27#include "llvm/IR/IntrinsicsS390.h"
37#define DEBUG_TYPE "systemz-lower"
43 cl::desc(
"Verify that narrow int args are properly extended per the "
50 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
51 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
101 if (Subtarget.hasHighWord())
107 if (Subtarget.hasVector()) {
116 if (Subtarget.hasVectorEnhancements1())
121 if (Subtarget.hasVector()) {
131 if (Subtarget.hasVector())
158 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
159 I <= MVT::LAST_FP_VALUETYPE;
185 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
186 I <= MVT::LAST_INTEGER_VALUETYPE;
217 if (Subtarget.hasPopulationCount())
243 (!Subtarget.hasFPExtension() && VT == MVT::i32) ?
Promote :
Custom;
264 if (!Subtarget.hasVectorEnhancements3()) {
291 if (Subtarget.hasVectorEnhancements3()) {
334 {MVT::i8, MVT::i16, MVT::i32},
Legal);
336 {MVT::i8, MVT::i16},
Legal);
357 if (Subtarget.hasMiscellaneousExtensions4()) {
364 if (Subtarget.hasMiscellaneousExtensions3()) {
457 if (VT != MVT::v2i64 || Subtarget.hasVectorEnhancements3()) {
462 if (Subtarget.hasVectorEnhancements3() &&
463 VT != MVT::v16i8 && VT != MVT::v8i16) {
473 if (Subtarget.hasVectorEnhancements1())
507 if (Subtarget.hasVector()) {
529 if (Subtarget.hasVectorEnhancements2()) {
555 for (
MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
569 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
570 I <= MVT::LAST_FP_VALUETYPE;
578 if (Subtarget.hasFPExtension()) {
606 if (Subtarget.hasFPExtension()) {
622 if (Subtarget.hasVector()) {
673 if (Subtarget.hasVectorEnhancements1()) {
680 if (Subtarget.hasVectorEnhancements1()) {
697 for (
MVT Type : {MVT::f64, MVT::v2f64, MVT::f32, MVT::v4f32, MVT::f128}) {
720 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
721 MVT::v4f32, MVT::v2f64 }) {
730 if (!Subtarget.hasVectorEnhancements1()) {
736 if (Subtarget.hasVectorEnhancements1())
746 if (Subtarget.hasVectorEnhancements1()) {
758 if (!Subtarget.hasVector()) {
769 if (Subtarget.isTargetzOS()) {
834 return Subtarget.hasSoftFloat();
839 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
842 IntermediateVT = RegisterVT = MVT::v8f16;
843 return NumIntermediates =
847 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
894 return Subtarget.hasVectorEnhancements1();
907 if (!Subtarget.hasVector() ||
908 (isFP128 && !Subtarget.hasVectorEnhancements1()))
917 uint64_t Byte = IntBits.lshr(
I * 8).trunc(8).getZExtValue();
924 Opcode = SystemZISD::BYTE_MASK;
930 if (SplatBitSize > 64)
937 OpVals.push_back(((
unsigned) SignedValue));
938 Opcode = SystemZISD::REPLICATE;
945 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start, End)) {
949 OpVals.push_back(Start - (64 - SplatBitSize));
950 OpVals.push_back(End - (64 - SplatBitSize));
951 Opcode = SystemZISD::ROTATE_MASK;
963 uint64_t SplatBitsZ = SplatBits.getZExtValue();
964 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
976 return TryValue(SplatBitsZ | Middle);
985 assert(IntBits.getBitWidth() == 128 &&
"Unsupported APInt.");
991 unsigned HalfSize = Width / 2;
996 if (HighValue != LowValue || 8 > HalfSize)
999 SplatBits = HighValue;
1003 SplatBitSize = Width;
1011 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
1015 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
1020 bool ForCodeSize)
const {
1022 if (Imm.isZero() || Imm.isNegZero())
1043 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
1049 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1102 const int64_t FPOffset = 0;
1123 auto *SpecialRegs = Subtarget.getSpecialRegisters();
1124 bool HasFP = Subtarget.getFrameLowering()->hasFP(*MF);
1127 .
addReg(SpecialRegs->getFramePointerRegister())
1135 .
addReg(SpecialRegs->getStackPointerRegister())
1146 .
addReg(SpecialRegs->getStackPointerRegister())
1147 .
addImm(TFL->getBackchainOffset(*MF))
1158 MIB =
BuildMI(*ThisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1162 MIB.
addRegMask(RegInfo->getNoPreservedMask());
1183 MI.eraseFromParent();
1199 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1202 auto *SpecialRegs = Subtarget.getSpecialRegisters();
1209 const int64_t FPOffset = 0;
1221 SpecialRegs->getFramePointerRegister())
1243 SpecialRegs->getStackPointerRegister())
1252 .
addReg(SpecialRegs->getStackPointerRegister())
1253 .
addImm(TFL->getBackchainOffset(*MF))
1259 MI.eraseFromParent();
1290 if (Subtarget.hasInterlockedAccess1() &&
1323 EVT VT =
Y.getValueType();
1326 if (VT == MVT::i32 || VT == MVT::i64)
1327 return Subtarget.hasMiscellaneousExtensions3();
1330 if (VT.
isVector() || VT == MVT::i128)
1331 return Subtarget.hasVector();
1359 bool MVC = Ty->isIntegerTy(8);
1365static AddressingMode
1368 switch (
II->getIntrinsicID()) {
1370 case Intrinsic::memset:
1371 case Intrinsic::memmove:
1372 case Intrinsic::memcpy:
1379 if (SingleUser->getParent() ==
I->getParent()) {
1382 if (
C->getBitWidth() <= 64 &&
1392 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1406 I->getOperand(0)->getType());
1408 bool IsVectorAccess = MemAccessTy->isVectorTy();
1413 Value *DataOp =
I->getOperand(0);
1415 IsVectorAccess =
true;
1421 User *LoadUser = *
I->user_begin();
1423 IsVectorAccess =
true;
1426 if (IsFPAccess || IsVectorAccess)
1445 Subtarget.hasVector() && (Ty->isVectorTy() || Ty->isIntegerTy(128));
1455 return AM.
Scale == 0;
1462 LLVMContext &Context, std::vector<EVT> &MemOps,
unsigned Limit,
1463 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
1464 const AttributeList &FuncAttributes,
EVT *LargestVT)
const {
1467 "Expected EmitTargetCodeForMemXXX() to handle AlwaysInline cases.");
1469 if (
Op.isZeroMemset())
1472 const int MVCFastLen = 16;
1474 if ((
Op.isMemset() ?
Op.size() - 1 :
Op.size()) <= MVCFastLen)
1478 if (!
Op.isAligned(
Align(8)) || (
Op.size() >= 25 &&
Op.size() <= 31))
1482 Context, MemOps, Limit,
Op, DstAS, SrcAS, FuncAttributes, LargestVT);
1487 const AttributeList &FuncAttributes)
const {
1488 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1492 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1494 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1496 return FromBits > ToBits;
1504 return FromBits > ToBits;
1513 if (Constraint.
size() == 1) {
1514 switch (Constraint[0]) {
1540 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1541 switch (Constraint[1]) {
1552 if (
StringRef(
"{@cc}").compare(Constraint) == 0)
1562 Value *CallOperandVal = Info.CallOperandVal;
1565 if (!CallOperandVal)
1569 switch (*Constraint) {
1588 if (Subtarget.hasVector())
1619 if (
C->getZExtValue() == 0x7fffffff)
1629static std::pair<unsigned, const TargetRegisterClass *>
1631 const unsigned *Map,
unsigned Size) {
1632 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1633 if (isdigit(Constraint[2])) {
1638 return std::make_pair(Map[Index], RC);
1640 return std::make_pair(0U,
nullptr);
1643std::pair<unsigned, const TargetRegisterClass *>
1646 if (Constraint.
size() == 1) {
1648 switch (Constraint[0]) {
1653 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1655 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1656 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1660 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1661 else if (VT == MVT::i128)
1662 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1663 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1666 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1671 return std::make_pair(0U, &SystemZ::FP16BitRegClass);
1673 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1675 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1676 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1681 if (Subtarget.hasVector()) {
1683 return std::make_pair(0U, &SystemZ::VR16BitRegClass);
1685 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1687 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1688 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1697 auto getVTSizeInBits = [&VT]() {
1705 if (Constraint[1] ==
'r') {
1706 if (getVTSizeInBits() == 32)
1709 if (getVTSizeInBits() == 128)
1715 if (Constraint[1] ==
'f') {
1717 return std::make_pair(
1719 if (getVTSizeInBits() == 16)
1722 if (getVTSizeInBits() == 32)
1725 if (getVTSizeInBits() == 128)
1731 if (Constraint[1] ==
'v') {
1732 if (!Subtarget.hasVector())
1733 return std::make_pair(
1735 if (getVTSizeInBits() == 16)
1738 if (getVTSizeInBits() == 32)
1741 if (getVTSizeInBits() == 64)
1747 if (Constraint[1] ==
'@') {
1748 if (
StringRef(
"{@cc}").compare(Constraint) == 0)
1749 return std::make_pair(SystemZ::CC, &SystemZ::CCRRegClass);
1762 .
Case(
"r4", Subtarget.isTargetXPLINK64() ? SystemZ::R4D
1763 : SystemZ::NoRegister)
1765 Subtarget.isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1772 const Constant *PersonalityFn)
const {
1773 return Subtarget.isTargetXPLINK64() ? SystemZ::R1D : SystemZ::R6D;
1777 const Constant *PersonalityFn)
const {
1778 return Subtarget.isTargetXPLINK64() ? SystemZ::R2D : SystemZ::R7D;
1793 if (
StringRef(
"{@cc}").compare(OpInfo.ConstraintCode) != 0)
1797 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
1798 OpInfo.ConstraintVT.getSizeInBits() < 8)
1813 if (Constraint.
size() == 1) {
1814 switch (Constraint[0]) {
1819 Op.getValueType()));
1826 Op.getValueType()));
1833 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1840 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1845 if (
C->getZExtValue() == 0x7fffffff)
1847 Op.getValueType()));
1858#include "SystemZGenCallingConv.inc"
1862 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1868 Type *ToType)
const {
1931 if (BitCastToType == MVT::v2i64)
1958 MVT::Untyped,
Hi,
Lo);
1982 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
1984 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1995 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
1996 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
2007template <
class ArgTy>
2010 MVT &PartVT,
unsigned &NumParts) {
2011 if (!Args[
I].Flags.isSplit())
2015 PartVT = ArgLocs[
I].getValVT();
2017 for (
unsigned PartIdx =
I + 1;; ++PartIdx) {
2018 assert(PartIdx != ArgLocs.
size() &&
"SplitEnd not found.");
2019 assert(ArgLocs[PartIdx].getValVT() == PartVT &&
"Unsupported split.");
2021 if (Args[PartIdx].Flags.isSplitEnd())
2045 unsigned NumFixedGPRs = 0;
2046 unsigned NumFixedFPRs = 0;
2047 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2060 RC = &SystemZ::GR32BitRegClass;
2064 RC = &SystemZ::GR64BitRegClass;
2068 RC = &SystemZ::FP16BitRegClass;
2072 RC = &SystemZ::FP32BitRegClass;
2076 RC = &SystemZ::FP64BitRegClass;
2080 RC = &SystemZ::FP128BitRegClass;
2089 RC = &SystemZ::VR128BitRegClass;
2103 if (Subtarget.isTargetXPLINK64()) {
2106 ArgSPOffset += XPRegs.getCallFrameSize();
2117 unsigned SlotOffs = VA.
getLocVT() == MVT::f16 ? 6 : 4;
2121 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
2135 for (
unsigned PartIdx = 1; PartIdx < NumParts; ++PartIdx) {
2138 unsigned PartOffset = Ins[
I].PartOffset;
2143 assert(PartOffset &&
"Offset should be non-zero.");
2150 if (IsVarArg && Subtarget.isTargetXPLINK64()) {
2156 Subtarget.getSpecialRegisters());
2162 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2167 if (IsVarArg && Subtarget.isTargetELF()) {
2180 int64_t RegSaveOffset =
2195 &SystemZ::FP64BitRegClass);
2207 if (Subtarget.isTargetXPLINK64()) {
2212 Subtarget.getSpecialRegisters());
2213 MRI.
addLiveIn(Regs->getADARegister(), ADAvReg);
2225 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
2232 if (
Reg == SystemZ::R6H ||
Reg == SystemZ::R6L ||
Reg == SystemZ::R6D)
2234 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2241 unsigned Offset,
bool LoadAdr =
false) {
2264 bool LoadAddr =
false;
2286 unsigned ADADelta = 0;
2287 unsigned EPADelta = 8;
2293 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2294 G->getGlobal()->hasPrivateLinkage());
2301 Callee = DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Callee);
2347 if (Subtarget.isTargetXPLINK64())
2351 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2355 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx);
2374 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2382 unsigned NumParts = 1;
2386 SlotVT = Outs[
I].VT;
2393 DAG.
getStore(Chain,
DL, ArgValue, SpillSlot, StackPtrInfo));
2396 assert(Outs[
I].PartOffset == 0);
2397 for (
unsigned PartIdx = 1; PartIdx < NumParts; ++PartIdx) {
2400 unsigned PartOffset = Outs[
I].PartOffset;
2406 assert(PartOffset &&
"Offset should be non-zero.");
2408 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2410 ArgValue = SpillSlot;
2427 if (!StackPtr.getNode())
2434 else if (VA.
getLocVT() == MVT::f16)
2447 if (Subtarget.isTargetXPLINK64() && VA.
needsCustom()) {
2451 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2457 if (!MemOpChains.
empty())
2465 if (Subtarget.isTargetXPLINK64()) {
2470 ->getAddressOfCalleeRegister();
2473 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2480 Callee = DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Callee);
2483 Callee = DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Callee);
2484 }
else if (IsTailCall) {
2487 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2492 for (
const auto &[Reg,
N] : RegsToPass) {
2499 Ops.push_back(Chain);
2500 Ops.push_back(Callee);
2504 for (
const auto &[Reg,
N] : RegsToPass)
2509 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2510 assert(Mask &&
"Missing call preserved mask for calling convention");
2515 Ops.push_back(Glue);
2524 Chain = DAG.
getNode(SystemZISD::CALL,
DL, NodeTys,
Ops);
2534 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2541 VA.getLocVT(), Glue);
2558 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2560 Args.reserve(
Ops.size());
2566 Entry.IsZExt = !Entry.IsSExt;
2567 Args.push_back(Entry);
2578 .
setCallee(CallConv, RetTy, Callee, std::move(Args))
2589 const Type *RetTy)
const {
2592 for (
auto &Out : Outs)
2593 if (Out.ArgVT.isScalarInteger() && Out.ArgVT.getSizeInBits() > 64)
2597 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Context);
2598 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2610 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2618 if (RetLocs.
empty())
2619 return DAG.
getNode(SystemZISD::RET_GLUE,
DL, MVT::Other, Chain);
2628 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2650 return DAG.
getNode(SystemZISD::RET_GLUE,
DL, MVT::Other, RetOps);
2657 unsigned &CCValid) {
2658 unsigned Id =
Op.getConstantOperandVal(1);
2660 case Intrinsic::s390_tbegin:
2661 Opcode = SystemZISD::TBEGIN;
2665 case Intrinsic::s390_tbegin_nofloat:
2666 Opcode = SystemZISD::TBEGIN_NOFLOAT;
2670 case Intrinsic::s390_tend:
2671 Opcode = SystemZISD::TEND;
2684 unsigned Id =
Op.getConstantOperandVal(0);
2686 case Intrinsic::s390_vpkshs:
2687 case Intrinsic::s390_vpksfs:
2688 case Intrinsic::s390_vpksgs:
2689 Opcode = SystemZISD::PACKS_CC;
2693 case Intrinsic::s390_vpklshs:
2694 case Intrinsic::s390_vpklsfs:
2695 case Intrinsic::s390_vpklsgs:
2696 Opcode = SystemZISD::PACKLS_CC;
2700 case Intrinsic::s390_vceqbs:
2701 case Intrinsic::s390_vceqhs:
2702 case Intrinsic::s390_vceqfs:
2703 case Intrinsic::s390_vceqgs:
2704 case Intrinsic::s390_vceqqs:
2705 Opcode = SystemZISD::VICMPES;
2709 case Intrinsic::s390_vchbs:
2710 case Intrinsic::s390_vchhs:
2711 case Intrinsic::s390_vchfs:
2712 case Intrinsic::s390_vchgs:
2713 case Intrinsic::s390_vchqs:
2714 Opcode = SystemZISD::VICMPHS;
2718 case Intrinsic::s390_vchlbs:
2719 case Intrinsic::s390_vchlhs:
2720 case Intrinsic::s390_vchlfs:
2721 case Intrinsic::s390_vchlgs:
2722 case Intrinsic::s390_vchlqs:
2723 Opcode = SystemZISD::VICMPHLS;
2727 case Intrinsic::s390_vtm:
2728 Opcode = SystemZISD::VTM;
2732 case Intrinsic::s390_vfaebs:
2733 case Intrinsic::s390_vfaehs:
2734 case Intrinsic::s390_vfaefs:
2735 Opcode = SystemZISD::VFAE_CC;
2739 case Intrinsic::s390_vfaezbs:
2740 case Intrinsic::s390_vfaezhs:
2741 case Intrinsic::s390_vfaezfs:
2742 Opcode = SystemZISD::VFAEZ_CC;
2746 case Intrinsic::s390_vfeebs:
2747 case Intrinsic::s390_vfeehs:
2748 case Intrinsic::s390_vfeefs:
2749 Opcode = SystemZISD::VFEE_CC;
2753 case Intrinsic::s390_vfeezbs:
2754 case Intrinsic::s390_vfeezhs:
2755 case Intrinsic::s390_vfeezfs:
2756 Opcode = SystemZISD::VFEEZ_CC;
2760 case Intrinsic::s390_vfenebs:
2761 case Intrinsic::s390_vfenehs:
2762 case Intrinsic::s390_vfenefs:
2763 Opcode = SystemZISD::VFENE_CC;
2767 case Intrinsic::s390_vfenezbs:
2768 case Intrinsic::s390_vfenezhs:
2769 case Intrinsic::s390_vfenezfs:
2770 Opcode = SystemZISD::VFENEZ_CC;
2774 case Intrinsic::s390_vistrbs:
2775 case Intrinsic::s390_vistrhs:
2776 case Intrinsic::s390_vistrfs:
2777 Opcode = SystemZISD::VISTR_CC;
2781 case Intrinsic::s390_vstrcbs:
2782 case Intrinsic::s390_vstrchs:
2783 case Intrinsic::s390_vstrcfs:
2784 Opcode = SystemZISD::VSTRC_CC;
2788 case Intrinsic::s390_vstrczbs:
2789 case Intrinsic::s390_vstrczhs:
2790 case Intrinsic::s390_vstrczfs:
2791 Opcode = SystemZISD::VSTRCZ_CC;
2795 case Intrinsic::s390_vstrsb:
2796 case Intrinsic::s390_vstrsh:
2797 case Intrinsic::s390_vstrsf:
2798 Opcode = SystemZISD::VSTRS_CC;
2802 case Intrinsic::s390_vstrszb:
2803 case Intrinsic::s390_vstrszh:
2804 case Intrinsic::s390_vstrszf:
2805 Opcode = SystemZISD::VSTRSZ_CC;
2809 case Intrinsic::s390_vfcedbs:
2810 case Intrinsic::s390_vfcesbs:
2811 Opcode = SystemZISD::VFCMPES;
2815 case Intrinsic::s390_vfchdbs:
2816 case Intrinsic::s390_vfchsbs:
2817 Opcode = SystemZISD::VFCMPHS;
2821 case Intrinsic::s390_vfchedbs:
2822 case Intrinsic::s390_vfchesbs:
2823 Opcode = SystemZISD::VFCMPHES;
2827 case Intrinsic::s390_vftcidb:
2828 case Intrinsic::s390_vftcisb:
2829 Opcode = SystemZISD::VFTCI;
2833 case Intrinsic::s390_tdc:
2834 Opcode = SystemZISD::TDC;
2847 unsigned NumOps =
Op.getNumOperands();
2850 Ops.push_back(
Op.getOperand(0));
2852 Ops.push_back(
Op.getOperand(
I));
2854 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2868 unsigned NumOps =
Op.getNumOperands();
2874 assert((
Op.getConstantOperandVal(0) == Intrinsic::s390_tdc &&
I == 1) &&
2875 "Unhandled intrinsic with f16 operand.");
2878 Ops.push_back(CurrOper);
2892 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2893 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2894 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2920 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2923 int64_t
Value = ConstOp1->getSExtValue();
2939 if (!
C.Op0.hasOneUse() ||
2946 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2947 if ((NumBits != 8 && NumBits != 16) ||
2948 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2954 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2957 uint64_t Mask = (1 << NumBits) - 1;
2960 int64_t SignedValue = ConstOp1->getSExtValue();
2967 }
else if (NumBits == 8) {
2993 if (
C.Op0.getValueType() != MVT::i32 ||
2994 Load->getExtensionType() != ExtType) {
2996 Load->getBasePtr(), Load->getPointerInfo(),
2997 Load->getMemoryVT(), Load->getAlign(),
2998 Load->getMemOperand()->getFlags());
3004 if (
C.Op1.getValueType() != MVT::i32 ||
3005 Value != ConstOp1->getZExtValue())
3015 if (Load->getMemoryVT() == MVT::i8)
3018 switch (Load->getExtensionType()) {
3035 if (
C.Op0.getValueType() == MVT::i128)
3037 if (
C.Op0.getValueType() == MVT::f128)
3049 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
3078 unsigned Opcode0 =
C.Op0.getOpcode();
3085 C.Op0.getConstantOperandVal(1) == 0xffffffff)
3100 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
3101 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
3123 if (C1 && C1->isZero()) {
3142 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
3145 if (C1 && C1->getZExtValue() == 32) {
3146 SDValue ShlOp0 =
C.Op0.getOperand(0);
3165 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
3168 C.Op1->getAsZExtVal() == 0) {
3170 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
3171 C.Op0.getValueSizeInBits().getFixedValue()) {
3172 unsigned Type = L->getExtensionType();
3175 C.Op0 =
C.Op0.getOperand(0);
3189 uint64_t Amount = Shift->getZExtValue();
3190 if (Amount >=
N.getValueSizeInBits())
3205 unsigned ICmpType) {
3206 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3228 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3234 if (EffectivelyUnsigned && CmpVal <
Low) {
3242 if (CmpVal == Mask) {
3248 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3254 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3262 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3268 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3297 if (
C.Op0.getValueType() == MVT::i128) {
3303 if (Mask && Mask->getAPIntValue() == 0) {
3304 C.Opcode = SystemZISD::VTM;
3321 uint64_t CmpVal = ConstOp1->getZExtValue();
3328 NewC.Op0 =
C.Op0.getOperand(0);
3329 NewC.Op1 =
C.Op0.getOperand(1);
3333 MaskVal = Mask->getZExtValue();
3353 MaskVal = -(CmpVal & -CmpVal);
3362 unsigned NewCCMask, ShiftVal;
3366 (MaskVal >> ShiftVal != 0) &&
3367 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3369 MaskVal >> ShiftVal,
3373 MaskVal >>= ShiftVal;
3377 (MaskVal << ShiftVal != 0) &&
3378 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3380 MaskVal << ShiftVal,
3384 MaskVal <<= ShiftVal;
3393 C.Opcode = SystemZISD::TM;
3395 if (Mask && Mask->getZExtValue() == MaskVal)
3400 C.CCMask = NewCCMask;
3406 if (
C.Opcode != SystemZISD::ICMP)
3408 if (
C.Op0.getValueType() != MVT::i128)
3419 Src = Src.getOperand(0);
3422 unsigned Opcode = 0;
3423 if (Src.hasOneUse()) {
3424 switch (Src.getOpcode()) {
3425 case SystemZISD::VICMPE: Opcode = SystemZISD::VICMPES;
break;
3426 case SystemZISD::VICMPH: Opcode = SystemZISD::VICMPHS;
break;
3427 case SystemZISD::VICMPHL: Opcode = SystemZISD::VICMPHLS;
break;
3428 case SystemZISD::VFCMPE: Opcode = SystemZISD::VFCMPES;
break;
3429 case SystemZISD::VFCMPH: Opcode = SystemZISD::VFCMPHS;
break;
3430 case SystemZISD::VFCMPHE: Opcode = SystemZISD::VFCMPHES;
break;
3436 C.Op0 = Src->getOperand(0);
3437 C.Op1 = Src->getOperand(1);
3441 C.CCMask ^=
C.CCValid;
3453 C.Opcode = SystemZISD::VICMPES;
3465 bool Swap =
false, Invert =
false;
3477 C.Opcode = SystemZISD::UCMP128HI;
3479 C.Opcode = SystemZISD::SCMP128HI;
3484 C.CCMask ^=
C.CCValid;
3495 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3498 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3501 C.Op0 =
C.Op0.getOperand(0);
3513 C.CCValid = CCValid;
3516 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
3519 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
3523 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
3526 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
3530 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
3533 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
3536 C.CCMask &= CCValid;
3544 bool IsSignaling =
false) {
3547 unsigned Opcode, CCValid;
3559 Comparison
C(CmpOp0, CmpOp1, Chain);
3561 if (
C.Op0.getValueType().isFloatingPoint()) {
3564 C.Opcode = SystemZISD::FCMP;
3565 else if (!IsSignaling)
3566 C.Opcode = SystemZISD::STRICT_FCMP;
3568 C.Opcode = SystemZISD::STRICT_FCMPS;
3573 C.Opcode = SystemZISD::ICMP;
3608 if (!
C.Op1.getNode()) {
3610 switch (
C.Op0.getOpcode()) {
3621 if (
C.Opcode == SystemZISD::ICMP)
3622 return DAG.
getNode(SystemZISD::ICMP,
DL, MVT::i32,
C.Op0,
C.Op1,
3624 if (
C.Opcode == SystemZISD::TM) {
3627 return DAG.
getNode(SystemZISD::TM,
DL, MVT::i32,
C.Op0,
C.Op1,
3630 if (
C.Opcode == SystemZISD::VICMPES ||
3631 C.Opcode == SystemZISD::VICMPHS ||
3632 C.Opcode == SystemZISD::VICMPHLS ||
3633 C.Opcode == SystemZISD::VFCMPES ||
3634 C.Opcode == SystemZISD::VFCMPHS ||
3635 C.Opcode == SystemZISD::VFCMPHES) {
3636 EVT IntVT =
C.Op0.getValueType().changeVectorElementTypeToInteger();
3643 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3645 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3654 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3655 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3680 unsigned CCValid,
unsigned CCMask) {
3685 return DAG.
getNode(SystemZISD::SELECT_CCMASK,
DL, MVT::i32,
Ops);
3763 int Mask[] = { Start, -1, Start + 1, -1 };
3767 return DAG.
getNode(SystemZISD::STRICT_VEXTEND,
DL, VTs, Chain,
Op);
3769 return DAG.
getNode(SystemZISD::VEXTEND,
DL, MVT::v2f64,
Op);
3783 !Subtarget.hasVectorEnhancements1()) {
3789 SDVTList VTs = DAG.
getVTList(MVT::v2i64, MVT::Other);
3802 return DAG.
getNode(SystemZISD::PACK,
DL, VT, HRes, LRes);
3805 SDVTList VTs = DAG.
getVTList(VT, MVT::Other);
3806 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3808 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3821 bool IsSignaling)
const {
3824 assert (!IsSignaling || Chain);
3827 bool Invert =
false;
3835 assert(IsFP &&
"Unexpected integer comparison");
3837 DL, VT, CmpOp1, CmpOp0, Chain);
3839 DL, VT, CmpOp0, CmpOp1, Chain);
3843 LT.getValue(1),
GE.getValue(1));
3852 assert(IsFP &&
"Unexpected integer comparison");
3854 DL, VT, CmpOp1, CmpOp0, Chain);
3856 DL, VT, CmpOp0, CmpOp1, Chain);
3860 LT.getValue(1),
GT.getValue(1));
3881 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3885 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3890 Chain =
Cmp.getValue(1);
3898 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3911 EVT VT =
Op.getValueType();
3913 return lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1);
3922 bool IsSignaling)
const {
3928 EVT VT =
Op.getNode()->getValueType(0);
3930 SDValue Res = lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1,
3931 Chain, IsSignaling);
3953 SystemZISD::BR_CCMASK,
DL,
Op.getValueType(),
Op.getOperand(0),
3987 C.CCMask ^=
C.CCValid;
3995 Op = SystemZISD::VICMPE;
3999 Op = SystemZISD::VICMPHL;
4001 Op = SystemZISD::VICMPH;
4040 C.Op1->getAsZExtVal() == 0) {
4047 if (Subtarget.hasVectorEnhancements3() &&
4048 C.Opcode == SystemZISD::ICMP &&
4049 C.Op0.getValueType() == MVT::i128 &&
4059 return DAG.
getNode(SystemZISD::SELECT_CCMASK,
DL,
Op.getValueType(),
Ops);
4065 const GlobalValue *GV =
Node->getGlobal();
4071 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
4074 uint64_t Anchor =
Offset & ~uint64_t(0xfff);
4093 }
else if (Subtarget.isTargetELF()) {
4098 }
else if (Subtarget.isTargetzOS()) {
4129 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
4134 Ops.push_back(Chain);
4136 Node->getValueType(0),
4145 const TargetRegisterInfo *
TRI = Subtarget.getRegisterInfo();
4146 const uint32_t *
Mask =
4148 assert(Mask &&
"Missing call preserved mask for calling convention");
4152 Ops.push_back(Glue);
4155 SDVTList NodeTys = DAG.
getVTList(MVT::Other, MVT::Glue);
4163SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
4187 const GlobalValue *GV =
Node->getGlobal();
4195 SDValue TP = lowerThreadPointer(
DL, DAG);
4202 SystemZConstantPoolValue *CPV =
4211 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL,
Offset);
4217 SystemZConstantPoolValue *CPV =
4226 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL,
Offset);
4231 SystemZMachineFunctionInfo* MFI =
4260 SystemZConstantPoolValue *CPV =
4294 return DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Result);
4311 return DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Result);
4316 auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
4318 MachineFrameInfo &MFI = MF.getFrameInfo();
4322 unsigned Depth =
Op.getConstantOperandVal(0);
4329 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
4334 if (!MF.getSubtarget<SystemZSubtarget>().hasBackChain())
4340 MachinePointerInfo());
4355 unsigned Depth =
Op.getConstantOperandVal(0);
4360 if (!MF.
getSubtarget<SystemZSubtarget>().hasBackChain())
4363 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
4364 const auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
4365 int Offset = TFL->getReturnAddressOffset(MF);
4369 MachinePointerInfo());
4374 SystemZCallingConventionRegisters *CCR = Subtarget.getSpecialRegisters();
4376 &SystemZ::GR64BitRegClass);
4384 EVT InVT =
In.getValueType();
4385 EVT ResVT =
Op.getValueType();
4393 LoadN->getBasePtr(), LoadN->getMemOperand());
4399 if (InVT == MVT::i32 && ResVT == MVT::f32) {
4401 if (Subtarget.hasHighWord()) {
4405 MVT::i64,
SDValue(U64, 0), In);
4413 DL, MVT::f32, Out64);
4415 if (InVT == MVT::f32 && ResVT == MVT::i32) {
4418 MVT::f64,
SDValue(U64, 0), In);
4420 if (Subtarget.hasHighWord())
4433 if (Subtarget.isTargetXPLINK64())
4434 return lowerVASTART_XPLINK(
Op, DAG);
4436 return lowerVASTART_ELF(
Op, DAG);
4442 SystemZMachineFunctionInfo *FuncInfo =
4443 MF.
getInfo<SystemZMachineFunctionInfo>();
4453 MachinePointerInfo(SV));
4459 SystemZMachineFunctionInfo *FuncInfo =
4460 MF.
getInfo<SystemZMachineFunctionInfo>();
4469 const unsigned NumFields = 4;
4480 for (
unsigned I = 0;
I < NumFields; ++
I) {
4485 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
4486 MachinePointerInfo(SV,
Offset));
4504 Align(8),
false,
false,
4505 nullptr, std::nullopt, MachinePointerInfo(DstSV),
4506 MachinePointerInfo(SrcSV));
4510SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
4512 if (Subtarget.isTargetXPLINK64())
4513 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
4515 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
4519SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
4521 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
4531 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4534 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4535 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4541 if (ExtraAlignSpace)
4545 bool IsSigned =
false;
4546 bool DoesNotReturn =
false;
4547 bool IsReturnValueUsed =
false;
4548 EVT VT =
Op.getValueType();
4558 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
4570 if (ExtraAlignSpace) {
4582SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4584 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
4587 bool StoreBackchain = MF.
getSubtarget<SystemZSubtarget>().hasBackChain();
4596 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4599 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4600 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4611 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4612 MachinePointerInfo());
4615 if (ExtraAlignSpace)
4622 NewSP = DAG.
getNode(SystemZISD::PROBED_ALLOCA,
DL,
4623 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4639 if (RequiredAlign > StackAlign) {
4649 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4650 MachinePointerInfo());
4656SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4660 return DAG.
getNode(SystemZISD::ADJDYNALLOC,
DL, MVT::i64);
4665 unsigned Opcode)
const {
4666 EVT VT =
Op.getValueType();
4672 assert(Subtarget.hasMiscellaneousExtensions2());
4677 Op.getOperand(0),
Op.getOperand(1), Even, Odd);
4683 EVT VT =
Op.getValueType();
4691 else if (Subtarget.hasMiscellaneousExtensions2())
4696 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4731 EVT VT =
Op.getValueType();
4744 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4752 EVT VT =
Op.getValueType();
4772 EVT VT =
Op.getValueType();
4779 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4784 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4796 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
4798 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
4835 MVT::i64, HighOp, Low32);
4841 SDNode *
N =
Op.getNode();
4846 if (
N->getValueType(0) == MVT::i128) {
4847 unsigned BaseOp = 0;
4848 unsigned FlagOp = 0;
4849 bool IsBorrow =
false;
4850 switch (
Op.getOpcode()) {
4854 FlagOp = SystemZISD::VACC;
4858 FlagOp = SystemZISD::VSCBI;
4873 unsigned BaseOp = 0;
4874 unsigned CCValid = 0;
4875 unsigned CCMask = 0;
4877 switch (
Op.getOpcode()) {
4880 BaseOp = SystemZISD::SADDO;
4885 BaseOp = SystemZISD::SSUBO;
4890 BaseOp = SystemZISD::UADDO;
4895 BaseOp = SystemZISD::USUBO;
4901 SDVTList VTs = DAG.
getVTList(
N->getValueType(0), MVT::i32);
4905 if (
N->getValueType(1) == MVT::i1)
4931 SDNode *
N =
Op.getNode();
4932 MVT VT =
N->getSimpleValueType(0);
4943 if (VT == MVT::i128) {
4944 unsigned BaseOp = 0;
4945 unsigned FlagOp = 0;
4946 bool IsBorrow =
false;
4947 switch (
Op.getOpcode()) {
4950 BaseOp = SystemZISD::VAC;
4951 FlagOp = SystemZISD::VACCC;
4954 BaseOp = SystemZISD::VSBI;
4955 FlagOp = SystemZISD::VSBCBI;
4974 unsigned BaseOp = 0;
4975 unsigned CCValid = 0;
4976 unsigned CCMask = 0;
4978 switch (
Op.getOpcode()) {
4984 BaseOp = SystemZISD::ADDCARRY;
4992 BaseOp = SystemZISD::SUBCARRY;
5003 SDVTList VTs = DAG.
getVTList(VT, MVT::i32);
5007 if (
N->getValueType(1) == MVT::i1)
5015 EVT VT =
Op.getValueType();
5017 Op =
Op.getOperand(0);
5040 Op = DAG.
getNode(SystemZISD::VSRL_BY_SCALAR,
DL, VT,
Op, Shift);
5052 Op = DAG.
getNode(SystemZISD::VSUM,
DL, MVT::v4i32,
Op, Tmp);
5065 if (NumSignificantBits == 0)
5071 BitSize = std::min(BitSize, OrigBitSize);
5080 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
5082 if (BitSize != OrigBitSize)
5119 EVT RegVT =
Op.getValueType();
5121 return lowerATOMIC_LDST_I128(
Op, DAG);
5122 return lowerLoadF16(
Op, DAG);
5128 if (
Node->getMemoryVT().getSizeInBits() == 128)
5129 return lowerATOMIC_LDST_I128(
Op, DAG);
5130 return lowerStoreF16(
Op, DAG);
5137 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
5138 "Only custom lowering i128 or f128.");
5151 EVT WideVT = MVT::i32;
5174 unsigned Opcode)
const {
5178 EVT NarrowVT =
Node->getMemoryVT();
5179 EVT WideVT = MVT::i32;
5180 if (NarrowVT == WideVT)
5187 MachineMemOperand *MMO =
Node->getMemOperand();
5191 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
5193 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
5198 SDValue AlignedAddr, BitShift, NegBitShift;
5206 if (Opcode != SystemZISD::ATOMIC_SWAPW)
5209 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
5210 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
5215 SDVTList VTList = DAG.
getVTList(WideVT, MVT::Other);
5216 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
5236 EVT MemVT =
Node->getMemoryVT();
5237 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
5239 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
5240 assert(Subtarget.hasInterlockedAccess1() &&
5241 "Should have been expanded by AtomicExpand pass.");
5247 Node->getChain(),
Node->getBasePtr(), NegSrc2,
5248 Node->getMemOperand());
5251 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
5262 MachineMemOperand *MMO =
Node->getMemOperand();
5265 if (
Node->getMemoryVT() == MVT::i128) {
5274 EVT NarrowVT =
Node->getMemoryVT();
5275 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
5276 if (NarrowVT == WideVT) {
5277 SDVTList Tys = DAG.
getVTList(WideVT, MVT::i32, MVT::Other);
5278 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
5280 DL, Tys,
Ops, NarrowVT, MMO);
5294 SDValue AlignedAddr, BitShift, NegBitShift;
5298 SDVTList VTList = DAG.
getVTList(WideVT, MVT::i32, MVT::Other);
5299 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
5302 VTList,
Ops, NarrowVT, MMO);
5316SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
5339 auto *Regs = Subtarget.getSpecialRegisters();
5342 "in GHC calling convention");
5344 Regs->getStackPointerRegister(),
Op.getValueType());
5350 auto *Regs = Subtarget.getSpecialRegisters();
5351 bool StoreBackchain = MF.
getSubtarget<SystemZSubtarget>().hasBackChain();
5355 "in GHC calling convention");
5362 if (StoreBackchain) {
5364 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
5365 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
5366 MachinePointerInfo());
5369 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
5372 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
5373 MachinePointerInfo());
5380 bool IsData =
Op.getConstantOperandVal(4);
5383 return Op.getOperand(0);
5386 bool IsWrite =
Op.getConstantOperandVal(2);
5393 Node->getMemoryVT(),
Node->getMemOperand());
5397SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
5399 unsigned Opcode, CCValid;
5401 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
5412SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
5414 unsigned Opcode, CCValid;
5417 if (
Op->getNumValues() == 1)
5419 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
5424 unsigned Id =
Op.getConstantOperandVal(0);
5426 case Intrinsic::thread_pointer:
5427 return lowerThreadPointer(SDLoc(
Op), DAG);
5429 case Intrinsic::s390_vpdi:
5430 return DAG.
getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(
Op),
Op.getValueType(),
5431 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5433 case Intrinsic::s390_vperm:
5434 return DAG.
getNode(SystemZISD::PERMUTE, SDLoc(
Op),
Op.getValueType(),
5435 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5437 case Intrinsic::s390_vuphb:
5438 case Intrinsic::s390_vuphh:
5439 case Intrinsic::s390_vuphf:
5440 case Intrinsic::s390_vuphg:
5441 return DAG.
getNode(SystemZISD::UNPACK_HIGH, SDLoc(
Op),
Op.getValueType(),
5444 case Intrinsic::s390_vuplhb:
5445 case Intrinsic::s390_vuplhh:
5446 case Intrinsic::s390_vuplhf:
5447 case Intrinsic::s390_vuplhg:
5448 return DAG.
getNode(SystemZISD::UNPACKL_HIGH, SDLoc(
Op),
Op.getValueType(),
5451 case Intrinsic::s390_vuplb:
5452 case Intrinsic::s390_vuplhw:
5453 case Intrinsic::s390_vuplf:
5454 case Intrinsic::s390_vuplg:
5455 return DAG.
getNode(SystemZISD::UNPACK_LOW, SDLoc(
Op),
Op.getValueType(),
5458 case Intrinsic::s390_vupllb:
5459 case Intrinsic::s390_vupllh:
5460 case Intrinsic::s390_vupllf:
5461 case Intrinsic::s390_vupllg:
5462 return DAG.
getNode(SystemZISD::UNPACKL_LOW, SDLoc(
Op),
Op.getValueType(),
5465 case Intrinsic::s390_vsumb:
5466 case Intrinsic::s390_vsumh:
5467 case Intrinsic::s390_vsumgh:
5468 case Intrinsic::s390_vsumgf:
5469 case Intrinsic::s390_vsumqf:
5470 case Intrinsic::s390_vsumqg:
5471 return DAG.
getNode(SystemZISD::VSUM, SDLoc(
Op),
Op.getValueType(),
5472 Op.getOperand(1),
Op.getOperand(2));
5474 case Intrinsic::s390_vaq:
5476 Op.getOperand(1),
Op.getOperand(2));
5477 case Intrinsic::s390_vaccb:
5478 case Intrinsic::s390_vacch:
5479 case Intrinsic::s390_vaccf:
5480 case Intrinsic::s390_vaccg:
5481 case Intrinsic::s390_vaccq:
5482 return DAG.
getNode(SystemZISD::VACC, SDLoc(
Op),
Op.getValueType(),
5483 Op.getOperand(1),
Op.getOperand(2));
5484 case Intrinsic::s390_vacq:
5485 return DAG.
getNode(SystemZISD::VAC, SDLoc(
Op),
Op.getValueType(),
5486 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5487 case Intrinsic::s390_vacccq:
5488 return DAG.
getNode(SystemZISD::VACCC, SDLoc(
Op),
Op.getValueType(),
5489 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5491 case Intrinsic::s390_vsq:
5493 Op.getOperand(1),
Op.getOperand(2));
5494 case Intrinsic::s390_vscbib:
5495 case Intrinsic::s390_vscbih:
5496 case Intrinsic::s390_vscbif:
5497 case Intrinsic::s390_vscbig:
5498 case Intrinsic::s390_vscbiq:
5499 return DAG.
getNode(SystemZISD::VSCBI, SDLoc(
Op),
Op.getValueType(),
5500 Op.getOperand(1),
Op.getOperand(2));
5501 case Intrinsic::s390_vsbiq:
5502 return DAG.
getNode(SystemZISD::VSBI, SDLoc(
Op),
Op.getValueType(),
5503 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5504 case Intrinsic::s390_vsbcbiq:
5505 return DAG.
getNode(SystemZISD::VSBCBI, SDLoc(
Op),
Op.getValueType(),
5506 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5508 case Intrinsic::s390_vmhb:
5509 case Intrinsic::s390_vmhh:
5510 case Intrinsic::s390_vmhf:
5511 case Intrinsic::s390_vmhg:
5512 case Intrinsic::s390_vmhq:
5514 Op.getOperand(1),
Op.getOperand(2));
5515 case Intrinsic::s390_vmlhb:
5516 case Intrinsic::s390_vmlhh:
5517 case Intrinsic::s390_vmlhf:
5518 case Intrinsic::s390_vmlhg:
5519 case Intrinsic::s390_vmlhq:
5521 Op.getOperand(1),
Op.getOperand(2));
5523 case Intrinsic::s390_vmahb:
5524 case Intrinsic::s390_vmahh:
5525 case Intrinsic::s390_vmahf:
5526 case Intrinsic::s390_vmahg:
5527 case Intrinsic::s390_vmahq:
5528 return DAG.
getNode(SystemZISD::VMAH, SDLoc(
Op),
Op.getValueType(),
5529 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5530 case Intrinsic::s390_vmalhb:
5531 case Intrinsic::s390_vmalhh:
5532 case Intrinsic::s390_vmalhf:
5533 case Intrinsic::s390_vmalhg:
5534 case Intrinsic::s390_vmalhq:
5535 return DAG.
getNode(SystemZISD::VMALH, SDLoc(
Op),
Op.getValueType(),
5536 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5538 case Intrinsic::s390_vmeb:
5539 case Intrinsic::s390_vmeh:
5540 case Intrinsic::s390_vmef:
5541 case Intrinsic::s390_vmeg:
5542 return DAG.
getNode(SystemZISD::VME, SDLoc(
Op),
Op.getValueType(),
5543 Op.getOperand(1),
Op.getOperand(2));
5544 case Intrinsic::s390_vmleb:
5545 case Intrinsic::s390_vmleh:
5546 case Intrinsic::s390_vmlef:
5547 case Intrinsic::s390_vmleg:
5548 return DAG.
getNode(SystemZISD::VMLE, SDLoc(
Op),
Op.getValueType(),
5549 Op.getOperand(1),
Op.getOperand(2));
5550 case Intrinsic::s390_vmob:
5551 case Intrinsic::s390_vmoh:
5552 case Intrinsic::s390_vmof:
5553 case Intrinsic::s390_vmog:
5554 return DAG.
getNode(SystemZISD::VMO, SDLoc(
Op),
Op.getValueType(),
5555 Op.getOperand(1),
Op.getOperand(2));
5556 case Intrinsic::s390_vmlob:
5557 case Intrinsic::s390_vmloh:
5558 case Intrinsic::s390_vmlof:
5559 case Intrinsic::s390_vmlog:
5560 return DAG.
getNode(SystemZISD::VMLO, SDLoc(
Op),
Op.getValueType(),
5561 Op.getOperand(1),
Op.getOperand(2));
5563 case Intrinsic::s390_vmaeb:
5564 case Intrinsic::s390_vmaeh:
5565 case Intrinsic::s390_vmaef:
5566 case Intrinsic::s390_vmaeg:
5568 DAG.
getNode(SystemZISD::VME, SDLoc(
Op),
Op.getValueType(),
5569 Op.getOperand(1),
Op.getOperand(2)),
5571 case Intrinsic::s390_vmaleb:
5572 case Intrinsic::s390_vmaleh:
5573 case Intrinsic::s390_vmalef:
5574 case Intrinsic::s390_vmaleg:
5576 DAG.
getNode(SystemZISD::VMLE, SDLoc(
Op),
Op.getValueType(),
5577 Op.getOperand(1),
Op.getOperand(2)),
5579 case Intrinsic::s390_vmaob:
5580 case Intrinsic::s390_vmaoh:
5581 case Intrinsic::s390_vmaof:
5582 case Intrinsic::s390_vmaog:
5584 DAG.
getNode(SystemZISD::VMO, SDLoc(
Op),
Op.getValueType(),
5585 Op.getOperand(1),
Op.getOperand(2)),
5587 case Intrinsic::s390_vmalob:
5588 case Intrinsic::s390_vmaloh:
5589 case Intrinsic::s390_vmalof:
5590 case Intrinsic::s390_vmalog:
5592 DAG.
getNode(SystemZISD::VMLO, SDLoc(
Op),
Op.getValueType(),
5593 Op.getOperand(1),
Op.getOperand(2)),
5614 { SystemZISD::MERGE_HIGH, 8,
5615 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
5617 { SystemZISD::MERGE_HIGH, 4,
5618 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
5620 { SystemZISD::MERGE_HIGH, 2,
5621 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
5623 { SystemZISD::MERGE_HIGH, 1,
5624 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
5626 { SystemZISD::MERGE_LOW, 8,
5627 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
5629 { SystemZISD::MERGE_LOW, 4,
5630 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
5632 { SystemZISD::MERGE_LOW, 2,
5633 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
5635 { SystemZISD::MERGE_LOW, 1,
5636 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
5638 { SystemZISD::PACK, 4,
5639 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
5641 { SystemZISD::PACK, 2,
5642 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
5644 { SystemZISD::PACK, 1,
5645 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
5647 { SystemZISD::PERMUTE_DWORDS, 4,
5648 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
5650 { SystemZISD::PERMUTE_DWORDS, 1,
5651 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
5665 OpNo0 = OpNo1 = OpNos[1];
5666 }
else if (OpNos[1] < 0) {
5667 OpNo0 = OpNo1 = OpNos[0];
5685 unsigned &OpNo0,
unsigned &OpNo1) {
5686 int OpNos[] = { -1, -1 };
5699 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5701 OpNos[ModelOpNo] = RealOpNo;
5709 unsigned &OpNo0,
unsigned &OpNo1) {
5726 int Elt = Bytes[From];
5729 Transform[From] = -1;
5731 while (
P.Bytes[To] != Elt) {
5736 Transform[From] = To;
5760 Bytes.
resize(NumElements * BytesPerElement, -1);
5761 for (
unsigned I = 0;
I < NumElements; ++
I) {
5762 int Index = VSN->getMaskElt(
I);
5764 for (
unsigned J = 0; J < BytesPerElement; ++J)
5765 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5769 if (SystemZISD::SPLAT == ShuffleOp.
getOpcode() &&
5772 Bytes.
resize(NumElements * BytesPerElement, -1);
5773 for (
unsigned I = 0;
I < NumElements; ++
I)
5774 for (
unsigned J = 0; J < BytesPerElement; ++J)
5775 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5786 unsigned BytesPerElement,
int &
Base) {
5788 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5789 if (Bytes[Start +
I] >= 0) {
5790 unsigned Elem = Bytes[Start +
I];
5794 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5796 }
else if (
unsigned(
Base) != Elem -
I)
5809 unsigned &StartIndex,
unsigned &OpNo0,
5811 int OpNos[] = { -1, -1 };
5813 for (
unsigned I = 0;
I < 16; ++
I) {
5814 int Index = Bytes[
I];
5820 Shift = ExpectedShift;
5821 else if (Shift != ExpectedShift)
5825 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5827 OpNos[ModelOpNo] = RealOpNo;
5840 unsigned InBytes = (
P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
5841 P.Opcode == SystemZISD::PACK ?
P.Operand * 2 :
5849 if (
P.Opcode == SystemZISD::PERMUTE_DWORDS) {
5851 Op = DAG.
getNode(SystemZISD::PERMUTE_DWORDS,
DL, InVT, Op0, Op1, Op2);
5852 }
else if (
P.Opcode == SystemZISD::PACK) {
5855 Op = DAG.
getNode(SystemZISD::PACK,
DL, OutVT, Op0, Op1);
5864 N =
N->getOperand(0);
5867 return Op->getZExtValue() == 0;
5873 for (
unsigned I = 0;
I < Num ;
I++)
5885 for (
unsigned I = 0;
I < 2; ++
I)
5889 unsigned StartIndex, OpNo0, OpNo1;
5891 return DAG.
getNode(SystemZISD::SHL_DOUBLE,
DL, MVT::v16i8,
Ops[OpNo0],
5898 if (ZeroVecIdx != UINT32_MAX) {
5899 bool MaskFirst =
true;
5904 if (OpNo == ZeroVecIdx &&
I == 0) {
5909 if (OpNo != ZeroVecIdx && Byte == 0) {
5916 if (ZeroIdx != -1) {
5919 if (Bytes[
I] >= 0) {
5922 if (OpNo == ZeroVecIdx)
5934 return DAG.
getNode(SystemZISD::PERMUTE,
DL, MVT::v16i8, Mask, Src,
5937 return DAG.
getNode(SystemZISD::PERMUTE,
DL, MVT::v16i8, Src, Mask,
5949 return DAG.
getNode(SystemZISD::PERMUTE,
DL, MVT::v16i8,
Ops[0],
5955struct GeneralShuffle {
5956 GeneralShuffle(EVT vt)
5957 : VT(vt), UnpackFromEltSize(UINT_MAX), UnpackLow(
false) {}
5961 void tryPrepareForUnpack();
5962 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5977 unsigned UnpackFromEltSize;
5984void GeneralShuffle::addUndef() {
5986 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5987 Bytes.push_back(-1);
5996bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
6002 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
6007 if (FromBytesPerElement < BytesPerElement)
6011 (FromBytesPerElement - BytesPerElement));
6014 while (
Op.getNode()) {
6016 Op =
Op.getOperand(0);
6032 }
else if (
Op.isUndef()) {
6041 for (; OpNo <
Ops.size(); ++OpNo)
6042 if (
Ops[OpNo] ==
Op)
6044 if (OpNo ==
Ops.size())
6049 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
6050 Bytes.push_back(
Base +
I);
6059 if (
Ops.size() == 0)
6063 tryPrepareForUnpack();
6066 if (
Ops.size() == 1)
6078 unsigned Stride = 1;
6079 for (; Stride * 2 <
Ops.size(); Stride *= 2) {
6080 for (
unsigned I = 0;
I <
Ops.size() - Stride;
I += Stride * 2) {
6090 else if (OpNo ==
I + Stride)
6101 if (NewBytes[J] >= 0) {
6103 "Invalid double permute");
6106 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
6112 if (NewBytes[J] >= 0)
6128 unsigned OpNo0, OpNo1;
6132 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
6137 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
6144 dbgs() << Msg.c_str() <<
" { ";
6145 for (
unsigned I = 0;
I < Bytes.
size();
I++)
6146 dbgs() << Bytes[
I] <<
" ";
6154void GeneralShuffle::tryPrepareForUnpack() {
6156 if (ZeroVecOpNo == UINT32_MAX ||
Ops.size() == 1)
6161 if (
Ops.size() > 2 &&
6166 UnpackFromEltSize = 1;
6167 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
6168 bool MatchUnpack =
true;
6171 unsigned ToEltSize = UnpackFromEltSize * 2;
6172 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
6175 if (Bytes[Elt] != -1) {
6177 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
6178 MatchUnpack =
false;
6184 if (
Ops.size() == 2) {
6186 bool CanUseUnpackLow =
true, CanUseUnpackHigh =
true;
6188 if (SrcBytes[i] == -1)
6190 if (SrcBytes[i] % 16 !=
int(i))
6191 CanUseUnpackHigh =
false;
6193 CanUseUnpackLow =
false;
6194 if (!CanUseUnpackLow && !CanUseUnpackHigh) {
6195 UnpackFromEltSize = UINT_MAX;
6199 if (!CanUseUnpackHigh)
6205 if (UnpackFromEltSize > 4)
6208 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
6209 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
6211 dumpBytes(Bytes,
"Original Bytes vector:"););
6220 Elt += UnpackFromEltSize;
6221 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
6222 Bytes[
B] = Bytes[Elt];
6230 Ops.erase(&
Ops[ZeroVecOpNo]);
6232 if (Bytes[
I] >= 0) {
6234 if (OpNo > ZeroVecOpNo)
6245 if (!unpackWasPrepared())
6247 unsigned InBits = UnpackFromEltSize * 8;
6251 unsigned OutBits = InBits * 2;
6254 return DAG.
getNode(UnpackLow ? SystemZISD::UNPACKL_LOW
6255 : SystemZISD::UNPACKL_HIGH,
6256 DL, OutVT, PackedOp);
6261 for (
unsigned I = 1,
E =
Op.getNumOperands();
I !=
E; ++
I)
6262 if (!
Op.getOperand(
I).isUndef())
6278 if (
Value.isUndef())
6290 return DAG.
getNode(SystemZISD::REPLICATE,
DL, VT, Op1);
6293 return DAG.
getNode(SystemZISD::REPLICATE,
DL, VT, Op0);
6294 return DAG.
getNode(SystemZISD::MERGE_HIGH,
DL, VT,
6315 return DAG.
getNode(SystemZISD::JOIN_DWORDS,
DL, MVT::v2i64, Op0, Op1);
6331 GeneralShuffle GS(VT);
6333 bool FoundOne =
false;
6334 for (
unsigned I = 0;
I < NumElements; ++
I) {
6337 Op =
Op.getOperand(0);
6340 unsigned Elem =
Op.getConstantOperandVal(1);
6341 if (!GS.add(
Op.getOperand(0), Elem))
6344 }
else if (
Op.isUndef()) {
6358 if (!ResidueOps.
empty()) {
6359 while (ResidueOps.
size() < NumElements)
6361 for (
auto &
Op : GS.Ops) {
6362 if (!
Op.getNode()) {
6368 return GS.getNode(DAG,
SDLoc(BVN));
6371bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
6377 if (Subtarget.hasVectorEnhancements2() &&
Op.getOpcode() == SystemZISD::LRV)
6388 "Handling full vectors only.");
6408 if (Op01.
getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
6420 unsigned int NumElements = Elems.
size();
6421 unsigned int Count = 0;
6422 for (
auto Elem : Elems) {
6423 if (!Elem.isUndef()) {
6426 else if (Elem != Single) {
6446 if (
Single.getNode() && (
Count > 1 || isVectorElementLoad(Single)))
6447 return DAG.
getNode(SystemZISD::REPLICATE,
DL, VT, Single);
6450 bool AllLoads =
true;
6451 for (
auto Elem : Elems)
6452 if (!isVectorElementLoad(Elem)) {
6458 if (VT == MVT::v2i64 && !AllLoads)
6462 if (VT == MVT::v2f64 && !AllLoads)
6472 if (VT == MVT::v4f32 && !AllLoads)
6476 if (VT == MVT::v8f16 && !AllLoads) {
6485 if (Op0123.
getOpcode() == SystemZISD::REPLICATE && Op0123 == Op4567)
6494 unsigned NumConstants = 0;
6495 for (
unsigned I = 0;
I < NumElements; ++
I) {
6509 if (NumConstants > 0) {
6510 for (
unsigned I = 0;
I < NumElements; ++
I)
6521 std::map<const SDNode*, unsigned> UseCounts;
6522 SDNode *LoadMaxUses =
nullptr;
6523 for (
unsigned I = 0;
I < NumElements; ++
I)
6524 if (isVectorElementLoad(Elems[
I])) {
6525 SDNode *Ld = Elems[
I].getNode();
6526 unsigned Count = ++UseCounts[Ld];
6527 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] <
Count)
6530 if (LoadMaxUses !=
nullptr) {
6531 ReplicatedVal =
SDValue(LoadMaxUses, 0);
6535 unsigned I1 = NumElements / 2 - 1;
6536 unsigned I2 = NumElements - 1;
6537 bool Def1 = !Elems[
I1].isUndef();
6538 bool Def2 = !Elems[I2].isUndef();
6552 for (
unsigned I = 0;
I < NumElements; ++
I)
6553 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
6563 EVT VT =
Op.getValueType();
6565 if (BVN->isConstant()) {
6566 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
6584 for (
unsigned I = 0;
I < NumElements; ++
I)
6586 return buildVector(DAG,
DL, VT,
Ops);
6593 EVT VT =
Op.getValueType();
6596 if (VSN->isSplat()) {
6598 unsigned Index = VSN->getSplatIndex();
6600 "Splat index should be defined and in first operand");
6606 return DAG.
getNode(SystemZISD::SPLAT,
DL, VT,
Op.getOperand(0),
6610 GeneralShuffle
GS(VT);
6611 for (
unsigned I = 0;
I < NumElements; ++
I) {
6612 int Elt = VSN->getMaskElt(
I);
6615 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
6616 unsigned(Elt) % NumElements))
6619 return GS.getNode(DAG, SDLoc(VSN));
6634 assert(
Op.getSimpleValueType() == MVT::i64 &&
6635 "Expexted to convert i64 to f16.");
6647 assert(
Op.getSimpleValueType() == MVT::f16 &&
6648 "Expected to convert f16 to i64.");
6665 EVT VT =
Op.getValueType();
6670 if (VT == MVT::v2f64 &&
6694SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
6700 EVT VT =
Op.getValueType();
6705 uint64_t
Index = CIndexN->getZExtValue();
6714 MVT ExtrVT = IntVT == MVT::i16 ? MVT::i32 : IntVT;
6722SDValue SystemZTargetLowering::
6725 EVT OutVT =
Op.getValueType();
6729 unsigned StartOffset = 0;
6736 ArrayRef<int> ShuffleMask = SVN->
getMask();
6741 if (ToBits == 64 && OutNumElts == 2) {
6742 int NumElem = ToBits / FromBits;
6743 if (ShuffleMask[0] == NumElem - 1 && ShuffleMask[1] == 2 * NumElem - 1)
6749 int StartOffsetCandidate = -1;
6750 for (
int Elt = 0; Elt < OutNumElts; Elt++) {
6751 if (ShuffleMask[Elt] == -1)
6753 if (ShuffleMask[Elt] % OutNumElts == Elt) {
6754 if (StartOffsetCandidate == -1)
6755 StartOffsetCandidate = ShuffleMask[Elt] - Elt;
6756 if (StartOffsetCandidate == ShuffleMask[Elt] - Elt)
6759 StartOffsetCandidate = -1;
6762 if (StartOffsetCandidate != -1) {
6763 StartOffset = StartOffsetCandidate;
6772 unsigned Opcode = SystemZISD::UNPACK_HIGH;
6773 if (StartOffset >= OutNumElts) {
6774 Opcode = SystemZISD::UNPACK_LOW;
6775 StartOffset -= OutNumElts;
6777 PackedOp = DAG.
getNode(Opcode, SDLoc(PackedOp), OutVT, PackedOp);
6778 }
while (FromBits != ToBits);
6783SDValue SystemZTargetLowering::
6787 EVT OutVT =
Op.getValueType();
6791 unsigned NumInPerOut = InNumElts / OutNumElts;
6796 SmallVector<int, 16>
Mask(InNumElts);
6797 unsigned ZeroVecElt = InNumElts;
6798 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6799 unsigned MaskElt = PackedElt * NumInPerOut;
6800 unsigned End = MaskElt + NumInPerOut - 1;
6801 for (; MaskElt < End; MaskElt++)
6802 Mask[MaskElt] = ZeroVecElt++;
6803 Mask[MaskElt] = PackedElt;
6810 unsigned ByScalar)
const {
6815 EVT VT =
Op.getValueType();
6820 APInt SplatBits, SplatUndef;
6821 unsigned SplatBitSize;
6825 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6826 ElemBitSize,
true) &&
6827 SplatBitSize == ElemBitSize) {
6830 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6833 BitVector UndefElements;
6839 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6846 if (VSN->isSplat()) {
6847 SDValue VSNOp0 = VSN->getOperand(0);
6848 unsigned Index = VSN->getSplatIndex();
6850 "Splat index should be defined and in first operand");
6857 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6875 uint64_t ShiftAmt = ShiftAmtNode->getZExtValue() & 127;
6876 if ((ShiftAmt & 7) == 0 || Subtarget.hasVectorEnhancements2()) {
6879 if (ShiftAmt > 120) {
6883 DAG.
getNode(SystemZISD::SHR_DOUBLE_BIT,
DL, MVT::v16i8, Op0, Op1,
6887 SmallVector<int, 16>
Mask(16);
6888 for (
unsigned Elt = 0; Elt < 16; Elt++)
6889 Mask[Elt] = (ShiftAmt >> 3) + Elt;
6891 if ((ShiftAmt & 7) == 0)
6895 DAG.
getNode(SystemZISD::SHL_DOUBLE_BIT,
DL, MVT::v16i8, Shuf1, Shuf2,
6913 uint64_t ShiftAmt = ShiftAmtNode->getZExtValue() & 127;
6914 if ((ShiftAmt & 7) == 0 || Subtarget.hasVectorEnhancements2()) {
6917 if (ShiftAmt > 120) {
6921 DAG.
getNode(SystemZISD::SHL_DOUBLE_BIT,
DL, MVT::v16i8, Op0, Op1,
6925 SmallVector<int, 16>
Mask(16);
6926 for (
unsigned Elt = 0; Elt < 16; Elt++)
6927 Mask[Elt] = 16 - (ShiftAmt >> 3) + Elt;
6929 if ((ShiftAmt & 7) == 0)
6933 DAG.
getNode(SystemZISD::SHR_DOUBLE_BIT,
DL, MVT::v16i8, Shuf2, Shuf1,
6945 MVT DstVT =
Op.getSimpleValueType();
6948 unsigned SrcAS =
N->getSrcAddressSpace();
6950 assert(SrcAS !=
N->getDestAddressSpace() &&
6951 "addrspacecast must be between different address spaces");
6959 }
else if (DstVT == MVT::i32) {
6973 if (
In.getSimpleValueType() != MVT::f16)
6980 SDValue Chain,
bool IsStrict)
const {
6981 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unexpected request for libcall!");
6984 std::tie(Result, Chain) =
6993 bool IsStrict =
Op->isStrictFPOpcode();
6995 MVT VT =
Op.getSimpleValueType();
6996 SDValue InOp =
Op.getOperand(IsStrict ? 1 : 0);
7004 if (!Subtarget.hasFPExtension() && !IsSigned)
7015 if (VT == MVT::i128) {
7018 return useLibCall(DAG, LC, VT, InOp,
DL, Chain, IsStrict);
7028 bool IsStrict =
Op->isStrictFPOpcode();
7030 MVT VT =
Op.getSimpleValueType();
7031 SDValue InOp =
Op.getOperand(IsStrict ? 1 : 0);
7036 if (VT == MVT::f16) {
7043 if (!Subtarget.hasFPExtension() && !IsSigned)
7046 if (InVT == MVT::i128) {
7049 return useLibCall(DAG, LC, VT, InOp,
DL, Chain, IsStrict);
7058 EVT RegVT =
Op.getValueType();
7059 assert(RegVT == MVT::f16 &&
"Expected to lower an f16 load.");
7066 assert(EVT(RegVT) == AtomicLd->getMemoryVT() &&
"Unhandled f16 load");
7068 AtomicLd->getChain(), AtomicLd->getBasePtr(),
7069 AtomicLd->getMemOperand());
7089 Shft, AtomicSt->getBasePtr(),
7090 AtomicSt->getMemOperand());
7100 MVT ResultVT =
Op.getSimpleValueType();
7102 unsigned Check =
Op.getConstantOperandVal(1);
7104 unsigned TDCMask = 0;
7139 MachinePointerInfo MPI =
7145 SystemZISD::STCKF,
DL, DAG.
getVTList(MVT::Other), StoreOps, MVT::i64,
7149 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
7154 switch (
Op.getOpcode()) {
7156 return lowerFRAMEADDR(
Op, DAG);
7158 return lowerRETURNADDR(
Op, DAG);
7160 return lowerBR_CC(
Op, DAG);
7162 return lowerSELECT_CC(
Op, DAG);
7164 return lowerSETCC(
Op, DAG);
7166 return lowerSTRICT_FSETCC(
Op, DAG,
false);
7168 return lowerSTRICT_FSETCC(
Op, DAG,
true);
7180 return lowerBITCAST(
Op, DAG);
7182 return lowerVASTART(
Op, DAG);
7184 return lowerVACOPY(
Op, DAG);
7186 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
7188 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
7190 return lowerMULH(
Op, DAG, SystemZISD::SMUL_LOHI);
7192 return lowerMULH(
Op, DAG, SystemZISD::UMUL_LOHI);
7194 return lowerSMUL_LOHI(
Op, DAG);
7196 return lowerUMUL_LOHI(
Op, DAG);
7198 return lowerSDIVREM(
Op, DAG);
7200 return lowerUDIVREM(
Op, DAG);
7205 return lowerXALUO(
Op, DAG);
7208 return lowerUADDSUBO_CARRY(
Op, DAG);
7210 return lowerOR(
Op, DAG);
7212 return lowerCTPOP(
Op, DAG);
7214 return lowerVECREDUCE_ADD(
Op, DAG);
7216 return lowerATOMIC_FENCE(
Op, DAG);
7218 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_SWAPW);
7220 return lowerATOMIC_STORE(
Op, DAG);
7222 return lowerATOMIC_LOAD(
Op, DAG);
7224 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
7226 return lowerATOMIC_LOAD_SUB(
Op, DAG);
7228 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
7230 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
7232 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
7234 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
7236 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
7238 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
7240 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
7242 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
7244 return lowerATOMIC_CMP_SWAP(
Op, DAG);
7246 return lowerSTACKSAVE(
Op, DAG);
7248 return lowerSTACKRESTORE(
Op, DAG);
7250 return lowerPREFETCH(
Op, DAG);
7252 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
7254 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
7256 return lowerBUILD_VECTOR(
Op, DAG);
7258 return lowerVECTOR_SHUFFLE(
Op, DAG);
7260 return lowerSCALAR_TO_VECTOR(
Op, DAG);
7262 return lowerINSERT_VECTOR_ELT(
Op, DAG);
7264 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
7266 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
7268 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
7270 return lowerShift(
Op, DAG, SystemZISD::VSHL_BY_SCALAR);
7272 return lowerShift(
Op, DAG, SystemZISD::VSRL_BY_SCALAR);
7274 return lowerShift(
Op, DAG, SystemZISD::VSRA_BY_SCALAR);
7278 return lowerShift(
Op, DAG, SystemZISD::VROTL_BY_SCALAR);
7280 return lowerFSHL(
Op, DAG);
7282 return lowerFSHR(
Op, DAG);
7285 return lowerFP_EXTEND(
Op, DAG);
7290 return lower_FP_TO_INT(
Op, DAG);
7295 return lower_INT_TO_FP(
Op, DAG);
7297 return lowerLoadF16(
Op, DAG);
7299 return lowerStoreF16(
Op, DAG);
7301 return lowerIS_FPCLASS(
Op, DAG);
7303 return lowerGET_ROUNDING(
Op, DAG);
7305 return lowerREADCYCLECOUNTER(
Op, DAG);
7327 &SystemZ::FP128BitRegClass);
7336 SystemZ::REG_SEQUENCE, SL, MVT::f128,
7351 &SystemZ::FP128BitRegClass);
7368 switch (
N->getOpcode()) {
7372 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
7375 DL, Tys,
Ops, MVT::i128, MMO);
7378 if (
N->getValueType(0) == MVT::f128)
7392 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
7395 DL, Tys,
Ops, MVT::i128, MMO);
7401 MVT::Other, Res), 0);
7413 DL, Tys,
Ops, MVT::i128, MMO);
7427 EVT SrcVT = Src.getValueType();
7428 EVT ResVT =
N->getValueType(0);
7429 if (ResVT == MVT::i128 && SrcVT == MVT::f128)
7431 else if (SrcVT == MVT::i16 && ResVT == MVT::f16) {
7432 if (Subtarget.hasVector()) {
7440 }
else if (SrcVT == MVT::f16 && ResVT == MVT::i16) {
7442 Subtarget.hasVector()
7456 bool IsStrict =
N->isStrictFPOpcode();
7458 SDValue InOp =
N->getOperand(IsStrict ? 1 : 0);
7459 EVT ResVT =
N->getValueType(0);
7461 if (ResVT == MVT::f16) {
7484 bool IsStrict =
N->isStrictFPOpcode();
7486 EVT ResVT =
N->getValueType(0);
7487 SDValue InOp =
N->getOperand(IsStrict ? 1 : 0);
7490 if (InVT == MVT::f16) {
7496 std::tie(InF32, Chain) =
7521bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
7522 if (!Subtarget.hasVector())
7536 DAGCombinerInfo &DCI,
7544 unsigned Opcode =
Op.getOpcode();
7547 Op =
Op.getOperand(0);
7549 canTreatAsByteVector(
Op.getValueType())) {
7558 BytesPerElement,
First))
7565 if (Byte % BytesPerElement != 0)
7568 Index = Byte / BytesPerElement;
7572 canTreatAsByteVector(
Op.getValueType())) {
7575 EVT OpVT =
Op.getValueType();
7577 if (OpBytesPerElement < BytesPerElement)
7581 unsigned End = (
Index + 1) * BytesPerElement;
7582 if (End % OpBytesPerElement != 0)
7585 Op =
Op.getOperand(End / OpBytesPerElement - 1);
7586 if (!
Op.getValueType().isInteger()) {
7589 DCI.AddToWorklist(
Op.getNode());
7594 DCI.AddToWorklist(
Op.getNode());
7601 canTreatAsByteVector(
Op.getValueType()) &&
7602 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
7604 EVT ExtVT =
Op.getValueType();
7605 EVT OpVT =
Op.getOperand(0).getValueType();
7608 unsigned Byte =
Index * BytesPerElement;
7609 unsigned SubByte =
Byte % ExtBytesPerElement;
7610 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
7611 if (SubByte < MinSubByte ||
7612 SubByte + BytesPerElement > ExtBytesPerElement)
7615 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
7617 Byte += SubByte - MinSubByte;
7618 if (Byte % BytesPerElement != 0)
7620 Op =
Op.getOperand(0);
7627 if (
Op.getValueType() != VecVT) {
7629 DCI.AddToWorklist(
Op.getNode());
7639SDValue SystemZTargetLowering::combineTruncateExtract(
7648 if (canTreatAsByteVector(VecVT)) {
7652 if (BytesPerElement % TruncBytes == 0) {
7658 unsigned Scale = BytesPerElement / TruncBytes;
7659 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
7666 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
7667 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
7675SDValue SystemZTargetLowering::combineZERO_EXTEND(
7676 SDNode *
N, DAGCombinerInfo &DCI)
const {
7678 SelectionDAG &DAG = DCI.DAG;
7680 EVT VT =
N->getValueType(0);
7681 if (N0.
getOpcode() == SystemZISD::SELECT_CCMASK) {
7684 if (TrueOp && FalseOp) {
7694 DCI.CombineTo(N0.
getNode(), TruncSelect);
7737 return DAG.
getNode(SystemZISD::VSCBI, SDLoc(N0), VT, Op0, Op1);
7755SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
7756 SDNode *
N, DAGCombinerInfo &DCI)
const {
7760 SelectionDAG &DAG = DCI.DAG;
7762 EVT VT =
N->getValueType(0);
7776SDValue SystemZTargetLowering::combineSIGN_EXTEND(
7777 SDNode *
N, DAGCombinerInfo &DCI)
const {
7781 SelectionDAG &DAG = DCI.DAG;
7783 EVT VT =
N->getValueType(0);
7790 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
7791 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
7807SDValue SystemZTargetLowering::combineMERGE(
7808 SDNode *
N, DAGCombinerInfo &DCI)
const {
7809 SelectionDAG &DAG = DCI.DAG;
7810 unsigned Opcode =
N->getOpcode();
7818 if (Op1 ==
N->getOperand(0))
7823 if (ElemBytes <= 4) {
7824 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
7825 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
7831 DCI.AddToWorklist(Op1.
getNode());
7834 DCI.AddToWorklist(
Op.getNode());
7843 LoPart = HiPart =
nullptr;
7848 if (
Use.getResNo() != 0)
7853 bool IsLoPart =
true;
7878 LoPart = HiPart =
nullptr;
7883 if (
Use.getResNo() != 0)
7889 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
7892 switch (
User->getConstantOperandVal(1)) {
7893 case SystemZ::subreg_l64:
7898 case SystemZ::subreg_h64:
7910SDValue SystemZTargetLowering::combineLOAD(
7911 SDNode *
N, DAGCombinerInfo &DCI)
const {
7912 SelectionDAG &DAG = DCI.DAG;
7913 EVT LdVT =
N->getValueType(0);
7917 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
7918 if (PtrVT != LoadNodeVT) {
7922 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
7923 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
7924 LN->getMemOperand());
7934 SDNode *LoPart, *HiPart;
7942 LD->getPointerInfo(),
LD->getBaseAlign(),
7943 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7945 DCI.CombineTo(HiPart, EltLoad,
true);
7952 LD->getPointerInfo().getWithOffset(8),
LD->getBaseAlign(),
7953 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7955 DCI.CombineTo(LoPart, EltLoad,
true);
7962 DCI.AddToWorklist(Chain.
getNode());
7977 for (SDUse &Use :
N->uses()) {
7978 if (
Use.getUser()->getOpcode() == SystemZISD::REPLICATE) {
7982 }
else if (
Use.getResNo() == 0)
7985 if (!Replicate || OtherUses.
empty())
7991 for (SDNode *U : OtherUses) {
7994 Ops.push_back((
Op.getNode() ==
N &&
Op.getResNo() == 0) ? Extract0 :
Op);
8000bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
8001 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
8003 if (Subtarget.hasVectorEnhancements2())
8004 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
8016 for (
unsigned i = 0; i < NumElts; ++i) {
8017 if (M[i] < 0)
continue;
8018 if ((
unsigned) M[i] != NumElts - 1 - i)
8026 for (
auto *U : StoredVal->
users()) {
8028 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
8087SDValue SystemZTargetLowering::combineSTORE(
8088 SDNode *
N, DAGCombinerInfo &DCI)
const {
8089 SelectionDAG &DAG = DCI.DAG;
8092 EVT MemVT = SN->getMemoryVT();
8096 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
8097 if (PtrVT != StoreNodeVT) {
8101 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
8102 SN->getPointerInfo(), SN->getBaseAlign(),
8103 SN->getMemOperand()->getFlags(), SN->getAAInfo());
8111 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
8113 combineTruncateExtract(SDLoc(
N), MemVT, SN->getValue(), DCI)) {
8114 DCI.AddToWorklist(
Value.getNode());
8118 SN->getBasePtr(), SN->getMemoryVT(),
8119 SN->getMemOperand());
8123 if (!SN->isTruncatingStore() &&
8139 Ops, MemVT, SN->getMemOperand());
8142 if (!SN->isTruncatingStore() &&
8145 Subtarget.hasVectorEnhancements2()) {
8147 ArrayRef<int> ShuffleMask = SVN->
getMask();
8155 Ops, MemVT, SN->getMemOperand());
8160 if (!SN->isTruncatingStore() &&
8163 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
8167 Ops, MemVT, SN->getMemOperand());
8177 SN->getChain(),
DL, HiPart, SN->getBasePtr(), SN->getPointerInfo(),
8178 SN->getBaseAlign(), SN->getMemOperand()->getFlags(), SN->getAAInfo());
8180 SN->getChain(),
DL, LoPart,
8182 SN->getPointerInfo().getWithOffset(8), SN->getBaseAlign(),
8183 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
8201 auto FindReplicatedImm = [&](ConstantSDNode *
C,
unsigned TotBytes) {
8203 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
8207 APInt Val =
C->getAPIntValue();
8210 assert(SN->isTruncatingStore() &&
8211 "Non-truncating store and immediate value does not fit?");
8212 Val = Val.
trunc(TotBytes * 8);
8215 SystemZVectorConstantInfo VCI(APInt(TotBytes * 8, Val.
getZExtValue()));
8216 if (VCI.isVectorConstantLegal(Subtarget) &&
8217 VCI.Opcode == SystemZISD::REPLICATE) {
8225 auto FindReplicatedReg = [&](
SDValue MulOp) {
8226 EVT MulVT = MulOp.getValueType();
8227 if (MulOp->getOpcode() ==
ISD::MUL &&
8228 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
8232 WordVT =
LHS->getOperand(0).getValueType();
8239 SystemZVectorConstantInfo VCI(
8241 if (VCI.isVectorConstantLegal(Subtarget) &&
8242 VCI.Opcode == SystemZISD::REPLICATE && VCI.OpVals[0] == 1 &&
8243 WordVT == VCI.VecVT.getScalarType())
8255 FindReplicatedReg(SplatVal);
8260 FindReplicatedReg(Op1);
8265 "Bad type handling");
8269 return DAG.
getStore(SN->getChain(), SDLoc(SN), SplatVal,
8270 SN->getBasePtr(), SN->getMemOperand());
8277SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
8278 SDNode *
N, DAGCombinerInfo &DCI)
const {
8279 SelectionDAG &DAG = DCI.DAG;
8282 N->getOperand(0).hasOneUse() &&
8283 Subtarget.hasVectorEnhancements2()) {
8285 ArrayRef<int> ShuffleMask = SVN->
getMask();
8298 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8302 DCI.CombineTo(
N, ESLoad);
8306 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
8316SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
8317 SDNode *
N, DAGCombinerInfo &DCI)
const {
8318 SelectionDAG &DAG = DCI.DAG;
8320 if (!Subtarget.hasVector())
8326 Op.getValueType().isVector() &&
8327 Op.getOperand(0).getValueType().isVector() &&
8328 Op.getValueType().getVectorNumElements() ==
8329 Op.getOperand(0).getValueType().getVectorNumElements())
8330 Op =
Op.getOperand(0);
8334 EVT VecVT =
Op.getValueType();
8337 Op.getOperand(0),
N->getOperand(1));
8338 DCI.AddToWorklist(
Op.getNode());
8340 if (EltVT !=
N->getValueType(0)) {
8341 DCI.AddToWorklist(
Op.getNode());
8351 if (canTreatAsByteVector(VecVT))
8352 return combineExtract(SDLoc(
N),
N->getValueType(0), VecVT, Op0,
8353 IndexN->getZExtValue(), DCI,
false);
8358SDValue SystemZTargetLowering::combineJOIN_DWORDS(
8359 SDNode *
N, DAGCombinerInfo &DCI)
const {
8360 SelectionDAG &DAG = DCI.DAG;
8362 if (
N->getOperand(0) ==
N->getOperand(1))
8363 return DAG.
getNode(SystemZISD::REPLICATE, SDLoc(
N),
N->getValueType(0),
8373 if (Chain1 == Chain2)
8381SDValue SystemZTargetLowering::combineFP_ROUND(
8382 SDNode *
N, DAGCombinerInfo &DCI)
const {
8384 if (!Subtarget.hasVector())
8393 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
8394 SelectionDAG &DAG = DCI.DAG;
8396 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
8402 for (
auto *U : Vec->
users()) {
8403 if (U != Op0.
getNode() &&
U->hasOneUse() &&
8405 U->getOperand(0) == Vec &&
8407 U->getConstantOperandVal(1) == 1) {
8409 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
8413 if (
N->isStrictFPOpcode()) {
8417 VRound = DAG.
getNode(SystemZISD::STRICT_VROUND, SDLoc(
N),
8418 {MVT::v4f32, MVT::Other}, {Chain, Vec});
8421 VRound = DAG.
getNode(SystemZISD::VROUND, SDLoc(
N),
8423 DCI.AddToWorklist(VRound.
getNode());
8427 DCI.AddToWorklist(Extract1.
getNode());
8433 VRound, DAG.
getConstant(0, SDLoc(Op0), MVT::i32));
8436 N->getVTList(), Extract0, Chain);
8445SDValue SystemZTargetLowering::combineFP_EXTEND(
8446 SDNode *
N, DAGCombinerInfo &DCI)
const {
8448 if (!Subtarget.hasVector())
8457 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
8458 SelectionDAG &DAG = DCI.DAG;
8460 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
8466 for (
auto *U : Vec->
users()) {
8467 if (U != Op0.
getNode() &&
U->hasOneUse() &&
8469 U->getOperand(0) == Vec &&
8471 U->getConstantOperandVal(1) == 2) {
8473 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
8477 if (
N->isStrictFPOpcode()) {
8481 VExtend = DAG.
getNode(SystemZISD::STRICT_VEXTEND, SDLoc(
N),
8482 {MVT::v2f64, MVT::Other}, {Chain, Vec});
8485 VExtend = DAG.
getNode(SystemZISD::VEXTEND, SDLoc(
N),
8487 DCI.AddToWorklist(VExtend.
getNode());
8491 DCI.AddToWorklist(Extract1.
getNode());
8497 VExtend, DAG.
getConstant(0, SDLoc(Op0), MVT::i32));
8500 N->getVTList(), Extract0, Chain);
8509SDValue SystemZTargetLowering::combineINT_TO_FP(
8510 SDNode *
N, DAGCombinerInfo &DCI)
const {
8513 SelectionDAG &DAG = DCI.DAG;
8515 unsigned Opcode =
N->getOpcode();
8516 EVT OutVT =
N->getValueType(0);
8520 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
8526 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
8527 OutScalarBits <= 64) {
8531 unsigned ExtOpcode =
8534 return DAG.
getNode(Opcode, SDLoc(
N), OutVT, ExtOp);
8539SDValue SystemZTargetLowering::combineFCOPYSIGN(
8540 SDNode *
N, DAGCombinerInfo &DCI)
const {
8541 SelectionDAG &DAG = DCI.DAG;
8542 EVT VT =
N->getValueType(0);
8555SDValue SystemZTargetLowering::combineBSWAP(
8556 SDNode *
N, DAGCombinerInfo &DCI)
const {
8557 SelectionDAG &DAG = DCI.DAG;
8560 N->getOperand(0).hasOneUse() &&
8561 canLoadStoreByteSwapped(
N->getValueType(0))) {
8570 EVT LoadVT =
N->getValueType(0);
8571 if (LoadVT == MVT::i16)
8576 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8580 if (
N->getValueType(0) == MVT::i16)
8585 DCI.CombineTo(
N, ResVal);
8589 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
8598 Op.getValueType().isVector() &&
8599 Op.getOperand(0).getValueType().isVector() &&
8600 Op.getValueType().getVectorNumElements() ==
8601 Op.getOperand(0).getValueType().getVectorNumElements())
8602 Op =
Op.getOperand(0);
8614 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
8616 EVT VecVT =
N->getValueType(0);
8620 DCI.AddToWorklist(Vec.
getNode());
8624 DCI.AddToWorklist(Elt.
getNode());
8627 DCI.AddToWorklist(Vec.
getNode());
8629 DCI.AddToWorklist(Elt.
getNode());
8637 if (SV &&
Op.hasOneUse()) {
8645 EVT VecVT =
N->getValueType(0);
8648 DCI.AddToWorklist(Op0.
getNode());
8652 DCI.AddToWorklist(Op1.
getNode());
8655 DCI.AddToWorklist(Op0.
getNode());
8657 DCI.AddToWorklist(Op1.
getNode());
8665SDValue SystemZTargetLowering::combineSETCC(
8666 SDNode *
N, DAGCombinerInfo &DCI)
const {
8667 SelectionDAG &DAG = DCI.DAG;
8673 EVT VT =
N->getValueType(0);
8683 Src.getValueType().isFixedLengthVector() &&
8684 Src.getValueType().getScalarType() == MVT::i1) {
8685 EVT CmpVT = Src.getOperand(0).getValueType();
8702 unsigned Depth = 0) {
8710 case SystemZISD::IPM:
8715 case SystemZISD::SELECT_CCMASK: {
8717 if (Op4CCReg.
getOpcode() == SystemZISD::ICMP ||
8718 Op4CCReg.
getOpcode() == SystemZISD::TM) {
8721 return std::make_pair(OpCC, OpCCValid);
8726 int CCValidVal = CCValid->getZExtValue();
8727 return std::make_pair(Op4CCReg, CCValidVal);
8738 return std::make_pair(Op0CC, Op0CCValid);
8754 return {Val, Val, Val, Val};
8755 case SystemZISD::IPM: {
8760 for (
auto CC : {0, 1, 2, 3})
8763 return ShiftedCCVals;
8765 case SystemZISD::SELECT_CCMASK: {
8769 if (!CCValid || !CCMask)
8772 int CCValidVal = CCValid->getZExtValue();
8773 int CCMaskVal = CCMask->getZExtValue();
8783 if (TrueSDVals.empty() || FalseSDVals.empty())
8786 for (
auto &CCVal : {0, 1, 2, 3})
8787 MergedSDVals.
emplace_back(((CCMaskVal & (1 << (3 - CCVal))) != 0)
8789 : FalseSDVals[CCVal]);
8790 return MergedSDVals;
8807 if (Op0SDVals.empty() || Op1SDVals.empty())
8810 for (
auto CCVal : {0, 1, 2, 3})
8812 Opcode,
DL, Val.
getValueType(), Op0SDVals[CCVal], Op1SDVals[CCVal]));
8813 return BinaryOpSDVals;
8824 auto *CCNode = CCReg.
getNode();
8828 if (CCNode->getOpcode() == SystemZISD::TM) {
8831 auto emulateTMCCMask = [](
const SDValue &Op0Val,
const SDValue &Op1Val) {
8834 if (!Op0Node || !Op1Node)
8836 auto Op0APVal = Op0Node->getAPIntValue();
8837 auto Op1APVal = Op1Node->getAPIntValue();
8838 auto Result = Op0APVal & Op1APVal;
8839 bool AllOnes = Result == Op1APVal;
8840 bool AllZeros = Result == 0;
8841 bool IsLeftMostBitSet = Result[Op1APVal.getActiveBits()] != 0;
8842 return AllZeros ? 0 :
AllOnes ? 3 : IsLeftMostBitSet ? 2 : 1;
8846 auto [Op0CC, Op0CCValid] =
findCCUse(Op0);
8851 if (Op0SDVals.empty() || Op1SDVals.empty())
8854 for (
auto CC : {0, 1, 2, 3}) {
8855 auto CCVal = emulateTMCCMask(Op0SDVals[CC], Op1SDVals[CC]);
8859 NewCCMask |= (CCMask & (1 << (3 - CCVal))) != 0;
8861 NewCCMask &= Op0CCValid;
8864 CCValid = Op0CCValid;
8867 if (CCNode->getOpcode() != SystemZISD::ICMP ||
8874 auto [Op0CC, Op0CCValid] =
findCCUse(CmpOp0);
8878 if (Op0SDVals.empty() || Op1SDVals.empty())
8882 auto CmpTypeVal = CmpType->getZExtValue();
8883 const auto compareCCSigned = [&CmpTypeVal](
const SDValue &Op0Val,
8887 if (!Op0Node || !Op1Node)
8889 auto Op0APVal = Op0Node->getAPIntValue();
8890 auto Op1APVal = Op1Node->getAPIntValue();
8892 return Op0APVal == Op1APVal ? 0 : Op0APVal.slt(Op1APVal) ? 1 : 2;
8893 return Op0APVal == Op1APVal ? 0 : Op0APVal.ult(Op1APVal) ? 1 : 2;
8896 for (
auto CC : {0, 1, 2, 3}) {
8897 auto CCVal = compareCCSigned(Op0SDVals[CC], Op1SDVals[CC]);
8901 NewCCMask |= (CCMask & (1 << (3 - CCVal))) != 0;
8903 NewCCMask &= Op0CCValid;
8906 CCValid = Op0CCValid;
8917 const Value *Rhs)
const {
8918 const auto isFlagOutOpCC = [](
const Value *V) {
8920 const Value *RHSVal;
8927 if (CB->isInlineAsm()) {
8929 return IA && IA->getConstraintString().contains(
"{@cc}");
8940 if (isFlagOutOpCC(Lhs) && isFlagOutOpCC(Rhs))
8943 return {-1, -1, -1};
8947 DAGCombinerInfo &DCI)
const {
8953 if (!CCValid || !CCMask)
8956 int CCValidVal = CCValid->getZExtValue();
8957 int CCMaskVal = CCMask->getZExtValue();
8964 if (
combineCCMask(CCReg, CCValidVal, CCMaskVal, DAG) && CCMaskVal != 0 &&
8965 CCMaskVal != CCValidVal)
8966 return DAG.
getNode(SystemZISD::BR_CCMASK,
SDLoc(
N),
N->getValueType(0),
8970 N->getOperand(3), CCReg);
8974SDValue SystemZTargetLowering::combineSELECT_CCMASK(
8975 SDNode *
N, DAGCombinerInfo &DCI)
const {
8981 if (!CCValid || !CCMask)
8984 int CCValidVal = CCValid->getZExtValue();
8985 int CCMaskVal = CCMask->getZExtValue();
8988 bool IsCombinedCCReg =
combineCCMask(CCReg, CCValidVal, CCMaskVal, DAG);
8992 const auto constructCCSDValsFromSELECT = [&CCReg](
SDValue &Val) {
8993 if (Val.getOpcode() == SystemZISD::SELECT_CCMASK) {
8995 if (Val.getOperand(4) != CCReg)
9002 int CCMaskVal = CCMask->getZExtValue();
9003 for (
auto &CC : {0, 1, 2, 3})
9004 Res.
emplace_back(((CCMaskVal & (1 << (3 - CC))) != 0) ? TrueVal
9018 if (TrueSDVals.empty())
9019 TrueSDVals = constructCCSDValsFromSELECT(TrueVal);
9020 if (FalseSDVals.empty())
9021 FalseSDVals = constructCCSDValsFromSELECT(FalseVal);
9022 if (!TrueSDVals.empty() && !FalseSDVals.empty()) {
9023 SmallSet<SDValue, 4> MergedSDValsSet;
9025 for (
auto CC : {0, 1, 2, 3}) {
9026 if ((CCValidVal & ((1 << (3 - CC)))) != 0)
9027 MergedSDValsSet.
insert(((CCMaskVal & (1 << (3 - CC))) != 0)
9031 if (MergedSDValsSet.
size() == 1)
9032 return *MergedSDValsSet.
begin();
9033 if (MergedSDValsSet.
size() == 2) {
9034 auto BeginIt = MergedSDValsSet.
begin();
9035 SDValue NewTrueVal = *BeginIt, NewFalseVal = *next(BeginIt);
9036 if (NewTrueVal == FalseVal || NewFalseVal == TrueVal)
9039 for (
auto CC : {0, 1, 2, 3}) {
9041 NewCCMask |= ((CCMaskVal & (1 << (3 - CC))) != 0)
9042 ? (TrueSDVals[CC] == NewTrueVal)
9043 : (FalseSDVals[CC] == NewTrueVal);
9045 CCMaskVal = NewCCMask;
9046 CCMaskVal &= CCValidVal;
9049 IsCombinedCCReg =
true;
9057 if (CCMaskVal == CCValidVal)
9060 if (IsCombinedCCReg)
9062 SystemZISD::SELECT_CCMASK, SDLoc(
N),
N->getValueType(0), TrueVal,
9069SDValue SystemZTargetLowering::combineGET_CCMASK(
9070 SDNode *
N, DAGCombinerInfo &DCI)
const {
9075 if (!CCValid || !CCMask)
9077 int CCValidVal = CCValid->getZExtValue();
9078 int CCMaskVal = CCMask->getZExtValue();
9083 if (
Select->getOpcode() != SystemZISD::SELECT_CCMASK)
9088 if (!SelectCCValid || !SelectCCMask)
9090 int SelectCCValidVal = SelectCCValid->getZExtValue();
9091 int SelectCCMaskVal = SelectCCMask->getZExtValue();
9095 if (!TrueVal || !FalseVal)
9099 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
9100 SelectCCMaskVal ^= SelectCCValidVal;
9104 if (SelectCCValidVal & ~CCValidVal)
9106 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
9109 return Select->getOperand(4);
9112SDValue SystemZTargetLowering::combineIntDIVREM(
9113 SDNode *
N, DAGCombinerInfo &DCI)
const {
9114 SelectionDAG &DAG = DCI.DAG;
9115 EVT VT =
N->getValueType(0);
9132SDValue SystemZTargetLowering::combineShiftToMulAddHigh(
9133 SDNode *
N, DAGCombinerInfo &DCI)
const {
9134 SelectionDAG &DAG = DCI.DAG;
9138 "SRL or SRA node is required here!");
9140 if (!Subtarget.hasVector())
9150 SDValue ShiftOperand =
N->getOperand(0);
9170 if (!IsSignExt && !IsZeroExt)
9178 unsigned ActiveBits = IsSignExt
9179 ?
Constant->getAPIntValue().getSignificantBits()
9180 :
Constant->getAPIntValue().getActiveBits();
9181 if (ActiveBits > NarrowVTSize)
9197 unsigned ActiveBits = IsSignExt
9198 ?
Constant->getAPIntValue().getSignificantBits()
9199 :
Constant->getAPIntValue().getActiveBits();
9200 if (ActiveBits > NarrowVTSize)
9217 "Cannot have a multiply node with two different operand types.");
9219 "Cannot have an add node with two different operand types.");
9230 if (ShiftAmt != NarrowVTSize)
9234 if (!(NarrowVT == MVT::v16i8 || NarrowVT == MVT::v8i16 ||
9235 NarrowVT == MVT::v4i32 ||
9236 (Subtarget.hasVectorEnhancements3() &&
9237 (NarrowVT == MVT::v2i64 || NarrowVT == MVT::i128))))
9243 MulhRightOp, MulhAddOp);
9244 bool IsSigned =
N->getOpcode() ==
ISD::SRA;
9255 EVT VT =
Op.getValueType();
9264 Op =
Op.getOperand(0);
9265 if (
Op.getValueType().getVectorNumElements() == 2 * NumElts &&
9269 bool CanUseEven =
true, CanUseOdd =
true;
9270 for (
unsigned Elt = 0; Elt < NumElts; Elt++) {
9271 if (ShuffleMask[Elt] == -1)
9273 if (
unsigned(ShuffleMask[Elt]) != 2 * Elt)
9275 if (
unsigned(ShuffleMask[Elt]) != 2 * Elt + 1)
9278 Op =
Op.getOperand(0);
9280 return IsSigned ? SystemZISD::VME : SystemZISD::VMLE;
9282 return IsSigned ? SystemZISD::VMO : SystemZISD::VMLO;
9288 if (VT == MVT::i128 && Subtarget.hasVectorEnhancements3() &&
9292 Op =
Op.getOperand(0);
9294 Op.getOperand(0).getValueType() == MVT::v2i64 &&
9296 unsigned Elem =
Op.getConstantOperandVal(1);
9297 Op =
Op.getOperand(0);
9299 return IsSigned ? SystemZISD::VME : SystemZISD::VMLE;
9301 return IsSigned ? SystemZISD::VMO : SystemZISD::VMLO;
9308SDValue SystemZTargetLowering::combineMUL(
9309 SDNode *
N, DAGCombinerInfo &DCI)
const {
9310 SelectionDAG &DAG = DCI.DAG;
9317 if (OpcodeCand0 && OpcodeCand0 == OpcodeCand1)
9318 return DAG.
getNode(OpcodeCand0, SDLoc(
N),
N->getValueType(0), Op0, Op1);
9323SDValue SystemZTargetLowering::combineINTRINSIC(
9324 SDNode *
N, DAGCombinerInfo &DCI)
const {
9325 SelectionDAG &DAG = DCI.DAG;
9327 unsigned Id =
N->getConstantOperandVal(1);
9331 case Intrinsic::s390_vll:
9332 case Intrinsic::s390_vlrl:
9334 if (
C->getZExtValue() >= 15)
9335 return DAG.
getLoad(
N->getValueType(0), SDLoc(
N),
N->getOperand(0),
9336 N->getOperand(3), MachinePointerInfo());
9339 case Intrinsic::s390_vstl:
9340 case Intrinsic::s390_vstrl:
9342 if (
C->getZExtValue() >= 15)
9343 return DAG.
getStore(
N->getOperand(0), SDLoc(
N),
N->getOperand(2),
9344 N->getOperand(4), MachinePointerInfo());
9352 if (
N->getOpcode() == SystemZISD::PCREL_WRAPPER)
9359 switch(
N->getOpcode()) {
9364 case SystemZISD::MERGE_HIGH:
9365 case SystemZISD::MERGE_LOW:
return combineMERGE(
N, DCI);
9370 case SystemZISD::JOIN_DWORDS:
return combineJOIN_DWORDS(
N, DCI);
9380 case SystemZISD::BR_CCMASK:
return combineBR_CCMASK(
N, DCI);
9381 case SystemZISD::SELECT_CCMASK:
return combineSELECT_CCMASK(
N, DCI);
9384 case ISD::SRA:
return combineShiftToMulAddHigh(
N, DCI);
9385 case ISD::MUL:
return combineMUL(
N, DCI);
9389 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
9401 EVT VT =
Op.getValueType();
9404 unsigned Opcode =
Op.getOpcode();
9406 unsigned Id =
Op.getConstantOperandVal(0);
9408 case Intrinsic::s390_vpksh:
9409 case Intrinsic::s390_vpksf:
9410 case Intrinsic::s390_vpksg:
9411 case Intrinsic::s390_vpkshs:
9412 case Intrinsic::s390_vpksfs:
9413 case Intrinsic::s390_vpksgs:
9414 case Intrinsic::s390_vpklsh:
9415 case Intrinsic::s390_vpklsf:
9416 case Intrinsic::s390_vpklsg:
9417 case Intrinsic::s390_vpklshs:
9418 case Intrinsic::s390_vpklsfs:
9419 case Intrinsic::s390_vpklsgs:
9421 SrcDemE = DemandedElts;
9424 SrcDemE = SrcDemE.
trunc(NumElts / 2);
9427 case Intrinsic::s390_vuphb:
9428 case Intrinsic::s390_vuphh:
9429 case Intrinsic::s390_vuphf:
9430 case Intrinsic::s390_vuplhb:
9431 case Intrinsic::s390_vuplhh:
9432 case Intrinsic::s390_vuplhf:
9433 SrcDemE =
APInt(NumElts * 2, 0);
9436 case Intrinsic::s390_vuplb:
9437 case Intrinsic::s390_vuplhw:
9438 case Intrinsic::s390_vuplf:
9439 case Intrinsic::s390_vupllb:
9440 case Intrinsic::s390_vupllh:
9441 case Intrinsic::s390_vupllf:
9442 SrcDemE =
APInt(NumElts * 2, 0);
9445 case Intrinsic::s390_vpdi: {
9447 SrcDemE =
APInt(NumElts, 0);
9448 if (!DemandedElts[OpNo - 1])
9450 unsigned Mask =
Op.getConstantOperandVal(3);
9451 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
9453 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
9456 case Intrinsic::s390_vsldb: {
9458 assert(VT == MVT::v16i8 &&
"Unexpected type.");
9459 unsigned FirstIdx =
Op.getConstantOperandVal(3);
9460 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
9461 unsigned NumSrc0Els = 16 - FirstIdx;
9462 SrcDemE =
APInt(NumElts, 0);
9464 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
9467 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
9472 case Intrinsic::s390_vperm:
9481 case SystemZISD::JOIN_DWORDS:
9483 SrcDemE =
APInt(1, 1);
9485 case SystemZISD::SELECT_CCMASK:
9486 SrcDemE = DemandedElts;
9497 const APInt &DemandedElts,
9512 const APInt &DemandedElts,
9514 unsigned Depth)
const {
9518 unsigned Tmp0, Tmp1;
9523 EVT VT =
Op.getValueType();
9524 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
9527 "KnownBits does not match VT in bitwidth");
9530 "DemandedElts does not match VT number of elements");
9532 unsigned Opcode =
Op.getOpcode();
9534 bool IsLogical =
false;
9535 unsigned Id =
Op.getConstantOperandVal(0);
9537 case Intrinsic::s390_vpksh:
9538 case Intrinsic::s390_vpksf:
9539 case Intrinsic::s390_vpksg:
9540 case Intrinsic::s390_vpkshs:
9541 case Intrinsic::s390_vpksfs:
9542 case Intrinsic::s390_vpksgs:
9543 case Intrinsic::s390_vpklsh:
9544 case Intrinsic::s390_vpklsf:
9545 case Intrinsic::s390_vpklsg:
9546 case Intrinsic::s390_vpklshs:
9547 case Intrinsic::s390_vpklsfs:
9548 case Intrinsic::s390_vpklsgs:
9549 case Intrinsic::s390_vpdi:
9550 case Intrinsic::s390_vsldb:
9551 case Intrinsic::s390_vperm:
9554 case Intrinsic::s390_vuplhb:
9555 case Intrinsic::s390_vuplhh:
9556 case Intrinsic::s390_vuplhf:
9557 case Intrinsic::s390_vupllb:
9558 case Intrinsic::s390_vupllh:
9559 case Intrinsic::s390_vupllf:
9562 case Intrinsic::s390_vuphb:
9563 case Intrinsic::s390_vuphh:
9564 case Intrinsic::s390_vuphf:
9565 case Intrinsic::s390_vuplb:
9566 case Intrinsic::s390_vuplhw:
9567 case Intrinsic::s390_vuplf: {
9582 case SystemZISD::JOIN_DWORDS:
9583 case SystemZISD::SELECT_CCMASK:
9586 case SystemZISD::REPLICATE: {
9609 if (
LHS == 1)
return 1;
9612 if (
RHS == 1)
return 1;
9613 unsigned Common = std::min(
LHS,
RHS);
9614 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
9615 EVT VT =
Op.getValueType();
9617 if (SrcBitWidth > VTBits) {
9618 unsigned SrcExtraBits = SrcBitWidth - VTBits;
9619 if (Common > SrcExtraBits)
9620 return (Common - SrcExtraBits);
9623 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
9630 unsigned Depth)
const {
9631 if (
Op.getResNo() != 0)
9633 unsigned Opcode =
Op.getOpcode();
9635 unsigned Id =
Op.getConstantOperandVal(0);
9637 case Intrinsic::s390_vpksh:
9638 case Intrinsic::s390_vpksf:
9639 case Intrinsic::s390_vpksg:
9640 case Intrinsic::s390_vpkshs:
9641 case Intrinsic::s390_vpksfs:
9642 case Intrinsic::s390_vpksgs:
9643 case Intrinsic::s390_vpklsh:
9644 case Intrinsic::s390_vpklsf:
9645 case Intrinsic::s390_vpklsg:
9646 case Intrinsic::s390_vpklshs:
9647 case Intrinsic::s390_vpklsfs:
9648 case Intrinsic::s390_vpklsgs:
9649 case Intrinsic::s390_vpdi:
9650 case Intrinsic::s390_vsldb:
9651 case Intrinsic::s390_vperm:
9653 case Intrinsic::s390_vuphb:
9654 case Intrinsic::s390_vuphh:
9655 case Intrinsic::s390_vuphf:
9656 case Intrinsic::s390_vuplb:
9657 case Intrinsic::s390_vuplhw:
9658 case Intrinsic::s390_vuplf: {
9662 EVT VT =
Op.getValueType();
9672 case SystemZISD::SELECT_CCMASK:
9685 switch (
Op->getOpcode()) {
9686 case SystemZISD::PCREL_WRAPPER:
9687 case SystemZISD::PCREL_OFFSET:
9698 "Unexpected stack alignment");
9701 unsigned StackProbeSize =
9704 StackProbeSize &= ~(StackAlign - 1);
9705 return StackProbeSize ? StackProbeSize : StackAlign;
9744 if (
MI.readsRegister(SystemZ::CC,
nullptr))
9746 if (
MI.definesRegister(SystemZ::CC,
nullptr))
9752 if (miI ==
MBB->end()) {
9754 if (Succ->isLiveIn(SystemZ::CC))
9765 switch (
MI.getOpcode()) {
9766 case SystemZ::Select32:
9767 case SystemZ::Select64:
9768 case SystemZ::Select128:
9769 case SystemZ::SelectF32:
9770 case SystemZ::SelectF64:
9771 case SystemZ::SelectF128:
9772 case SystemZ::SelectVR32:
9773 case SystemZ::SelectVR64:
9774 case SystemZ::SelectVR128:
9806 for (
auto *
MI : Selects) {
9807 Register DestReg =
MI->getOperand(0).getReg();
9808 Register TrueReg =
MI->getOperand(1).getReg();
9809 Register FalseReg =
MI->getOperand(2).getReg();
9814 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
9817 if (
auto It = RegRewriteTable.
find(TrueReg); It != RegRewriteTable.
end())
9818 TrueReg = It->second.first;
9820 if (
auto It = RegRewriteTable.
find(FalseReg); It != RegRewriteTable.
end())
9821 FalseReg = It->second.second;
9824 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
9829 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
9840 auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
9841 assert(TFL->hasReservedCallFrame(MF) &&
9842 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
9847 uint32_t NumBytes =
MI.getOperand(0).getImm();
9852 MI.eraseFromParent();
9861 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
9863 unsigned CCValid =
MI.getOperand(3).getImm();
9864 unsigned CCMask =
MI.getOperand(4).getImm();
9869 SmallVector<MachineInstr*, 8> Selects;
9870 SmallVector<MachineInstr*, 8> DbgValues;
9876 assert(NextMI.getOperand(3).getImm() == CCValid &&
9877 "Bad CCValid operands since CC was not redefined.");
9878 if (NextMI.getOperand(4).getImm() == CCMask ||
9879 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
9885 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
9886 NextMI.usesCustomInsertionHook())
9889 for (
auto *SelMI : Selects)
9890 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
9894 if (NextMI.isDebugInstr()) {
9896 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
9899 }
else if (User || ++
Count > 20)
9903 MachineInstr *LastMI = Selects.back();
9904 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
9906 MachineBasicBlock *StartMBB =
MBB;
9936 for (
auto *SelMI : Selects)
9937 SelMI->eraseFromParent();
9940 for (
auto *DbgMI : DbgValues)
9941 MBB->
splice(InsertPos, StartMBB, DbgMI);
9952 unsigned StoreOpcode,
9953 unsigned STOCOpcode,
9954 bool Invert)
const {
9955 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
9958 MachineOperand
Base =
MI.getOperand(1);
9959 int64_t Disp =
MI.getOperand(2).getImm();
9960 Register IndexReg =
MI.getOperand(3).getReg();
9961 unsigned CCValid =
MI.getOperand(4).getImm();
9962 unsigned CCMask =
MI.getOperand(5).getImm();
9965 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
9969 MachineMemOperand *MMO =
nullptr;
9970 for (
auto *
I :
MI.memoperands())
9979 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
9991 MI.eraseFromParent();
9999 MachineBasicBlock *StartMBB =
MBB;
10005 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
10032 MI.eraseFromParent();
10042 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10050 MachineBasicBlock *StartMBB =
MBB;
10068 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
10095 MI.eraseFromParent();
10106 bool Invert)
const {
10108 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10115 int64_t Disp =
MI.getOperand(2).getImm();
10117 Register BitShift =
MI.getOperand(4).getReg();
10118 Register NegBitShift =
MI.getOperand(5).getReg();
10119 unsigned BitSize =
MI.getOperand(6).getImm();
10123 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10124 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10125 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10135 MachineBasicBlock *StartMBB =
MBB;
10168 }
else if (BinOpcode)
10191 MI.eraseFromParent();
10202 unsigned KeepOldMask)
const {
10204 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10210 int64_t Disp =
MI.getOperand(2).getImm();
10212 Register BitShift =
MI.getOperand(4).getReg();
10213 Register NegBitShift =
MI.getOperand(5).getReg();
10214 unsigned BitSize =
MI.getOperand(6).getImm();
10218 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10219 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10220 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10231 MachineBasicBlock *StartMBB =
MBB;
10295 MI.eraseFromParent();
10305 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10311 int64_t Disp =
MI.getOperand(2).getImm();
10312 Register CmpVal =
MI.getOperand(3).getReg();
10313 Register OrigSwapVal =
MI.getOperand(4).getReg();
10314 Register BitShift =
MI.getOperand(5).getReg();
10315 Register NegBitShift =
MI.getOperand(6).getReg();
10316 int64_t BitSize =
MI.getOperand(7).getImm();
10319 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
10322 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10323 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10324 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
10325 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10337 MachineBasicBlock *StartMBB =
MBB;
10409 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
10412 MI.eraseFromParent();
10420 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10425 .
add(
MI.getOperand(1))
10426 .
addImm(SystemZ::subreg_h64)
10427 .
add(
MI.getOperand(2))
10428 .
addImm(SystemZ::subreg_l64);
10429 MI.eraseFromParent();
10438 bool ClearEven)
const {
10440 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10462 MI.eraseFromParent();
10469 unsigned Opcode,
bool IsMemset)
const {
10471 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10476 uint64_t DestDisp =
MI.getOperand(1).getImm();
10481 auto foldDisplIfNeeded = [&](MachineOperand &
Base, uint64_t &Disp) ->
void {
10484 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
10494 SrcDisp =
MI.getOperand(3).getImm();
10496 SrcBase = DestBase;
10497 SrcDisp = DestDisp++;
10498 foldDisplIfNeeded(DestBase, DestDisp);
10501 MachineOperand &LengthMO =
MI.getOperand(IsMemset ? 2 : 4);
10502 bool IsImmForm = LengthMO.
isImm();
10503 bool IsRegForm = !IsImmForm;
10506 auto insertMemMemOp = [&](MachineBasicBlock *InsMBB,
10508 MachineOperand DBase, uint64_t DDisp,
10509 MachineOperand
SBase, uint64_t SDisp,
10510 unsigned Length) ->
void {
10514 if (ByteMO.
isImm())
10529 bool NeedsLoop =
false;
10530 uint64_t ImmLength = 0;
10531 Register LenAdjReg = SystemZ::NoRegister;
10533 ImmLength = LengthMO.
getImm();
10534 ImmLength += IsMemset ? 2 : 1;
10535 if (ImmLength == 0) {
10536 MI.eraseFromParent();
10539 if (Opcode == SystemZ::CLC) {
10540 if (ImmLength > 3 * 256)
10550 }
else if (ImmLength > 6 * 256)
10558 LenAdjReg = LengthMO.
getReg();
10563 MachineBasicBlock *EndMBB =
10564 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
10572 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
10582 auto loadZeroAddress = [&]() -> MachineOperand {
10587 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
10588 DestBase = loadZeroAddress();
10589 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
10590 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
10592 MachineBasicBlock *StartMBB =
nullptr;
10593 MachineBasicBlock *LoopMBB =
nullptr;
10594 MachineBasicBlock *NextMBB =
nullptr;
10595 MachineBasicBlock *DoneMBB =
nullptr;
10596 MachineBasicBlock *AllDoneMBB =
nullptr;
10600 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
10602 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
10609 RC = &SystemZ::GR64BitRegClass;
10637 MBB = MemsetOneCheckMBB;
10648 MBB = MemsetOneMBB;
10680 if (EndMBB && !ImmLength)
10702 if (!HaveSingleBase)
10709 if (Opcode == SystemZ::MVC)
10736 if (!HaveSingleBase)
10759 Register RemDestReg = HaveSingleBase ? RemSrcReg
10764 if (!HaveSingleBase)
10772 MachineInstrBuilder EXRL_MIB =
10780 if (Opcode != SystemZ::MVC) {
10790 while (ImmLength > 0) {
10791 uint64_t ThisLength = std::min(ImmLength, uint64_t(256));
10794 foldDisplIfNeeded(DestBase, DestDisp);
10795 foldDisplIfNeeded(SrcBase, SrcDisp);
10796 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
10797 DestDisp += ThisLength;
10798 SrcDisp += ThisLength;
10799 ImmLength -= ThisLength;
10802 if (EndMBB && ImmLength > 0) {
10818 MI.eraseFromParent();
10827 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10831 uint64_t End1Reg =
MI.getOperand(0).getReg();
10832 uint64_t Start1Reg =
MI.getOperand(1).getReg();
10833 uint64_t Start2Reg =
MI.getOperand(2).getReg();
10834 uint64_t CharReg =
MI.getOperand(3).getReg();
10836 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
10841 MachineBasicBlock *StartMBB =
MBB;
10877 MI.eraseFromParent();
10884 bool NoFloat)
const {
10886 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
10887 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10890 MI.setDesc(
TII->get(Opcode));
10894 uint64_t Control =
MI.getOperand(2).getImm();
10895 static const unsigned GPRControlBit[16] = {
10896 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
10897 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
10899 Control |= GPRControlBit[15];
10900 if (TFI->
hasFP(MF))
10901 Control |= GPRControlBit[11];
10902 MI.getOperand(2).setImm(Control);
10905 for (
int I = 0;
I < 16;
I++) {
10906 if ((Control & GPRControlBit[
I]) == 0) {
10913 if (!NoFloat && (Control & 4) != 0) {
10914 if (Subtarget.hasVector()) {
10931 MachineRegisterInfo *MRI = &MF.
getRegInfo();
10932 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10935 Register SrcReg =
MI.getOperand(0).getReg();
10938 const TargetRegisterClass *RC = MRI->
getRegClass(SrcReg);
10946 MI.eraseFromParent();
10954 MachineRegisterInfo *MRI = &MF.
getRegInfo();
10955 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10958 Register DstReg =
MI.getOperand(0).getReg();
10959 Register SizeReg =
MI.getOperand(2).getReg();
10961 MachineBasicBlock *StartMBB =
MBB;
11037 MI.eraseFromParent();
11041SDValue SystemZTargetLowering::
11044 auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>();
11052 switch (
MI.getOpcode()) {
11053 case SystemZ::ADJCALLSTACKDOWN:
11054 case SystemZ::ADJCALLSTACKUP:
11055 return emitAdjCallStack(
MI,
MBB);
11057 case SystemZ::Select32:
11058 case SystemZ::Select64:
11059 case SystemZ::Select128:
11060 case SystemZ::SelectF32:
11061 case SystemZ::SelectF64:
11062 case SystemZ::SelectF128:
11063 case SystemZ::SelectVR32:
11064 case SystemZ::SelectVR64:
11065 case SystemZ::SelectVR128:
11066 return emitSelect(
MI,
MBB);
11068 case SystemZ::CondStore8Mux:
11069 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
11070 case SystemZ::CondStore8MuxInv:
11071 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
11072 case SystemZ::CondStore16Mux:
11073 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
11074 case SystemZ::CondStore16MuxInv:
11075 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
11076 case SystemZ::CondStore32Mux:
11077 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
11078 case SystemZ::CondStore32MuxInv:
11079 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
11080 case SystemZ::CondStore8:
11081 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
11082 case SystemZ::CondStore8Inv:
11083 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
11084 case SystemZ::CondStore16:
11085 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
11086 case SystemZ::CondStore16Inv:
11087 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
11088 case SystemZ::CondStore32:
11089 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
11090 case SystemZ::CondStore32Inv:
11091 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
11092 case SystemZ::CondStore64:
11093 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
11094 case SystemZ::CondStore64Inv:
11095 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
11096 case SystemZ::CondStoreF32:
11097 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
11098 case SystemZ::CondStoreF32Inv:
11099 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
11100 case SystemZ::CondStoreF64:
11101 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
11102 case SystemZ::CondStoreF64Inv:
11103 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
11105 case SystemZ::SCmp128Hi:
11106 return emitICmp128Hi(
MI,
MBB,
false);
11107 case SystemZ::UCmp128Hi:
11108 return emitICmp128Hi(
MI,
MBB,
true);
11110 case SystemZ::PAIR128:
11111 return emitPair128(
MI,
MBB);
11112 case SystemZ::AEXT128:
11113 return emitExt128(
MI,
MBB,
false);
11114 case SystemZ::ZEXT128:
11115 return emitExt128(
MI,
MBB,
true);
11117 case SystemZ::ATOMIC_SWAPW:
11118 return emitAtomicLoadBinary(
MI,
MBB, 0);
11120 case SystemZ::ATOMIC_LOADW_AR:
11121 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
11122 case SystemZ::ATOMIC_LOADW_AFI:
11123 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
11125 case SystemZ::ATOMIC_LOADW_SR:
11126 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
11128 case SystemZ::ATOMIC_LOADW_NR:
11129 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
11130 case SystemZ::ATOMIC_LOADW_NILH:
11131 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
11133 case SystemZ::ATOMIC_LOADW_OR:
11134 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
11135 case SystemZ::ATOMIC_LOADW_OILH:
11136 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
11138 case SystemZ::ATOMIC_LOADW_XR:
11139 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
11140 case SystemZ::ATOMIC_LOADW_XILF:
11141 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
11143 case SystemZ::ATOMIC_LOADW_NRi:
11144 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
11145 case SystemZ::ATOMIC_LOADW_NILHi:
11146 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
11148 case SystemZ::ATOMIC_LOADW_MIN:
11150 case SystemZ::ATOMIC_LOADW_MAX:
11152 case SystemZ::ATOMIC_LOADW_UMIN:
11154 case SystemZ::ATOMIC_LOADW_UMAX:
11157 case SystemZ::ATOMIC_CMP_SWAPW:
11158 return emitAtomicCmpSwapW(
MI,
MBB);
11159 case SystemZ::MVCImm:
11160 case SystemZ::MVCReg:
11161 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
11162 case SystemZ::NCImm:
11163 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
11164 case SystemZ::OCImm:
11165 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
11166 case SystemZ::XCImm:
11167 case SystemZ::XCReg:
11168 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
11169 case SystemZ::CLCImm:
11170 case SystemZ::CLCReg:
11171 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
11172 case SystemZ::MemsetImmImm:
11173 case SystemZ::MemsetImmReg:
11174 case SystemZ::MemsetRegImm:
11175 case SystemZ::MemsetRegReg:
11176 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
11177 case SystemZ::CLSTLoop:
11178 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
11179 case SystemZ::MVSTLoop:
11180 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
11181 case SystemZ::SRSTLoop:
11182 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
11183 case SystemZ::TBEGIN:
11184 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
11185 case SystemZ::TBEGIN_nofloat:
11186 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
11187 case SystemZ::TBEGINC:
11188 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
11189 case SystemZ::LTEBRCompare_Pseudo:
11190 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
11191 case SystemZ::LTDBRCompare_Pseudo:
11192 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
11193 case SystemZ::LTXBRCompare_Pseudo:
11194 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
11196 case SystemZ::PROBED_ALLOCA:
11197 return emitProbedAlloca(
MI,
MBB);
11198 case SystemZ::EH_SjLj_SetJmp:
11200 case SystemZ::EH_SjLj_LongJmp:
11203 case TargetOpcode::STACKMAP:
11204 case TargetOpcode::PATCHPOINT:
11215SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
11216 if (VT == MVT::Untyped)
11217 return &SystemZ::ADDR128BitRegClass;
11243 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
11263 EVT VT =
Op.getValueType();
11264 Op =
Op.getOperand(0);
11265 EVT OpVT =
Op.getValueType();
11267 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
11278 Op = DAG.
getNode(SystemZISD::VSUM,
DL, MVT::v4i32,
Op, Zero);
11298 const AttributeList &Attrs =
F->getAttributes();
11299 if (Attrs.hasRetAttrs())
11300 OS << Attrs.getAsString(AttributeList::ReturnIndex) <<
" ";
11301 OS << *
F->getReturnType() <<
" @" <<
F->getName() <<
"(";
11302 for (
unsigned I = 0,
E = FT->getNumParams();
I !=
E; ++
I) {
11305 OS << *FT->getParamType(
I);
11307 for (
auto A : {Attribute::SExt, Attribute::ZExt, Attribute::NoExt})
11314bool SystemZTargetLowering::isInternal(
const Function *Fn)
const {
11315 std::map<const Function *, bool>::iterator Itr = IsInternalCache.find(Fn);
11316 if (Itr == IsInternalCache.end())
11317 Itr = IsInternalCache
11318 .insert(std::pair<const Function *, bool>(
11321 return Itr->second;
11324void SystemZTargetLowering::
11332 bool IsInternal =
false;
11333 const Function *CalleeFn =
nullptr;
11336 IsInternal = isInternal(CalleeFn);
11337 if (!IsInternal && !verifyNarrowIntegerArgs(Outs)) {
11338 errs() <<
"ERROR: Missing extension attribute of passed "
11339 <<
"value in call to function:\n" <<
"Callee: ";
11340 if (CalleeFn !=
nullptr)
11344 errs() <<
"Caller: ";
11350void SystemZTargetLowering::
11358 if (!isInternal(
F) && !verifyNarrowIntegerArgs(Outs)) {
11359 errs() <<
"ERROR: Missing extension attribute of returned "
11360 <<
"value from function:\n";
11368bool SystemZTargetLowering::verifyNarrowIntegerArgs(
11370 if (!Subtarget.isTargetELF())
11379 for (
unsigned i = 0; i < Outs.
size(); ++i) {
11380 MVT VT = Outs[i].VT;
11381 ISD::ArgFlagsTy
Flags = Outs[i].Flags;
11384 "Unexpected integer argument VT.");
11385 if (VT == MVT::i32 &&
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isSelectPseudo(MachineInstr &MI)
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the SmallSet class.
static SDValue getI128Select(SelectionDAG &DAG, const SDLoc &DL, Comparison C, SDValue TrueOp, SDValue FalseOp)
static SmallVector< SDValue, 4 > simplifyAssumingCCVal(SDValue &Val, SDValue &CC, SelectionDAG &DAG)
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static cl::opt< bool > EnableIntArgExtCheck("argext-abi-check", cl::init(false), cl::desc("Verify that narrow int args are properly extended per the " "SystemZ ABI."))
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static SDValue mergeHighParts(SelectionDAG &DAG, const SDLoc &DL, unsigned MergedBits, EVT VT, SDValue Op0, SDValue Op1)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static SDValue convertToF16(SDValue Op, SelectionDAG &DAG)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask, SelectionDAG &DAG)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static std::pair< SDValue, int > findCCUse(const SDValue &Val, unsigned Depth=0)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool analyzeArgSplit(const SmallVectorImpl< ArgTy > &Args, SmallVector< CCValAssign, 16 > &ArgLocs, unsigned I, MVT &PartVT, unsigned &NumParts)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static SDValue convertFromF16(SDValue Op, SDLoc DL, SelectionDAG &DAG)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static unsigned detectEvenOddMultiplyOperand(const SelectionDAG &DAG, const SystemZSubtarget &Subtarget, SDValue &Op)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static SDValue buildFPVecFromScalars4(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SmallVectorImpl< SDValue > &Elems, unsigned Pos)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
static LLVM_ABI StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind)
LLVM Basic Block Representation.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI bool isConstant() const
CCState - This class holds information needed while lowering arguments and return values.
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
uint64_t getZExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool hasAddressTaken(const User **=nullptr, bool IgnoreCallbackUses=false, bool IgnoreAssumeLikeCalls=true, bool IngoreLLVMUsed=false, bool IgnoreARCAttachedCall=false, bool IgnoreCastedDirectCall=false) const
hasAddressTaken - returns true if there are any uses of this function other than direct calls or invo...
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool hasLocalLinkage() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
static auto integer_fixedlen_vector_valuetypes()
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
const_iterator begin() const
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, const SDLoc &DL, const AsmOperandInfo &Constraint, SelectionDAG &DAG) const override
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs) const override
bool useSoftFloat() const override
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, UndefPoisonKind Kind, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, Kind can be used to track poison ...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue useLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, MVT VT, SDValue Arg, SDLoc DL, SDValue Chain, bool IsStrict) const
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
A raw_ostream that writes to a file descriptor.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ CTTZ_ZERO_POISON
Bit counting operators with a poisoned result for zero inputs.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
auto m_Cmp()
Matches any compare instruction and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
auto m_Value()
Match an arbitrary value and ignore it.
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned VR16Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned FP16Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
const unsigned CCMASK_VCMP_NONE
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_NONE
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
initializer< Ty > init(const Ty &Val)
support::ulittle32_t Word
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
NodeAddr< NodeBase * > Node
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
@ Define
Register definition.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr T maskLeadingOnes(unsigned N)
Create a bitmask with the N left-most bits set to 1, and all other bits set to 0.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.