27#include "llvm/IR/IntrinsicsS390.h"
37#define DEBUG_TYPE "systemz-lower"
43 cl::desc(
"Verify that narrow int args are properly extended per the "
50 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
51 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
101 if (Subtarget.hasHighWord())
107 if (Subtarget.hasVector()) {
116 if (Subtarget.hasVectorEnhancements1())
121 if (Subtarget.hasVector()) {
131 if (Subtarget.hasVector())
158 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
159 I <= MVT::LAST_FP_VALUETYPE;
185 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
186 I <= MVT::LAST_INTEGER_VALUETYPE;
217 if (Subtarget.hasPopulationCount())
243 (!Subtarget.hasFPExtension() && VT == MVT::i32) ?
Promote :
Custom;
264 if (!Subtarget.hasVectorEnhancements3()) {
291 if (Subtarget.hasVectorEnhancements3()) {
334 {MVT::i8, MVT::i16, MVT::i32},
Legal);
336 {MVT::i8, MVT::i16},
Legal);
357 if (Subtarget.hasMiscellaneousExtensions4()) {
364 if (Subtarget.hasMiscellaneousExtensions3()) {
457 if (VT != MVT::v2i64 || Subtarget.hasVectorEnhancements3()) {
462 if (Subtarget.hasVectorEnhancements3() &&
463 VT != MVT::v16i8 && VT != MVT::v8i16) {
473 if (Subtarget.hasVectorEnhancements1())
507 if (Subtarget.hasVector()) {
529 if (Subtarget.hasVectorEnhancements2()) {
555 for (
MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
569 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
570 I <= MVT::LAST_FP_VALUETYPE;
578 if (Subtarget.hasFPExtension()) {
606 if (Subtarget.hasFPExtension()) {
622 if (Subtarget.hasVector()) {
673 if (Subtarget.hasVectorEnhancements1()) {
680 if (Subtarget.hasVectorEnhancements1()) {
697 for (
MVT Type : {MVT::f64, MVT::v2f64, MVT::f32, MVT::v4f32, MVT::f128}) {
720 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
721 MVT::v4f32, MVT::v2f64 }) {
730 if (!Subtarget.hasVectorEnhancements1()) {
736 if (Subtarget.hasVectorEnhancements1())
746 if (Subtarget.hasVectorEnhancements1()) {
758 if (!Subtarget.hasVector()) {
769 if (Subtarget.isTargetzOS()) {
830 return Subtarget.hasSoftFloat();
835 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
838 IntermediateVT = RegisterVT = MVT::v8f16;
839 return NumIntermediates =
843 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
890 return Subtarget.hasVectorEnhancements1();
903 if (!Subtarget.hasVector() ||
904 (isFP128 && !Subtarget.hasVectorEnhancements1()))
913 uint64_t Byte = IntBits.lshr(
I * 8).trunc(8).getZExtValue();
920 Opcode = SystemZISD::BYTE_MASK;
926 if (SplatBitSize > 64)
933 OpVals.push_back(((
unsigned) SignedValue));
934 Opcode = SystemZISD::REPLICATE;
941 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start, End)) {
945 OpVals.push_back(Start - (64 - SplatBitSize));
946 OpVals.push_back(End - (64 - SplatBitSize));
947 Opcode = SystemZISD::ROTATE_MASK;
959 uint64_t SplatBitsZ = SplatBits.getZExtValue();
960 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
972 return TryValue(SplatBitsZ | Middle);
981 assert(IntBits.getBitWidth() == 128 &&
"Unsupported APInt.");
987 unsigned HalfSize = Width / 2;
992 if (HighValue != LowValue || 8 > HalfSize)
995 SplatBits = HighValue;
999 SplatBitSize = Width;
1007 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
1011 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
1016 bool ForCodeSize)
const {
1018 if (Imm.isZero() || Imm.isNegZero())
1039 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
1045 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1098 const int64_t FPOffset = 0;
1119 auto *SpecialRegs = Subtarget.getSpecialRegisters();
1120 bool HasFP = Subtarget.getFrameLowering()->hasFP(*MF);
1123 .
addReg(SpecialRegs->getFramePointerRegister())
1131 .
addReg(SpecialRegs->getStackPointerRegister())
1142 .
addReg(SpecialRegs->getStackPointerRegister())
1143 .
addImm(TFL->getBackchainOffset(*MF))
1154 MIB =
BuildMI(*ThisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1158 MIB.
addRegMask(RegInfo->getNoPreservedMask());
1179 MI.eraseFromParent();
1195 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1198 auto *SpecialRegs = Subtarget.getSpecialRegisters();
1205 const int64_t FPOffset = 0;
1217 SpecialRegs->getFramePointerRegister())
1239 SpecialRegs->getStackPointerRegister())
1248 .
addReg(SpecialRegs->getStackPointerRegister())
1249 .
addImm(TFL->getBackchainOffset(*MF))
1255 MI.eraseFromParent();
1286 if (Subtarget.hasInterlockedAccess1() &&
1319 EVT VT =
Y.getValueType();
1322 if (VT == MVT::i32 || VT == MVT::i64)
1323 return Subtarget.hasMiscellaneousExtensions3();
1326 if (VT.
isVector() || VT == MVT::i128)
1327 return Subtarget.hasVector();
1355 bool MVC = Ty->isIntegerTy(8);
1361static AddressingMode
1364 switch (
II->getIntrinsicID()) {
1366 case Intrinsic::memset:
1367 case Intrinsic::memmove:
1368 case Intrinsic::memcpy:
1375 if (SingleUser->getParent() ==
I->getParent()) {
1378 if (
C->getBitWidth() <= 64 &&
1388 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1402 I->getOperand(0)->getType());
1404 bool IsVectorAccess = MemAccessTy->isVectorTy();
1409 Value *DataOp =
I->getOperand(0);
1411 IsVectorAccess =
true;
1417 User *LoadUser = *
I->user_begin();
1419 IsVectorAccess =
true;
1422 if (IsFPAccess || IsVectorAccess)
1441 Subtarget.hasVector() && (Ty->isVectorTy() || Ty->isIntegerTy(128));
1451 return AM.
Scale == 0;
1458 LLVMContext &Context, std::vector<EVT> &MemOps,
unsigned Limit,
1459 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
1460 const AttributeList &FuncAttributes,
EVT *LargestVT)
const {
1461 const int MVCFastLen = 16;
1463 if (Limit != ~
unsigned(0)) {
1465 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1467 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1469 if (
Op.isZeroMemset())
1474 Context, MemOps, Limit,
Op, DstAS, SrcAS, FuncAttributes, LargestVT);
1479 const AttributeList &FuncAttributes)
const {
1480 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1484 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1486 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1488 return FromBits > ToBits;
1496 return FromBits > ToBits;
1505 if (Constraint.
size() == 1) {
1506 switch (Constraint[0]) {
1532 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1533 switch (Constraint[1]) {
1544 if (
StringRef(
"{@cc}").compare(Constraint) == 0)
1554 Value *CallOperandVal = Info.CallOperandVal;
1557 if (!CallOperandVal)
1561 switch (*Constraint) {
1580 if (Subtarget.hasVector())
1611 if (
C->getZExtValue() == 0x7fffffff)
1621static std::pair<unsigned, const TargetRegisterClass *>
1623 const unsigned *Map,
unsigned Size) {
1624 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1625 if (isdigit(Constraint[2])) {
1630 return std::make_pair(Map[Index], RC);
1632 return std::make_pair(0U,
nullptr);
1635std::pair<unsigned, const TargetRegisterClass *>
1638 if (Constraint.
size() == 1) {
1640 switch (Constraint[0]) {
1645 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1647 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1648 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1652 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1653 else if (VT == MVT::i128)
1654 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1655 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1658 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1663 return std::make_pair(0U, &SystemZ::FP16BitRegClass);
1665 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1667 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1668 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1673 if (Subtarget.hasVector()) {
1675 return std::make_pair(0U, &SystemZ::VR16BitRegClass);
1677 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1679 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1680 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1689 auto getVTSizeInBits = [&VT]() {
1697 if (Constraint[1] ==
'r') {
1698 if (getVTSizeInBits() == 32)
1701 if (getVTSizeInBits() == 128)
1707 if (Constraint[1] ==
'f') {
1709 return std::make_pair(
1711 if (getVTSizeInBits() == 16)
1714 if (getVTSizeInBits() == 32)
1717 if (getVTSizeInBits() == 128)
1723 if (Constraint[1] ==
'v') {
1724 if (!Subtarget.hasVector())
1725 return std::make_pair(
1727 if (getVTSizeInBits() == 16)
1730 if (getVTSizeInBits() == 32)
1733 if (getVTSizeInBits() == 64)
1739 if (Constraint[1] ==
'@') {
1740 if (
StringRef(
"{@cc}").compare(Constraint) == 0)
1741 return std::make_pair(SystemZ::CC, &SystemZ::CCRRegClass);
1754 .
Case(
"r4", Subtarget.isTargetXPLINK64() ? SystemZ::R4D
1755 : SystemZ::NoRegister)
1757 Subtarget.isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1764 const Constant *PersonalityFn)
const {
1765 return Subtarget.isTargetXPLINK64() ? SystemZ::R1D : SystemZ::R6D;
1769 const Constant *PersonalityFn)
const {
1770 return Subtarget.isTargetXPLINK64() ? SystemZ::R2D : SystemZ::R7D;
1785 if (
StringRef(
"{@cc}").compare(OpInfo.ConstraintCode) != 0)
1789 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
1790 OpInfo.ConstraintVT.getSizeInBits() < 8)
1805 if (Constraint.
size() == 1) {
1806 switch (Constraint[0]) {
1811 Op.getValueType()));
1818 Op.getValueType()));
1825 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1832 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1837 if (
C->getZExtValue() == 0x7fffffff)
1839 Op.getValueType()));
1850#include "SystemZGenCallingConv.inc"
1854 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1860 Type *ToType)
const {
1923 if (BitCastToType == MVT::v2i64)
1950 MVT::Untyped,
Hi,
Lo);
1974 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
1976 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1987 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
1988 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1999template <
class ArgTy>
2002 MVT &PartVT,
unsigned &NumParts) {
2003 if (!Args[
I].Flags.isSplit())
2007 PartVT = ArgLocs[
I].getValVT();
2009 for (
unsigned PartIdx =
I + 1;; ++PartIdx) {
2010 assert(PartIdx != ArgLocs.
size() &&
"SplitEnd not found.");
2011 assert(ArgLocs[PartIdx].getValVT() == PartVT &&
"Unsupported split.");
2013 if (Args[PartIdx].Flags.isSplitEnd())
2037 unsigned NumFixedGPRs = 0;
2038 unsigned NumFixedFPRs = 0;
2039 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2052 RC = &SystemZ::GR32BitRegClass;
2056 RC = &SystemZ::GR64BitRegClass;
2060 RC = &SystemZ::FP16BitRegClass;
2064 RC = &SystemZ::FP32BitRegClass;
2068 RC = &SystemZ::FP64BitRegClass;
2072 RC = &SystemZ::FP128BitRegClass;
2081 RC = &SystemZ::VR128BitRegClass;
2095 if (Subtarget.isTargetXPLINK64()) {
2098 ArgSPOffset += XPRegs.getCallFrameSize();
2109 unsigned SlotOffs = VA.
getLocVT() == MVT::f16 ? 6 : 4;
2113 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
2127 for (
unsigned PartIdx = 1; PartIdx < NumParts; ++PartIdx) {
2130 unsigned PartOffset = Ins[
I].PartOffset;
2135 assert(PartOffset &&
"Offset should be non-zero.");
2142 if (IsVarArg && Subtarget.isTargetXPLINK64()) {
2148 Subtarget.getSpecialRegisters());
2154 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2159 if (IsVarArg && Subtarget.isTargetELF()) {
2172 int64_t RegSaveOffset =
2187 &SystemZ::FP64BitRegClass);
2199 if (Subtarget.isTargetXPLINK64()) {
2204 Subtarget.getSpecialRegisters());
2205 MRI.
addLiveIn(Regs->getADARegister(), ADAvReg);
2217 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
2224 if (
Reg == SystemZ::R6H ||
Reg == SystemZ::R6L ||
Reg == SystemZ::R6D)
2226 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2233 unsigned Offset,
bool LoadAdr =
false) {
2256 bool LoadAddr =
false;
2278 unsigned ADADelta = 0;
2279 unsigned EPADelta = 8;
2285 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2286 G->getGlobal()->hasPrivateLinkage());
2293 Callee = DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Callee);
2339 if (Subtarget.isTargetXPLINK64())
2343 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2347 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx);
2366 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2374 unsigned NumParts = 1;
2378 SlotVT = Outs[
I].VT;
2385 DAG.
getStore(Chain,
DL, ArgValue, SpillSlot, StackPtrInfo));
2388 assert(Outs[
I].PartOffset == 0);
2389 for (
unsigned PartIdx = 1; PartIdx < NumParts; ++PartIdx) {
2392 unsigned PartOffset = Outs[
I].PartOffset;
2398 assert(PartOffset &&
"Offset should be non-zero.");
2400 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2402 ArgValue = SpillSlot;
2419 if (!StackPtr.getNode())
2426 else if (VA.
getLocVT() == MVT::f16)
2439 if (Subtarget.isTargetXPLINK64() && VA.
needsCustom()) {
2443 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2449 if (!MemOpChains.
empty())
2457 if (Subtarget.isTargetXPLINK64()) {
2462 ->getAddressOfCalleeRegister();
2465 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2472 Callee = DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Callee);
2475 Callee = DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Callee);
2476 }
else if (IsTailCall) {
2479 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2484 for (
const auto &[Reg,
N] : RegsToPass) {
2491 Ops.push_back(Chain);
2492 Ops.push_back(Callee);
2496 for (
const auto &[Reg,
N] : RegsToPass)
2501 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2502 assert(Mask &&
"Missing call preserved mask for calling convention");
2507 Ops.push_back(Glue);
2516 Chain = DAG.
getNode(SystemZISD::CALL,
DL, NodeTys,
Ops);
2526 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2533 VA.getLocVT(), Glue);
2550 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2552 Args.reserve(
Ops.size());
2558 Entry.IsZExt = !Entry.IsSExt;
2559 Args.push_back(Entry);
2570 .
setCallee(CallConv, RetTy, Callee, std::move(Args))
2581 const Type *RetTy)
const {
2584 for (
auto &Out : Outs)
2585 if (Out.ArgVT.isScalarInteger() && Out.ArgVT.getSizeInBits() > 64)
2589 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Context);
2590 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2602 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2610 if (RetLocs.
empty())
2611 return DAG.
getNode(SystemZISD::RET_GLUE,
DL, MVT::Other, Chain);
2620 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2642 return DAG.
getNode(SystemZISD::RET_GLUE,
DL, MVT::Other, RetOps);
2649 unsigned &CCValid) {
2650 unsigned Id =
Op.getConstantOperandVal(1);
2652 case Intrinsic::s390_tbegin:
2653 Opcode = SystemZISD::TBEGIN;
2657 case Intrinsic::s390_tbegin_nofloat:
2658 Opcode = SystemZISD::TBEGIN_NOFLOAT;
2662 case Intrinsic::s390_tend:
2663 Opcode = SystemZISD::TEND;
2676 unsigned Id =
Op.getConstantOperandVal(0);
2678 case Intrinsic::s390_vpkshs:
2679 case Intrinsic::s390_vpksfs:
2680 case Intrinsic::s390_vpksgs:
2681 Opcode = SystemZISD::PACKS_CC;
2685 case Intrinsic::s390_vpklshs:
2686 case Intrinsic::s390_vpklsfs:
2687 case Intrinsic::s390_vpklsgs:
2688 Opcode = SystemZISD::PACKLS_CC;
2692 case Intrinsic::s390_vceqbs:
2693 case Intrinsic::s390_vceqhs:
2694 case Intrinsic::s390_vceqfs:
2695 case Intrinsic::s390_vceqgs:
2696 case Intrinsic::s390_vceqqs:
2697 Opcode = SystemZISD::VICMPES;
2701 case Intrinsic::s390_vchbs:
2702 case Intrinsic::s390_vchhs:
2703 case Intrinsic::s390_vchfs:
2704 case Intrinsic::s390_vchgs:
2705 case Intrinsic::s390_vchqs:
2706 Opcode = SystemZISD::VICMPHS;
2710 case Intrinsic::s390_vchlbs:
2711 case Intrinsic::s390_vchlhs:
2712 case Intrinsic::s390_vchlfs:
2713 case Intrinsic::s390_vchlgs:
2714 case Intrinsic::s390_vchlqs:
2715 Opcode = SystemZISD::VICMPHLS;
2719 case Intrinsic::s390_vtm:
2720 Opcode = SystemZISD::VTM;
2724 case Intrinsic::s390_vfaebs:
2725 case Intrinsic::s390_vfaehs:
2726 case Intrinsic::s390_vfaefs:
2727 Opcode = SystemZISD::VFAE_CC;
2731 case Intrinsic::s390_vfaezbs:
2732 case Intrinsic::s390_vfaezhs:
2733 case Intrinsic::s390_vfaezfs:
2734 Opcode = SystemZISD::VFAEZ_CC;
2738 case Intrinsic::s390_vfeebs:
2739 case Intrinsic::s390_vfeehs:
2740 case Intrinsic::s390_vfeefs:
2741 Opcode = SystemZISD::VFEE_CC;
2745 case Intrinsic::s390_vfeezbs:
2746 case Intrinsic::s390_vfeezhs:
2747 case Intrinsic::s390_vfeezfs:
2748 Opcode = SystemZISD::VFEEZ_CC;
2752 case Intrinsic::s390_vfenebs:
2753 case Intrinsic::s390_vfenehs:
2754 case Intrinsic::s390_vfenefs:
2755 Opcode = SystemZISD::VFENE_CC;
2759 case Intrinsic::s390_vfenezbs:
2760 case Intrinsic::s390_vfenezhs:
2761 case Intrinsic::s390_vfenezfs:
2762 Opcode = SystemZISD::VFENEZ_CC;
2766 case Intrinsic::s390_vistrbs:
2767 case Intrinsic::s390_vistrhs:
2768 case Intrinsic::s390_vistrfs:
2769 Opcode = SystemZISD::VISTR_CC;
2773 case Intrinsic::s390_vstrcbs:
2774 case Intrinsic::s390_vstrchs:
2775 case Intrinsic::s390_vstrcfs:
2776 Opcode = SystemZISD::VSTRC_CC;
2780 case Intrinsic::s390_vstrczbs:
2781 case Intrinsic::s390_vstrczhs:
2782 case Intrinsic::s390_vstrczfs:
2783 Opcode = SystemZISD::VSTRCZ_CC;
2787 case Intrinsic::s390_vstrsb:
2788 case Intrinsic::s390_vstrsh:
2789 case Intrinsic::s390_vstrsf:
2790 Opcode = SystemZISD::VSTRS_CC;
2794 case Intrinsic::s390_vstrszb:
2795 case Intrinsic::s390_vstrszh:
2796 case Intrinsic::s390_vstrszf:
2797 Opcode = SystemZISD::VSTRSZ_CC;
2801 case Intrinsic::s390_vfcedbs:
2802 case Intrinsic::s390_vfcesbs:
2803 Opcode = SystemZISD::VFCMPES;
2807 case Intrinsic::s390_vfchdbs:
2808 case Intrinsic::s390_vfchsbs:
2809 Opcode = SystemZISD::VFCMPHS;
2813 case Intrinsic::s390_vfchedbs:
2814 case Intrinsic::s390_vfchesbs:
2815 Opcode = SystemZISD::VFCMPHES;
2819 case Intrinsic::s390_vftcidb:
2820 case Intrinsic::s390_vftcisb:
2821 Opcode = SystemZISD::VFTCI;
2825 case Intrinsic::s390_tdc:
2826 Opcode = SystemZISD::TDC;
2839 unsigned NumOps =
Op.getNumOperands();
2842 Ops.push_back(
Op.getOperand(0));
2844 Ops.push_back(
Op.getOperand(
I));
2846 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2860 unsigned NumOps =
Op.getNumOperands();
2866 assert((
Op.getConstantOperandVal(0) == Intrinsic::s390_tdc &&
I == 1) &&
2867 "Unhandled intrinsic with f16 operand.");
2870 Ops.push_back(CurrOper);
2884 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2885 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2886 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2912 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2915 int64_t
Value = ConstOp1->getSExtValue();
2931 if (!
C.Op0.hasOneUse() ||
2938 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2939 if ((NumBits != 8 && NumBits != 16) ||
2940 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2946 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2949 uint64_t Mask = (1 << NumBits) - 1;
2952 int64_t SignedValue = ConstOp1->getSExtValue();
2959 }
else if (NumBits == 8) {
2985 if (
C.Op0.getValueType() != MVT::i32 ||
2986 Load->getExtensionType() != ExtType) {
2988 Load->getBasePtr(), Load->getPointerInfo(),
2989 Load->getMemoryVT(), Load->getAlign(),
2990 Load->getMemOperand()->getFlags());
2996 if (
C.Op1.getValueType() != MVT::i32 ||
2997 Value != ConstOp1->getZExtValue())
3007 if (Load->getMemoryVT() == MVT::i8)
3010 switch (Load->getExtensionType()) {
3027 if (
C.Op0.getValueType() == MVT::i128)
3029 if (
C.Op0.getValueType() == MVT::f128)
3041 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
3070 unsigned Opcode0 =
C.Op0.getOpcode();
3077 C.Op0.getConstantOperandVal(1) == 0xffffffff)
3092 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
3093 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
3115 if (C1 && C1->isZero()) {
3134 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
3137 if (C1 && C1->getZExtValue() == 32) {
3138 SDValue ShlOp0 =
C.Op0.getOperand(0);
3157 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
3160 C.Op1->getAsZExtVal() == 0) {
3162 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
3163 C.Op0.getValueSizeInBits().getFixedValue()) {
3164 unsigned Type = L->getExtensionType();
3167 C.Op0 =
C.Op0.getOperand(0);
3181 uint64_t Amount = Shift->getZExtValue();
3182 if (Amount >=
N.getValueSizeInBits())
3197 unsigned ICmpType) {
3198 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3220 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3226 if (EffectivelyUnsigned && CmpVal <
Low) {
3234 if (CmpVal == Mask) {
3240 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3246 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3254 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3260 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3289 if (
C.Op0.getValueType() == MVT::i128) {
3295 if (Mask && Mask->getAPIntValue() == 0) {
3296 C.Opcode = SystemZISD::VTM;
3313 uint64_t CmpVal = ConstOp1->getZExtValue();
3320 NewC.Op0 =
C.Op0.getOperand(0);
3321 NewC.Op1 =
C.Op0.getOperand(1);
3325 MaskVal = Mask->getZExtValue();
3345 MaskVal = -(CmpVal & -CmpVal);
3354 unsigned NewCCMask, ShiftVal;
3358 (MaskVal >> ShiftVal != 0) &&
3359 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3361 MaskVal >> ShiftVal,
3365 MaskVal >>= ShiftVal;
3369 (MaskVal << ShiftVal != 0) &&
3370 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3372 MaskVal << ShiftVal,
3376 MaskVal <<= ShiftVal;
3385 C.Opcode = SystemZISD::TM;
3387 if (Mask && Mask->getZExtValue() == MaskVal)
3392 C.CCMask = NewCCMask;
3398 if (
C.Opcode != SystemZISD::ICMP)
3400 if (
C.Op0.getValueType() != MVT::i128)
3411 Src = Src.getOperand(0);
3414 unsigned Opcode = 0;
3415 if (Src.hasOneUse()) {
3416 switch (Src.getOpcode()) {
3417 case SystemZISD::VICMPE: Opcode = SystemZISD::VICMPES;
break;
3418 case SystemZISD::VICMPH: Opcode = SystemZISD::VICMPHS;
break;
3419 case SystemZISD::VICMPHL: Opcode = SystemZISD::VICMPHLS;
break;
3420 case SystemZISD::VFCMPE: Opcode = SystemZISD::VFCMPES;
break;
3421 case SystemZISD::VFCMPH: Opcode = SystemZISD::VFCMPHS;
break;
3422 case SystemZISD::VFCMPHE: Opcode = SystemZISD::VFCMPHES;
break;
3428 C.Op0 = Src->getOperand(0);
3429 C.Op1 = Src->getOperand(1);
3433 C.CCMask ^=
C.CCValid;
3445 C.Opcode = SystemZISD::VICMPES;
3457 bool Swap =
false, Invert =
false;
3469 C.Opcode = SystemZISD::UCMP128HI;
3471 C.Opcode = SystemZISD::SCMP128HI;
3476 C.CCMask ^=
C.CCValid;
3487 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3490 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3493 C.Op0 =
C.Op0.getOperand(0);
3505 C.CCValid = CCValid;
3508 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
3511 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
3515 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
3518 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
3522 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
3525 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
3528 C.CCMask &= CCValid;
3536 bool IsSignaling =
false) {
3539 unsigned Opcode, CCValid;
3551 Comparison
C(CmpOp0, CmpOp1, Chain);
3553 if (
C.Op0.getValueType().isFloatingPoint()) {
3556 C.Opcode = SystemZISD::FCMP;
3557 else if (!IsSignaling)
3558 C.Opcode = SystemZISD::STRICT_FCMP;
3560 C.Opcode = SystemZISD::STRICT_FCMPS;
3565 C.Opcode = SystemZISD::ICMP;
3600 if (!
C.Op1.getNode()) {
3602 switch (
C.Op0.getOpcode()) {
3613 if (
C.Opcode == SystemZISD::ICMP)
3614 return DAG.
getNode(SystemZISD::ICMP,
DL, MVT::i32,
C.Op0,
C.Op1,
3616 if (
C.Opcode == SystemZISD::TM) {
3619 return DAG.
getNode(SystemZISD::TM,
DL, MVT::i32,
C.Op0,
C.Op1,
3622 if (
C.Opcode == SystemZISD::VICMPES ||
3623 C.Opcode == SystemZISD::VICMPHS ||
3624 C.Opcode == SystemZISD::VICMPHLS ||
3625 C.Opcode == SystemZISD::VFCMPES ||
3626 C.Opcode == SystemZISD::VFCMPHS ||
3627 C.Opcode == SystemZISD::VFCMPHES) {
3628 EVT IntVT =
C.Op0.getValueType().changeVectorElementTypeToInteger();
3635 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3637 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3646 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3647 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3672 unsigned CCValid,
unsigned CCMask) {
3677 return DAG.
getNode(SystemZISD::SELECT_CCMASK,
DL, MVT::i32,
Ops);
3755 int Mask[] = { Start, -1, Start + 1, -1 };
3759 return DAG.
getNode(SystemZISD::STRICT_VEXTEND,
DL, VTs, Chain,
Op);
3761 return DAG.
getNode(SystemZISD::VEXTEND,
DL, MVT::v2f64,
Op);
3775 !Subtarget.hasVectorEnhancements1()) {
3781 SDVTList VTs = DAG.
getVTList(MVT::v2i64, MVT::Other);
3794 return DAG.
getNode(SystemZISD::PACK,
DL, VT, HRes, LRes);
3797 SDVTList VTs = DAG.
getVTList(VT, MVT::Other);
3798 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3800 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3813 bool IsSignaling)
const {
3816 assert (!IsSignaling || Chain);
3819 bool Invert =
false;
3827 assert(IsFP &&
"Unexpected integer comparison");
3829 DL, VT, CmpOp1, CmpOp0, Chain);
3831 DL, VT, CmpOp0, CmpOp1, Chain);
3835 LT.getValue(1),
GE.getValue(1));
3844 assert(IsFP &&
"Unexpected integer comparison");
3846 DL, VT, CmpOp1, CmpOp0, Chain);
3848 DL, VT, CmpOp0, CmpOp1, Chain);
3852 LT.getValue(1),
GT.getValue(1));
3873 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3877 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3882 Chain =
Cmp.getValue(1);
3890 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3903 EVT VT =
Op.getValueType();
3905 return lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1);
3914 bool IsSignaling)
const {
3920 EVT VT =
Op.getNode()->getValueType(0);
3922 SDValue Res = lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1,
3923 Chain, IsSignaling);
3945 SystemZISD::BR_CCMASK,
DL,
Op.getValueType(),
Op.getOperand(0),
3979 C.CCMask ^=
C.CCValid;
3987 Op = SystemZISD::VICMPE;
3991 Op = SystemZISD::VICMPHL;
3993 Op = SystemZISD::VICMPH;
4032 C.Op1->getAsZExtVal() == 0) {
4039 if (Subtarget.hasVectorEnhancements3() &&
4040 C.Opcode == SystemZISD::ICMP &&
4041 C.Op0.getValueType() == MVT::i128 &&
4051 return DAG.
getNode(SystemZISD::SELECT_CCMASK,
DL,
Op.getValueType(),
Ops);
4057 const GlobalValue *GV =
Node->getGlobal();
4063 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
4066 uint64_t Anchor =
Offset & ~uint64_t(0xfff);
4085 }
else if (Subtarget.isTargetELF()) {
4090 }
else if (Subtarget.isTargetzOS()) {
4121 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
4126 Ops.push_back(Chain);
4128 Node->getValueType(0),
4137 const TargetRegisterInfo *
TRI = Subtarget.getRegisterInfo();
4138 const uint32_t *
Mask =
4140 assert(Mask &&
"Missing call preserved mask for calling convention");
4144 Ops.push_back(Glue);
4147 SDVTList NodeTys = DAG.
getVTList(MVT::Other, MVT::Glue);
4155SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
4179 const GlobalValue *GV =
Node->getGlobal();
4187 SDValue TP = lowerThreadPointer(
DL, DAG);
4194 SystemZConstantPoolValue *CPV =
4203 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL,
Offset);
4209 SystemZConstantPoolValue *CPV =
4218 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL,
Offset);
4223 SystemZMachineFunctionInfo* MFI =
4252 SystemZConstantPoolValue *CPV =
4286 return DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Result);
4303 return DAG.
getNode(SystemZISD::PCREL_WRAPPER,
DL, PtrVT, Result);
4308 auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
4310 MachineFrameInfo &MFI = MF.getFrameInfo();
4314 unsigned Depth =
Op.getConstantOperandVal(0);
4321 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
4326 if (!MF.getSubtarget<SystemZSubtarget>().hasBackChain())
4332 MachinePointerInfo());
4347 unsigned Depth =
Op.getConstantOperandVal(0);
4352 if (!MF.
getSubtarget<SystemZSubtarget>().hasBackChain())
4355 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
4356 const auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
4357 int Offset = TFL->getReturnAddressOffset(MF);
4361 MachinePointerInfo());
4366 SystemZCallingConventionRegisters *CCR = Subtarget.getSpecialRegisters();
4368 &SystemZ::GR64BitRegClass);
4376 EVT InVT =
In.getValueType();
4377 EVT ResVT =
Op.getValueType();
4385 LoadN->getBasePtr(), LoadN->getMemOperand());
4391 if (InVT == MVT::i32 && ResVT == MVT::f32) {
4393 if (Subtarget.hasHighWord()) {
4397 MVT::i64,
SDValue(U64, 0), In);
4405 DL, MVT::f32, Out64);
4407 if (InVT == MVT::f32 && ResVT == MVT::i32) {
4410 MVT::f64,
SDValue(U64, 0), In);
4412 if (Subtarget.hasHighWord())
4425 if (Subtarget.isTargetXPLINK64())
4426 return lowerVASTART_XPLINK(
Op, DAG);
4428 return lowerVASTART_ELF(
Op, DAG);
4434 SystemZMachineFunctionInfo *FuncInfo =
4435 MF.
getInfo<SystemZMachineFunctionInfo>();
4445 MachinePointerInfo(SV));
4451 SystemZMachineFunctionInfo *FuncInfo =
4452 MF.
getInfo<SystemZMachineFunctionInfo>();
4461 const unsigned NumFields = 4;
4472 for (
unsigned I = 0;
I < NumFields; ++
I) {
4477 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
4478 MachinePointerInfo(SV,
Offset));
4496 Align(8),
false,
false,
4497 nullptr, std::nullopt, MachinePointerInfo(DstSV),
4498 MachinePointerInfo(SrcSV));
4502SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
4504 if (Subtarget.isTargetXPLINK64())
4505 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
4507 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
4511SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
4513 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
4523 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4526 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4527 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4533 if (ExtraAlignSpace)
4537 bool IsSigned =
false;
4538 bool DoesNotReturn =
false;
4539 bool IsReturnValueUsed =
false;
4540 EVT VT =
Op.getValueType();
4550 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
4562 if (ExtraAlignSpace) {
4574SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4576 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
4579 bool StoreBackchain = MF.
getSubtarget<SystemZSubtarget>().hasBackChain();
4588 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4591 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4592 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4603 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4604 MachinePointerInfo());
4607 if (ExtraAlignSpace)
4614 NewSP = DAG.
getNode(SystemZISD::PROBED_ALLOCA,
DL,
4615 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4631 if (RequiredAlign > StackAlign) {
4641 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4642 MachinePointerInfo());
4648SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4652 return DAG.
getNode(SystemZISD::ADJDYNALLOC,
DL, MVT::i64);
4657 unsigned Opcode)
const {
4658 EVT VT =
Op.getValueType();
4664 assert(Subtarget.hasMiscellaneousExtensions2());
4669 Op.getOperand(0),
Op.getOperand(1), Even, Odd);
4675 EVT VT =
Op.getValueType();
4683 else if (Subtarget.hasMiscellaneousExtensions2())
4688 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4723 EVT VT =
Op.getValueType();
4736 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4744 EVT VT =
Op.getValueType();
4764 EVT VT =
Op.getValueType();
4771 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4776 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4788 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
4790 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
4827 MVT::i64, HighOp, Low32);
4833 SDNode *
N =
Op.getNode();
4838 if (
N->getValueType(0) == MVT::i128) {
4839 unsigned BaseOp = 0;
4840 unsigned FlagOp = 0;
4841 bool IsBorrow =
false;
4842 switch (
Op.getOpcode()) {
4846 FlagOp = SystemZISD::VACC;
4850 FlagOp = SystemZISD::VSCBI;
4865 unsigned BaseOp = 0;
4866 unsigned CCValid = 0;
4867 unsigned CCMask = 0;
4869 switch (
Op.getOpcode()) {
4872 BaseOp = SystemZISD::SADDO;
4877 BaseOp = SystemZISD::SSUBO;
4882 BaseOp = SystemZISD::UADDO;
4887 BaseOp = SystemZISD::USUBO;
4893 SDVTList VTs = DAG.
getVTList(
N->getValueType(0), MVT::i32);
4897 if (
N->getValueType(1) == MVT::i1)
4923 SDNode *
N =
Op.getNode();
4924 MVT VT =
N->getSimpleValueType(0);
4935 if (VT == MVT::i128) {
4936 unsigned BaseOp = 0;
4937 unsigned FlagOp = 0;
4938 bool IsBorrow =
false;
4939 switch (
Op.getOpcode()) {
4942 BaseOp = SystemZISD::VAC;
4943 FlagOp = SystemZISD::VACCC;
4946 BaseOp = SystemZISD::VSBI;
4947 FlagOp = SystemZISD::VSBCBI;
4966 unsigned BaseOp = 0;
4967 unsigned CCValid = 0;
4968 unsigned CCMask = 0;
4970 switch (
Op.getOpcode()) {
4976 BaseOp = SystemZISD::ADDCARRY;
4984 BaseOp = SystemZISD::SUBCARRY;
4995 SDVTList VTs = DAG.
getVTList(VT, MVT::i32);
4999 if (
N->getValueType(1) == MVT::i1)
5007 EVT VT =
Op.getValueType();
5009 Op =
Op.getOperand(0);
5032 Op = DAG.
getNode(SystemZISD::VSRL_BY_SCALAR,
DL, VT,
Op, Shift);
5044 Op = DAG.
getNode(SystemZISD::VSUM,
DL, MVT::v4i32,
Op, Tmp);
5057 if (NumSignificantBits == 0)
5063 BitSize = std::min(BitSize, OrigBitSize);
5072 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
5074 if (BitSize != OrigBitSize)
5111 EVT RegVT =
Op.getValueType();
5113 return lowerATOMIC_LDST_I128(
Op, DAG);
5114 return lowerLoadF16(
Op, DAG);
5120 if (
Node->getMemoryVT().getSizeInBits() == 128)
5121 return lowerATOMIC_LDST_I128(
Op, DAG);
5122 return lowerStoreF16(
Op, DAG);
5129 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
5130 "Only custom lowering i128 or f128.");
5143 EVT WideVT = MVT::i32;
5166 unsigned Opcode)
const {
5170 EVT NarrowVT =
Node->getMemoryVT();
5171 EVT WideVT = MVT::i32;
5172 if (NarrowVT == WideVT)
5179 MachineMemOperand *MMO =
Node->getMemOperand();
5183 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
5185 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
5190 SDValue AlignedAddr, BitShift, NegBitShift;
5198 if (Opcode != SystemZISD::ATOMIC_SWAPW)
5201 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
5202 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
5207 SDVTList VTList = DAG.
getVTList(WideVT, MVT::Other);
5208 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
5228 EVT MemVT =
Node->getMemoryVT();
5229 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
5231 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
5232 assert(Subtarget.hasInterlockedAccess1() &&
5233 "Should have been expanded by AtomicExpand pass.");
5239 Node->getChain(),
Node->getBasePtr(), NegSrc2,
5240 Node->getMemOperand());
5243 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
5254 MachineMemOperand *MMO =
Node->getMemOperand();
5257 if (
Node->getMemoryVT() == MVT::i128) {
5266 EVT NarrowVT =
Node->getMemoryVT();
5267 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
5268 if (NarrowVT == WideVT) {
5269 SDVTList Tys = DAG.
getVTList(WideVT, MVT::i32, MVT::Other);
5270 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
5272 DL, Tys,
Ops, NarrowVT, MMO);
5286 SDValue AlignedAddr, BitShift, NegBitShift;
5290 SDVTList VTList = DAG.
getVTList(WideVT, MVT::i32, MVT::Other);
5291 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
5294 VTList,
Ops, NarrowVT, MMO);
5308SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
5331 auto *Regs = Subtarget.getSpecialRegisters();
5334 "in GHC calling convention");
5336 Regs->getStackPointerRegister(),
Op.getValueType());
5342 auto *Regs = Subtarget.getSpecialRegisters();
5343 bool StoreBackchain = MF.
getSubtarget<SystemZSubtarget>().hasBackChain();
5347 "in GHC calling convention");
5354 if (StoreBackchain) {
5356 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
5357 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
5358 MachinePointerInfo());
5361 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
5364 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
5365 MachinePointerInfo());
5372 bool IsData =
Op.getConstantOperandVal(4);
5375 return Op.getOperand(0);
5378 bool IsWrite =
Op.getConstantOperandVal(2);
5385 Node->getMemoryVT(),
Node->getMemOperand());
5389SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
5391 unsigned Opcode, CCValid;
5393 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
5404SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
5406 unsigned Opcode, CCValid;
5409 if (
Op->getNumValues() == 1)
5411 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
5416 unsigned Id =
Op.getConstantOperandVal(0);
5418 case Intrinsic::thread_pointer:
5419 return lowerThreadPointer(SDLoc(
Op), DAG);
5421 case Intrinsic::s390_vpdi:
5422 return DAG.
getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(
Op),
Op.getValueType(),
5423 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5425 case Intrinsic::s390_vperm:
5426 return DAG.
getNode(SystemZISD::PERMUTE, SDLoc(
Op),
Op.getValueType(),
5427 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5429 case Intrinsic::s390_vuphb:
5430 case Intrinsic::s390_vuphh:
5431 case Intrinsic::s390_vuphf:
5432 case Intrinsic::s390_vuphg:
5433 return DAG.
getNode(SystemZISD::UNPACK_HIGH, SDLoc(
Op),
Op.getValueType(),
5436 case Intrinsic::s390_vuplhb:
5437 case Intrinsic::s390_vuplhh:
5438 case Intrinsic::s390_vuplhf:
5439 case Intrinsic::s390_vuplhg:
5440 return DAG.
getNode(SystemZISD::UNPACKL_HIGH, SDLoc(
Op),
Op.getValueType(),
5443 case Intrinsic::s390_vuplb:
5444 case Intrinsic::s390_vuplhw:
5445 case Intrinsic::s390_vuplf:
5446 case Intrinsic::s390_vuplg:
5447 return DAG.
getNode(SystemZISD::UNPACK_LOW, SDLoc(
Op),
Op.getValueType(),
5450 case Intrinsic::s390_vupllb:
5451 case Intrinsic::s390_vupllh:
5452 case Intrinsic::s390_vupllf:
5453 case Intrinsic::s390_vupllg:
5454 return DAG.
getNode(SystemZISD::UNPACKL_LOW, SDLoc(
Op),
Op.getValueType(),
5457 case Intrinsic::s390_vsumb:
5458 case Intrinsic::s390_vsumh:
5459 case Intrinsic::s390_vsumgh:
5460 case Intrinsic::s390_vsumgf:
5461 case Intrinsic::s390_vsumqf:
5462 case Intrinsic::s390_vsumqg:
5463 return DAG.
getNode(SystemZISD::VSUM, SDLoc(
Op),
Op.getValueType(),
5464 Op.getOperand(1),
Op.getOperand(2));
5466 case Intrinsic::s390_vaq:
5468 Op.getOperand(1),
Op.getOperand(2));
5469 case Intrinsic::s390_vaccb:
5470 case Intrinsic::s390_vacch:
5471 case Intrinsic::s390_vaccf:
5472 case Intrinsic::s390_vaccg:
5473 case Intrinsic::s390_vaccq:
5474 return DAG.
getNode(SystemZISD::VACC, SDLoc(
Op),
Op.getValueType(),
5475 Op.getOperand(1),
Op.getOperand(2));
5476 case Intrinsic::s390_vacq:
5477 return DAG.
getNode(SystemZISD::VAC, SDLoc(
Op),
Op.getValueType(),
5478 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5479 case Intrinsic::s390_vacccq:
5480 return DAG.
getNode(SystemZISD::VACCC, SDLoc(
Op),
Op.getValueType(),
5481 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5483 case Intrinsic::s390_vsq:
5485 Op.getOperand(1),
Op.getOperand(2));
5486 case Intrinsic::s390_vscbib:
5487 case Intrinsic::s390_vscbih:
5488 case Intrinsic::s390_vscbif:
5489 case Intrinsic::s390_vscbig:
5490 case Intrinsic::s390_vscbiq:
5491 return DAG.
getNode(SystemZISD::VSCBI, SDLoc(
Op),
Op.getValueType(),
5492 Op.getOperand(1),
Op.getOperand(2));
5493 case Intrinsic::s390_vsbiq:
5494 return DAG.
getNode(SystemZISD::VSBI, SDLoc(
Op),
Op.getValueType(),
5495 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5496 case Intrinsic::s390_vsbcbiq:
5497 return DAG.
getNode(SystemZISD::VSBCBI, SDLoc(
Op),
Op.getValueType(),
5498 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5500 case Intrinsic::s390_vmhb:
5501 case Intrinsic::s390_vmhh:
5502 case Intrinsic::s390_vmhf:
5503 case Intrinsic::s390_vmhg:
5504 case Intrinsic::s390_vmhq:
5506 Op.getOperand(1),
Op.getOperand(2));
5507 case Intrinsic::s390_vmlhb:
5508 case Intrinsic::s390_vmlhh:
5509 case Intrinsic::s390_vmlhf:
5510 case Intrinsic::s390_vmlhg:
5511 case Intrinsic::s390_vmlhq:
5513 Op.getOperand(1),
Op.getOperand(2));
5515 case Intrinsic::s390_vmahb:
5516 case Intrinsic::s390_vmahh:
5517 case Intrinsic::s390_vmahf:
5518 case Intrinsic::s390_vmahg:
5519 case Intrinsic::s390_vmahq:
5520 return DAG.
getNode(SystemZISD::VMAH, SDLoc(
Op),
Op.getValueType(),
5521 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5522 case Intrinsic::s390_vmalhb:
5523 case Intrinsic::s390_vmalhh:
5524 case Intrinsic::s390_vmalhf:
5525 case Intrinsic::s390_vmalhg:
5526 case Intrinsic::s390_vmalhq:
5527 return DAG.
getNode(SystemZISD::VMALH, SDLoc(
Op),
Op.getValueType(),
5528 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5530 case Intrinsic::s390_vmeb:
5531 case Intrinsic::s390_vmeh:
5532 case Intrinsic::s390_vmef:
5533 case Intrinsic::s390_vmeg:
5534 return DAG.
getNode(SystemZISD::VME, SDLoc(
Op),
Op.getValueType(),
5535 Op.getOperand(1),
Op.getOperand(2));
5536 case Intrinsic::s390_vmleb:
5537 case Intrinsic::s390_vmleh:
5538 case Intrinsic::s390_vmlef:
5539 case Intrinsic::s390_vmleg:
5540 return DAG.
getNode(SystemZISD::VMLE, SDLoc(
Op),
Op.getValueType(),
5541 Op.getOperand(1),
Op.getOperand(2));
5542 case Intrinsic::s390_vmob:
5543 case Intrinsic::s390_vmoh:
5544 case Intrinsic::s390_vmof:
5545 case Intrinsic::s390_vmog:
5546 return DAG.
getNode(SystemZISD::VMO, SDLoc(
Op),
Op.getValueType(),
5547 Op.getOperand(1),
Op.getOperand(2));
5548 case Intrinsic::s390_vmlob:
5549 case Intrinsic::s390_vmloh:
5550 case Intrinsic::s390_vmlof:
5551 case Intrinsic::s390_vmlog:
5552 return DAG.
getNode(SystemZISD::VMLO, SDLoc(
Op),
Op.getValueType(),
5553 Op.getOperand(1),
Op.getOperand(2));
5555 case Intrinsic::s390_vmaeb:
5556 case Intrinsic::s390_vmaeh:
5557 case Intrinsic::s390_vmaef:
5558 case Intrinsic::s390_vmaeg:
5560 DAG.
getNode(SystemZISD::VME, SDLoc(
Op),
Op.getValueType(),
5561 Op.getOperand(1),
Op.getOperand(2)),
5563 case Intrinsic::s390_vmaleb:
5564 case Intrinsic::s390_vmaleh:
5565 case Intrinsic::s390_vmalef:
5566 case Intrinsic::s390_vmaleg:
5568 DAG.
getNode(SystemZISD::VMLE, SDLoc(
Op),
Op.getValueType(),
5569 Op.getOperand(1),
Op.getOperand(2)),
5571 case Intrinsic::s390_vmaob:
5572 case Intrinsic::s390_vmaoh:
5573 case Intrinsic::s390_vmaof:
5574 case Intrinsic::s390_vmaog:
5576 DAG.
getNode(SystemZISD::VMO, SDLoc(
Op),
Op.getValueType(),
5577 Op.getOperand(1),
Op.getOperand(2)),
5579 case Intrinsic::s390_vmalob:
5580 case Intrinsic::s390_vmaloh:
5581 case Intrinsic::s390_vmalof:
5582 case Intrinsic::s390_vmalog:
5584 DAG.
getNode(SystemZISD::VMLO, SDLoc(
Op),
Op.getValueType(),
5585 Op.getOperand(1),
Op.getOperand(2)),
5606 { SystemZISD::MERGE_HIGH, 8,
5607 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
5609 { SystemZISD::MERGE_HIGH, 4,
5610 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
5612 { SystemZISD::MERGE_HIGH, 2,
5613 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
5615 { SystemZISD::MERGE_HIGH, 1,
5616 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
5618 { SystemZISD::MERGE_LOW, 8,
5619 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
5621 { SystemZISD::MERGE_LOW, 4,
5622 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
5624 { SystemZISD::MERGE_LOW, 2,
5625 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
5627 { SystemZISD::MERGE_LOW, 1,
5628 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
5630 { SystemZISD::PACK, 4,
5631 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
5633 { SystemZISD::PACK, 2,
5634 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
5636 { SystemZISD::PACK, 1,
5637 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
5639 { SystemZISD::PERMUTE_DWORDS, 4,
5640 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
5642 { SystemZISD::PERMUTE_DWORDS, 1,
5643 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
5657 OpNo0 = OpNo1 = OpNos[1];
5658 }
else if (OpNos[1] < 0) {
5659 OpNo0 = OpNo1 = OpNos[0];
5677 unsigned &OpNo0,
unsigned &OpNo1) {
5678 int OpNos[] = { -1, -1 };
5691 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5693 OpNos[ModelOpNo] = RealOpNo;
5701 unsigned &OpNo0,
unsigned &OpNo1) {
5718 int Elt = Bytes[From];
5721 Transform[From] = -1;
5723 while (
P.Bytes[To] != Elt) {
5728 Transform[From] = To;
5752 Bytes.
resize(NumElements * BytesPerElement, -1);
5753 for (
unsigned I = 0;
I < NumElements; ++
I) {
5754 int Index = VSN->getMaskElt(
I);
5756 for (
unsigned J = 0; J < BytesPerElement; ++J)
5757 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5761 if (SystemZISD::SPLAT == ShuffleOp.
getOpcode() &&
5764 Bytes.
resize(NumElements * BytesPerElement, -1);
5765 for (
unsigned I = 0;
I < NumElements; ++
I)
5766 for (
unsigned J = 0; J < BytesPerElement; ++J)
5767 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5778 unsigned BytesPerElement,
int &
Base) {
5780 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5781 if (Bytes[Start +
I] >= 0) {
5782 unsigned Elem = Bytes[Start +
I];
5786 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5788 }
else if (
unsigned(
Base) != Elem -
I)
5801 unsigned &StartIndex,
unsigned &OpNo0,
5803 int OpNos[] = { -1, -1 };
5805 for (
unsigned I = 0;
I < 16; ++
I) {
5806 int Index = Bytes[
I];
5812 Shift = ExpectedShift;
5813 else if (Shift != ExpectedShift)
5817 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5819 OpNos[ModelOpNo] = RealOpNo;
5832 unsigned InBytes = (
P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
5833 P.Opcode == SystemZISD::PACK ?
P.Operand * 2 :
5841 if (
P.Opcode == SystemZISD::PERMUTE_DWORDS) {
5843 Op = DAG.
getNode(SystemZISD::PERMUTE_DWORDS,
DL, InVT, Op0, Op1, Op2);
5844 }
else if (
P.Opcode == SystemZISD::PACK) {
5847 Op = DAG.
getNode(SystemZISD::PACK,
DL, OutVT, Op0, Op1);
5856 N =
N->getOperand(0);
5859 return Op->getZExtValue() == 0;
5865 for (
unsigned I = 0;
I < Num ;
I++)
5877 for (
unsigned I = 0;
I < 2; ++
I)
5881 unsigned StartIndex, OpNo0, OpNo1;
5883 return DAG.
getNode(SystemZISD::SHL_DOUBLE,
DL, MVT::v16i8,
Ops[OpNo0],
5890 if (ZeroVecIdx != UINT32_MAX) {
5891 bool MaskFirst =
true;
5896 if (OpNo == ZeroVecIdx &&
I == 0) {
5901 if (OpNo != ZeroVecIdx && Byte == 0) {
5908 if (ZeroIdx != -1) {
5911 if (Bytes[
I] >= 0) {
5914 if (OpNo == ZeroVecIdx)
5926 return DAG.
getNode(SystemZISD::PERMUTE,
DL, MVT::v16i8, Mask, Src,
5929 return DAG.
getNode(SystemZISD::PERMUTE,
DL, MVT::v16i8, Src, Mask,
5941 return DAG.
getNode(SystemZISD::PERMUTE,
DL, MVT::v16i8,
Ops[0],
5947struct GeneralShuffle {
5948 GeneralShuffle(EVT vt)
5949 : VT(vt), UnpackFromEltSize(UINT_MAX), UnpackLow(
false) {}
5953 void tryPrepareForUnpack();
5954 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5969 unsigned UnpackFromEltSize;
5976void GeneralShuffle::addUndef() {
5978 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5979 Bytes.push_back(-1);
5988bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5994 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5999 if (FromBytesPerElement < BytesPerElement)
6003 (FromBytesPerElement - BytesPerElement));
6006 while (
Op.getNode()) {
6008 Op =
Op.getOperand(0);
6024 }
else if (
Op.isUndef()) {
6033 for (; OpNo <
Ops.size(); ++OpNo)
6034 if (
Ops[OpNo] ==
Op)
6036 if (OpNo ==
Ops.size())
6041 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
6042 Bytes.push_back(
Base +
I);
6051 if (
Ops.size() == 0)
6055 tryPrepareForUnpack();
6058 if (
Ops.size() == 1)
6070 unsigned Stride = 1;
6071 for (; Stride * 2 <
Ops.size(); Stride *= 2) {
6072 for (
unsigned I = 0;
I <
Ops.size() - Stride;
I += Stride * 2) {
6082 else if (OpNo ==
I + Stride)
6093 if (NewBytes[J] >= 0) {
6095 "Invalid double permute");
6098 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
6104 if (NewBytes[J] >= 0)
6120 unsigned OpNo0, OpNo1;
6124 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
6129 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
6136 dbgs() << Msg.c_str() <<
" { ";
6137 for (
unsigned I = 0;
I < Bytes.
size();
I++)
6138 dbgs() << Bytes[
I] <<
" ";
6146void GeneralShuffle::tryPrepareForUnpack() {
6148 if (ZeroVecOpNo == UINT32_MAX ||
Ops.size() == 1)
6153 if (
Ops.size() > 2 &&
6158 UnpackFromEltSize = 1;
6159 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
6160 bool MatchUnpack =
true;
6163 unsigned ToEltSize = UnpackFromEltSize * 2;
6164 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
6167 if (Bytes[Elt] != -1) {
6169 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
6170 MatchUnpack =
false;
6176 if (
Ops.size() == 2) {
6178 bool CanUseUnpackLow =
true, CanUseUnpackHigh =
true;
6180 if (SrcBytes[i] == -1)
6182 if (SrcBytes[i] % 16 !=
int(i))
6183 CanUseUnpackHigh =
false;
6185 CanUseUnpackLow =
false;
6186 if (!CanUseUnpackLow && !CanUseUnpackHigh) {
6187 UnpackFromEltSize = UINT_MAX;
6191 if (!CanUseUnpackHigh)
6197 if (UnpackFromEltSize > 4)
6200 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
6201 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
6203 dumpBytes(Bytes,
"Original Bytes vector:"););
6212 Elt += UnpackFromEltSize;
6213 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
6214 Bytes[
B] = Bytes[Elt];
6222 Ops.erase(&
Ops[ZeroVecOpNo]);
6224 if (Bytes[
I] >= 0) {
6226 if (OpNo > ZeroVecOpNo)
6237 if (!unpackWasPrepared())
6239 unsigned InBits = UnpackFromEltSize * 8;
6243 unsigned OutBits = InBits * 2;
6246 return DAG.
getNode(UnpackLow ? SystemZISD::UNPACKL_LOW
6247 : SystemZISD::UNPACKL_HIGH,
6248 DL, OutVT, PackedOp);
6253 for (
unsigned I = 1,
E =
Op.getNumOperands();
I !=
E; ++
I)
6254 if (!
Op.getOperand(
I).isUndef())
6270 if (
Value.isUndef())
6282 return DAG.
getNode(SystemZISD::REPLICATE,
DL, VT, Op1);
6285 return DAG.
getNode(SystemZISD::REPLICATE,
DL, VT, Op0);
6286 return DAG.
getNode(SystemZISD::MERGE_HIGH,
DL, VT,
6307 return DAG.
getNode(SystemZISD::JOIN_DWORDS,
DL, MVT::v2i64, Op0, Op1);
6323 GeneralShuffle GS(VT);
6325 bool FoundOne =
false;
6326 for (
unsigned I = 0;
I < NumElements; ++
I) {
6329 Op =
Op.getOperand(0);
6332 unsigned Elem =
Op.getConstantOperandVal(1);
6333 if (!GS.add(
Op.getOperand(0), Elem))
6336 }
else if (
Op.isUndef()) {
6350 if (!ResidueOps.
empty()) {
6351 while (ResidueOps.
size() < NumElements)
6353 for (
auto &
Op : GS.Ops) {
6354 if (!
Op.getNode()) {
6360 return GS.getNode(DAG,
SDLoc(BVN));
6363bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
6369 if (Subtarget.hasVectorEnhancements2() &&
Op.getOpcode() == SystemZISD::LRV)
6380 "Handling full vectors only.");
6400 if (Op01.
getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
6412 unsigned int NumElements = Elems.
size();
6413 unsigned int Count = 0;
6414 for (
auto Elem : Elems) {
6415 if (!Elem.isUndef()) {
6418 else if (Elem != Single) {
6438 if (
Single.getNode() && (
Count > 1 || isVectorElementLoad(Single)))
6439 return DAG.
getNode(SystemZISD::REPLICATE,
DL, VT, Single);
6442 bool AllLoads =
true;
6443 for (
auto Elem : Elems)
6444 if (!isVectorElementLoad(Elem)) {
6450 if (VT == MVT::v2i64 && !AllLoads)
6454 if (VT == MVT::v2f64 && !AllLoads)
6464 if (VT == MVT::v4f32 && !AllLoads)
6468 if (VT == MVT::v8f16 && !AllLoads) {
6477 if (Op0123.
getOpcode() == SystemZISD::REPLICATE && Op0123 == Op4567)
6486 unsigned NumConstants = 0;
6487 for (
unsigned I = 0;
I < NumElements; ++
I) {
6501 if (NumConstants > 0) {
6502 for (
unsigned I = 0;
I < NumElements; ++
I)
6513 std::map<const SDNode*, unsigned> UseCounts;
6514 SDNode *LoadMaxUses =
nullptr;
6515 for (
unsigned I = 0;
I < NumElements; ++
I)
6516 if (isVectorElementLoad(Elems[
I])) {
6517 SDNode *Ld = Elems[
I].getNode();
6518 unsigned Count = ++UseCounts[Ld];
6519 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] <
Count)
6522 if (LoadMaxUses !=
nullptr) {
6523 ReplicatedVal =
SDValue(LoadMaxUses, 0);
6527 unsigned I1 = NumElements / 2 - 1;
6528 unsigned I2 = NumElements - 1;
6529 bool Def1 = !Elems[
I1].isUndef();
6530 bool Def2 = !Elems[I2].isUndef();
6544 for (
unsigned I = 0;
I < NumElements; ++
I)
6545 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
6555 EVT VT =
Op.getValueType();
6557 if (BVN->isConstant()) {
6558 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
6576 for (
unsigned I = 0;
I < NumElements; ++
I)
6578 return buildVector(DAG,
DL, VT,
Ops);
6585 EVT VT =
Op.getValueType();
6588 if (VSN->isSplat()) {
6590 unsigned Index = VSN->getSplatIndex();
6592 "Splat index should be defined and in first operand");
6598 return DAG.
getNode(SystemZISD::SPLAT,
DL, VT,
Op.getOperand(0),
6602 GeneralShuffle
GS(VT);
6603 for (
unsigned I = 0;
I < NumElements; ++
I) {
6604 int Elt = VSN->getMaskElt(
I);
6607 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
6608 unsigned(Elt) % NumElements))
6611 return GS.getNode(DAG, SDLoc(VSN));
6626 assert(
Op.getSimpleValueType() == MVT::i64 &&
6627 "Expexted to convert i64 to f16.");
6639 assert(
Op.getSimpleValueType() == MVT::f16 &&
6640 "Expected to convert f16 to i64.");
6657 EVT VT =
Op.getValueType();
6662 if (VT == MVT::v2f64 &&
6686SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
6692 EVT VT =
Op.getValueType();
6697 uint64_t
Index = CIndexN->getZExtValue();
6706 MVT ExtrVT = IntVT == MVT::i16 ? MVT::i32 : IntVT;
6714SDValue SystemZTargetLowering::
6717 EVT OutVT =
Op.getValueType();
6721 unsigned StartOffset = 0;
6728 ArrayRef<int> ShuffleMask = SVN->
getMask();
6733 if (ToBits == 64 && OutNumElts == 2) {
6734 int NumElem = ToBits / FromBits;
6735 if (ShuffleMask[0] == NumElem - 1 && ShuffleMask[1] == 2 * NumElem - 1)
6741 int StartOffsetCandidate = -1;
6742 for (
int Elt = 0; Elt < OutNumElts; Elt++) {
6743 if (ShuffleMask[Elt] == -1)
6745 if (ShuffleMask[Elt] % OutNumElts == Elt) {
6746 if (StartOffsetCandidate == -1)
6747 StartOffsetCandidate = ShuffleMask[Elt] - Elt;
6748 if (StartOffsetCandidate == ShuffleMask[Elt] - Elt)
6751 StartOffsetCandidate = -1;
6754 if (StartOffsetCandidate != -1) {
6755 StartOffset = StartOffsetCandidate;
6764 unsigned Opcode = SystemZISD::UNPACK_HIGH;
6765 if (StartOffset >= OutNumElts) {
6766 Opcode = SystemZISD::UNPACK_LOW;
6767 StartOffset -= OutNumElts;
6769 PackedOp = DAG.
getNode(Opcode, SDLoc(PackedOp), OutVT, PackedOp);
6770 }
while (FromBits != ToBits);
6775SDValue SystemZTargetLowering::
6779 EVT OutVT =
Op.getValueType();
6783 unsigned NumInPerOut = InNumElts / OutNumElts;
6788 SmallVector<int, 16>
Mask(InNumElts);
6789 unsigned ZeroVecElt = InNumElts;
6790 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6791 unsigned MaskElt = PackedElt * NumInPerOut;
6792 unsigned End = MaskElt + NumInPerOut - 1;
6793 for (; MaskElt < End; MaskElt++)
6794 Mask[MaskElt] = ZeroVecElt++;
6795 Mask[MaskElt] = PackedElt;
6802 unsigned ByScalar)
const {
6807 EVT VT =
Op.getValueType();
6812 APInt SplatBits, SplatUndef;
6813 unsigned SplatBitSize;
6817 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6818 ElemBitSize,
true) &&
6819 SplatBitSize == ElemBitSize) {
6822 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6825 BitVector UndefElements;
6831 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6838 if (VSN->isSplat()) {
6839 SDValue VSNOp0 = VSN->getOperand(0);
6840 unsigned Index = VSN->getSplatIndex();
6842 "Splat index should be defined and in first operand");
6849 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6867 uint64_t ShiftAmt = ShiftAmtNode->getZExtValue() & 127;
6868 if ((ShiftAmt & 7) == 0 || Subtarget.hasVectorEnhancements2()) {
6871 if (ShiftAmt > 120) {
6875 DAG.
getNode(SystemZISD::SHR_DOUBLE_BIT,
DL, MVT::v16i8, Op0, Op1,
6879 SmallVector<int, 16>
Mask(16);
6880 for (
unsigned Elt = 0; Elt < 16; Elt++)
6881 Mask[Elt] = (ShiftAmt >> 3) + Elt;
6883 if ((ShiftAmt & 7) == 0)
6887 DAG.
getNode(SystemZISD::SHL_DOUBLE_BIT,
DL, MVT::v16i8, Shuf1, Shuf2,
6905 uint64_t ShiftAmt = ShiftAmtNode->getZExtValue() & 127;
6906 if ((ShiftAmt & 7) == 0 || Subtarget.hasVectorEnhancements2()) {
6909 if (ShiftAmt > 120) {
6913 DAG.
getNode(SystemZISD::SHL_DOUBLE_BIT,
DL, MVT::v16i8, Op0, Op1,
6917 SmallVector<int, 16>
Mask(16);
6918 for (
unsigned Elt = 0; Elt < 16; Elt++)
6919 Mask[Elt] = 16 - (ShiftAmt >> 3) + Elt;
6921 if ((ShiftAmt & 7) == 0)
6925 DAG.
getNode(SystemZISD::SHR_DOUBLE_BIT,
DL, MVT::v16i8, Shuf2, Shuf1,
6937 MVT DstVT =
Op.getSimpleValueType();
6940 unsigned SrcAS =
N->getSrcAddressSpace();
6942 assert(SrcAS !=
N->getDestAddressSpace() &&
6943 "addrspacecast must be between different address spaces");
6951 }
else if (DstVT == MVT::i32) {
6965 if (
In.getSimpleValueType() != MVT::f16)
6972 SDValue Chain,
bool IsStrict)
const {
6973 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unexpected request for libcall!");
6976 std::tie(Result, Chain) =
6985 bool IsStrict =
Op->isStrictFPOpcode();
6987 MVT VT =
Op.getSimpleValueType();
6988 SDValue InOp =
Op.getOperand(IsStrict ? 1 : 0);
6996 if (!Subtarget.hasFPExtension() && !IsSigned)
7007 if (VT == MVT::i128) {
7010 return useLibCall(DAG, LC, VT, InOp,
DL, Chain, IsStrict);
7020 bool IsStrict =
Op->isStrictFPOpcode();
7022 MVT VT =
Op.getSimpleValueType();
7023 SDValue InOp =
Op.getOperand(IsStrict ? 1 : 0);
7028 if (VT == MVT::f16) {
7035 if (!Subtarget.hasFPExtension() && !IsSigned)
7038 if (InVT == MVT::i128) {
7041 return useLibCall(DAG, LC, VT, InOp,
DL, Chain, IsStrict);
7050 EVT RegVT =
Op.getValueType();
7051 assert(RegVT == MVT::f16 &&
"Expected to lower an f16 load.");
7058 assert(EVT(RegVT) == AtomicLd->getMemoryVT() &&
"Unhandled f16 load");
7060 AtomicLd->getChain(), AtomicLd->getBasePtr(),
7061 AtomicLd->getMemOperand());
7081 Shft, AtomicSt->getBasePtr(),
7082 AtomicSt->getMemOperand());
7092 MVT ResultVT =
Op.getSimpleValueType();
7094 unsigned Check =
Op.getConstantOperandVal(1);
7096 unsigned TDCMask = 0;
7133 MachinePointerInfo MPI =
7139 SystemZISD::STCKF,
DL, DAG.
getVTList(MVT::Other), StoreOps, MVT::i64,
7143 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
7148 switch (
Op.getOpcode()) {
7150 return lowerFRAMEADDR(
Op, DAG);
7152 return lowerRETURNADDR(
Op, DAG);
7154 return lowerBR_CC(
Op, DAG);
7156 return lowerSELECT_CC(
Op, DAG);
7158 return lowerSETCC(
Op, DAG);
7160 return lowerSTRICT_FSETCC(
Op, DAG,
false);
7162 return lowerSTRICT_FSETCC(
Op, DAG,
true);
7174 return lowerBITCAST(
Op, DAG);
7176 return lowerVASTART(
Op, DAG);
7178 return lowerVACOPY(
Op, DAG);
7180 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
7182 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
7184 return lowerMULH(
Op, DAG, SystemZISD::SMUL_LOHI);
7186 return lowerMULH(
Op, DAG, SystemZISD::UMUL_LOHI);
7188 return lowerSMUL_LOHI(
Op, DAG);
7190 return lowerUMUL_LOHI(
Op, DAG);
7192 return lowerSDIVREM(
Op, DAG);
7194 return lowerUDIVREM(
Op, DAG);
7199 return lowerXALUO(
Op, DAG);
7202 return lowerUADDSUBO_CARRY(
Op, DAG);
7204 return lowerOR(
Op, DAG);
7206 return lowerCTPOP(
Op, DAG);
7208 return lowerVECREDUCE_ADD(
Op, DAG);
7210 return lowerATOMIC_FENCE(
Op, DAG);
7212 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_SWAPW);
7214 return lowerATOMIC_STORE(
Op, DAG);
7216 return lowerATOMIC_LOAD(
Op, DAG);
7218 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
7220 return lowerATOMIC_LOAD_SUB(
Op, DAG);
7222 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
7224 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
7226 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
7228 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
7230 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
7232 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
7234 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
7236 return lowerATOMIC_LOAD_OP(
Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
7238 return lowerATOMIC_CMP_SWAP(
Op, DAG);
7240 return lowerSTACKSAVE(
Op, DAG);
7242 return lowerSTACKRESTORE(
Op, DAG);
7244 return lowerPREFETCH(
Op, DAG);
7246 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
7248 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
7250 return lowerBUILD_VECTOR(
Op, DAG);
7252 return lowerVECTOR_SHUFFLE(
Op, DAG);
7254 return lowerSCALAR_TO_VECTOR(
Op, DAG);
7256 return lowerINSERT_VECTOR_ELT(
Op, DAG);
7258 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
7260 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
7262 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
7264 return lowerShift(
Op, DAG, SystemZISD::VSHL_BY_SCALAR);
7266 return lowerShift(
Op, DAG, SystemZISD::VSRL_BY_SCALAR);
7268 return lowerShift(
Op, DAG, SystemZISD::VSRA_BY_SCALAR);
7272 return lowerShift(
Op, DAG, SystemZISD::VROTL_BY_SCALAR);
7274 return lowerFSHL(
Op, DAG);
7276 return lowerFSHR(
Op, DAG);
7279 return lowerFP_EXTEND(
Op, DAG);
7284 return lower_FP_TO_INT(
Op, DAG);
7289 return lower_INT_TO_FP(
Op, DAG);
7291 return lowerLoadF16(
Op, DAG);
7293 return lowerStoreF16(
Op, DAG);
7295 return lowerIS_FPCLASS(
Op, DAG);
7297 return lowerGET_ROUNDING(
Op, DAG);
7299 return lowerREADCYCLECOUNTER(
Op, DAG);
7321 &SystemZ::FP128BitRegClass);
7330 SystemZ::REG_SEQUENCE, SL, MVT::f128,
7345 &SystemZ::FP128BitRegClass);
7362 switch (
N->getOpcode()) {
7366 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
7369 DL, Tys,
Ops, MVT::i128, MMO);
7372 if (
N->getValueType(0) == MVT::f128)
7386 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
7389 DL, Tys,
Ops, MVT::i128, MMO);
7395 MVT::Other, Res), 0);
7407 DL, Tys,
Ops, MVT::i128, MMO);
7421 EVT SrcVT = Src.getValueType();
7422 EVT ResVT =
N->getValueType(0);
7423 if (ResVT == MVT::i128 && SrcVT == MVT::f128)
7425 else if (SrcVT == MVT::i16 && ResVT == MVT::f16) {
7426 if (Subtarget.hasVector()) {
7434 }
else if (SrcVT == MVT::f16 && ResVT == MVT::i16) {
7436 Subtarget.hasVector()
7450 bool IsStrict =
N->isStrictFPOpcode();
7452 SDValue InOp =
N->getOperand(IsStrict ? 1 : 0);
7453 EVT ResVT =
N->getValueType(0);
7455 if (ResVT == MVT::f16) {
7478 bool IsStrict =
N->isStrictFPOpcode();
7480 EVT ResVT =
N->getValueType(0);
7481 SDValue InOp =
N->getOperand(IsStrict ? 1 : 0);
7484 if (InVT == MVT::f16) {
7490 std::tie(InF32, Chain) =
7515bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
7516 if (!Subtarget.hasVector())
7530 DAGCombinerInfo &DCI,
7538 unsigned Opcode =
Op.getOpcode();
7541 Op =
Op.getOperand(0);
7543 canTreatAsByteVector(
Op.getValueType())) {
7552 BytesPerElement,
First))
7559 if (Byte % BytesPerElement != 0)
7562 Index = Byte / BytesPerElement;
7566 canTreatAsByteVector(
Op.getValueType())) {
7569 EVT OpVT =
Op.getValueType();
7571 if (OpBytesPerElement < BytesPerElement)
7575 unsigned End = (
Index + 1) * BytesPerElement;
7576 if (End % OpBytesPerElement != 0)
7579 Op =
Op.getOperand(End / OpBytesPerElement - 1);
7580 if (!
Op.getValueType().isInteger()) {
7583 DCI.AddToWorklist(
Op.getNode());
7588 DCI.AddToWorklist(
Op.getNode());
7595 canTreatAsByteVector(
Op.getValueType()) &&
7596 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
7598 EVT ExtVT =
Op.getValueType();
7599 EVT OpVT =
Op.getOperand(0).getValueType();
7602 unsigned Byte =
Index * BytesPerElement;
7603 unsigned SubByte =
Byte % ExtBytesPerElement;
7604 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
7605 if (SubByte < MinSubByte ||
7606 SubByte + BytesPerElement > ExtBytesPerElement)
7609 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
7611 Byte += SubByte - MinSubByte;
7612 if (Byte % BytesPerElement != 0)
7614 Op =
Op.getOperand(0);
7621 if (
Op.getValueType() != VecVT) {
7623 DCI.AddToWorklist(
Op.getNode());
7633SDValue SystemZTargetLowering::combineTruncateExtract(
7642 if (canTreatAsByteVector(VecVT)) {
7646 if (BytesPerElement % TruncBytes == 0) {
7652 unsigned Scale = BytesPerElement / TruncBytes;
7653 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
7660 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
7661 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
7669SDValue SystemZTargetLowering::combineZERO_EXTEND(
7670 SDNode *
N, DAGCombinerInfo &DCI)
const {
7672 SelectionDAG &DAG = DCI.DAG;
7674 EVT VT =
N->getValueType(0);
7675 if (N0.
getOpcode() == SystemZISD::SELECT_CCMASK) {
7678 if (TrueOp && FalseOp) {
7688 DCI.CombineTo(N0.
getNode(), TruncSelect);
7731 return DAG.
getNode(SystemZISD::VSCBI, SDLoc(N0), VT, Op0, Op1);
7749SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
7750 SDNode *
N, DAGCombinerInfo &DCI)
const {
7754 SelectionDAG &DAG = DCI.DAG;
7756 EVT VT =
N->getValueType(0);
7770SDValue SystemZTargetLowering::combineSIGN_EXTEND(
7771 SDNode *
N, DAGCombinerInfo &DCI)
const {
7775 SelectionDAG &DAG = DCI.DAG;
7777 EVT VT =
N->getValueType(0);
7784 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
7785 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
7801SDValue SystemZTargetLowering::combineMERGE(
7802 SDNode *
N, DAGCombinerInfo &DCI)
const {
7803 SelectionDAG &DAG = DCI.DAG;
7804 unsigned Opcode =
N->getOpcode();
7812 if (Op1 ==
N->getOperand(0))
7817 if (ElemBytes <= 4) {
7818 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
7819 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
7825 DCI.AddToWorklist(Op1.
getNode());
7828 DCI.AddToWorklist(
Op.getNode());
7837 LoPart = HiPart =
nullptr;
7842 if (
Use.getResNo() != 0)
7847 bool IsLoPart =
true;
7872 LoPart = HiPart =
nullptr;
7877 if (
Use.getResNo() != 0)
7883 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
7886 switch (
User->getConstantOperandVal(1)) {
7887 case SystemZ::subreg_l64:
7892 case SystemZ::subreg_h64:
7904SDValue SystemZTargetLowering::combineLOAD(
7905 SDNode *
N, DAGCombinerInfo &DCI)
const {
7906 SelectionDAG &DAG = DCI.DAG;
7907 EVT LdVT =
N->getValueType(0);
7911 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
7912 if (PtrVT != LoadNodeVT) {
7916 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
7917 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
7918 LN->getMemOperand());
7928 SDNode *LoPart, *HiPart;
7936 LD->getPointerInfo(),
LD->getBaseAlign(),
7937 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7939 DCI.CombineTo(HiPart, EltLoad,
true);
7946 LD->getPointerInfo().getWithOffset(8),
LD->getBaseAlign(),
7947 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7949 DCI.CombineTo(LoPart, EltLoad,
true);
7956 DCI.AddToWorklist(Chain.
getNode());
7971 for (SDUse &Use :
N->uses()) {
7972 if (
Use.getUser()->getOpcode() == SystemZISD::REPLICATE) {
7976 }
else if (
Use.getResNo() == 0)
7979 if (!Replicate || OtherUses.
empty())
7985 for (SDNode *U : OtherUses) {
7988 Ops.push_back((
Op.getNode() ==
N &&
Op.getResNo() == 0) ? Extract0 :
Op);
7994bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
7995 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
7997 if (Subtarget.hasVectorEnhancements2())
7998 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
8010 for (
unsigned i = 0; i < NumElts; ++i) {
8011 if (M[i] < 0)
continue;
8012 if ((
unsigned) M[i] != NumElts - 1 - i)
8020 for (
auto *U : StoredVal->
users()) {
8022 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
8081SDValue SystemZTargetLowering::combineSTORE(
8082 SDNode *
N, DAGCombinerInfo &DCI)
const {
8083 SelectionDAG &DAG = DCI.DAG;
8086 EVT MemVT = SN->getMemoryVT();
8090 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
8091 if (PtrVT != StoreNodeVT) {
8095 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
8096 SN->getPointerInfo(), SN->getBaseAlign(),
8097 SN->getMemOperand()->getFlags(), SN->getAAInfo());
8105 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
8107 combineTruncateExtract(SDLoc(
N), MemVT, SN->getValue(), DCI)) {
8108 DCI.AddToWorklist(
Value.getNode());
8112 SN->getBasePtr(), SN->getMemoryVT(),
8113 SN->getMemOperand());
8117 if (!SN->isTruncatingStore() &&
8133 Ops, MemVT, SN->getMemOperand());
8136 if (!SN->isTruncatingStore() &&
8139 Subtarget.hasVectorEnhancements2()) {
8141 ArrayRef<int> ShuffleMask = SVN->
getMask();
8149 Ops, MemVT, SN->getMemOperand());
8154 if (!SN->isTruncatingStore() &&
8157 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
8161 Ops, MemVT, SN->getMemOperand());
8171 SN->getChain(),
DL, HiPart, SN->getBasePtr(), SN->getPointerInfo(),
8172 SN->getBaseAlign(), SN->getMemOperand()->getFlags(), SN->getAAInfo());
8174 SN->getChain(),
DL, LoPart,
8176 SN->getPointerInfo().getWithOffset(8), SN->getBaseAlign(),
8177 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
8195 auto FindReplicatedImm = [&](ConstantSDNode *
C,
unsigned TotBytes) {
8197 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
8201 APInt Val =
C->getAPIntValue();
8204 assert(SN->isTruncatingStore() &&
8205 "Non-truncating store and immediate value does not fit?");
8206 Val = Val.
trunc(TotBytes * 8);
8209 SystemZVectorConstantInfo VCI(APInt(TotBytes * 8, Val.
getZExtValue()));
8210 if (VCI.isVectorConstantLegal(Subtarget) &&
8211 VCI.Opcode == SystemZISD::REPLICATE) {
8219 auto FindReplicatedReg = [&](
SDValue MulOp) {
8220 EVT MulVT = MulOp.getValueType();
8221 if (MulOp->getOpcode() ==
ISD::MUL &&
8222 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
8226 WordVT =
LHS->getOperand(0).getValueType();
8233 SystemZVectorConstantInfo VCI(
8235 if (VCI.isVectorConstantLegal(Subtarget) &&
8236 VCI.Opcode == SystemZISD::REPLICATE && VCI.OpVals[0] == 1 &&
8237 WordVT == VCI.VecVT.getScalarType())
8249 FindReplicatedReg(SplatVal);
8254 FindReplicatedReg(Op1);
8259 "Bad type handling");
8263 return DAG.
getStore(SN->getChain(), SDLoc(SN), SplatVal,
8264 SN->getBasePtr(), SN->getMemOperand());
8271SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
8272 SDNode *
N, DAGCombinerInfo &DCI)
const {
8273 SelectionDAG &DAG = DCI.DAG;
8276 N->getOperand(0).hasOneUse() &&
8277 Subtarget.hasVectorEnhancements2()) {
8279 ArrayRef<int> ShuffleMask = SVN->
getMask();
8292 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8296 DCI.CombineTo(
N, ESLoad);
8300 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
8310SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
8311 SDNode *
N, DAGCombinerInfo &DCI)
const {
8312 SelectionDAG &DAG = DCI.DAG;
8314 if (!Subtarget.hasVector())
8320 Op.getValueType().isVector() &&
8321 Op.getOperand(0).getValueType().isVector() &&
8322 Op.getValueType().getVectorNumElements() ==
8323 Op.getOperand(0).getValueType().getVectorNumElements())
8324 Op =
Op.getOperand(0);
8328 EVT VecVT =
Op.getValueType();
8331 Op.getOperand(0),
N->getOperand(1));
8332 DCI.AddToWorklist(
Op.getNode());
8334 if (EltVT !=
N->getValueType(0)) {
8335 DCI.AddToWorklist(
Op.getNode());
8345 if (canTreatAsByteVector(VecVT))
8346 return combineExtract(SDLoc(
N),
N->getValueType(0), VecVT, Op0,
8347 IndexN->getZExtValue(), DCI,
false);
8352SDValue SystemZTargetLowering::combineJOIN_DWORDS(
8353 SDNode *
N, DAGCombinerInfo &DCI)
const {
8354 SelectionDAG &DAG = DCI.DAG;
8356 if (
N->getOperand(0) ==
N->getOperand(1))
8357 return DAG.
getNode(SystemZISD::REPLICATE, SDLoc(
N),
N->getValueType(0),
8367 if (Chain1 == Chain2)
8375SDValue SystemZTargetLowering::combineFP_ROUND(
8376 SDNode *
N, DAGCombinerInfo &DCI)
const {
8378 if (!Subtarget.hasVector())
8387 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
8388 SelectionDAG &DAG = DCI.DAG;
8390 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
8396 for (
auto *U : Vec->
users()) {
8397 if (U != Op0.
getNode() &&
U->hasOneUse() &&
8399 U->getOperand(0) == Vec &&
8401 U->getConstantOperandVal(1) == 1) {
8403 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
8407 if (
N->isStrictFPOpcode()) {
8411 VRound = DAG.
getNode(SystemZISD::STRICT_VROUND, SDLoc(
N),
8412 {MVT::v4f32, MVT::Other}, {Chain, Vec});
8415 VRound = DAG.
getNode(SystemZISD::VROUND, SDLoc(
N),
8417 DCI.AddToWorklist(VRound.
getNode());
8421 DCI.AddToWorklist(Extract1.
getNode());
8427 VRound, DAG.
getConstant(0, SDLoc(Op0), MVT::i32));
8430 N->getVTList(), Extract0, Chain);
8439SDValue SystemZTargetLowering::combineFP_EXTEND(
8440 SDNode *
N, DAGCombinerInfo &DCI)
const {
8442 if (!Subtarget.hasVector())
8451 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
8452 SelectionDAG &DAG = DCI.DAG;
8454 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
8460 for (
auto *U : Vec->
users()) {
8461 if (U != Op0.
getNode() &&
U->hasOneUse() &&
8463 U->getOperand(0) == Vec &&
8465 U->getConstantOperandVal(1) == 2) {
8467 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
8471 if (
N->isStrictFPOpcode()) {
8475 VExtend = DAG.
getNode(SystemZISD::STRICT_VEXTEND, SDLoc(
N),
8476 {MVT::v2f64, MVT::Other}, {Chain, Vec});
8479 VExtend = DAG.
getNode(SystemZISD::VEXTEND, SDLoc(
N),
8481 DCI.AddToWorklist(VExtend.
getNode());
8485 DCI.AddToWorklist(Extract1.
getNode());
8491 VExtend, DAG.
getConstant(0, SDLoc(Op0), MVT::i32));
8494 N->getVTList(), Extract0, Chain);
8503SDValue SystemZTargetLowering::combineINT_TO_FP(
8504 SDNode *
N, DAGCombinerInfo &DCI)
const {
8507 SelectionDAG &DAG = DCI.DAG;
8509 unsigned Opcode =
N->getOpcode();
8510 EVT OutVT =
N->getValueType(0);
8514 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
8520 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
8521 OutScalarBits <= 64) {
8525 unsigned ExtOpcode =
8528 return DAG.
getNode(Opcode, SDLoc(
N), OutVT, ExtOp);
8533SDValue SystemZTargetLowering::combineFCOPYSIGN(
8534 SDNode *
N, DAGCombinerInfo &DCI)
const {
8535 SelectionDAG &DAG = DCI.DAG;
8536 EVT VT =
N->getValueType(0);
8549SDValue SystemZTargetLowering::combineBSWAP(
8550 SDNode *
N, DAGCombinerInfo &DCI)
const {
8551 SelectionDAG &DAG = DCI.DAG;
8554 N->getOperand(0).hasOneUse() &&
8555 canLoadStoreByteSwapped(
N->getValueType(0))) {
8564 EVT LoadVT =
N->getValueType(0);
8565 if (LoadVT == MVT::i16)
8570 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8574 if (
N->getValueType(0) == MVT::i16)
8579 DCI.CombineTo(
N, ResVal);
8583 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
8592 Op.getValueType().isVector() &&
8593 Op.getOperand(0).getValueType().isVector() &&
8594 Op.getValueType().getVectorNumElements() ==
8595 Op.getOperand(0).getValueType().getVectorNumElements())
8596 Op =
Op.getOperand(0);
8608 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
8610 EVT VecVT =
N->getValueType(0);
8614 DCI.AddToWorklist(Vec.
getNode());
8618 DCI.AddToWorklist(Elt.
getNode());
8621 DCI.AddToWorklist(Vec.
getNode());
8623 DCI.AddToWorklist(Elt.
getNode());
8631 if (SV &&
Op.hasOneUse()) {
8639 EVT VecVT =
N->getValueType(0);
8642 DCI.AddToWorklist(Op0.
getNode());
8646 DCI.AddToWorklist(Op1.
getNode());
8649 DCI.AddToWorklist(Op0.
getNode());
8651 DCI.AddToWorklist(Op1.
getNode());
8659SDValue SystemZTargetLowering::combineSETCC(
8660 SDNode *
N, DAGCombinerInfo &DCI)
const {
8661 SelectionDAG &DAG = DCI.DAG;
8667 EVT VT =
N->getValueType(0);
8677 Src.getValueType().isFixedLengthVector() &&
8678 Src.getValueType().getScalarType() == MVT::i1) {
8679 EVT CmpVT = Src.getOperand(0).getValueType();
8699 case SystemZISD::IPM:
8704 case SystemZISD::SELECT_CCMASK: {
8706 if (Op4CCReg.
getOpcode() == SystemZISD::ICMP ||
8707 Op4CCReg.
getOpcode() == SystemZISD::TM) {
8710 return std::make_pair(OpCC, OpCCValid);
8715 int CCValidVal = CCValid->getZExtValue();
8716 return std::make_pair(Op4CCReg, CCValidVal);
8727 return std::make_pair(Op0CC, Op0CCValid);
8743 return {Val, Val, Val, Val};
8744 case SystemZISD::IPM: {
8749 for (
auto CC : {0, 1, 2, 3})
8752 return ShiftedCCVals;
8754 case SystemZISD::SELECT_CCMASK: {
8758 if (!CCValid || !CCMask)
8761 int CCValidVal = CCValid->getZExtValue();
8762 int CCMaskVal = CCMask->getZExtValue();
8772 if (TrueSDVals.empty() || FalseSDVals.empty())
8775 for (
auto &CCVal : {0, 1, 2, 3})
8776 MergedSDVals.
emplace_back(((CCMaskVal & (1 << (3 - CCVal))) != 0)
8778 : FalseSDVals[CCVal]);
8779 return MergedSDVals;
8796 if (Op0SDVals.empty() || Op1SDVals.empty())
8799 for (
auto CCVal : {0, 1, 2, 3})
8801 Opcode,
DL, Val.
getValueType(), Op0SDVals[CCVal], Op1SDVals[CCVal]));
8802 return BinaryOpSDVals;
8813 auto *CCNode = CCReg.
getNode();
8817 if (CCNode->getOpcode() == SystemZISD::TM) {
8820 auto emulateTMCCMask = [](
const SDValue &Op0Val,
const SDValue &Op1Val) {
8823 if (!Op0Node || !Op1Node)
8825 auto Op0APVal = Op0Node->getAPIntValue();
8826 auto Op1APVal = Op1Node->getAPIntValue();
8827 auto Result = Op0APVal & Op1APVal;
8828 bool AllOnes = Result == Op1APVal;
8829 bool AllZeros = Result == 0;
8830 bool IsLeftMostBitSet = Result[Op1APVal.getActiveBits()] != 0;
8831 return AllZeros ? 0 :
AllOnes ? 3 : IsLeftMostBitSet ? 2 : 1;
8835 auto [Op0CC, Op0CCValid] =
findCCUse(Op0);
8840 if (Op0SDVals.empty() || Op1SDVals.empty())
8843 for (
auto CC : {0, 1, 2, 3}) {
8844 auto CCVal = emulateTMCCMask(Op0SDVals[CC], Op1SDVals[CC]);
8848 NewCCMask |= (CCMask & (1 << (3 - CCVal))) != 0;
8850 NewCCMask &= Op0CCValid;
8853 CCValid = Op0CCValid;
8856 if (CCNode->getOpcode() != SystemZISD::ICMP ||
8863 auto [Op0CC, Op0CCValid] =
findCCUse(CmpOp0);
8867 if (Op0SDVals.empty() || Op1SDVals.empty())
8871 auto CmpTypeVal = CmpType->getZExtValue();
8872 const auto compareCCSigned = [&CmpTypeVal](
const SDValue &Op0Val,
8876 if (!Op0Node || !Op1Node)
8878 auto Op0APVal = Op0Node->getAPIntValue();
8879 auto Op1APVal = Op1Node->getAPIntValue();
8881 return Op0APVal == Op1APVal ? 0 : Op0APVal.slt(Op1APVal) ? 1 : 2;
8882 return Op0APVal == Op1APVal ? 0 : Op0APVal.ult(Op1APVal) ? 1 : 2;
8885 for (
auto CC : {0, 1, 2, 3}) {
8886 auto CCVal = compareCCSigned(Op0SDVals[CC], Op1SDVals[CC]);
8890 NewCCMask |= (CCMask & (1 << (3 - CCVal))) != 0;
8892 NewCCMask &= Op0CCValid;
8895 CCValid = Op0CCValid;
8906 const Value *Rhs)
const {
8907 const auto isFlagOutOpCC = [](
const Value *V) {
8909 const Value *RHSVal;
8916 if (CB->isInlineAsm()) {
8918 return IA && IA->getConstraintString().contains(
"{@cc}");
8929 if (isFlagOutOpCC(Lhs) && isFlagOutOpCC(Rhs))
8932 return {-1, -1, -1};
8936 DAGCombinerInfo &DCI)
const {
8942 if (!CCValid || !CCMask)
8945 int CCValidVal = CCValid->getZExtValue();
8946 int CCMaskVal = CCMask->getZExtValue();
8953 if (
combineCCMask(CCReg, CCValidVal, CCMaskVal, DAG) && CCMaskVal != 0 &&
8954 CCMaskVal != CCValidVal)
8955 return DAG.
getNode(SystemZISD::BR_CCMASK,
SDLoc(
N),
N->getValueType(0),
8959 N->getOperand(3), CCReg);
8963SDValue SystemZTargetLowering::combineSELECT_CCMASK(
8964 SDNode *
N, DAGCombinerInfo &DCI)
const {
8970 if (!CCValid || !CCMask)
8973 int CCValidVal = CCValid->getZExtValue();
8974 int CCMaskVal = CCMask->getZExtValue();
8977 bool IsCombinedCCReg =
combineCCMask(CCReg, CCValidVal, CCMaskVal, DAG);
8981 const auto constructCCSDValsFromSELECT = [&CCReg](
SDValue &Val) {
8982 if (Val.getOpcode() == SystemZISD::SELECT_CCMASK) {
8984 if (Val.getOperand(4) != CCReg)
8991 int CCMaskVal = CCMask->getZExtValue();
8992 for (
auto &CC : {0, 1, 2, 3})
8993 Res.
emplace_back(((CCMaskVal & (1 << (3 - CC))) != 0) ? TrueVal
9007 if (TrueSDVals.empty())
9008 TrueSDVals = constructCCSDValsFromSELECT(TrueVal);
9009 if (FalseSDVals.empty())
9010 FalseSDVals = constructCCSDValsFromSELECT(FalseVal);
9011 if (!TrueSDVals.empty() && !FalseSDVals.empty()) {
9012 SmallSet<SDValue, 4> MergedSDValsSet;
9014 for (
auto CC : {0, 1, 2, 3}) {
9015 if ((CCValidVal & ((1 << (3 - CC)))) != 0)
9016 MergedSDValsSet.
insert(((CCMaskVal & (1 << (3 - CC))) != 0)
9020 if (MergedSDValsSet.
size() == 1)
9021 return *MergedSDValsSet.
begin();
9022 if (MergedSDValsSet.
size() == 2) {
9023 auto BeginIt = MergedSDValsSet.
begin();
9024 SDValue NewTrueVal = *BeginIt, NewFalseVal = *next(BeginIt);
9025 if (NewTrueVal == FalseVal || NewFalseVal == TrueVal)
9028 for (
auto CC : {0, 1, 2, 3}) {
9030 NewCCMask |= ((CCMaskVal & (1 << (3 - CC))) != 0)
9031 ? (TrueSDVals[CC] == NewTrueVal)
9032 : (FalseSDVals[CC] == NewTrueVal);
9034 CCMaskVal = NewCCMask;
9035 CCMaskVal &= CCValidVal;
9038 IsCombinedCCReg =
true;
9046 if (CCMaskVal == CCValidVal)
9049 if (IsCombinedCCReg)
9051 SystemZISD::SELECT_CCMASK, SDLoc(
N),
N->getValueType(0), TrueVal,
9058SDValue SystemZTargetLowering::combineGET_CCMASK(
9059 SDNode *
N, DAGCombinerInfo &DCI)
const {
9064 if (!CCValid || !CCMask)
9066 int CCValidVal = CCValid->getZExtValue();
9067 int CCMaskVal = CCMask->getZExtValue();
9072 if (
Select->getOpcode() != SystemZISD::SELECT_CCMASK)
9077 if (!SelectCCValid || !SelectCCMask)
9079 int SelectCCValidVal = SelectCCValid->getZExtValue();
9080 int SelectCCMaskVal = SelectCCMask->getZExtValue();
9084 if (!TrueVal || !FalseVal)
9088 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
9089 SelectCCMaskVal ^= SelectCCValidVal;
9093 if (SelectCCValidVal & ~CCValidVal)
9095 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
9098 return Select->getOperand(4);
9101SDValue SystemZTargetLowering::combineIntDIVREM(
9102 SDNode *
N, DAGCombinerInfo &DCI)
const {
9103 SelectionDAG &DAG = DCI.DAG;
9104 EVT VT =
N->getValueType(0);
9121SDValue SystemZTargetLowering::combineShiftToMulAddHigh(
9122 SDNode *
N, DAGCombinerInfo &DCI)
const {
9123 SelectionDAG &DAG = DCI.DAG;
9127 "SRL or SRA node is required here!");
9129 if (!Subtarget.hasVector())
9139 SDValue ShiftOperand =
N->getOperand(0);
9159 if (!IsSignExt && !IsZeroExt)
9167 unsigned ActiveBits = IsSignExt
9168 ?
Constant->getAPIntValue().getSignificantBits()
9169 :
Constant->getAPIntValue().getActiveBits();
9170 if (ActiveBits > NarrowVTSize)
9186 unsigned ActiveBits = IsSignExt
9187 ?
Constant->getAPIntValue().getSignificantBits()
9188 :
Constant->getAPIntValue().getActiveBits();
9189 if (ActiveBits > NarrowVTSize)
9206 "Cannot have a multiply node with two different operand types.");
9208 "Cannot have an add node with two different operand types.");
9219 if (ShiftAmt != NarrowVTSize)
9223 if (!(NarrowVT == MVT::v16i8 || NarrowVT == MVT::v8i16 ||
9224 NarrowVT == MVT::v4i32 ||
9225 (Subtarget.hasVectorEnhancements3() &&
9226 (NarrowVT == MVT::v2i64 || NarrowVT == MVT::i128))))
9232 MulhRightOp, MulhAddOp);
9233 bool IsSigned =
N->getOpcode() ==
ISD::SRA;
9244 EVT VT =
Op.getValueType();
9253 Op =
Op.getOperand(0);
9254 if (
Op.getValueType().getVectorNumElements() == 2 * NumElts &&
9258 bool CanUseEven =
true, CanUseOdd =
true;
9259 for (
unsigned Elt = 0; Elt < NumElts; Elt++) {
9260 if (ShuffleMask[Elt] == -1)
9262 if (
unsigned(ShuffleMask[Elt]) != 2 * Elt)
9264 if (
unsigned(ShuffleMask[Elt]) != 2 * Elt + 1)
9267 Op =
Op.getOperand(0);
9269 return IsSigned ? SystemZISD::VME : SystemZISD::VMLE;
9271 return IsSigned ? SystemZISD::VMO : SystemZISD::VMLO;
9277 if (VT == MVT::i128 && Subtarget.hasVectorEnhancements3() &&
9281 Op =
Op.getOperand(0);
9283 Op.getOperand(0).getValueType() == MVT::v2i64 &&
9285 unsigned Elem =
Op.getConstantOperandVal(1);
9286 Op =
Op.getOperand(0);
9288 return IsSigned ? SystemZISD::VME : SystemZISD::VMLE;
9290 return IsSigned ? SystemZISD::VMO : SystemZISD::VMLO;
9297SDValue SystemZTargetLowering::combineMUL(
9298 SDNode *
N, DAGCombinerInfo &DCI)
const {
9299 SelectionDAG &DAG = DCI.DAG;
9306 if (OpcodeCand0 && OpcodeCand0 == OpcodeCand1)
9307 return DAG.
getNode(OpcodeCand0, SDLoc(
N),
N->getValueType(0), Op0, Op1);
9312SDValue SystemZTargetLowering::combineINTRINSIC(
9313 SDNode *
N, DAGCombinerInfo &DCI)
const {
9314 SelectionDAG &DAG = DCI.DAG;
9316 unsigned Id =
N->getConstantOperandVal(1);
9320 case Intrinsic::s390_vll:
9321 case Intrinsic::s390_vlrl:
9323 if (
C->getZExtValue() >= 15)
9324 return DAG.
getLoad(
N->getValueType(0), SDLoc(
N),
N->getOperand(0),
9325 N->getOperand(3), MachinePointerInfo());
9328 case Intrinsic::s390_vstl:
9329 case Intrinsic::s390_vstrl:
9331 if (
C->getZExtValue() >= 15)
9332 return DAG.
getStore(
N->getOperand(0), SDLoc(
N),
N->getOperand(2),
9333 N->getOperand(4), MachinePointerInfo());
9341 if (
N->getOpcode() == SystemZISD::PCREL_WRAPPER)
9348 switch(
N->getOpcode()) {
9353 case SystemZISD::MERGE_HIGH:
9354 case SystemZISD::MERGE_LOW:
return combineMERGE(
N, DCI);
9359 case SystemZISD::JOIN_DWORDS:
return combineJOIN_DWORDS(
N, DCI);
9369 case SystemZISD::BR_CCMASK:
return combineBR_CCMASK(
N, DCI);
9370 case SystemZISD::SELECT_CCMASK:
return combineSELECT_CCMASK(
N, DCI);
9373 case ISD::SRA:
return combineShiftToMulAddHigh(
N, DCI);
9374 case ISD::MUL:
return combineMUL(
N, DCI);
9378 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
9390 EVT VT =
Op.getValueType();
9393 unsigned Opcode =
Op.getOpcode();
9395 unsigned Id =
Op.getConstantOperandVal(0);
9397 case Intrinsic::s390_vpksh:
9398 case Intrinsic::s390_vpksf:
9399 case Intrinsic::s390_vpksg:
9400 case Intrinsic::s390_vpkshs:
9401 case Intrinsic::s390_vpksfs:
9402 case Intrinsic::s390_vpksgs:
9403 case Intrinsic::s390_vpklsh:
9404 case Intrinsic::s390_vpklsf:
9405 case Intrinsic::s390_vpklsg:
9406 case Intrinsic::s390_vpklshs:
9407 case Intrinsic::s390_vpklsfs:
9408 case Intrinsic::s390_vpklsgs:
9410 SrcDemE = DemandedElts;
9413 SrcDemE = SrcDemE.
trunc(NumElts / 2);
9416 case Intrinsic::s390_vuphb:
9417 case Intrinsic::s390_vuphh:
9418 case Intrinsic::s390_vuphf:
9419 case Intrinsic::s390_vuplhb:
9420 case Intrinsic::s390_vuplhh:
9421 case Intrinsic::s390_vuplhf:
9422 SrcDemE =
APInt(NumElts * 2, 0);
9425 case Intrinsic::s390_vuplb:
9426 case Intrinsic::s390_vuplhw:
9427 case Intrinsic::s390_vuplf:
9428 case Intrinsic::s390_vupllb:
9429 case Intrinsic::s390_vupllh:
9430 case Intrinsic::s390_vupllf:
9431 SrcDemE =
APInt(NumElts * 2, 0);
9434 case Intrinsic::s390_vpdi: {
9436 SrcDemE =
APInt(NumElts, 0);
9437 if (!DemandedElts[OpNo - 1])
9439 unsigned Mask =
Op.getConstantOperandVal(3);
9440 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
9442 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
9445 case Intrinsic::s390_vsldb: {
9447 assert(VT == MVT::v16i8 &&
"Unexpected type.");
9448 unsigned FirstIdx =
Op.getConstantOperandVal(3);
9449 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
9450 unsigned NumSrc0Els = 16 - FirstIdx;
9451 SrcDemE =
APInt(NumElts, 0);
9453 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
9456 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
9461 case Intrinsic::s390_vperm:
9470 case SystemZISD::JOIN_DWORDS:
9472 SrcDemE =
APInt(1, 1);
9474 case SystemZISD::SELECT_CCMASK:
9475 SrcDemE = DemandedElts;
9486 const APInt &DemandedElts,
9501 const APInt &DemandedElts,
9503 unsigned Depth)
const {
9507 unsigned Tmp0, Tmp1;
9512 EVT VT =
Op.getValueType();
9513 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
9516 "KnownBits does not match VT in bitwidth");
9519 "DemandedElts does not match VT number of elements");
9521 unsigned Opcode =
Op.getOpcode();
9523 bool IsLogical =
false;
9524 unsigned Id =
Op.getConstantOperandVal(0);
9526 case Intrinsic::s390_vpksh:
9527 case Intrinsic::s390_vpksf:
9528 case Intrinsic::s390_vpksg:
9529 case Intrinsic::s390_vpkshs:
9530 case Intrinsic::s390_vpksfs:
9531 case Intrinsic::s390_vpksgs:
9532 case Intrinsic::s390_vpklsh:
9533 case Intrinsic::s390_vpklsf:
9534 case Intrinsic::s390_vpklsg:
9535 case Intrinsic::s390_vpklshs:
9536 case Intrinsic::s390_vpklsfs:
9537 case Intrinsic::s390_vpklsgs:
9538 case Intrinsic::s390_vpdi:
9539 case Intrinsic::s390_vsldb:
9540 case Intrinsic::s390_vperm:
9543 case Intrinsic::s390_vuplhb:
9544 case Intrinsic::s390_vuplhh:
9545 case Intrinsic::s390_vuplhf:
9546 case Intrinsic::s390_vupllb:
9547 case Intrinsic::s390_vupllh:
9548 case Intrinsic::s390_vupllf:
9551 case Intrinsic::s390_vuphb:
9552 case Intrinsic::s390_vuphh:
9553 case Intrinsic::s390_vuphf:
9554 case Intrinsic::s390_vuplb:
9555 case Intrinsic::s390_vuplhw:
9556 case Intrinsic::s390_vuplf: {
9571 case SystemZISD::JOIN_DWORDS:
9572 case SystemZISD::SELECT_CCMASK:
9575 case SystemZISD::REPLICATE: {
9598 if (
LHS == 1)
return 1;
9601 if (
RHS == 1)
return 1;
9602 unsigned Common = std::min(
LHS,
RHS);
9603 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
9604 EVT VT =
Op.getValueType();
9606 if (SrcBitWidth > VTBits) {
9607 unsigned SrcExtraBits = SrcBitWidth - VTBits;
9608 if (Common > SrcExtraBits)
9609 return (Common - SrcExtraBits);
9612 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
9619 unsigned Depth)
const {
9620 if (
Op.getResNo() != 0)
9622 unsigned Opcode =
Op.getOpcode();
9624 unsigned Id =
Op.getConstantOperandVal(0);
9626 case Intrinsic::s390_vpksh:
9627 case Intrinsic::s390_vpksf:
9628 case Intrinsic::s390_vpksg:
9629 case Intrinsic::s390_vpkshs:
9630 case Intrinsic::s390_vpksfs:
9631 case Intrinsic::s390_vpksgs:
9632 case Intrinsic::s390_vpklsh:
9633 case Intrinsic::s390_vpklsf:
9634 case Intrinsic::s390_vpklsg:
9635 case Intrinsic::s390_vpklshs:
9636 case Intrinsic::s390_vpklsfs:
9637 case Intrinsic::s390_vpklsgs:
9638 case Intrinsic::s390_vpdi:
9639 case Intrinsic::s390_vsldb:
9640 case Intrinsic::s390_vperm:
9642 case Intrinsic::s390_vuphb:
9643 case Intrinsic::s390_vuphh:
9644 case Intrinsic::s390_vuphf:
9645 case Intrinsic::s390_vuplb:
9646 case Intrinsic::s390_vuplhw:
9647 case Intrinsic::s390_vuplf: {
9651 EVT VT =
Op.getValueType();
9661 case SystemZISD::SELECT_CCMASK:
9675 switch (
Op->getOpcode()) {
9676 case SystemZISD::PCREL_WRAPPER:
9677 case SystemZISD::PCREL_OFFSET:
9688 "Unexpected stack alignment");
9691 unsigned StackProbeSize =
9694 StackProbeSize &= ~(StackAlign - 1);
9695 return StackProbeSize ? StackProbeSize : StackAlign;
9734 if (
MI.readsRegister(SystemZ::CC,
nullptr))
9736 if (
MI.definesRegister(SystemZ::CC,
nullptr))
9742 if (miI ==
MBB->end()) {
9744 if (Succ->isLiveIn(SystemZ::CC))
9755 switch (
MI.getOpcode()) {
9756 case SystemZ::Select32:
9757 case SystemZ::Select64:
9758 case SystemZ::Select128:
9759 case SystemZ::SelectF32:
9760 case SystemZ::SelectF64:
9761 case SystemZ::SelectF128:
9762 case SystemZ::SelectVR32:
9763 case SystemZ::SelectVR64:
9764 case SystemZ::SelectVR128:
9796 for (
auto *
MI : Selects) {
9797 Register DestReg =
MI->getOperand(0).getReg();
9798 Register TrueReg =
MI->getOperand(1).getReg();
9799 Register FalseReg =
MI->getOperand(2).getReg();
9804 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
9807 if (
auto It = RegRewriteTable.
find(TrueReg); It != RegRewriteTable.
end())
9808 TrueReg = It->second.first;
9810 if (
auto It = RegRewriteTable.
find(FalseReg); It != RegRewriteTable.
end())
9811 FalseReg = It->second.second;
9814 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
9819 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
9830 auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
9831 assert(TFL->hasReservedCallFrame(MF) &&
9832 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
9837 uint32_t NumBytes =
MI.getOperand(0).getImm();
9842 MI.eraseFromParent();
9851 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
9853 unsigned CCValid =
MI.getOperand(3).getImm();
9854 unsigned CCMask =
MI.getOperand(4).getImm();
9859 SmallVector<MachineInstr*, 8> Selects;
9860 SmallVector<MachineInstr*, 8> DbgValues;
9866 assert(NextMI.getOperand(3).getImm() == CCValid &&
9867 "Bad CCValid operands since CC was not redefined.");
9868 if (NextMI.getOperand(4).getImm() == CCMask ||
9869 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
9875 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
9876 NextMI.usesCustomInsertionHook())
9879 for (
auto *SelMI : Selects)
9880 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
9884 if (NextMI.isDebugInstr()) {
9886 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
9889 }
else if (User || ++
Count > 20)
9893 MachineInstr *LastMI = Selects.back();
9894 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
9896 MachineBasicBlock *StartMBB =
MBB;
9926 for (
auto *SelMI : Selects)
9927 SelMI->eraseFromParent();
9930 for (
auto *DbgMI : DbgValues)
9931 MBB->
splice(InsertPos, StartMBB, DbgMI);
9942 unsigned StoreOpcode,
9943 unsigned STOCOpcode,
9944 bool Invert)
const {
9945 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
9948 MachineOperand
Base =
MI.getOperand(1);
9949 int64_t Disp =
MI.getOperand(2).getImm();
9950 Register IndexReg =
MI.getOperand(3).getReg();
9951 unsigned CCValid =
MI.getOperand(4).getImm();
9952 unsigned CCMask =
MI.getOperand(5).getImm();
9955 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
9959 MachineMemOperand *MMO =
nullptr;
9960 for (
auto *
I :
MI.memoperands())
9969 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
9981 MI.eraseFromParent();
9989 MachineBasicBlock *StartMBB =
MBB;
9995 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
10022 MI.eraseFromParent();
10032 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10040 MachineBasicBlock *StartMBB =
MBB;
10058 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
10085 MI.eraseFromParent();
10096 bool Invert)
const {
10098 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10105 int64_t Disp =
MI.getOperand(2).getImm();
10107 Register BitShift =
MI.getOperand(4).getReg();
10108 Register NegBitShift =
MI.getOperand(5).getReg();
10109 unsigned BitSize =
MI.getOperand(6).getImm();
10113 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10114 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10115 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10125 MachineBasicBlock *StartMBB =
MBB;
10158 }
else if (BinOpcode)
10181 MI.eraseFromParent();
10192 unsigned KeepOldMask)
const {
10194 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10200 int64_t Disp =
MI.getOperand(2).getImm();
10202 Register BitShift =
MI.getOperand(4).getReg();
10203 Register NegBitShift =
MI.getOperand(5).getReg();
10204 unsigned BitSize =
MI.getOperand(6).getImm();
10208 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10209 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10210 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10221 MachineBasicBlock *StartMBB =
MBB;
10285 MI.eraseFromParent();
10295 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10301 int64_t Disp =
MI.getOperand(2).getImm();
10302 Register CmpVal =
MI.getOperand(3).getReg();
10303 Register OrigSwapVal =
MI.getOperand(4).getReg();
10304 Register BitShift =
MI.getOperand(5).getReg();
10305 Register NegBitShift =
MI.getOperand(6).getReg();
10306 int64_t BitSize =
MI.getOperand(7).getImm();
10309 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
10312 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10313 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10314 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
10315 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10327 MachineBasicBlock *StartMBB =
MBB;
10399 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
10402 MI.eraseFromParent();
10410 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10415 .
add(
MI.getOperand(1))
10416 .
addImm(SystemZ::subreg_h64)
10417 .
add(
MI.getOperand(2))
10418 .
addImm(SystemZ::subreg_l64);
10419 MI.eraseFromParent();
10428 bool ClearEven)
const {
10430 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10452 MI.eraseFromParent();
10459 unsigned Opcode,
bool IsMemset)
const {
10461 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10466 uint64_t DestDisp =
MI.getOperand(1).getImm();
10471 auto foldDisplIfNeeded = [&](MachineOperand &
Base, uint64_t &Disp) ->
void {
10474 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
10484 SrcDisp =
MI.getOperand(3).getImm();
10486 SrcBase = DestBase;
10487 SrcDisp = DestDisp++;
10488 foldDisplIfNeeded(DestBase, DestDisp);
10491 MachineOperand &LengthMO =
MI.getOperand(IsMemset ? 2 : 4);
10492 bool IsImmForm = LengthMO.
isImm();
10493 bool IsRegForm = !IsImmForm;
10496 auto insertMemMemOp = [&](MachineBasicBlock *InsMBB,
10498 MachineOperand DBase, uint64_t DDisp,
10499 MachineOperand
SBase, uint64_t SDisp,
10500 unsigned Length) ->
void {
10504 if (ByteMO.
isImm())
10519 bool NeedsLoop =
false;
10520 uint64_t ImmLength = 0;
10521 Register LenAdjReg = SystemZ::NoRegister;
10523 ImmLength = LengthMO.
getImm();
10524 ImmLength += IsMemset ? 2 : 1;
10525 if (ImmLength == 0) {
10526 MI.eraseFromParent();
10529 if (Opcode == SystemZ::CLC) {
10530 if (ImmLength > 3 * 256)
10540 }
else if (ImmLength > 6 * 256)
10548 LenAdjReg = LengthMO.
getReg();
10553 MachineBasicBlock *EndMBB =
10554 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
10562 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
10572 auto loadZeroAddress = [&]() -> MachineOperand {
10577 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
10578 DestBase = loadZeroAddress();
10579 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
10580 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
10582 MachineBasicBlock *StartMBB =
nullptr;
10583 MachineBasicBlock *LoopMBB =
nullptr;
10584 MachineBasicBlock *NextMBB =
nullptr;
10585 MachineBasicBlock *DoneMBB =
nullptr;
10586 MachineBasicBlock *AllDoneMBB =
nullptr;
10590 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
10592 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
10599 RC = &SystemZ::GR64BitRegClass;
10627 MBB = MemsetOneCheckMBB;
10638 MBB = MemsetOneMBB;
10670 if (EndMBB && !ImmLength)
10692 if (!HaveSingleBase)
10699 if (Opcode == SystemZ::MVC)
10726 if (!HaveSingleBase)
10749 Register RemDestReg = HaveSingleBase ? RemSrcReg
10754 if (!HaveSingleBase)
10762 MachineInstrBuilder EXRL_MIB =
10770 if (Opcode != SystemZ::MVC) {
10780 while (ImmLength > 0) {
10781 uint64_t ThisLength = std::min(ImmLength, uint64_t(256));
10784 foldDisplIfNeeded(DestBase, DestDisp);
10785 foldDisplIfNeeded(SrcBase, SrcDisp);
10786 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
10787 DestDisp += ThisLength;
10788 SrcDisp += ThisLength;
10789 ImmLength -= ThisLength;
10792 if (EndMBB && ImmLength > 0) {
10808 MI.eraseFromParent();
10817 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10821 uint64_t End1Reg =
MI.getOperand(0).getReg();
10822 uint64_t Start1Reg =
MI.getOperand(1).getReg();
10823 uint64_t Start2Reg =
MI.getOperand(2).getReg();
10824 uint64_t CharReg =
MI.getOperand(3).getReg();
10826 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
10831 MachineBasicBlock *StartMBB =
MBB;
10867 MI.eraseFromParent();
10874 bool NoFloat)
const {
10876 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
10877 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10880 MI.setDesc(
TII->get(Opcode));
10884 uint64_t Control =
MI.getOperand(2).getImm();
10885 static const unsigned GPRControlBit[16] = {
10886 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
10887 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
10889 Control |= GPRControlBit[15];
10890 if (TFI->
hasFP(MF))
10891 Control |= GPRControlBit[11];
10892 MI.getOperand(2).setImm(Control);
10895 for (
int I = 0;
I < 16;
I++) {
10896 if ((Control & GPRControlBit[
I]) == 0) {
10903 if (!NoFloat && (Control & 4) != 0) {
10904 if (Subtarget.hasVector()) {
10921 MachineRegisterInfo *MRI = &MF.
getRegInfo();
10922 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10925 Register SrcReg =
MI.getOperand(0).getReg();
10928 const TargetRegisterClass *RC = MRI->
getRegClass(SrcReg);
10936 MI.eraseFromParent();
10944 MachineRegisterInfo *MRI = &MF.
getRegInfo();
10945 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10948 Register DstReg =
MI.getOperand(0).getReg();
10949 Register SizeReg =
MI.getOperand(2).getReg();
10951 MachineBasicBlock *StartMBB =
MBB;
11027 MI.eraseFromParent();
11031SDValue SystemZTargetLowering::
11034 auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>();
11042 switch (
MI.getOpcode()) {
11043 case SystemZ::ADJCALLSTACKDOWN:
11044 case SystemZ::ADJCALLSTACKUP:
11045 return emitAdjCallStack(
MI,
MBB);
11047 case SystemZ::Select32:
11048 case SystemZ::Select64:
11049 case SystemZ::Select128:
11050 case SystemZ::SelectF32:
11051 case SystemZ::SelectF64:
11052 case SystemZ::SelectF128:
11053 case SystemZ::SelectVR32:
11054 case SystemZ::SelectVR64:
11055 case SystemZ::SelectVR128:
11056 return emitSelect(
MI,
MBB);
11058 case SystemZ::CondStore8Mux:
11059 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
11060 case SystemZ::CondStore8MuxInv:
11061 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
11062 case SystemZ::CondStore16Mux:
11063 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
11064 case SystemZ::CondStore16MuxInv:
11065 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
11066 case SystemZ::CondStore32Mux:
11067 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
11068 case SystemZ::CondStore32MuxInv:
11069 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
11070 case SystemZ::CondStore8:
11071 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
11072 case SystemZ::CondStore8Inv:
11073 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
11074 case SystemZ::CondStore16:
11075 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
11076 case SystemZ::CondStore16Inv:
11077 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
11078 case SystemZ::CondStore32:
11079 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
11080 case SystemZ::CondStore32Inv:
11081 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
11082 case SystemZ::CondStore64:
11083 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
11084 case SystemZ::CondStore64Inv:
11085 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
11086 case SystemZ::CondStoreF32:
11087 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
11088 case SystemZ::CondStoreF32Inv:
11089 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
11090 case SystemZ::CondStoreF64:
11091 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
11092 case SystemZ::CondStoreF64Inv:
11093 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
11095 case SystemZ::SCmp128Hi:
11096 return emitICmp128Hi(
MI,
MBB,
false);
11097 case SystemZ::UCmp128Hi:
11098 return emitICmp128Hi(
MI,
MBB,
true);
11100 case SystemZ::PAIR128:
11101 return emitPair128(
MI,
MBB);
11102 case SystemZ::AEXT128:
11103 return emitExt128(
MI,
MBB,
false);
11104 case SystemZ::ZEXT128:
11105 return emitExt128(
MI,
MBB,
true);
11107 case SystemZ::ATOMIC_SWAPW:
11108 return emitAtomicLoadBinary(
MI,
MBB, 0);
11110 case SystemZ::ATOMIC_LOADW_AR:
11111 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
11112 case SystemZ::ATOMIC_LOADW_AFI:
11113 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
11115 case SystemZ::ATOMIC_LOADW_SR:
11116 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
11118 case SystemZ::ATOMIC_LOADW_NR:
11119 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
11120 case SystemZ::ATOMIC_LOADW_NILH:
11121 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
11123 case SystemZ::ATOMIC_LOADW_OR:
11124 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
11125 case SystemZ::ATOMIC_LOADW_OILH:
11126 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
11128 case SystemZ::ATOMIC_LOADW_XR:
11129 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
11130 case SystemZ::ATOMIC_LOADW_XILF:
11131 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
11133 case SystemZ::ATOMIC_LOADW_NRi:
11134 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
11135 case SystemZ::ATOMIC_LOADW_NILHi:
11136 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
11138 case SystemZ::ATOMIC_LOADW_MIN:
11140 case SystemZ::ATOMIC_LOADW_MAX:
11142 case SystemZ::ATOMIC_LOADW_UMIN:
11144 case SystemZ::ATOMIC_LOADW_UMAX:
11147 case SystemZ::ATOMIC_CMP_SWAPW:
11148 return emitAtomicCmpSwapW(
MI,
MBB);
11149 case SystemZ::MVCImm:
11150 case SystemZ::MVCReg:
11151 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
11152 case SystemZ::NCImm:
11153 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
11154 case SystemZ::OCImm:
11155 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
11156 case SystemZ::XCImm:
11157 case SystemZ::XCReg:
11158 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
11159 case SystemZ::CLCImm:
11160 case SystemZ::CLCReg:
11161 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
11162 case SystemZ::MemsetImmImm:
11163 case SystemZ::MemsetImmReg:
11164 case SystemZ::MemsetRegImm:
11165 case SystemZ::MemsetRegReg:
11166 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
11167 case SystemZ::CLSTLoop:
11168 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
11169 case SystemZ::MVSTLoop:
11170 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
11171 case SystemZ::SRSTLoop:
11172 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
11173 case SystemZ::TBEGIN:
11174 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
11175 case SystemZ::TBEGIN_nofloat:
11176 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
11177 case SystemZ::TBEGINC:
11178 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
11179 case SystemZ::LTEBRCompare_Pseudo:
11180 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
11181 case SystemZ::LTDBRCompare_Pseudo:
11182 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
11183 case SystemZ::LTXBRCompare_Pseudo:
11184 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
11186 case SystemZ::PROBED_ALLOCA:
11187 return emitProbedAlloca(
MI,
MBB);
11188 case SystemZ::EH_SjLj_SetJmp:
11190 case SystemZ::EH_SjLj_LongJmp:
11193 case TargetOpcode::STACKMAP:
11194 case TargetOpcode::PATCHPOINT:
11205SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
11206 if (VT == MVT::Untyped)
11207 return &SystemZ::ADDR128BitRegClass;
11233 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
11253 EVT VT =
Op.getValueType();
11254 Op =
Op.getOperand(0);
11255 EVT OpVT =
Op.getValueType();
11257 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
11268 Op = DAG.
getNode(SystemZISD::VSUM,
DL, MVT::v4i32,
Op, Zero);
11288 const AttributeList &Attrs =
F->getAttributes();
11289 if (Attrs.hasRetAttrs())
11290 OS << Attrs.getAsString(AttributeList::ReturnIndex) <<
" ";
11291 OS << *
F->getReturnType() <<
" @" <<
F->getName() <<
"(";
11292 for (
unsigned I = 0,
E = FT->getNumParams();
I !=
E; ++
I) {
11295 OS << *FT->getParamType(
I);
11297 for (
auto A : {Attribute::SExt, Attribute::ZExt, Attribute::NoExt})
11304bool SystemZTargetLowering::isInternal(
const Function *Fn)
const {
11305 std::map<const Function *, bool>::iterator Itr = IsInternalCache.find(Fn);
11306 if (Itr == IsInternalCache.end())
11307 Itr = IsInternalCache
11308 .insert(std::pair<const Function *, bool>(
11311 return Itr->second;
11314void SystemZTargetLowering::
11322 bool IsInternal =
false;
11323 const Function *CalleeFn =
nullptr;
11326 IsInternal = isInternal(CalleeFn);
11327 if (!IsInternal && !verifyNarrowIntegerArgs(Outs)) {
11328 errs() <<
"ERROR: Missing extension attribute of passed "
11329 <<
"value in call to function:\n" <<
"Callee: ";
11330 if (CalleeFn !=
nullptr)
11334 errs() <<
"Caller: ";
11340void SystemZTargetLowering::
11348 if (!isInternal(
F) && !verifyNarrowIntegerArgs(Outs)) {
11349 errs() <<
"ERROR: Missing extension attribute of returned "
11350 <<
"value from function:\n";
11358bool SystemZTargetLowering::verifyNarrowIntegerArgs(
11360 if (!Subtarget.isTargetELF())
11369 for (
unsigned i = 0; i < Outs.
size(); ++i) {
11370 MVT VT = Outs[i].VT;
11371 ISD::ArgFlagsTy
Flags = Outs[i].Flags;
11374 "Unexpected integer argument VT.");
11375 if (VT == MVT::i32 &&
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isSelectPseudo(MachineInstr &MI)
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the SmallSet class.
static SDValue getI128Select(SelectionDAG &DAG, const SDLoc &DL, Comparison C, SDValue TrueOp, SDValue FalseOp)
static SmallVector< SDValue, 4 > simplifyAssumingCCVal(SDValue &Val, SDValue &CC, SelectionDAG &DAG)
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static cl::opt< bool > EnableIntArgExtCheck("argext-abi-check", cl::init(false), cl::desc("Verify that narrow int args are properly extended per the " "SystemZ ABI."))
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static SDValue mergeHighParts(SelectionDAG &DAG, const SDLoc &DL, unsigned MergedBits, EVT VT, SDValue Op0, SDValue Op1)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static SDValue convertToF16(SDValue Op, SelectionDAG &DAG)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask, SelectionDAG &DAG)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static std::pair< SDValue, int > findCCUse(const SDValue &Val)
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool analyzeArgSplit(const SmallVectorImpl< ArgTy > &Args, SmallVector< CCValAssign, 16 > &ArgLocs, unsigned I, MVT &PartVT, unsigned &NumParts)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static SDValue convertFromF16(SDValue Op, SDLoc DL, SelectionDAG &DAG)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static unsigned detectEvenOddMultiplyOperand(const SelectionDAG &DAG, const SystemZSubtarget &Subtarget, SDValue &Op)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static SDValue buildFPVecFromScalars4(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SmallVectorImpl< SDValue > &Elems, unsigned Pos)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
static LLVM_ABI StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind)
LLVM Basic Block Representation.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI bool isConstant() const
CCState - This class holds information needed while lowering arguments and return values.
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
uint64_t getZExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool hasAddressTaken(const User **=nullptr, bool IgnoreCallbackUses=false, bool IgnoreAssumeLikeCalls=true, bool IngoreLLVMUsed=false, bool IgnoreARCAttachedCall=false, bool IgnoreCastedDirectCall=false) const
hasAddressTaken - returns true if there are any uses of this function other than direct calls or invo...
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool hasLocalLinkage() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
static auto integer_fixedlen_vector_valuetypes()
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
const_iterator begin() const
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, const SDLoc &DL, const AsmOperandInfo &Constraint, SelectionDAG &DAG) const override
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs) const override
bool useSoftFloat() const override
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue useLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, MVT VT, SDValue Arg, SDLoc DL, SDValue Chain, bool IsStrict) const
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
A raw_ostream that writes to a file descriptor.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned VR16Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned FP16Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
const unsigned CCMASK_VCMP_NONE
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_NONE
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
initializer< Ty > init(const Ty &Val)
support::ulittle32_t Word
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
NodeAddr< NodeBase * > Node
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
@ Define
Register definition.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr T maskLeadingOnes(unsigned N)
Create a bitmask with the N left-most bits set to 1, and all other bits set to 0.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.