24#include "llvm/IR/IntrinsicsS390.h"
32#define DEBUG_TYPE "systemz-lower"
38 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
39 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
89 if (Subtarget.hasHighWord())
95 if (Subtarget.hasVector()) {
102 if (Subtarget.hasVectorEnhancements1())
107 if (Subtarget.hasVector()) {
116 if (Subtarget.hasVector())
143 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
144 I <= MVT::LAST_FP_VALUETYPE;
170 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
171 I <= MVT::LAST_INTEGER_VALUETYPE;
202 if (Subtarget.hasPopulationCount())
221 if (!Subtarget.hasFPExtension())
227 if (Subtarget.hasFPExtension())
232 if (Subtarget.hasFPExtension())
309 if (!Subtarget.hasFPExtension()) {
322 if (Subtarget.hasMiscellaneousExtensions3()) {
418 if (VT != MVT::v2i64)
424 if (Subtarget.hasVectorEnhancements1())
451 if (Subtarget.hasVector()) {
473 if (Subtarget.hasVectorEnhancements2()) {
494 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
495 I <= MVT::LAST_FP_VALUETYPE;
503 if (Subtarget.hasFPExtension()) {
531 if (Subtarget.hasFPExtension()) {
542 if (Subtarget.hasVector()) {
588 if (Subtarget.hasVectorEnhancements1()) {
595 if (Subtarget.hasVectorEnhancements1()) {
649 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
650 MVT::v4f32, MVT::v2f64 }) {
659 if (!Subtarget.hasVectorEnhancements1()) {
665 if (Subtarget.hasVectorEnhancements1())
675 if (Subtarget.hasVectorEnhancements1()) {
687 if (!Subtarget.hasVector()) {
742 struct RTLibCallMapping {
746 static RTLibCallMapping RTLibCallCommon[] = {
747#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
748#include "ZOSLibcallNames.def"
750 for (
auto &E : RTLibCallCommon)
756 return Subtarget.hasSoftFloat();
778 return Subtarget.hasVectorEnhancements1();
791 if (!Subtarget.hasVector() ||
792 (isFP128 && !Subtarget.hasVectorEnhancements1()))
814 if (SplatBitSize > 64)
820 if (isInt<16>(SignedValue)) {
829 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
851 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
852 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
859 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
860 return tryValue(SplatBitsZ | Middle);
875 unsigned HalfSize = Width / 2;
880 if (HighValue != LowValue || 8 > HalfSize)
883 SplatBits = HighValue;
887 SplatBitSize = Width;
895 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
899 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
904 bool ForCodeSize)
const {
906 if (Imm.isZero() || Imm.isNegZero())
932 if (SI->getValueOperand()->getType()->isFP128Ty())
944 if (Subtarget.hasInterlockedAccess1() &&
958 return isInt<32>(Imm) || isUInt<32>(Imm);
963 return isUInt<32>(Imm) || isUInt<32>(-Imm);
985 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1008 switch (II->getIntrinsicID()) {
1010 case Intrinsic::memset:
1011 case Intrinsic::memmove:
1012 case Intrinsic::memcpy:
1017 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1018 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1019 if (SingleUser->getParent() ==
I->getParent()) {
1020 if (isa<ICmpInst>(SingleUser)) {
1021 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1022 if (
C->getBitWidth() <= 64 &&
1023 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1026 }
else if (isa<StoreInst>(SingleUser))
1030 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1031 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1032 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1037 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1045 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1046 I->getOperand(0)->getType());
1048 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1052 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1053 Value *DataOp =
I->getOperand(0);
1054 if (isa<ExtractElementInst>(DataOp))
1055 IsVectorAccess =
true;
1060 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1061 User *LoadUser = *
I->user_begin();
1062 if (isa<InsertElementInst>(LoadUser))
1063 IsVectorAccess =
true;
1066 if (IsFPAccess || IsVectorAccess)
1095 return AM.
Scale == 0;
1102 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1103 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1104 const int MVCFastLen = 16;
1106 if (Limit != ~
unsigned(0)) {
1108 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1110 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1112 if (
Op.isZeroMemset())
1117 SrcAS, FuncAttributes);
1122 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1126 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1128 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1130 return FromBits > ToBits;
1138 return FromBits > ToBits;
1147 if (Constraint.
size() == 1) {
1148 switch (Constraint[0]) {
1174 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1175 switch (Constraint[1]) {
1191 const char *constraint)
const {
1193 Value *CallOperandVal =
info.CallOperandVal;
1196 if (!CallOperandVal)
1200 switch (*constraint) {
1218 if (Subtarget.hasVector())
1224 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1225 if (isUInt<8>(
C->getZExtValue()))
1230 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1231 if (isUInt<12>(
C->getZExtValue()))
1236 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1237 if (isInt<16>(
C->getSExtValue()))
1242 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1243 if (isInt<20>(
C->getSExtValue()))
1248 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1249 if (
C->getZExtValue() == 0x7fffffff)
1259static std::pair<unsigned, const TargetRegisterClass *>
1261 const unsigned *Map,
unsigned Size) {
1262 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1263 if (isdigit(Constraint[2])) {
1268 return std::make_pair(Map[
Index], RC);
1270 return std::make_pair(0U,
nullptr);
1273std::pair<unsigned, const TargetRegisterClass *>
1276 if (Constraint.
size() == 1) {
1278 switch (Constraint[0]) {
1283 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1285 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1286 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1290 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1291 else if (VT == MVT::i128)
1292 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1293 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1296 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1301 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1303 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1304 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1309 if (Subtarget.hasVector()) {
1311 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1313 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1314 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1323 auto getVTSizeInBits = [&VT]() {
1331 if (Constraint[1] ==
'r') {
1332 if (getVTSizeInBits() == 32)
1335 if (getVTSizeInBits() == 128)
1341 if (Constraint[1] ==
'f') {
1343 return std::make_pair(
1345 if (getVTSizeInBits() == 32)
1348 if (getVTSizeInBits() == 128)
1354 if (Constraint[1] ==
'v') {
1355 if (!Subtarget.hasVector())
1356 return std::make_pair(
1358 if (getVTSizeInBits() == 32)
1361 if (getVTSizeInBits() == 64)
1388 const Constant *PersonalityFn)
const {
1393 const Constant *PersonalityFn)
const {
1401 if (Constraint.
size() == 1) {
1402 switch (Constraint[0]) {
1404 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1405 if (isUInt<8>(
C->getZExtValue()))
1407 Op.getValueType()));
1411 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1412 if (isUInt<12>(
C->getZExtValue()))
1414 Op.getValueType()));
1418 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1419 if (isInt<16>(
C->getSExtValue()))
1421 Op.getValueType()));
1425 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1426 if (isInt<20>(
C->getSExtValue()))
1428 Op.getValueType()));
1432 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1433 if (
C->getZExtValue() == 0x7fffffff)
1435 Op.getValueType()));
1446#include "SystemZGenCallingConv.inc"
1450 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1456 Type *ToType)
const {
1519 if (BitCastToType == MVT::v2i64)
1544 MVT::Untyped,
Hi,
Lo);
1568 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1570 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1581 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1582 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1609 unsigned NumFixedGPRs = 0;
1610 unsigned NumFixedFPRs = 0;
1611 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1624 RC = &SystemZ::GR32BitRegClass;
1628 RC = &SystemZ::GR64BitRegClass;
1632 RC = &SystemZ::FP32BitRegClass;
1636 RC = &SystemZ::FP64BitRegClass;
1640 RC = &SystemZ::FP128BitRegClass;
1648 RC = &SystemZ::VR128BitRegClass;
1677 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1688 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1689 assert (Ins[
I].PartOffset == 0);
1690 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1692 unsigned PartOffset = Ins[
I + 1].PartOffset;
1715 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1733 int64_t RegSaveOffset =
1748 &SystemZ::FP64BitRegClass);
1766 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1778 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1785 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1787 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1794 unsigned Offset,
bool LoadAdr =
false) {
1817 bool LoadAddr =
false;
1818 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1839 unsigned ADADelta = 0;
1840 unsigned EPADelta = 8;
1845 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1846 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1847 G->getGlobal()->hasPrivateLinkage());
1862 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1924 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1930 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1932 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1934 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1940 SlotVT = Outs[
I].VT;
1943 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1949 assert (Outs[
I].PartOffset == 0);
1950 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1951 SDValue PartValue = OutVals[
I + 1];
1952 unsigned PartOffset = Outs[
I + 1].PartOffset;
1959 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1962 ArgValue = SpillSlot;
1979 if (!StackPtr.getNode())
2001 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2007 if (!MemOpChains.
empty())
2020 ->getAddressOfCalleeRegister();
2023 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2028 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2031 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2034 }
else if (IsTailCall) {
2037 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2042 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2044 RegsToPass[
I].second, Glue);
2055 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2057 RegsToPass[
I].second.getValueType()));
2061 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2062 assert(Mask &&
"Missing call preserved mask for calling convention");
2086 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2090 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2112 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2114 Args.reserve(Ops.
size());
2119 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2122 Args.push_back(Entry);
2148 for (
auto &Out : Outs)
2149 if (Out.ArgVT == MVT::i128)
2154 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2171 if (RetLocs.
empty())
2181 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2210 unsigned &CCValid) {
2211 unsigned Id =
Op.getConstantOperandVal(1);
2213 case Intrinsic::s390_tbegin:
2218 case Intrinsic::s390_tbegin_nofloat:
2223 case Intrinsic::s390_tend:
2237 unsigned Id =
Op.getConstantOperandVal(0);
2239 case Intrinsic::s390_vpkshs:
2240 case Intrinsic::s390_vpksfs:
2241 case Intrinsic::s390_vpksgs:
2246 case Intrinsic::s390_vpklshs:
2247 case Intrinsic::s390_vpklsfs:
2248 case Intrinsic::s390_vpklsgs:
2253 case Intrinsic::s390_vceqbs:
2254 case Intrinsic::s390_vceqhs:
2255 case Intrinsic::s390_vceqfs:
2256 case Intrinsic::s390_vceqgs:
2261 case Intrinsic::s390_vchbs:
2262 case Intrinsic::s390_vchhs:
2263 case Intrinsic::s390_vchfs:
2264 case Intrinsic::s390_vchgs:
2269 case Intrinsic::s390_vchlbs:
2270 case Intrinsic::s390_vchlhs:
2271 case Intrinsic::s390_vchlfs:
2272 case Intrinsic::s390_vchlgs:
2277 case Intrinsic::s390_vtm:
2282 case Intrinsic::s390_vfaebs:
2283 case Intrinsic::s390_vfaehs:
2284 case Intrinsic::s390_vfaefs:
2289 case Intrinsic::s390_vfaezbs:
2290 case Intrinsic::s390_vfaezhs:
2291 case Intrinsic::s390_vfaezfs:
2296 case Intrinsic::s390_vfeebs:
2297 case Intrinsic::s390_vfeehs:
2298 case Intrinsic::s390_vfeefs:
2303 case Intrinsic::s390_vfeezbs:
2304 case Intrinsic::s390_vfeezhs:
2305 case Intrinsic::s390_vfeezfs:
2310 case Intrinsic::s390_vfenebs:
2311 case Intrinsic::s390_vfenehs:
2312 case Intrinsic::s390_vfenefs:
2317 case Intrinsic::s390_vfenezbs:
2318 case Intrinsic::s390_vfenezhs:
2319 case Intrinsic::s390_vfenezfs:
2324 case Intrinsic::s390_vistrbs:
2325 case Intrinsic::s390_vistrhs:
2326 case Intrinsic::s390_vistrfs:
2331 case Intrinsic::s390_vstrcbs:
2332 case Intrinsic::s390_vstrchs:
2333 case Intrinsic::s390_vstrcfs:
2338 case Intrinsic::s390_vstrczbs:
2339 case Intrinsic::s390_vstrczhs:
2340 case Intrinsic::s390_vstrczfs:
2345 case Intrinsic::s390_vstrsb:
2346 case Intrinsic::s390_vstrsh:
2347 case Intrinsic::s390_vstrsf:
2352 case Intrinsic::s390_vstrszb:
2353 case Intrinsic::s390_vstrszh:
2354 case Intrinsic::s390_vstrszf:
2359 case Intrinsic::s390_vfcedbs:
2360 case Intrinsic::s390_vfcesbs:
2365 case Intrinsic::s390_vfchdbs:
2366 case Intrinsic::s390_vfchsbs:
2371 case Intrinsic::s390_vfchedbs:
2372 case Intrinsic::s390_vfchesbs:
2377 case Intrinsic::s390_vftcidb:
2378 case Intrinsic::s390_vftcisb:
2383 case Intrinsic::s390_tdc:
2401 for (
unsigned I = 2;
I < NumOps; ++
I)
2404 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2410 return Intr.getNode();
2420 for (
unsigned I = 1;
I < NumOps; ++
I)
2424 return Intr.getNode();
2434 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2435 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2436 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2461 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2462 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2465 int64_t
Value = ConstOp1->getSExtValue();
2481 if (!
C.Op0.hasOneUse() ||
2487 auto *Load = cast<LoadSDNode>(
C.Op0);
2488 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2489 if ((NumBits != 8 && NumBits != 16) ||
2490 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2495 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2496 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2499 uint64_t Mask = (1 << NumBits) - 1;
2502 int64_t SignedValue = ConstOp1->getSExtValue();
2509 }
else if (NumBits == 8) {
2535 if (
C.Op0.getValueType() != MVT::i32 ||
2536 Load->getExtensionType() != ExtType) {
2538 Load->getBasePtr(), Load->getPointerInfo(),
2539 Load->getMemoryVT(), Load->getAlign(),
2540 Load->getMemOperand()->getFlags());
2546 if (
C.Op1.getValueType() != MVT::i32 ||
2547 Value != ConstOp1->getZExtValue())
2554 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2557 if (Load->getMemoryVT() == MVT::i8)
2560 switch (Load->getExtensionType()) {
2577 if (
C.Op0.getValueType() == MVT::i128)
2579 if (
C.Op0.getValueType() == MVT::f128)
2585 if (isa<ConstantFPSDNode>(
C.Op1))
2590 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2591 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2609 isUInt<16>(ConstOp1->getZExtValue()))
2614 isInt<16>(ConstOp1->getSExtValue()))
2620 unsigned Opcode0 =
C.Op0.getOpcode();
2627 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2642 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2643 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2647 Flags.setNoSignedWrap(
false);
2648 Flags.setNoUnsignedWrap(
false);
2667 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2668 if (C1 && C1->isZero()) {
2687 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2689 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2690 if (C1 && C1->getZExtValue() == 32) {
2691 SDValue ShlOp0 =
C.Op0.getOperand(0);
2695 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2710 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2712 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2713 C.Op1->getAsZExtVal() == 0) {
2714 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2715 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2716 C.Op0.getValueSizeInBits().getFixedValue()) {
2717 unsigned Type = L->getExtensionType();
2720 C.Op0 =
C.Op0.getOperand(0);
2730 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2734 uint64_t Amount = Shift->getZExtValue();
2735 if (Amount >=
N.getValueSizeInBits())
2750 unsigned ICmpType) {
2751 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2773 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2779 if (EffectivelyUnsigned && CmpVal <
Low) {
2787 if (CmpVal == Mask) {
2793 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2799 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2807 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2813 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2842 if (
C.Op0.getValueType() == MVT::i128) {
2847 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
2848 if (Mask && Mask->getAPIntValue() == 0) {
2863 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2866 uint64_t CmpVal = ConstOp1->getZExtValue();
2873 NewC.Op0 =
C.Op0.getOperand(0);
2874 NewC.Op1 =
C.Op0.getOperand(1);
2875 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2878 MaskVal = Mask->getZExtValue();
2883 if (NewC.Op0.getValueType() != MVT::i64 ||
2898 MaskVal = -(CmpVal & -CmpVal);
2906 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2907 unsigned NewCCMask, ShiftVal;
2909 NewC.Op0.getOpcode() ==
ISD::SHL &&
2911 (MaskVal >> ShiftVal != 0) &&
2912 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2914 MaskVal >> ShiftVal,
2917 NewC.Op0 = NewC.Op0.getOperand(0);
2918 MaskVal >>= ShiftVal;
2920 NewC.Op0.getOpcode() ==
ISD::SRL &&
2922 (MaskVal << ShiftVal != 0) &&
2923 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2925 MaskVal << ShiftVal,
2928 NewC.Op0 = NewC.Op0.getOperand(0);
2929 MaskVal <<= ShiftVal;
2940 if (Mask && Mask->getZExtValue() == MaskVal)
2945 C.CCMask = NewCCMask;
2953 if (
C.Op0.getValueType() != MVT::i128)
2971 bool Swap =
false, Invert =
false;
2990 C.CCMask ^=
C.CCValid;
3000 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3001 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3004 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3007 C.Op0 =
C.Op0.getOperand(0);
3019 C.CCValid = CCValid;
3022 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3025 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3029 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3032 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3036 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3039 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3042 C.CCMask &= CCValid;
3050 bool IsSignaling =
false) {
3053 unsigned Opcode, CCValid;
3065 Comparison
C(CmpOp0, CmpOp1, Chain);
3067 if (
C.Op0.getValueType().isFloatingPoint()) {
3071 else if (!IsSignaling)
3093 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3114 if (!
C.Op1.getNode()) {
3116 switch (
C.Op0.getOpcode()) {
3143 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3145 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3154 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3155 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3180 unsigned CCValid,
unsigned CCMask) {
3209 case CmpMode::Int:
return 0;
3229 case CmpMode::FP:
return 0;
3230 case CmpMode::StrictFP:
return 0;
3231 case CmpMode::SignalingFP:
return 0;
3263 int Mask[] = { Start, -1, Start + 1, -1 };
3283 !Subtarget.hasVectorEnhancements1()) {
3297 SDValue Ops[2] = { Res, NewChain };
3306 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3308 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3321 bool IsSignaling)
const {
3324 assert (!IsSignaling || Chain);
3325 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3326 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3327 bool Invert =
false;
3335 assert(IsFP &&
"Unexpected integer comparison");
3337 DL, VT, CmpOp1, CmpOp0, Chain);
3339 DL, VT, CmpOp0, CmpOp1, Chain);
3343 LT.getValue(1),
GE.getValue(1));
3352 assert(IsFP &&
"Unexpected integer comparison");
3354 DL, VT, CmpOp1, CmpOp0, Chain);
3356 DL, VT, CmpOp0, CmpOp1, Chain);
3360 LT.getValue(1),
GT.getValue(1));
3369 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3373 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3378 Chain =
Cmp.getValue(1);
3386 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3399 EVT VT =
Op.getValueType();
3401 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3403 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3410 bool IsSignaling)
const {
3416 EVT VT =
Op.getNode()->getValueType(0);
3418 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3419 Chain, IsSignaling);
3423 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL, Chain, IsSignaling));
3438 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3475 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3483 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3484 C.Op1->getAsZExtVal() == 0) {
3492 SDValue Ops[] = {TrueOp, FalseOp,
3566 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3573 Node->getValueType(0),
3585 assert(Mask &&
"Missing call preserved mask for calling convention");
3593 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3600SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3632 SDValue TP = lowerThreadPointer(
DL, DAG);
3740 if (
CP->isMachineConstantPoolEntry())
3759 unsigned Depth =
Op.getConstantOperandVal(0);
3766 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3795 unsigned Depth =
Op.getConstantOperandVal(0);
3803 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3805 int Offset = (TFL->usePackedStack(MF) ? -2 : 14) *
3822 EVT InVT =
In.getValueType();
3823 EVT ResVT =
Op.getValueType();
3828 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3831 LoadN->getBasePtr(), LoadN->getMemOperand());
3837 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3839 if (Subtarget.hasHighWord()) {
3843 MVT::i64,
SDValue(U64, 0), In);
3851 DL, MVT::f32, Out64);
3853 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3856 MVT::f64,
SDValue(U64, 0), In);
3858 if (Subtarget.hasHighWord())
3872 return lowerVASTART_XPLINK(
Op, DAG);
3874 return lowerVASTART_ELF(
Op, DAG);
3889 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3903 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3907 const unsigned NumFields = 4;
3918 for (
unsigned I = 0;
I < NumFields; ++
I) {
3923 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3935 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3936 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3942 Align(8),
false,
false,
3948SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3951 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3953 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3957SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3969 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
3972 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3973 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3979 if (ExtraAlignSpace)
3983 bool IsSigned =
false;
3984 bool DoesNotReturn =
false;
3985 bool IsReturnValueUsed =
false;
3986 EVT VT =
Op.getValueType();
3997 Register SPReg = Regs.getStackPointerRegister();
4008 if (ExtraAlignSpace) {
4020SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4034 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4037 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4038 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4049 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4053 if (ExtraAlignSpace)
4061 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4077 if (RequiredAlign > StackAlign) {
4087 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4094SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4103 EVT VT =
Op.getValueType();
4110 Op.getOperand(1), Ops[1], Ops[0]);
4111 else if (Subtarget.hasMiscellaneousExtensions2())
4116 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4140 LL, RL, Ops[1], Ops[0]);
4151 EVT VT =
Op.getValueType();
4158 Op.getOperand(1), Ops[1], Ops[0]);
4164 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4172 EVT VT =
Op.getValueType();
4192 EVT VT =
Op.getValueType();
4199 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4204 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4207 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4216 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4218 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4234 if (!isInt<16>(
Value))
4255 MVT::i64, HighOp, Low32);
4266 if (
N->getValueType(0) == MVT::i128) {
4267 unsigned BaseOp = 0;
4268 unsigned FlagOp = 0;
4269 bool IsBorrow =
false;
4270 switch (
Op.getOpcode()) {
4293 unsigned BaseOp = 0;
4294 unsigned CCValid = 0;
4295 unsigned CCMask = 0;
4297 switch (
Op.getOpcode()) {
4325 if (
N->getValueType(1) == MVT::i1)
4348 MVT VT =
N->getSimpleValueType(0);
4359 if (VT == MVT::i128) {
4360 unsigned BaseOp = 0;
4361 unsigned FlagOp = 0;
4362 bool IsBorrow =
false;
4363 switch (
Op.getOpcode()) {
4390 unsigned BaseOp = 0;
4391 unsigned CCValid = 0;
4392 unsigned CCMask = 0;
4394 switch (
Op.getOpcode()) {
4423 if (
N->getValueType(1) == MVT::i1)
4431 EVT VT =
Op.getValueType();
4433 Op =
Op.getOperand(0);
4481 if (NumSignificantBits == 0)
4487 BitSize = std::min(BitSize, OrigBitSize);
4496 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4498 if (BitSize != OrigBitSize)
4535 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4536 assert(
Node->getMemoryVT() == MVT::i128 &&
"Only custom lowering i128.");
4548 EVT PtrVT =
Addr.getValueType();
4549 EVT WideVT = MVT::i32;
4572 unsigned Opcode)
const {
4573 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4576 EVT NarrowVT =
Node->getMemoryVT();
4577 EVT WideVT = MVT::i32;
4578 if (NarrowVT == WideVT)
4590 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4595 SDValue AlignedAddr, BitShift, NegBitShift;
4613 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4632 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4633 EVT MemVT =
Node->getMemoryVT();
4634 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4636 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4637 assert(Subtarget.hasInterlockedAccess1() &&
4638 "Should have been expanded by AtomicExpand pass.");
4644 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4645 Node->getMemOperand());
4654 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4662 if (
Node->getMemoryVT() == MVT::i128) {
4671 EVT NarrowVT =
Node->getMemoryVT();
4672 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4673 if (NarrowVT == WideVT) {
4675 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4677 DL, Tys, Ops, NarrowVT, MMO);
4691 SDValue AlignedAddr, BitShift, NegBitShift;
4696 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4699 VTList, Ops, NarrowVT, MMO);
4713SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4718 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4721 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4724 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
4727 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
4739 "in GHC calling convention");
4741 Regs->getStackPointerRegister(),
Op.getValueType());
4752 "in GHC calling convention");
4759 if (StoreBackchain) {
4761 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
4762 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4766 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
4769 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4777 bool IsData =
Op.getConstantOperandVal(4);
4780 return Op.getOperand(0);
4783 bool IsWrite =
Op.getConstantOperandVal(2);
4785 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
4789 Node->getVTList(), Ops,
4790 Node->getMemoryVT(),
Node->getMemOperand());
4802SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4804 unsigned Opcode, CCValid;
4806 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
4817SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4819 unsigned Opcode, CCValid;
4822 if (
Op->getNumValues() == 1)
4824 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
4829 unsigned Id =
Op.getConstantOperandVal(0);
4831 case Intrinsic::thread_pointer:
4832 return lowerThreadPointer(
SDLoc(
Op), DAG);
4834 case Intrinsic::s390_vpdi:
4836 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4838 case Intrinsic::s390_vperm:
4840 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4842 case Intrinsic::s390_vuphb:
4843 case Intrinsic::s390_vuphh:
4844 case Intrinsic::s390_vuphf:
4848 case Intrinsic::s390_vuplhb:
4849 case Intrinsic::s390_vuplhh:
4850 case Intrinsic::s390_vuplhf:
4854 case Intrinsic::s390_vuplb:
4855 case Intrinsic::s390_vuplhw:
4856 case Intrinsic::s390_vuplf:
4860 case Intrinsic::s390_vupllb:
4861 case Intrinsic::s390_vupllh:
4862 case Intrinsic::s390_vupllf:
4866 case Intrinsic::s390_vsumb:
4867 case Intrinsic::s390_vsumh:
4868 case Intrinsic::s390_vsumgh:
4869 case Intrinsic::s390_vsumgf:
4870 case Intrinsic::s390_vsumqf:
4871 case Intrinsic::s390_vsumqg:
4873 Op.getOperand(1),
Op.getOperand(2));
4875 case Intrinsic::s390_vaq:
4877 Op.getOperand(1),
Op.getOperand(2));
4878 case Intrinsic::s390_vaccb:
4879 case Intrinsic::s390_vacch:
4880 case Intrinsic::s390_vaccf:
4881 case Intrinsic::s390_vaccg:
4882 case Intrinsic::s390_vaccq:
4884 Op.getOperand(1),
Op.getOperand(2));
4885 case Intrinsic::s390_vacq:
4887 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4888 case Intrinsic::s390_vacccq:
4890 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4892 case Intrinsic::s390_vsq:
4894 Op.getOperand(1),
Op.getOperand(2));
4895 case Intrinsic::s390_vscbib:
4896 case Intrinsic::s390_vscbih:
4897 case Intrinsic::s390_vscbif:
4898 case Intrinsic::s390_vscbig:
4899 case Intrinsic::s390_vscbiq:
4901 Op.getOperand(1),
Op.getOperand(2));
4902 case Intrinsic::s390_vsbiq:
4904 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4905 case Intrinsic::s390_vsbcbiq:
4907 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4928 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4931 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4934 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4937 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4940 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4943 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4946 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4949 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4952 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4955 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4958 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4961 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4964 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4978 OpNo0 = OpNo1 = OpNos[1];
4979 }
else if (OpNos[1] < 0) {
4980 OpNo0 = OpNo1 = OpNos[0];
4998 unsigned &OpNo0,
unsigned &OpNo1) {
4999 int OpNos[] = { -1, -1 };
5012 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5014 OpNos[ModelOpNo] = RealOpNo;
5022 unsigned &OpNo0,
unsigned &OpNo1) {
5039 int Elt = Bytes[
From];
5042 Transform[
From] = -1;
5044 while (
P.Bytes[To] != Elt) {
5049 Transform[
From] = To;
5072 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5073 Bytes.
resize(NumElements * BytesPerElement, -1);
5074 for (
unsigned I = 0;
I < NumElements; ++
I) {
5075 int Index = VSN->getMaskElt(
I);
5077 for (
unsigned J = 0; J < BytesPerElement; ++J)
5078 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5083 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5085 Bytes.
resize(NumElements * BytesPerElement, -1);
5086 for (
unsigned I = 0;
I < NumElements; ++
I)
5087 for (
unsigned J = 0; J < BytesPerElement; ++J)
5088 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5099 unsigned BytesPerElement,
int &
Base) {
5101 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5102 if (Bytes[Start +
I] >= 0) {
5103 unsigned Elem = Bytes[Start +
I];
5107 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5109 }
else if (
unsigned(
Base) != Elem -
I)
5122 unsigned &StartIndex,
unsigned &OpNo0,
5124 int OpNos[] = { -1, -1 };
5126 for (
unsigned I = 0;
I < 16; ++
I) {
5133 Shift = ExpectedShift;
5134 else if (Shift != ExpectedShift)
5138 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5140 OpNos[ModelOpNo] = RealOpNo;
5177 N =
N->getOperand(0);
5179 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5180 return Op->getZExtValue() == 0;
5186 for (
unsigned I = 0;
I < Num ;
I++)
5198 for (
unsigned I = 0;
I < 2; ++
I)
5202 unsigned StartIndex, OpNo0, OpNo1;
5211 if (ZeroVecIdx != UINT32_MAX) {
5212 bool MaskFirst =
true;
5217 if (OpNo == ZeroVecIdx &&
I == 0) {
5222 if (OpNo != ZeroVecIdx && Byte == 0) {
5229 if (ZeroIdx != -1) {
5232 if (Bytes[
I] >= 0) {
5235 if (OpNo == ZeroVecIdx)
5245 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5263 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5268struct GeneralShuffle {
5269 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5273 void tryPrepareForUnpack();
5274 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5289 unsigned UnpackFromEltSize;
5294void GeneralShuffle::addUndef() {
5296 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5297 Bytes.push_back(-1);
5306bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5312 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5317 if (FromBytesPerElement < BytesPerElement)
5321 (FromBytesPerElement - BytesPerElement));
5324 while (
Op.getNode()) {
5326 Op =
Op.getOperand(0);
5342 }
else if (
Op.isUndef()) {
5351 for (; OpNo < Ops.size(); ++OpNo)
5352 if (Ops[OpNo] ==
Op)
5354 if (OpNo == Ops.size())
5359 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5360 Bytes.push_back(
Base +
I);
5369 if (Ops.size() == 0)
5373 tryPrepareForUnpack();
5376 if (Ops.size() == 1)
5377 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5388 unsigned Stride = 1;
5389 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5390 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5391 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5400 else if (OpNo ==
I + Stride)
5411 if (NewBytes[J] >= 0) {
5413 "Invalid double permute");
5416 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5422 if (NewBytes[J] >= 0)
5430 Ops[1] = Ops[Stride];
5438 unsigned OpNo0, OpNo1;
5440 if (unpackWasPrepared() && Ops[1].
isUndef())
5442 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5447 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5454 dbgs() << Msg.c_str() <<
" { ";
5455 for (
unsigned i = 0; i < Bytes.
size(); i++)
5456 dbgs() << Bytes[i] <<
" ";
5464void GeneralShuffle::tryPrepareForUnpack() {
5466 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5471 if (Ops.size() > 2 &&
5476 UnpackFromEltSize = 1;
5477 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5478 bool MatchUnpack =
true;
5481 unsigned ToEltSize = UnpackFromEltSize * 2;
5482 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5485 if (Bytes[Elt] != -1) {
5487 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5488 MatchUnpack =
false;
5494 if (Ops.size() == 2) {
5497 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5498 UnpackFromEltSize = UINT_MAX;
5505 if (UnpackFromEltSize > 4)
5508 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5509 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5511 dumpBytes(Bytes,
"Original Bytes vector:"););
5516 Elt += UnpackFromEltSize;
5517 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5518 Bytes[
B] = Bytes[Elt];
5524 Ops.erase(&Ops[ZeroVecOpNo]);
5526 if (Bytes[
I] >= 0) {
5528 if (OpNo > ZeroVecOpNo)
5539 if (!unpackWasPrepared())
5541 unsigned InBits = UnpackFromEltSize * 8;
5545 unsigned OutBits = InBits * 2;
5554 if (!
Op.getOperand(
I).isUndef())
5570 if (
Value.isUndef())
5623 GeneralShuffle GS(VT);
5625 bool FoundOne =
false;
5626 for (
unsigned I = 0;
I < NumElements; ++
I) {
5629 Op =
Op.getOperand(0);
5632 unsigned Elem =
Op.getConstantOperandVal(1);
5633 if (!GS.add(
Op.getOperand(0), Elem))
5636 }
else if (
Op.isUndef()) {
5650 if (!ResidueOps.
empty()) {
5651 while (ResidueOps.
size() < NumElements)
5653 for (
auto &
Op : GS.Ops) {
5654 if (!
Op.getNode()) {
5660 return GS.getNode(DAG,
SDLoc(BVN));
5663bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5664 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5666 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5680 unsigned int NumElements = Elems.
size();
5681 unsigned int Count = 0;
5682 for (
auto Elem : Elems) {
5683 if (!Elem.isUndef()) {
5686 else if (Elem != Single) {
5706 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5710 bool AllLoads =
true;
5711 for (
auto Elem : Elems)
5712 if (!isVectorElementLoad(Elem)) {
5718 if (VT == MVT::v2i64 && !AllLoads)
5722 if (VT == MVT::v2f64 && !AllLoads)
5732 if (VT == MVT::v4f32 && !AllLoads) {
5746 DL, MVT::v2i64, Op01, Op23);
5754 unsigned NumConstants = 0;
5755 for (
unsigned I = 0;
I < NumElements; ++
I) {
5769 if (NumConstants > 0) {
5770 for (
unsigned I = 0;
I < NumElements; ++
I)
5781 std::map<const SDNode*, unsigned> UseCounts;
5782 SDNode *LoadMaxUses =
nullptr;
5783 for (
unsigned I = 0;
I < NumElements; ++
I)
5784 if (isVectorElementLoad(Elems[
I])) {
5785 SDNode *Ld = Elems[
I].getNode();
5787 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5790 if (LoadMaxUses !=
nullptr) {
5791 ReplicatedVal =
SDValue(LoadMaxUses, 0);
5795 unsigned I1 = NumElements / 2 - 1;
5796 unsigned I2 = NumElements - 1;
5797 bool Def1 = !Elems[
I1].isUndef();
5798 bool Def2 = !Elems[I2].isUndef();
5812 for (
unsigned I = 0;
I < NumElements; ++
I)
5813 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
5821 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
5823 EVT VT =
Op.getValueType();
5825 if (BVN->isConstant()) {
5844 for (
unsigned I = 0;
I < NumElements; ++
I)
5845 Ops[
I] =
Op.getOperand(
I);
5846 return buildVector(DAG,
DL, VT, Ops);
5851 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
5853 EVT VT =
Op.getValueType();
5856 if (VSN->isSplat()) {
5858 unsigned Index = VSN->getSplatIndex();
5860 "Splat index should be defined and in first operand");
5870 GeneralShuffle
GS(VT);
5871 for (
unsigned I = 0;
I < NumElements; ++
I) {
5872 int Elt = VSN->getMaskElt(
I);
5875 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
5876 unsigned(Elt) % NumElements))
5879 return GS.getNode(DAG,
SDLoc(VSN));
5898 EVT VT =
Op.getValueType();
5903 if (VT == MVT::v2f64 &&
5923SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
5929 EVT VT =
Op.getValueType();
5933 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5948SDValue SystemZTargetLowering::
5951 EVT OutVT =
Op.getValueType();
5961 }
while (FromBits != ToBits);
5966SDValue SystemZTargetLowering::
5970 EVT OutVT =
Op.getValueType();
5974 unsigned NumInPerOut = InNumElts / OutNumElts;
5980 unsigned ZeroVecElt = InNumElts;
5981 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
5982 unsigned MaskElt = PackedElt * NumInPerOut;
5983 unsigned End = MaskElt + NumInPerOut - 1;
5984 for (; MaskElt <
End; MaskElt++)
5985 Mask[MaskElt] = ZeroVecElt++;
5986 Mask[MaskElt] = PackedElt;
5993 unsigned ByScalar)
const {
5998 EVT VT =
Op.getValueType();
6002 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6003 APInt SplatBits, SplatUndef;
6004 unsigned SplatBitSize;
6008 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6009 ElemBitSize,
true) &&
6010 SplatBitSize == ElemBitSize) {
6013 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6022 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6028 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6029 if (VSN->isSplat()) {
6031 unsigned Index = VSN->getSplatIndex();
6033 "Splat index should be defined and in first operand");
6040 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6052 MVT ResultVT =
Op.getSimpleValueType();
6054 unsigned Check =
Op.getConstantOperandVal(1);
6056 unsigned TDCMask = 0;
6090 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6101 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6106 switch (
Op.getOpcode()) {
6108 return lowerFRAMEADDR(
Op, DAG);
6110 return lowerRETURNADDR(
Op, DAG);
6112 return lowerBR_CC(
Op, DAG);
6114 return lowerSELECT_CC(
Op, DAG);
6116 return lowerSETCC(
Op, DAG);
6118 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6120 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6122 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6124 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6126 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6128 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6130 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6132 return lowerBITCAST(
Op, DAG);
6134 return lowerVASTART(
Op, DAG);
6136 return lowerVACOPY(
Op, DAG);
6138 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6140 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6142 return lowerSMUL_LOHI(
Op, DAG);
6144 return lowerUMUL_LOHI(
Op, DAG);
6146 return lowerSDIVREM(
Op, DAG);
6148 return lowerUDIVREM(
Op, DAG);
6153 return lowerXALUO(
Op, DAG);
6156 return lowerUADDSUBO_CARRY(
Op, DAG);
6158 return lowerOR(
Op, DAG);
6160 return lowerCTPOP(
Op, DAG);
6162 return lowerATOMIC_FENCE(
Op, DAG);
6167 return lowerATOMIC_LDST_I128(
Op, DAG);
6171 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6189 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6191 return lowerSTACKSAVE(
Op, DAG);
6193 return lowerSTACKRESTORE(
Op, DAG);
6195 return lowerPREFETCH(
Op, DAG);
6197 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6199 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6201 return lowerBUILD_VECTOR(
Op, DAG);
6203 return lowerVECTOR_SHUFFLE(
Op, DAG);
6205 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6207 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6209 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6211 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6213 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6223 return lowerIS_FPCLASS(
Op, DAG);
6225 return lowerGET_ROUNDING(
Op, DAG);
6227 return lowerREADCYCLECOUNTER(
Op, DAG);
6239 switch (
N->getOpcode()) {
6243 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6246 DL, Tys, Ops, MVT::i128, MMO);
6258 DL, Tys, Ops, MVT::i128, MMO);
6261 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6264 MVT::Other, Res), 0);
6271 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6276 DL, Tys, Ops, MVT::i128, MMO);
6287 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6291 if (getRepRegClassFor(MVT::f128) == &SystemZ::VR128BitRegClass) {
6298 assert(getRepRegClassFor(MVT::f128) == &SystemZ::FP128BitRegClass &&
6299 "Unrecognized register class for f128.");
6324#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6435 OPCODE(ATOMIC_LOADW_ADD);
6436 OPCODE(ATOMIC_LOADW_SUB);
6437 OPCODE(ATOMIC_LOADW_AND);
6439 OPCODE(ATOMIC_LOADW_XOR);
6440 OPCODE(ATOMIC_LOADW_NAND);
6441 OPCODE(ATOMIC_LOADW_MIN);
6442 OPCODE(ATOMIC_LOADW_MAX);
6443 OPCODE(ATOMIC_LOADW_UMIN);
6444 OPCODE(ATOMIC_LOADW_UMAX);
6445 OPCODE(ATOMIC_CMP_SWAPW);
6448 OPCODE(ATOMIC_STORE_128);
6449 OPCODE(ATOMIC_CMP_SWAP_128);
6464bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6465 if (!Subtarget.hasVector())
6479 DAGCombinerInfo &DCI,
6487 unsigned Opcode =
Op.getOpcode();
6490 Op =
Op.getOperand(0);
6492 canTreatAsByteVector(
Op.getValueType())) {
6501 BytesPerElement,
First))
6508 if (Byte % BytesPerElement != 0)
6511 Index = Byte / BytesPerElement;
6515 canTreatAsByteVector(
Op.getValueType())) {
6518 EVT OpVT =
Op.getValueType();
6520 if (OpBytesPerElement < BytesPerElement)
6524 unsigned End = (
Index + 1) * BytesPerElement;
6525 if (
End % OpBytesPerElement != 0)
6528 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6529 if (!
Op.getValueType().isInteger()) {
6532 DCI.AddToWorklist(
Op.getNode());
6537 DCI.AddToWorklist(
Op.getNode());
6544 canTreatAsByteVector(
Op.getValueType()) &&
6545 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6547 EVT ExtVT =
Op.getValueType();
6548 EVT OpVT =
Op.getOperand(0).getValueType();
6551 unsigned Byte =
Index * BytesPerElement;
6552 unsigned SubByte =
Byte % ExtBytesPerElement;
6553 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6554 if (SubByte < MinSubByte ||
6555 SubByte + BytesPerElement > ExtBytesPerElement)
6558 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6560 Byte += SubByte - MinSubByte;
6561 if (Byte % BytesPerElement != 0)
6563 Op =
Op.getOperand(0);
6570 if (
Op.getValueType() != VecVT) {
6572 DCI.AddToWorklist(
Op.getNode());
6582SDValue SystemZTargetLowering::combineTruncateExtract(
6591 if (canTreatAsByteVector(VecVT)) {
6592 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6595 if (BytesPerElement % TruncBytes == 0) {
6601 unsigned Scale = BytesPerElement / TruncBytes;
6602 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6608 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6609 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6626 auto *NewALoad = dyn_cast<AtomicSDNode>(DAG.
getAtomic(
6629 NewALoad->setExtensionType(ETy);
6638SDValue SystemZTargetLowering::combineZERO_EXTEND(
6639 SDNode *
N, DAGCombinerInfo &DCI)
const {
6643 EVT VT =
N->getValueType(0);
6645 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6646 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6647 if (TrueOp && FalseOp) {
6657 DCI.CombineTo(N0.
getNode(), TruncSelect);
6685 if (
auto *ALoad = dyn_cast<AtomicSDNode>(N0))
6693SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
6694 SDNode *
N, DAGCombinerInfo &DCI)
const {
6700 EVT VT =
N->getValueType(0);
6701 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
6714SDValue SystemZTargetLowering::combineSIGN_EXTEND(
6715 SDNode *
N, DAGCombinerInfo &DCI)
const {
6721 EVT VT =
N->getValueType(0);
6723 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6726 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
6728 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
6729 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
6743 if (
auto *ALoad = dyn_cast<AtomicSDNode>(N0))
6751SDValue SystemZTargetLowering::combineMERGE(
6752 SDNode *
N, DAGCombinerInfo &DCI)
const {
6754 unsigned Opcode =
N->getOpcode();
6762 if (Op1 ==
N->getOperand(0))
6767 if (ElemBytes <= 4) {
6775 DCI.AddToWorklist(Op1.
getNode());
6778 DCI.AddToWorklist(
Op.getNode());
6785SDValue SystemZTargetLowering::combineLOAD(
6786 SDNode *
N, DAGCombinerInfo &DCI)
const {
6788 EVT LdVT =
N->getValueType(0);
6793 if (LdVT == MVT::i128) {
6800 int UsedElements = 0;
6802 UI != UIEnd; ++UI) {
6804 if (UI.getUse().getResNo() != 0)
6817 User->getValueType(0) != MVT::i64)
6821 if (UsedElements & (1 <<
Index))
6824 UsedElements |= 1 <<
Index;
6830 for (
auto UserAndIndex :
Users) {
6832 unsigned Offset =
User->getValueType(0).getStoreSize() * UserAndIndex.second;
6837 LD->getPointerInfo().getWithOffset(
Offset),
6838 LD->getOriginalAlign(),
LD->getMemOperand()->getFlags(),
6841 DCI.CombineTo(
User, EltLoad,
true);
6849 DCI.AddToWorklist(Chain.
getNode());
6870 else if (UI.getUse().getResNo() == 0)
6873 if (!Replicate || OtherUses.
empty())
6879 for (
SDNode *U : OtherUses) {
6888bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
6889 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
6891 if (Subtarget.hasVectorEnhancements2())
6892 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
6904 for (
unsigned i = 0; i < NumElts; ++i) {
6905 if (M[i] < 0)
continue;
6906 if ((
unsigned) M[i] != NumElts - 1 - i)
6914 for (
auto *U : StoredVal->
uses()) {
6916 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
6919 }
else if (isa<BuildVectorSDNode>(U)) {
6957SDValue SystemZTargetLowering::combineSTORE(
6958 SDNode *
N, DAGCombinerInfo &DCI)
const {
6960 auto *SN = cast<StoreSDNode>(
N);
6961 auto &Op1 =
N->getOperand(1);
6962 EVT MemVT = SN->getMemoryVT();
6967 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
6969 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
6970 DCI.AddToWorklist(
Value.getNode());
6974 SN->getBasePtr(), SN->getMemoryVT(),
6975 SN->getMemOperand());
6979 if (!SN->isTruncatingStore() &&
6990 N->getOperand(0), BSwapOp,
N->getOperand(2)
6995 Ops, MemVT, SN->getMemOperand());
6998 if (!SN->isTruncatingStore() &&
7001 Subtarget.hasVectorEnhancements2()) {
7011 Ops, MemVT, SN->getMemOperand());
7016 if (!SN->isTruncatingStore() &&
7019 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7023 Ops, MemVT, SN->getMemOperand());
7032 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7033 SN->getPointerInfo(), SN->getOriginalAlign(),
7034 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7039 SN->getPointerInfo().getWithOffset(8),
7040 SN->getOriginalAlign(),
7041 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7061 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7065 if (VCI.isVectorConstantLegal(Subtarget) &&
7074 auto FindReplicatedReg = [&](
SDValue MulOp) {
7075 EVT MulVT = MulOp.getValueType();
7076 if (MulOp->getOpcode() ==
ISD::MUL &&
7077 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7081 WordVT =
LHS->getOperand(0).getValueType();
7083 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7087 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7089 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7090 if (VCI.isVectorConstantLegal(Subtarget) &&
7092 WordVT == VCI.VecVT.getScalarType())
7098 if (isa<BuildVectorSDNode>(Op1) &&
7101 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7104 FindReplicatedReg(SplatVal);
7106 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7109 FindReplicatedReg(Op1);
7114 "Bad type handling");
7119 SN->getBasePtr(), SN->getMemOperand());
7126SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7127 SDNode *
N, DAGCombinerInfo &DCI)
const {
7131 N->getOperand(0).hasOneUse() &&
7132 Subtarget.hasVectorEnhancements2()) {
7147 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7151 DCI.CombineTo(
N, ESLoad);
7155 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7165SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7166 SDNode *
N, DAGCombinerInfo &DCI)
const {
7169 if (!Subtarget.hasVector())
7175 Op.getValueType().isVector() &&
7176 Op.getOperand(0).getValueType().isVector() &&
7177 Op.getValueType().getVectorNumElements() ==
7178 Op.getOperand(0).getValueType().getVectorNumElements())
7179 Op =
Op.getOperand(0);
7183 EVT VecVT =
Op.getValueType();
7186 Op.getOperand(0),
N->getOperand(1));
7187 DCI.AddToWorklist(
Op.getNode());
7189 if (EltVT !=
N->getValueType(0)) {
7190 DCI.AddToWorklist(
Op.getNode());
7197 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7200 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7201 IndexN->getZExtValue(), DCI,
false);
7206SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7207 SDNode *
N, DAGCombinerInfo &DCI)
const {
7210 if (
N->getOperand(0) ==
N->getOperand(1))
7221 if (Chain1 == Chain2)
7229SDValue SystemZTargetLowering::combineFP_ROUND(
7230 SDNode *
N, DAGCombinerInfo &DCI)
const {
7232 if (!Subtarget.hasVector())
7241 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7244 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7250 for (
auto *U : Vec->
uses()) {
7251 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7253 U->getOperand(0) == Vec &&
7255 U->getConstantOperandVal(1) == 1) {
7257 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7261 if (
N->isStrictFPOpcode()) {
7266 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7271 DCI.AddToWorklist(VRound.
getNode());
7275 DCI.AddToWorklist(Extract1.
getNode());
7284 N->getVTList(), Extract0, Chain);
7293SDValue SystemZTargetLowering::combineFP_EXTEND(
7294 SDNode *
N, DAGCombinerInfo &DCI)
const {
7296 if (!Subtarget.hasVector())
7305 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7308 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7314 for (
auto *U : Vec->
uses()) {
7315 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7317 U->getOperand(0) == Vec &&
7319 U->getConstantOperandVal(1) == 2) {
7321 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7325 if (
N->isStrictFPOpcode()) {
7330 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7335 DCI.AddToWorklist(VExtend.
getNode());
7339 DCI.AddToWorklist(Extract1.
getNode());
7348 N->getVTList(), Extract0, Chain);
7357SDValue SystemZTargetLowering::combineINT_TO_FP(
7358 SDNode *
N, DAGCombinerInfo &DCI)
const {
7363 unsigned Opcode =
N->getOpcode();
7364 EVT OutVT =
N->getValueType(0);
7368 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7374 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7375 OutScalarBits <= 64) {
7376 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7379 unsigned ExtOpcode =
7387SDValue SystemZTargetLowering::combineBSWAP(
7388 SDNode *
N, DAGCombinerInfo &DCI)
const {
7392 N->getOperand(0).hasOneUse() &&
7393 canLoadStoreByteSwapped(
N->getValueType(0))) {
7402 EVT LoadVT =
N->getValueType(0);
7403 if (LoadVT == MVT::i16)
7408 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7412 if (
N->getValueType(0) == MVT::i16)
7417 DCI.CombineTo(
N, ResVal);
7421 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7430 Op.getValueType().isVector() &&
7431 Op.getOperand(0).getValueType().isVector() &&
7432 Op.getValueType().getVectorNumElements() ==
7433 Op.getOperand(0).getValueType().getVectorNumElements())
7434 Op =
Op.getOperand(0);
7446 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7448 EVT VecVT =
N->getValueType(0);
7449 EVT EltVT =
N->getValueType(0).getVectorElementType();
7452 DCI.AddToWorklist(Vec.
getNode());
7456 DCI.AddToWorklist(Elt.
getNode());
7459 DCI.AddToWorklist(Vec.
getNode());
7461 DCI.AddToWorklist(Elt.
getNode());
7469 if (SV &&
Op.hasOneUse()) {
7477 EVT VecVT =
N->getValueType(0);
7480 DCI.AddToWorklist(Op0.
getNode());
7484 DCI.AddToWorklist(Op1.
getNode());
7487 DCI.AddToWorklist(Op0.
getNode());
7489 DCI.AddToWorklist(Op1.
getNode());
7511 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7518 bool Invert =
false;
7525 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7528 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7531 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7533 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7537 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7538 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7539 if (!NewCCValid || !NewCCMask)
7541 CCValid = NewCCValid->getZExtValue();
7542 CCMask = NewCCMask->getZExtValue();
7552 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7553 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7554 if (!SRACount || SRACount->getZExtValue() != 30)
7556 auto *SHL = CompareLHS->getOperand(0).getNode();
7559 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7562 auto *IPM = SHL->getOperand(0).getNode();
7567 if (!CompareLHS->hasOneUse())
7570 if (CompareRHS->getZExtValue() != 0)
7577 CCReg = IPM->getOperand(0);
7584SDValue SystemZTargetLowering::combineBR_CCMASK(
7585 SDNode *
N, DAGCombinerInfo &DCI)
const {
7589 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7590 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7591 if (!CCValid || !CCMask)
7594 int CCValidVal = CCValid->getZExtValue();
7595 int CCMaskVal = CCMask->getZExtValue();
7604 N->getOperand(3), CCReg);
7608SDValue SystemZTargetLowering::combineSELECT_CCMASK(
7609 SDNode *
N, DAGCombinerInfo &DCI)
const {
7613 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7614 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
7615 if (!CCValid || !CCMask)
7618 int CCValidVal = CCValid->getZExtValue();
7619 int CCMaskVal = CCMask->getZExtValue();
7624 N->getOperand(0),
N->getOperand(1),
7632SDValue SystemZTargetLowering::combineGET_CCMASK(
7633 SDNode *
N, DAGCombinerInfo &DCI)
const {
7636 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7637 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7638 if (!CCValid || !CCMask)
7640 int CCValidVal = CCValid->getZExtValue();
7641 int CCMaskVal = CCMask->getZExtValue();
7649 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
7650 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
7651 if (!SelectCCValid || !SelectCCMask)
7653 int SelectCCValidVal = SelectCCValid->getZExtValue();
7654 int SelectCCMaskVal = SelectCCMask->getZExtValue();
7656 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
7657 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
7658 if (!TrueVal || !FalseVal)
7662 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
7663 SelectCCMaskVal ^= SelectCCValidVal;
7667 if (SelectCCValidVal & ~CCValidVal)
7669 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
7672 return Select->getOperand(4);
7675SDValue SystemZTargetLowering::combineIntDIVREM(
7676 SDNode *
N, DAGCombinerInfo &DCI)
const {
7678 EVT VT =
N->getValueType(0);
7692SDValue SystemZTargetLowering::combineINTRINSIC(
7693 SDNode *
N, DAGCombinerInfo &DCI)
const {
7696 unsigned Id =
N->getConstantOperandVal(1);
7700 case Intrinsic::s390_vll:
7701 case Intrinsic::s390_vlrl:
7702 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
7703 if (
C->getZExtValue() >= 15)
7708 case Intrinsic::s390_vstl:
7709 case Intrinsic::s390_vstrl:
7710 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
7711 if (
C->getZExtValue() >= 15)
7722 return N->getOperand(0);
7728 switch(
N->getOpcode()) {
7753 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
7765 EVT VT =
Op.getValueType();
7768 unsigned Opcode =
Op.getOpcode();
7770 unsigned Id =
Op.getConstantOperandVal(0);
7772 case Intrinsic::s390_vpksh:
7773 case Intrinsic::s390_vpksf:
7774 case Intrinsic::s390_vpksg:
7775 case Intrinsic::s390_vpkshs:
7776 case Intrinsic::s390_vpksfs:
7777 case Intrinsic::s390_vpksgs:
7778 case Intrinsic::s390_vpklsh:
7779 case Intrinsic::s390_vpklsf:
7780 case Intrinsic::s390_vpklsg:
7781 case Intrinsic::s390_vpklshs:
7782 case Intrinsic::s390_vpklsfs:
7783 case Intrinsic::s390_vpklsgs:
7785 SrcDemE = DemandedElts;
7788 SrcDemE = SrcDemE.
trunc(NumElts / 2);
7791 case Intrinsic::s390_vuphb:
7792 case Intrinsic::s390_vuphh:
7793 case Intrinsic::s390_vuphf:
7794 case Intrinsic::s390_vuplhb:
7795 case Intrinsic::s390_vuplhh:
7796 case Intrinsic::s390_vuplhf:
7797 SrcDemE =
APInt(NumElts * 2, 0);
7800 case Intrinsic::s390_vuplb:
7801 case Intrinsic::s390_vuplhw:
7802 case Intrinsic::s390_vuplf:
7803 case Intrinsic::s390_vupllb:
7804 case Intrinsic::s390_vupllh:
7805 case Intrinsic::s390_vupllf:
7806 SrcDemE =
APInt(NumElts * 2, 0);
7809 case Intrinsic::s390_vpdi: {
7811 SrcDemE =
APInt(NumElts, 0);
7812 if (!DemandedElts[OpNo - 1])
7814 unsigned Mask =
Op.getConstantOperandVal(3);
7815 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
7817 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
7820 case Intrinsic::s390_vsldb: {
7822 assert(VT == MVT::v16i8 &&
"Unexpected type.");
7823 unsigned FirstIdx =
Op.getConstantOperandVal(3);
7824 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
7825 unsigned NumSrc0Els = 16 - FirstIdx;
7826 SrcDemE =
APInt(NumElts, 0);
7828 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
7831 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
7836 case Intrinsic::s390_vperm:
7837 SrcDemE =
APInt(NumElts, 1);
7847 SrcDemE =
APInt(1, 1);
7850 SrcDemE = DemandedElts;
7861 const APInt &DemandedElts,
7876 const APInt &DemandedElts,
7878 unsigned Depth)
const {
7882 unsigned tmp0, tmp1;
7887 EVT VT =
Op.getValueType();
7888 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
7891 "KnownBits does not match VT in bitwidth");
7894 "DemandedElts does not match VT number of elements");
7896 unsigned Opcode =
Op.getOpcode();
7898 bool IsLogical =
false;
7899 unsigned Id =
Op.getConstantOperandVal(0);
7901 case Intrinsic::s390_vpksh:
7902 case Intrinsic::s390_vpksf:
7903 case Intrinsic::s390_vpksg:
7904 case Intrinsic::s390_vpkshs:
7905 case Intrinsic::s390_vpksfs:
7906 case Intrinsic::s390_vpksgs:
7907 case Intrinsic::s390_vpklsh:
7908 case Intrinsic::s390_vpklsf:
7909 case Intrinsic::s390_vpklsg:
7910 case Intrinsic::s390_vpklshs:
7911 case Intrinsic::s390_vpklsfs:
7912 case Intrinsic::s390_vpklsgs:
7913 case Intrinsic::s390_vpdi:
7914 case Intrinsic::s390_vsldb:
7915 case Intrinsic::s390_vperm:
7918 case Intrinsic::s390_vuplhb:
7919 case Intrinsic::s390_vuplhh:
7920 case Intrinsic::s390_vuplhf:
7921 case Intrinsic::s390_vupllb:
7922 case Intrinsic::s390_vupllh:
7923 case Intrinsic::s390_vupllf:
7926 case Intrinsic::s390_vuphb:
7927 case Intrinsic::s390_vuphh:
7928 case Intrinsic::s390_vuphf:
7929 case Intrinsic::s390_vuplb:
7930 case Intrinsic::s390_vuplhw:
7931 case Intrinsic::s390_vuplf: {
7973 if (
LHS == 1)
return 1;
7976 if (
RHS == 1)
return 1;
7977 unsigned Common = std::min(
LHS,
RHS);
7978 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
7979 EVT VT =
Op.getValueType();
7981 if (SrcBitWidth > VTBits) {
7982 unsigned SrcExtraBits = SrcBitWidth - VTBits;
7983 if (Common > SrcExtraBits)
7984 return (Common - SrcExtraBits);
7987 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
7994 unsigned Depth)
const {
7995 if (
Op.getResNo() != 0)
7997 unsigned Opcode =
Op.getOpcode();
7999 unsigned Id =
Op.getConstantOperandVal(0);
8001 case Intrinsic::s390_vpksh:
8002 case Intrinsic::s390_vpksf:
8003 case Intrinsic::s390_vpksg:
8004 case Intrinsic::s390_vpkshs:
8005 case Intrinsic::s390_vpksfs:
8006 case Intrinsic::s390_vpksgs:
8007 case Intrinsic::s390_vpklsh:
8008 case Intrinsic::s390_vpklsf:
8009 case Intrinsic::s390_vpklsg:
8010 case Intrinsic::s390_vpklshs:
8011 case Intrinsic::s390_vpklsfs:
8012 case Intrinsic::s390_vpklsgs:
8013 case Intrinsic::s390_vpdi:
8014 case Intrinsic::s390_vsldb:
8015 case Intrinsic::s390_vperm:
8017 case Intrinsic::s390_vuphb:
8018 case Intrinsic::s390_vuphh:
8019 case Intrinsic::s390_vuphf:
8020 case Intrinsic::s390_vuplb:
8021 case Intrinsic::s390_vuplhw:
8022 case Intrinsic::s390_vuplf: {
8026 EVT VT =
Op.getValueType();
8050 switch (
Op->getOpcode()) {
8063 "Unexpected stack alignment");
8066 unsigned StackProbeSize =
8069 StackProbeSize &= ~(StackAlign - 1);
8070 return StackProbeSize ? StackProbeSize : StackAlign;
8087 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8093 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8119 if (Succ->isLiveIn(SystemZ::CC))
8130 switch (
MI.getOpcode()) {
8131 case SystemZ::Select32:
8132 case SystemZ::Select64:
8133 case SystemZ::Select128:
8134 case SystemZ::SelectF32:
8135 case SystemZ::SelectF64:
8136 case SystemZ::SelectF128:
8137 case SystemZ::SelectVR32:
8138 case SystemZ::SelectVR64:
8139 case SystemZ::SelectVR128:
8171 for (
auto *
MI : Selects) {
8172 Register DestReg =
MI->getOperand(0).getReg();
8173 Register TrueReg =
MI->getOperand(1).getReg();
8174 Register FalseReg =
MI->getOperand(2).getReg();
8179 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8182 if (RegRewriteTable.
contains(TrueReg))
8183 TrueReg = RegRewriteTable[TrueReg].first;
8185 if (RegRewriteTable.
contains(FalseReg))
8186 FalseReg = RegRewriteTable[FalseReg].second;
8189 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8194 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8207 unsigned CCValid =
MI.getOperand(3).getImm();
8208 unsigned CCMask =
MI.getOperand(4).getImm();
8220 assert(NextMI.getOperand(3).getImm() == CCValid &&
8221 "Bad CCValid operands since CC was not redefined.");
8222 if (NextMI.getOperand(4).getImm() == CCMask ||
8223 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8229 if (NextMI.definesRegister(SystemZ::CC) || NextMI.usesCustomInsertionHook())
8232 for (
auto *SelMI : Selects)
8233 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8237 if (NextMI.isDebugInstr()) {
8239 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8242 }
else if (
User || ++Count > 20)
8279 for (
auto *SelMI : Selects)
8280 SelMI->eraseFromParent();
8283 for (
auto *DbgMI : DbgValues)
8284 MBB->
splice(InsertPos, StartMBB, DbgMI);
8295 unsigned StoreOpcode,
8296 unsigned STOCOpcode,
8297 bool Invert)
const {
8302 int64_t Disp =
MI.getOperand(2).getImm();
8303 Register IndexReg =
MI.getOperand(3).getReg();
8304 unsigned CCValid =
MI.getOperand(4).getImm();
8305 unsigned CCMask =
MI.getOperand(5).getImm();
8308 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8313 for (
auto *
I :
MI.memoperands())
8322 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8334 MI.eraseFromParent();
8374 MI.eraseFromParent();
8410 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8429 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8437 MI.eraseFromParent();
8448 bool Invert)
const {
8457 int64_t Disp =
MI.getOperand(2).getImm();
8459 Register BitShift =
MI.getOperand(4).getReg();
8460 Register NegBitShift =
MI.getOperand(5).getReg();
8461 unsigned BitSize =
MI.getOperand(6).getImm();
8465 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8466 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8467 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8470 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8471 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8472 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8473 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8474 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8505 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8510 }
else if (BinOpcode)
8533 MI.eraseFromParent();
8544 unsigned KeepOldMask)
const {
8552 int64_t Disp =
MI.getOperand(2).getImm();
8554 Register BitShift =
MI.getOperand(4).getReg();
8555 Register NegBitShift =
MI.getOperand(5).getReg();
8556 unsigned BitSize =
MI.getOperand(6).getImm();
8560 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8561 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8562 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8565 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8566 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8567 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8568 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8569 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8570 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8637 MI.eraseFromParent();
8653 int64_t Disp =
MI.getOperand(2).getImm();
8655 Register OrigSwapVal =
MI.getOperand(4).getReg();
8656 Register BitShift =
MI.getOperand(5).getReg();
8657 Register NegBitShift =
MI.getOperand(6).getReg();
8658 int64_t BitSize =
MI.getOperand(7).getImm();
8664 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8665 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8666 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
8667 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8670 Register OrigOldVal =
MRI.createVirtualRegister(RC);
8673 Register StoreVal =
MRI.createVirtualRegister(RC);
8674 Register OldValRot =
MRI.createVirtualRegister(RC);
8675 Register RetryOldVal =
MRI.createVirtualRegister(RC);
8676 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
8751 if (!
MI.registerDefIsDead(SystemZ::CC))
8754 MI.eraseFromParent();
8770 Register Tmp1 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8771 Register Tmp2 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8779 MI.eraseFromParent();
8788 bool ClearEven)
const {
8796 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8800 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8801 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8812 MI.eraseFromParent();
8819 unsigned Opcode,
bool IsMemset)
const {
8826 uint64_t DestDisp =
MI.getOperand(1).getImm();
8832 if (!isUInt<12>(Disp)) {
8833 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8834 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
8844 SrcDisp =
MI.getOperand(3).getImm();
8847 SrcDisp = DestDisp++;
8848 foldDisplIfNeeded(DestBase, DestDisp);
8852 bool IsImmForm = LengthMO.
isImm();
8853 bool IsRegForm = !IsImmForm;
8860 unsigned Length) ->
void {
8879 bool NeedsLoop =
false;
8881 Register LenAdjReg = SystemZ::NoRegister;
8883 ImmLength = LengthMO.
getImm();
8884 ImmLength += IsMemset ? 2 : 1;
8885 if (ImmLength == 0) {
8886 MI.eraseFromParent();
8889 if (Opcode == SystemZ::CLC) {
8890 if (ImmLength > 3 * 256)
8900 }
else if (ImmLength > 6 * 256)
8908 LenAdjReg = LengthMO.
getReg();
8914 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
8920 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8922 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
8933 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8937 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
8938 DestBase = loadZeroAddress();
8939 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
8940 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
8950 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
8953 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
8955 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
8956 Register NextSrcReg =
MRI.createVirtualRegister(RC);
8958 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
8959 RC = &SystemZ::GR64BitRegClass;
8960 Register ThisCountReg =
MRI.createVirtualRegister(RC);
8961 Register NextCountReg =
MRI.createVirtualRegister(RC);
8987 MBB = MemsetOneCheckMBB;
9030 if (EndMBB && !ImmLength)
9052 if (!HaveSingleBase)
9059 if (Opcode == SystemZ::MVC)
9086 if (!HaveSingleBase)
9108 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9109 Register RemDestReg = HaveSingleBase ? RemSrcReg
9110 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9114 if (!HaveSingleBase)
9130 if (Opcode != SystemZ::MVC) {
9140 while (ImmLength > 0) {
9144 foldDisplIfNeeded(DestBase, DestDisp);
9145 foldDisplIfNeeded(SrcBase, SrcDisp);
9146 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9147 DestDisp += ThisLength;
9148 SrcDisp += ThisLength;
9149 ImmLength -= ThisLength;
9152 if (EndMBB && ImmLength > 0) {
9168 MI.eraseFromParent();
9181 uint64_t End1Reg =
MI.getOperand(0).getReg();
9182 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9183 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9184 uint64_t CharReg =
MI.getOperand(3).getReg();
9187 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9188 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9227 MI.eraseFromParent();
9234 bool NoFloat)
const {
9240 MI.setDesc(
TII->get(Opcode));
9244 uint64_t Control =
MI.getOperand(2).getImm();
9245 static const unsigned GPRControlBit[16] = {
9246 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9247 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9249 Control |= GPRControlBit[15];
9251 Control |= GPRControlBit[11];
9252 MI.getOperand(2).setImm(Control);
9255 for (
int I = 0;
I < 16;
I++) {
9256 if ((Control & GPRControlBit[
I]) == 0) {
9263 if (!NoFloat && (Control & 4) != 0) {
9264 if (Subtarget.hasVector()) {
9296 MI.eraseFromParent();
9309 Register SizeReg =
MI.getOperand(2).getReg();
9321 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9322 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9387 MI.eraseFromParent();
9391SDValue SystemZTargetLowering::
9402 switch (
MI.getOpcode()) {
9403 case SystemZ::Select32:
9404 case SystemZ::Select64:
9405 case SystemZ::Select128:
9406 case SystemZ::SelectF32:
9407 case SystemZ::SelectF64:
9408 case SystemZ::SelectF128:
9409 case SystemZ::SelectVR32:
9410 case SystemZ::SelectVR64:
9411 case SystemZ::SelectVR128:
9412 return emitSelect(
MI,
MBB);
9414 case SystemZ::CondStore8Mux:
9415 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9416 case SystemZ::CondStore8MuxInv:
9417 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9418 case SystemZ::CondStore16Mux:
9419 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9420 case SystemZ::CondStore16MuxInv:
9421 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9422 case SystemZ::CondStore32Mux:
9423 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9424 case SystemZ::CondStore32MuxInv:
9425 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9426 case SystemZ::CondStore8:
9427 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9428 case SystemZ::CondStore8Inv:
9429 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9430 case SystemZ::CondStore16:
9431 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9432 case SystemZ::CondStore16Inv:
9433 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9434 case SystemZ::CondStore32:
9435 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9436 case SystemZ::CondStore32Inv:
9437 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9438 case SystemZ::CondStore64:
9439 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9440 case SystemZ::CondStore64Inv:
9441 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9442 case SystemZ::CondStoreF32:
9443 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9444 case SystemZ::CondStoreF32Inv:
9445 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9446 case SystemZ::CondStoreF64:
9447 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9448 case SystemZ::CondStoreF64Inv:
9449 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9451 case SystemZ::SCmp128Hi:
9452 return emitICmp128Hi(
MI,
MBB,
false);
9453 case SystemZ::UCmp128Hi:
9454 return emitICmp128Hi(
MI,
MBB,
true);
9456 case SystemZ::PAIR128:
9457 return emitPair128(
MI,
MBB);
9458 case SystemZ::AEXT128:
9459 return emitExt128(
MI,
MBB,
false);
9460 case SystemZ::ZEXT128:
9461 return emitExt128(
MI,
MBB,
true);
9463 case SystemZ::ATOMIC_SWAPW:
9464 return emitAtomicLoadBinary(
MI,
MBB, 0);
9466 case SystemZ::ATOMIC_LOADW_AR:
9467 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9468 case SystemZ::ATOMIC_LOADW_AFI:
9469 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9471 case SystemZ::ATOMIC_LOADW_SR:
9472 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9474 case SystemZ::ATOMIC_LOADW_NR:
9475 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9476 case SystemZ::ATOMIC_LOADW_NILH:
9477 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9479 case SystemZ::ATOMIC_LOADW_OR:
9480 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9481 case SystemZ::ATOMIC_LOADW_OILH:
9482 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9484 case SystemZ::ATOMIC_LOADW_XR:
9485 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9486 case SystemZ::ATOMIC_LOADW_XILF:
9487 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9489 case SystemZ::ATOMIC_LOADW_NRi:
9490 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9491 case SystemZ::ATOMIC_LOADW_NILHi:
9492 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9494 case SystemZ::ATOMIC_LOADW_MIN:
9496 case SystemZ::ATOMIC_LOADW_MAX:
9498 case SystemZ::ATOMIC_LOADW_UMIN:
9500 case SystemZ::ATOMIC_LOADW_UMAX:
9503 case SystemZ::ATOMIC_CMP_SWAPW:
9504 return emitAtomicCmpSwapW(
MI,
MBB);
9505 case SystemZ::MVCImm:
9506 case SystemZ::MVCReg:
9507 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9508 case SystemZ::NCImm:
9509 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9510 case SystemZ::OCImm:
9511 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9512 case SystemZ::XCImm:
9513 case SystemZ::XCReg:
9514 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9515 case SystemZ::CLCImm:
9516 case SystemZ::CLCReg:
9517 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9518 case SystemZ::MemsetImmImm:
9519 case SystemZ::MemsetImmReg:
9520 case SystemZ::MemsetRegImm:
9521 case SystemZ::MemsetRegReg:
9522 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9523 case SystemZ::CLSTLoop:
9524 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9525 case SystemZ::MVSTLoop:
9526 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9527 case SystemZ::SRSTLoop:
9528 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9529 case SystemZ::TBEGIN:
9530 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9531 case SystemZ::TBEGIN_nofloat:
9532 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9533 case SystemZ::TBEGINC:
9534 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9535 case SystemZ::LTEBRCompare_Pseudo:
9536 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9537 case SystemZ::LTDBRCompare_Pseudo:
9538 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9539 case SystemZ::LTXBRCompare_Pseudo:
9540 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9542 case SystemZ::PROBED_ALLOCA:
9543 return emitProbedAlloca(
MI,
MBB);
9545 case TargetOpcode::STACKMAP:
9546 case TargetOpcode::PATCHPOINT:
9557SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9558 if (VT == MVT::Untyped)
9559 return &SystemZ::ADDR128BitRegClass;
9585 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
iv Induction Variable Users
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static bool isMovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static SDValue extendAtomicLoad(AtomicSDNode *ALoad, EVT VT, SelectionDAG &DAG, ISD::LoadExtType ETy)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
This is an SDNode representing atomic operations.
const SDValue & getBasePtr() const
StringRef getValueAsString() const
Return the attribute's value as a string.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFP128Ty() const
Return true if this is 'fp128'.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})