22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
23 #define LLVM_CODEGEN_TARGETLOWERING_H
70 class FunctionLoweringInfo;
76 class LegacyDivergenceAnalysis;
78 class MachineBasicBlock;
79 class MachineFunction;
81 class MachineJumpTableInfo;
83 class MachineRegisterInfo;
87 class ProfileSummaryInfo;
88 class TargetLibraryInfo;
90 class TargetRegisterClass;
91 class TargetRegisterInfo;
92 class TargetTransformInfo;
115 bool DstAlignCanChange;
131 bool MemcpyStrSrc =
false) {
134 Op.DstAlignCanChange = DstAlignCanChange;
135 Op.DstAlign = DstAlign;
138 Op.ZeroMemset =
false;
139 Op.MemcpyStrSrc = MemcpyStrSrc;
140 Op.SrcAlign = SrcAlign;
148 Op.DstAlignCanChange = DstAlignCanChange;
149 Op.DstAlign = DstAlign;
152 Op.ZeroMemset = IsZeroMemset;
153 Op.MemcpyStrSrc =
false;
159 assert(!DstAlignCanChange);
167 return isMemcpy() && !DstAlignCanChange;
396 bool LegalTypes =
true)
const;
452 return HasMultipleConditionRegisters;
488 unsigned DefinedValues)
const {
489 return DefinedValues < 3;
546 return BypassSlowDivWidths;
604 unsigned AddrSpace)
const {
754 unsigned KeptBits)
const {
768 unsigned OldShiftOpcode,
unsigned NewShiftOpcode,
810 unsigned &Cost)
const {
855 return BooleanVectorContents;
856 return isFloat ? BooleanFloatContents : BooleanContents;
874 return DAG.
getNode(ExtendCode, dl, BoolVT, Bool);
879 return SchedPreferenceInfo;
894 assert(RC &&
"This value type is not natively supported!");
902 const Value *)
const {
921 return RepRegClassCostForVT[VT.
SimpleTy];
952 return ValueTypeActions[VT.
SimpleTy];
956 ValueTypeActions[VT.
SimpleTy] = Action;
961 return ValueTypeActions;
969 return getTypeConversion(
Context, VT).first;
982 return getTypeConversion(
Context, VT).second;
1013 EVT &IntermediateVT,
1014 unsigned &NumIntermediates,
1015 MVT &RegisterVT)
const;
1022 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
1057 bool ForCodeSize =
false)
const {
1103 unsigned Scale)
const {
1112 unsigned Scale)
const {
1114 if (Action !=
Legal)
1135 return Supported ? Action :
Expand;
1144 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1145 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1146 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1147 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1148 #include "llvm/IR/ConstrainedOps.def"
1159 bool LegalOnly =
false)
const {
1173 bool LegalOnly =
false)
const {
1187 bool LegalOnly =
false)
const {
1229 EVT ConditionVT)
const;
1254 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1255 (NumDests == 3 && NumCmps >= 6);
1281 unsigned Shift = 4 * ExtType;
1305 "Table isn't big enough!");
1306 return TruncStoreActions[ValI][MemI];
1324 bool LegalOnly)
const {
1335 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1349 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1363 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1377 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1404 "Table isn't big enough!");
1409 assert(Action !=
Promote &&
"Can't promote condition code!");
1429 "This operation isn't promoted!");
1432 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1434 PromoteToType.find(std::make_pair(
Op, VT.
SimpleTy));
1435 if (PTTI != PromoteToType.end())
return PTTI->second;
1438 "Cannot autopromote this type, add it with AddPromotedToType.");
1444 "Didn't find type to promote to!");
1451 bool AllowUnknown =
false)
const {
1460 bool AllowUnknown =
false)
const {
1462 if (
auto *PTy = dyn_cast<PointerType>(Ty))
1465 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1466 Type *EltTy = VTy->getElementType();
1468 if (
auto *PTy = dyn_cast<PointerType>(EltTy)) {
1473 VTy->getElementCount());
1480 bool AllowUnknown =
false)
const {
1484 else if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1485 Type *Elm = VTy->getElementType();
1486 if (
PointerType *PT = dyn_cast<PointerType>(Elm)) {
1491 VTy->getElementCount());
1500 bool AllowUnknown =
false)
const {
1512 return RegisterTypeForVT[VT.
SimpleTy];
1525 unsigned NumIntermediates;
1527 NumIntermediates, RegisterVT);
1558 unsigned NumIntermediates;
1564 return (
BitWidth + RegWidth - 1) / RegWidth;
1590 return DL.getABITypeAlign(ArgTy);
1621 return TargetDAGCombineArray[
NT >> 3] & (1 << (
NT&7));
1694 bool * =
nullptr)
const {
1702 bool * =
nullptr)
const {
1712 unsigned AddrSpace = 0,
Align Alignment =
Align(1),
1714 bool *Fast =
nullptr)
const;
1723 bool *Fast =
nullptr)
const;
1731 unsigned AddrSpace = 0,
Align Alignment =
Align(1),
1733 bool *Fast =
nullptr)
const;
1741 bool *Fast =
nullptr)
const;
1746 bool *Fast =
nullptr)
const;
1789 return StackPointerRegisterToSaveRestore;
1812 return MinStackArgumentAlignment;
1894 unsigned & )
const {
1920 return MaxAtomicSizeInBitsSupported;
1966 llvm_unreachable(
"Masked atomicrmw expansion unimplemented on this target");
1974 "Bit test atomicrmw expansion unimplemented on this target");
2071 if (
SI->getValueOperand()->getType()->isFloatingPointTy())
2195 bool IsSigned)
const {
2207 BooleanContents = Ty;
2208 BooleanFloatContents = Ty;
2214 BooleanContents = IntTy;
2215 BooleanFloatContents = FloatTy;
2221 BooleanVectorContents = Ty;
2226 SchedPreferenceInfo = Pref;
2239 StackPointerRegisterToSaveRestore = R;
2248 HasMultipleConditionRegisters = hasManyRegs;
2256 HasExtractBitsInsn = hasExtractInsn;
2266 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2279 virtual std::pair<const TargetRegisterClass *, uint8_t>
2291 for (
auto Op : Ops) {
2293 OpActions[(unsigned)VT.
SimpleTy][
Op] = Action;
2306 for (
auto ExtType : ExtTypes) {
2308 MemVT.
isValid() &&
"Table isn't big enough!");
2309 assert((
unsigned)Action < 0x10 &&
"too many bits for bitfield array");
2310 unsigned Shift = 4 * ExtType;
2320 for (
auto MemVT : MemVTs)
2338 for (
auto IdxMode : IdxModes)
2339 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2355 for (
auto IdxMode : IdxModes)
2356 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2372 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2382 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2389 for (
auto CC : CCs) {
2391 "Table isn't big enough!");
2392 assert((
unsigned)Action < 0x10 &&
"too many bits for bitfield array");
2426 for (
auto NT : NTs) {
2428 TargetDAGCombineArray[
NT >> 3] |= 1 << (
NT & 7);
2434 MinFunctionAlignment = Alignment;
2440 PrefFunctionAlignment = Alignment;
2448 MaxBytesForAlignment = MaxBytes;
2453 MinStackArgumentAlignment = Alignment;
2461 MaxAtomicSizeInBitsSupported = SizeInBits;
2466 MinCmpXchgSizeInBits = SizeInBits;
2471 SupportsUnalignedAtomics = UnalignedSupported;
2516 Type *Ty,
unsigned AddrSpace,
2528 unsigned AS = 0)
const {
2579 return (
From->isIntegerTy() ||
From->isFloatingPointTy()) &&
2619 default:
return false;
2687 switch (
I->getOpcode()) {
2688 case Instruction::FPExt:
2693 case Instruction::ZExt:
2694 if (
isZExtFree(
I->getOperand(0)->getType(),
I->getType()))
2697 case Instruction::SExt:
2724 if (isa<ZExtInst>(
Ext))
2727 assert(isa<SExtInst>(
Ext) &&
"Unexpected ext type!");
2818 unsigned Factor)
const {
2829 unsigned Factor)
const {
2845 "invalid fpext types");
2853 LLT DestTy,
LLT SrcTy)
const {
2861 EVT DestVT,
EVT SrcVT)
const {
2863 "invalid fpext types");
2921 assert((
MI.getOpcode() == TargetOpcode::G_FADD ||
2922 MI.getOpcode() == TargetOpcode::G_FSUB ||
2923 MI.getOpcode() == TargetOpcode::G_FMUL) &&
2924 "unexpected node in FMAD forming combine");
2945 "unexpected node in FMAD forming combine");
2986 unsigned Index)
const {
3009 bool MathUsed)
const {
3058 for (
auto Call : Calls)
3059 LibcallRoutineNames[Call] =
Name;
3064 return LibcallRoutineNames[Call];
3070 CmpLibcallCCs[Call] = CC;
3076 return CmpLibcallCCs[Call];
3081 LibcallCallingConvs[Call] = CC;
3086 return LibcallCallingConvs[Call];
3110 bool HasMultipleConditionRegisters;
3116 bool HasExtractBitsInsn;
3127 bool JumpIsExpensive;
3146 Align MinStackArgumentAlignment;
3150 Align MinFunctionAlignment;
3154 Align PrefFunctionAlignment;
3157 Align PrefLoopAlignment;
3159 unsigned MaxBytesForAlignment;
3163 unsigned MaxAtomicSizeInBitsSupported;
3167 unsigned MinCmpXchgSizeInBits;
3170 bool SupportsUnalignedAtomics;
3174 Register StackPointerRegisterToSaveRestore;
3235 ValueTypeActionImpl ValueTypeActions;
3256 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
3266 void InitLibcalls(
const Triple &TT);
3270 enum IndexedModeActionsBits {
3273 IMAB_MaskedStore = 8,
3274 IMAB_MaskedLoad = 12
3277 void setIndexedModeAction(
unsigned IdxMode, MVT VT,
unsigned Shift,
3280 (
unsigned)Action < 0xf &&
"Table isn't big enough!");
3281 unsigned Ty = (unsigned)VT.SimpleTy;
3282 IndexedModeActions[Ty][IdxMode] &= ~(0xf <<
Shift);
3283 IndexedModeActions[Ty][IdxMode] |= ((
uint16_t)Action) <<
Shift;
3287 unsigned Shift)
const {
3289 "Table isn't big enough!");
3290 unsigned Ty = (unsigned)VT.SimpleTy;
3494 bool IsSignaling =
false)
const;
3500 MakeLibCallOptions CallOptions,
3508 const uint32_t *CallerPreservedMask,
3546 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
3554 const APInt &DemandedElts,
3555 TargetLoweringOpt &TLO)
const;
3559 TargetLoweringOpt &TLO)
const;
3566 const APInt &DemandedElts,
3575 TargetLoweringOpt &TLO)
const;
3592 TargetLoweringOpt &TLO,
unsigned Depth = 0,
3593 bool AssumeSingleUse =
false)
const;
3598 KnownBits &Known, TargetLoweringOpt &TLO,
3600 bool AssumeSingleUse =
false)
const;
3605 DAGCombinerInfo &DCI)
const;
3610 const APInt &DemandedElts,
3611 DAGCombinerInfo &DCI)
const;
3617 const APInt &DemandedElts,
3619 unsigned Depth = 0)
const;
3625 unsigned Depth = 0)
const;
3630 const APInt &DemandedElts,
3632 unsigned Depth = 0)
const;
3650 TargetLoweringOpt &TLO,
unsigned Depth = 0,
3651 bool AssumeSingleUse =
false)
const;
3656 DAGCombinerInfo &DCI)
const;
3672 const APInt &DemandedElts,
3674 unsigned Depth = 0)
const;
3682 const APInt &DemandedElts,
3684 unsigned Depth = 0)
const;
3693 unsigned Depth = 0)
const;
3707 const APInt &DemandedElts,
3709 unsigned Depth = 0)
const;
3717 const APInt &DemandedElts,
3719 unsigned Depth = 0)
const;
3728 APInt &KnownZero, TargetLoweringOpt &TLO,
unsigned Depth = 0)
const;
3737 const APInt &DemandedElts,
3739 TargetLoweringOpt &TLO,
3740 unsigned Depth = 0)
const;
3754 bool PoisonOnly,
unsigned Depth)
const;
3774 unsigned Depth = 0)
const;
3780 unsigned Depth = 0)
const;
3823 bool foldBooleans, DAGCombinerInfo &DCI,
3824 const SDLoc &dl)
const;
3919 bool LegalOps,
bool OptForSize,
3921 unsigned Depth = 0)
const;
3926 bool LegalOps,
bool OptForSize,
3927 unsigned Depth = 0)
const {
3942 bool OptForSize,
unsigned Depth = 0)
const {
3956 unsigned NumParts,
MVT PartVT,
3964 const SDValue *Parts,
unsigned NumParts,
4063 IsInReg = Call.hasRetAttr(Attribute::InReg);
4065 Call.doesNotReturn() ||
4066 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4069 RetSExt = Call.hasRetAttr(Attribute::SExt);
4070 RetZExt = Call.hasRetAttr(Attribute::ZExt);
4071 NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4181 bool Value =
true) {
4193 std::pair<SDValue, SDValue>
LowerCallTo(CallLoweringInfo &CLI)
const;
4252 return "__clear_cache";
4272 return VT.
bitsLT(MinVT) ? MinVT : VT;
4291 return DL.isLittleEndian();
4317 assert(
SI.isAtomic() &&
"violated precondition");
4493 virtual std::pair<unsigned, const TargetRegisterClass *>
4498 if (ConstraintCode ==
"m")
4500 if (ConstraintCode ==
"o")
4502 if (ConstraintCode ==
"X")
4504 if (ConstraintCode ==
"p")
4517 std::vector<SDValue> &Ops,
4523 const AsmOperandInfo &OpInfo,