23using namespace TargetOpcode;
24using namespace LegalizeActions;
38 for (
unsigned i = 0; i < v.size(); ++i) {
39 result.push_back(v[i]);
40 if (i + 1 < v[i].first && i + 1 < v.size() &&
41 v[i + 1].first != v[i].first + 1)
54 auto Largest = result.back().first;
61 : Subtarget(STI),
TM(
TM) {
63 setLegalizerInfo32bit();
64 setLegalizerInfo64bit();
65 setLegalizerInfoSSE1();
66 setLegalizerInfoSSE2();
67 setLegalizerInfoSSE41();
68 setLegalizerInfoAVX();
69 setLegalizerInfoAVX2();
70 setLegalizerInfoAVX512();
71 setLegalizerInfoAVX512DQ();
72 setLegalizerInfoAVX512BW();
80 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0,
widen_1);
81 for (
unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR})
82 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(BinOp, 0,
widen_1);
83 for (
unsigned MemOp : {G_LOAD, G_STORE})
84 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
86 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
89 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
95 LegacyInfo.computeTables();
104void X86LegalizerInfo::setLegalizerInfo32bit() {
116 for (
auto Ty : {p0, s1, s8, s16, s32})
119 for (
auto Ty : {s8, s16, s32, p0})
122 for (
unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
123 for (
auto Ty : {s8, s16, s32})
126 for (
unsigned Op : {G_UADDE}) {
131 for (
unsigned MemOp : {G_LOAD, G_STORE}) {
132 for (
auto Ty : {s8, s16, s32, p0})
146 if (!Subtarget.is64Bit()) {
155 {G_SDIV, G_SREM, G_UDIV, G_UREM})
156 .legalFor({s8, s16, s32})
157 .clampScalar(0, s8, s32);
160 {G_SHL, G_LSHR, G_ASHR})
161 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
162 .clampScalar(0, s8, s32)
168 .clampScalar(0, s8, s8);
175 for (
auto Ty : {s8, s16, s32, p0})
176 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, Ty},
180 for (
auto Ty : {s8, s16, s32}) {
189 for (
const auto &Ty : {s16, s32, s64}) {
191 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
194 for (
const auto &Ty : {s8, s16, s32}) {
200void X86LegalizerInfo::setLegalizerInfo64bit() {
202 if (!Subtarget.is64Bit())
222 for (
unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
225 for (
unsigned MemOp : {G_LOAD, G_STORE})
237 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, s64},
241 for (
unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) {
247 .clampScalar(1, s32, s64)
254 .clampScalar(1, s32, s64)
262 .clampScalar(0, s8, s8);
266 .clampScalar(0, s8, s8)
272 {G_SDIV, G_SREM, G_UDIV, G_UREM})
273 .legalFor({s8, s16, s32, s64})
274 .clampScalar(0, s8, s64);
278 {G_SHL, G_LSHR, G_ASHR})
279 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}, {s64, s8}})
280 .clampScalar(0, s8, s64)
285 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, s128},
291void X86LegalizerInfo::setLegalizerInfoSSE1() {
302 for (
unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
303 for (
auto Ty : {s32, v4s32})
306 for (
unsigned MemOp : {G_LOAD, G_STORE})
307 for (
auto Ty : {v4s32, v2s64})
311 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s32},
315 for (
const auto &Ty : {v4s32, v2s64}) {
317 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
324void X86LegalizerInfo::setLegalizerInfoSSE2() {
342 for (
unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
343 for (
auto Ty : {s64, v2s64})
346 for (
unsigned BinOp : {G_ADD, G_SUB})
347 for (
auto Ty : {v16s8, v8s16, v4s32, v2s64})
359 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s64},
363 for (
const auto &Ty :
364 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
366 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
369 for (
const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) {
370 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
376void X86LegalizerInfo::setLegalizerInfoSSE41() {
387void X86LegalizerInfo::setLegalizerInfoAVX() {
407 for (
unsigned MemOp : {G_LOAD, G_STORE})
408 for (
auto Ty : {v8s32, v4s64})
411 for (
auto Ty : {v32s8, v16s16, v8s32, v4s64}) {
415 for (
auto Ty : {v16s8, v8s16, v4s32, v2s64}) {
420 for (
const auto &Ty :
421 {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) {
423 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
426 for (
const auto &Ty :
427 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
428 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
434void X86LegalizerInfo::setLegalizerInfoAVX2() {
450 for (
unsigned BinOp : {G_ADD, G_SUB})
451 for (
auto Ty : {v32s8, v16s16, v8s32, v4s64})
454 for (
auto Ty : {v16s16, v8s32})
458 for (
const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) {
460 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
463 for (
const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) {
464 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
470void X86LegalizerInfo::setLegalizerInfoAVX512() {
491 for (
unsigned BinOp : {G_ADD, G_SUB})
492 for (
auto Ty : {v16s32, v8s64})
497 for (
unsigned MemOp : {G_LOAD, G_STORE})
498 for (
auto Ty : {v16s32, v8s64})
501 for (
auto Ty : {v64s8, v32s16, v16s32, v8s64}) {
505 for (
auto Ty : {v32s8, v16s16, v8s32, v4s64, v16s8, v8s16, v4s32, v2s64}) {
511 if (!Subtarget.hasVLX())
514 for (
auto Ty : {v4s32, v8s32})
518void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
519 if (!(Subtarget.
hasAVX512() && Subtarget.hasDQI()))
529 if (!Subtarget.hasVLX())
535 for (
auto Ty : {v2s64, v4s64})
539void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
540 if (!(Subtarget.
hasAVX512() && Subtarget.hasBWI()))
548 for (
unsigned BinOp : {G_ADD, G_SUB})
549 for (
auto Ty : {v64s8, v32s16})
555 if (!Subtarget.hasVLX())
561 for (
auto Ty : {v8s16, v16s16})
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static LegacyLegalizerInfo::SizeAndActionsVec widen_1(const LegacyLegalizerInfo::SizeAndActionsVec &v)
static void addAndInterleaveWithUnsupported(LegacyLegalizerInfo::SizeAndActionsVec &result, const LegacyLegalizerInfo::SizeAndActionsVec &v)
FIXME: The following static functions are SizeChangeStrategy functions that are meant to temporarily ...
This file declares the targeting of the Machinelegalizer class for X86.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
static SizeAndActionsVec narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v)
static SizeAndActionsVec widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v)
std::vector< SizeAndAction > SizeAndActionsVec
static SizeAndActionsVec widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v)
A SizeChangeStrategy for the common case where legalization for a particular operation consists of wi...
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
Representation of each machine instruction.
unsigned getPointerSizeInBits(unsigned AS) const
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
X86LegalizerInfo(const X86Subtarget &STI, const X86TargetMachine &TM)
const X86InstrInfo * getInstrInfo() const override
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
@ Unsupported
This operation is completely unsupported on the target.
@ WidenScalar
The operation should be implemented in terms of a wider scalar base-type.
This is an optimization pass for GlobalISel generic memory operations.