23 using namespace TargetOpcode;
24 using namespace LegalizeActions;
38 for (
unsigned i = 0;
i < v.size(); ++
i) {
40 if (
i + 1 < v[
i].first &&
i + 1 < v.size() &&
41 v[
i + 1].first != v[
i].first + 1)
54 auto Largest =
result.back().first;
61 : Subtarget(STI),
TM(
TM) {
63 setLegalizerInfo32bit();
64 setLegalizerInfo64bit();
65 setLegalizerInfoSSE1();
66 setLegalizerInfoSSE2();
67 setLegalizerInfoSSE41();
68 setLegalizerInfoAVX();
69 setLegalizerInfoAVX2();
70 setLegalizerInfoAVX512();
71 setLegalizerInfoAVX512DQ();
72 setLegalizerInfoAVX512BW();
80 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0,
widen_1);
81 for (
unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR})
82 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(BinOp, 0,
widen_1);
83 for (
unsigned MemOp : {G_LOAD, G_STORE})
84 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
86 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
89 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
95 LegacyInfo.computeTables();
104 void X86LegalizerInfo::setLegalizerInfo32bit() {
116 for (
auto Ty : {p0,
s1, s8, s16, s32})
119 for (
auto Ty : {s8, s16, s32, p0})
122 for (
unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
123 for (
auto Ty : {s8, s16, s32})
126 for (
unsigned Op : {G_UADDE}) {
131 for (
unsigned MemOp : {G_LOAD, G_STORE}) {
132 for (
auto Ty : {s8, s16, s32, p0})
146 if (!Subtarget.is64Bit()) {
155 {G_SDIV, G_SREM, G_UDIV, G_UREM})
156 .legalFor({s8, s16, s32})
157 .clampScalar(0, s8, s32);
160 {G_SHL, G_LSHR, G_ASHR})
161 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
162 .clampScalar(0, s8, s32)
168 .clampScalar(0, s8, s8);
175 for (
auto Ty : {s8, s16, s32, p0})
176 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, Ty},
180 for (
auto Ty : {s8, s16, s32}) {
189 for (
const auto &Ty : {s16, s32, s64}) {
191 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
194 for (
const auto &Ty : {s8, s16, s32}) {
200 void X86LegalizerInfo::setLegalizerInfo64bit() {
202 if (!Subtarget.is64Bit())
222 for (
unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
225 for (
unsigned MemOp : {G_LOAD, G_STORE})
237 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, s64},
241 for (
unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) {
247 .clampScalar(1, s32, s64)
254 .clampScalar(1, s32, s64)
262 .clampScalar(0, s8, s8);
266 .clampScalar(0, s8, s8)
272 {G_SDIV, G_SREM, G_UDIV, G_UREM})
273 .legalFor({s8, s16, s32, s64})
274 .clampScalar(0, s8, s64);
278 {G_SHL, G_LSHR, G_ASHR})
279 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}, {s64, s8}})
280 .clampScalar(0, s8, s64)
285 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, s128},
291 void X86LegalizerInfo::setLegalizerInfoSSE1() {
302 for (
unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
303 for (
auto Ty : {s32, v4s32})
306 for (
unsigned MemOp : {G_LOAD, G_STORE})
307 for (
auto Ty : {v4s32, v2s64})
311 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s32},
315 for (
const auto &Ty : {v4s32, v2s64}) {
317 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
324 void X86LegalizerInfo::setLegalizerInfoSSE2() {
342 for (
unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
343 for (
auto Ty : {s64, v2s64})
346 for (
unsigned BinOp : {G_ADD, G_SUB})
347 for (
auto Ty : {v16s8, v8s16, v4s32, v2s64})
359 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s64},
363 for (
const auto &Ty :
364 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
366 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
369 for (
const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) {
370 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
376 void X86LegalizerInfo::setLegalizerInfoSSE41() {
387 void X86LegalizerInfo::setLegalizerInfoAVX() {
407 for (
unsigned MemOp : {G_LOAD, G_STORE})
408 for (
auto Ty : {v8s32, v4s64})
411 for (
auto Ty : {v32s8, v16s16, v8s32, v4s64}) {
415 for (
auto Ty : {v16s8, v8s16, v4s32, v2s64}) {
420 for (
const auto &Ty :
421 {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) {
423 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
426 for (
const auto &Ty :
427 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
428 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
434 void X86LegalizerInfo::setLegalizerInfoAVX2() {
450 for (
unsigned BinOp : {G_ADD, G_SUB})
451 for (
auto Ty : {v32s8, v16s16, v8s32, v4s64})
454 for (
auto Ty : {v16s16, v8s32})
458 for (
const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) {
460 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
463 for (
const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) {
464 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
470 void X86LegalizerInfo::setLegalizerInfoAVX512() {
491 for (
unsigned BinOp : {G_ADD, G_SUB})
492 for (
auto Ty : {v16s32, v8s64})
497 for (
unsigned MemOp : {G_LOAD, G_STORE})
498 for (
auto Ty : {v16s32, v8s64})
501 for (
auto Ty : {v64s8, v32s16, v16s32, v8s64}) {
505 for (
auto Ty : {v32s8, v16s16, v8s32, v4s64, v16s8, v8s16, v4s32, v2s64}) {
511 if (!Subtarget.hasVLX())
514 for (
auto Ty : {v4s32, v8s32})
518 void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
519 if (!(Subtarget.
hasAVX512() && Subtarget.hasDQI()))
529 if (!Subtarget.hasVLX())
535 for (
auto Ty : {v2s64, v4s64})
539 void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
540 if (!(Subtarget.
hasAVX512() && Subtarget.hasBWI()))
548 for (
unsigned BinOp : {G_ADD, G_SUB})
549 for (
auto Ty : {v64s8, v32s16})
555 if (!Subtarget.hasVLX())
561 for (
auto Ty : {v8s16, v16s16})