LLVM 17.0.0git
X86LegalizerInfo.cpp
Go to the documentation of this file.
1//===- X86LegalizerInfo.cpp --------------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for X86.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "X86LegalizerInfo.h"
14#include "X86Subtarget.h"
15#include "X86TargetMachine.h"
20#include "llvm/IR/Type.h"
21
22using namespace llvm;
23using namespace TargetOpcode;
24using namespace LegalizeActions;
25
26/// FIXME: The following static functions are SizeChangeStrategy functions
27/// that are meant to temporarily mimic the behaviour of the old legalization
28/// based on doubling/halving non-legal types as closely as possible. This is
29/// not entirly possible as only legalizing the types that are exactly a power
30/// of 2 times the size of the legal types would require specifying all those
31/// sizes explicitly.
32/// In practice, not specifying those isn't a problem, and the below functions
33/// should disappear quickly as we add support for legalizing non-power-of-2
34/// sized types further.
38 for (unsigned i = 0; i < v.size(); ++i) {
39 result.push_back(v[i]);
40 if (i + 1 < v[i].first && i + 1 < v.size() &&
41 v[i + 1].first != v[i].first + 1)
42 result.push_back({v[i].first + 1, LegacyLegalizeActions::Unsupported});
43 }
44}
45
48 assert(v.size() >= 1);
49 assert(v[0].first > 1);
54 auto Largest = result.back().first;
55 result.push_back({Largest + 1, LegacyLegalizeActions::Unsupported});
56 return result;
57}
58
60 const X86TargetMachine &TM)
61 : Subtarget(STI), TM(TM) {
62
63 setLegalizerInfo32bit();
64 setLegalizerInfo64bit();
65 setLegalizerInfoSSE1();
66 setLegalizerInfoSSE2();
67 setLegalizerInfoSSE41();
68 setLegalizerInfoAVX();
69 setLegalizerInfoAVX2();
70 setLegalizerInfoAVX512();
71 setLegalizerInfoAVX512DQ();
72 setLegalizerInfoAVX512BW();
73
74 getActionDefinitionsBuilder(G_INTRINSIC_ROUNDEVEN)
75 .scalarize(0)
76 .minScalar(0, LLT::scalar(32))
77 .libcall();
78
79 auto &LegacyInfo = getLegacyLegalizerInfo();
80 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1);
81 for (unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR})
82 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1);
83 for (unsigned MemOp : {G_LOAD, G_STORE})
84 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
86 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
87 G_PTR_ADD, 1,
89 LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(
90 G_CONSTANT, 0,
92
93 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
94
95 LegacyInfo.computeTables();
96 verify(*STI.getInstrInfo());
97}
98
100 MachineInstr &MI) const {
101 return true;
102}
103
104void X86LegalizerInfo::setLegalizerInfo32bit() {
105
106 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
107 const LLT s1 = LLT::scalar(1);
108 const LLT s8 = LLT::scalar(8);
109 const LLT s16 = LLT::scalar(16);
110 const LLT s32 = LLT::scalar(32);
111 const LLT s64 = LLT::scalar(64);
112 const LLT s128 = LLT::scalar(128);
113
114 auto &LegacyInfo = getLegacyLegalizerInfo();
115
116 for (auto Ty : {p0, s1, s8, s16, s32})
117 LegacyInfo.setAction({G_IMPLICIT_DEF, Ty}, LegacyLegalizeActions::Legal);
118
119 for (auto Ty : {s8, s16, s32, p0})
120 LegacyInfo.setAction({G_PHI, Ty}, LegacyLegalizeActions::Legal);
121
122 for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
123 for (auto Ty : {s8, s16, s32})
124 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
125
126 for (unsigned Op : {G_UADDE}) {
127 LegacyInfo.setAction({Op, s32}, LegacyLegalizeActions::Legal);
128 LegacyInfo.setAction({Op, 1, s1}, LegacyLegalizeActions::Legal);
129 }
130
131 for (unsigned MemOp : {G_LOAD, G_STORE}) {
132 for (auto Ty : {s8, s16, s32, p0})
133 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
134
135 // And everything's fine in addrspace 0.
136 LegacyInfo.setAction({MemOp, 1, p0}, LegacyLegalizeActions::Legal);
137 }
138
139 // Pointer-handling
140 LegacyInfo.setAction({G_FRAME_INDEX, p0}, LegacyLegalizeActions::Legal);
141 LegacyInfo.setAction({G_GLOBAL_VALUE, p0}, LegacyLegalizeActions::Legal);
142
143 LegacyInfo.setAction({G_PTR_ADD, p0}, LegacyLegalizeActions::Legal);
144 LegacyInfo.setAction({G_PTR_ADD, 1, s32}, LegacyLegalizeActions::Legal);
145
146 if (!Subtarget.is64Bit()) {
148 .legalForCartesianProduct({s1, s8, s16, s32}, {p0})
149 .maxScalar(0, s32)
150 .widenScalarToNextPow2(0, /*Min*/ 8);
151 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
152
153 // Shifts and SDIV
155 {G_SDIV, G_SREM, G_UDIV, G_UREM})
156 .legalFor({s8, s16, s32})
157 .clampScalar(0, s8, s32);
158
160 {G_SHL, G_LSHR, G_ASHR})
161 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
162 .clampScalar(0, s8, s32)
163 .clampScalar(1, s8, s8);
164
165 // Comparison
167 .legalForCartesianProduct({s8}, {s8, s16, s32, p0})
168 .clampScalar(0, s8, s8);
169 }
170
171 // Control-flow
172 LegacyInfo.setAction({G_BRCOND, s1}, LegacyLegalizeActions::Legal);
173
174 // Constants
175 for (auto Ty : {s8, s16, s32, p0})
176 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, Ty},
178
179 // Extensions
180 for (auto Ty : {s8, s16, s32}) {
181 LegacyInfo.setAction({G_ZEXT, Ty}, LegacyLegalizeActions::Legal);
182 LegacyInfo.setAction({G_SEXT, Ty}, LegacyLegalizeActions::Legal);
183 LegacyInfo.setAction({G_ANYEXT, Ty}, LegacyLegalizeActions::Legal);
184 }
185 LegacyInfo.setAction({G_ANYEXT, s128}, LegacyLegalizeActions::Legal);
186 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
187
188 // Merge/Unmerge
189 for (const auto &Ty : {s16, s32, s64}) {
190 LegacyInfo.setAction({G_MERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
191 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
193 }
194 for (const auto &Ty : {s8, s16, s32}) {
195 LegacyInfo.setAction({G_MERGE_VALUES, 1, Ty}, LegacyLegalizeActions::Legal);
196 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
197 }
198}
199
200void X86LegalizerInfo::setLegalizerInfo64bit() {
201
202 if (!Subtarget.is64Bit())
203 return;
204
205 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
206 const LLT s1 = LLT::scalar(1);
207 const LLT s8 = LLT::scalar(8);
208 const LLT s16 = LLT::scalar(16);
209 const LLT s32 = LLT::scalar(32);
210 const LLT s64 = LLT::scalar(64);
211 const LLT s128 = LLT::scalar(128);
212
213 auto &LegacyInfo = getLegacyLegalizerInfo();
214
215 LegacyInfo.setAction({G_IMPLICIT_DEF, s64}, LegacyLegalizeActions::Legal);
216 // Need to have that, as tryFoldImplicitDef will create this pattern:
217 // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF
218 LegacyInfo.setAction({G_IMPLICIT_DEF, s128}, LegacyLegalizeActions::Legal);
219
220 LegacyInfo.setAction({G_PHI, s64}, LegacyLegalizeActions::Legal);
221
222 for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
223 LegacyInfo.setAction({BinOp, s64}, LegacyLegalizeActions::Legal);
224
225 for (unsigned MemOp : {G_LOAD, G_STORE})
226 LegacyInfo.setAction({MemOp, s64}, LegacyLegalizeActions::Legal);
227
228 // Pointer-handling
229 LegacyInfo.setAction({G_PTR_ADD, 1, s64}, LegacyLegalizeActions::Legal);
231 .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
232 .maxScalar(0, s64)
233 .widenScalarToNextPow2(0, /*Min*/ 8);
234 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s64}});
235
236 // Constants
237 LegacyInfo.setAction({TargetOpcode::G_CONSTANT, s64},
239
240 // Extensions
241 for (unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) {
242 LegacyInfo.setAction({extOp, s64}, LegacyLegalizeActions::Legal);
243 }
244
246 .legalForCartesianProduct({s32, s64})
247 .clampScalar(1, s32, s64)
249 .clampScalar(0, s32, s64)
251
253 .legalForCartesianProduct({s32, s64})
254 .clampScalar(1, s32, s64)
256 .clampScalar(0, s32, s64)
258
259 // Comparison
261 .legalForCartesianProduct({s8}, {s8, s16, s32, s64, p0})
262 .clampScalar(0, s8, s8);
263
265 .legalForCartesianProduct({s8}, {s32, s64})
266 .clampScalar(0, s8, s8)
267 .clampScalar(1, s32, s64)
269
270 // Divisions
272 {G_SDIV, G_SREM, G_UDIV, G_UREM})
273 .legalFor({s8, s16, s32, s64})
274 .clampScalar(0, s8, s64);
275
276 // Shifts
278 {G_SHL, G_LSHR, G_ASHR})
279 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}, {s64, s8}})
280 .clampScalar(0, s8, s64)
281 .clampScalar(1, s8, s8);
282
283 // Merge/Unmerge
284 LegacyInfo.setAction({G_MERGE_VALUES, s128}, LegacyLegalizeActions::Legal);
285 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, s128},
287 LegacyInfo.setAction({G_MERGE_VALUES, 1, s128}, LegacyLegalizeActions::Legal);
288 LegacyInfo.setAction({G_UNMERGE_VALUES, s128}, LegacyLegalizeActions::Legal);
289}
290
291void X86LegalizerInfo::setLegalizerInfoSSE1() {
292 if (!Subtarget.hasSSE1())
293 return;
294
295 const LLT s32 = LLT::scalar(32);
296 const LLT s64 = LLT::scalar(64);
297 const LLT v4s32 = LLT::fixed_vector(4, 32);
298 const LLT v2s64 = LLT::fixed_vector(2, 64);
299
300 auto &LegacyInfo = getLegacyLegalizerInfo();
301
302 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
303 for (auto Ty : {s32, v4s32})
304 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
305
306 for (unsigned MemOp : {G_LOAD, G_STORE})
307 for (auto Ty : {v4s32, v2s64})
308 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
309
310 // Constants
311 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s32},
313
314 // Merge/Unmerge
315 for (const auto &Ty : {v4s32, v2s64}) {
316 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
317 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
319 }
320 LegacyInfo.setAction({G_MERGE_VALUES, 1, s64}, LegacyLegalizeActions::Legal);
321 LegacyInfo.setAction({G_UNMERGE_VALUES, s64}, LegacyLegalizeActions::Legal);
322}
323
324void X86LegalizerInfo::setLegalizerInfoSSE2() {
325 if (!Subtarget.hasSSE2())
326 return;
327
328 const LLT s32 = LLT::scalar(32);
329 const LLT s64 = LLT::scalar(64);
330 const LLT v16s8 = LLT::fixed_vector(16, 8);
331 const LLT v8s16 = LLT::fixed_vector(8, 16);
332 const LLT v4s32 = LLT::fixed_vector(4, 32);
333 const LLT v2s64 = LLT::fixed_vector(2, 64);
334
335 const LLT v32s8 = LLT::fixed_vector(32, 8);
336 const LLT v16s16 = LLT::fixed_vector(16, 16);
337 const LLT v8s32 = LLT::fixed_vector(8, 32);
338 const LLT v4s64 = LLT::fixed_vector(4, 64);
339
340 auto &LegacyInfo = getLegacyLegalizerInfo();
341
342 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV})
343 for (auto Ty : {s64, v2s64})
344 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
345
346 for (unsigned BinOp : {G_ADD, G_SUB})
347 for (auto Ty : {v16s8, v8s16, v4s32, v2s64})
348 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
349
350 LegacyInfo.setAction({G_MUL, v8s16}, LegacyLegalizeActions::Legal);
351
352 LegacyInfo.setAction({G_FPEXT, s64}, LegacyLegalizeActions::Legal);
353 LegacyInfo.setAction({G_FPEXT, 1, s32}, LegacyLegalizeActions::Legal);
354
355 LegacyInfo.setAction({G_FPTRUNC, s32}, LegacyLegalizeActions::Legal);
356 LegacyInfo.setAction({G_FPTRUNC, 1, s64}, LegacyLegalizeActions::Legal);
357
358 // Constants
359 LegacyInfo.setAction({TargetOpcode::G_FCONSTANT, s64},
361
362 // Merge/Unmerge
363 for (const auto &Ty :
364 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
365 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
366 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
368 }
369 for (const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) {
370 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
372 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
373 }
374}
375
376void X86LegalizerInfo::setLegalizerInfoSSE41() {
377 if (!Subtarget.hasSSE41())
378 return;
379
380 const LLT v4s32 = LLT::fixed_vector(4, 32);
381
382 auto &LegacyInfo = getLegacyLegalizerInfo();
383
384 LegacyInfo.setAction({G_MUL, v4s32}, LegacyLegalizeActions::Legal);
385}
386
387void X86LegalizerInfo::setLegalizerInfoAVX() {
388 if (!Subtarget.hasAVX())
389 return;
390
391 const LLT v16s8 = LLT::fixed_vector(16, 8);
392 const LLT v8s16 = LLT::fixed_vector(8, 16);
393 const LLT v4s32 = LLT::fixed_vector(4, 32);
394 const LLT v2s64 = LLT::fixed_vector(2, 64);
395
396 const LLT v32s8 = LLT::fixed_vector(32, 8);
397 const LLT v64s8 = LLT::fixed_vector(64, 8);
398 const LLT v16s16 = LLT::fixed_vector(16, 16);
399 const LLT v32s16 = LLT::fixed_vector(32, 16);
400 const LLT v8s32 = LLT::fixed_vector(8, 32);
401 const LLT v16s32 = LLT::fixed_vector(16, 32);
402 const LLT v4s64 = LLT::fixed_vector(4, 64);
403 const LLT v8s64 = LLT::fixed_vector(8, 64);
404
405 auto &LegacyInfo = getLegacyLegalizerInfo();
406
407 for (unsigned MemOp : {G_LOAD, G_STORE})
408 for (auto Ty : {v8s32, v4s64})
409 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
410
411 for (auto Ty : {v32s8, v16s16, v8s32, v4s64}) {
412 LegacyInfo.setAction({G_INSERT, Ty}, LegacyLegalizeActions::Legal);
413 LegacyInfo.setAction({G_EXTRACT, 1, Ty}, LegacyLegalizeActions::Legal);
414 }
415 for (auto Ty : {v16s8, v8s16, v4s32, v2s64}) {
416 LegacyInfo.setAction({G_INSERT, 1, Ty}, LegacyLegalizeActions::Legal);
417 LegacyInfo.setAction({G_EXTRACT, Ty}, LegacyLegalizeActions::Legal);
418 }
419 // Merge/Unmerge
420 for (const auto &Ty :
421 {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) {
422 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
423 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
425 }
426 for (const auto &Ty :
427 {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) {
428 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
430 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
431 }
432}
433
434void X86LegalizerInfo::setLegalizerInfoAVX2() {
435 if (!Subtarget.hasAVX2())
436 return;
437
438 const LLT v32s8 = LLT::fixed_vector(32, 8);
439 const LLT v16s16 = LLT::fixed_vector(16, 16);
440 const LLT v8s32 = LLT::fixed_vector(8, 32);
441 const LLT v4s64 = LLT::fixed_vector(4, 64);
442
443 const LLT v64s8 = LLT::fixed_vector(64, 8);
444 const LLT v32s16 = LLT::fixed_vector(32, 16);
445 const LLT v16s32 = LLT::fixed_vector(16, 32);
446 const LLT v8s64 = LLT::fixed_vector(8, 64);
447
448 auto &LegacyInfo = getLegacyLegalizerInfo();
449
450 for (unsigned BinOp : {G_ADD, G_SUB})
451 for (auto Ty : {v32s8, v16s16, v8s32, v4s64})
452 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
453
454 for (auto Ty : {v16s16, v8s32})
455 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
456
457 // Merge/Unmerge
458 for (const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) {
459 LegacyInfo.setAction({G_CONCAT_VECTORS, Ty}, LegacyLegalizeActions::Legal);
460 LegacyInfo.setAction({G_UNMERGE_VALUES, 1, Ty},
462 }
463 for (const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) {
464 LegacyInfo.setAction({G_CONCAT_VECTORS, 1, Ty},
466 LegacyInfo.setAction({G_UNMERGE_VALUES, Ty}, LegacyLegalizeActions::Legal);
467 }
468}
469
470void X86LegalizerInfo::setLegalizerInfoAVX512() {
471 if (!Subtarget.hasAVX512())
472 return;
473
474 const LLT v16s8 = LLT::fixed_vector(16, 8);
475 const LLT v8s16 = LLT::fixed_vector(8, 16);
476 const LLT v4s32 = LLT::fixed_vector(4, 32);
477 const LLT v2s64 = LLT::fixed_vector(2, 64);
478
479 const LLT v32s8 = LLT::fixed_vector(32, 8);
480 const LLT v16s16 = LLT::fixed_vector(16, 16);
481 const LLT v8s32 = LLT::fixed_vector(8, 32);
482 const LLT v4s64 = LLT::fixed_vector(4, 64);
483
484 const LLT v64s8 = LLT::fixed_vector(64, 8);
485 const LLT v32s16 = LLT::fixed_vector(32, 16);
486 const LLT v16s32 = LLT::fixed_vector(16, 32);
487 const LLT v8s64 = LLT::fixed_vector(8, 64);
488
489 auto &LegacyInfo = getLegacyLegalizerInfo();
490
491 for (unsigned BinOp : {G_ADD, G_SUB})
492 for (auto Ty : {v16s32, v8s64})
493 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
494
495 LegacyInfo.setAction({G_MUL, v16s32}, LegacyLegalizeActions::Legal);
496
497 for (unsigned MemOp : {G_LOAD, G_STORE})
498 for (auto Ty : {v16s32, v8s64})
499 LegacyInfo.setAction({MemOp, Ty}, LegacyLegalizeActions::Legal);
500
501 for (auto Ty : {v64s8, v32s16, v16s32, v8s64}) {
502 LegacyInfo.setAction({G_INSERT, Ty}, LegacyLegalizeActions::Legal);
503 LegacyInfo.setAction({G_EXTRACT, 1, Ty}, LegacyLegalizeActions::Legal);
504 }
505 for (auto Ty : {v32s8, v16s16, v8s32, v4s64, v16s8, v8s16, v4s32, v2s64}) {
506 LegacyInfo.setAction({G_INSERT, 1, Ty}, LegacyLegalizeActions::Legal);
507 LegacyInfo.setAction({G_EXTRACT, Ty}, LegacyLegalizeActions::Legal);
508 }
509
510 /************ VLX *******************/
511 if (!Subtarget.hasVLX())
512 return;
513
514 for (auto Ty : {v4s32, v8s32})
515 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
516}
517
518void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
519 if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
520 return;
521
522 const LLT v8s64 = LLT::fixed_vector(8, 64);
523
524 auto &LegacyInfo = getLegacyLegalizerInfo();
525
526 LegacyInfo.setAction({G_MUL, v8s64}, LegacyLegalizeActions::Legal);
527
528 /************ VLX *******************/
529 if (!Subtarget.hasVLX())
530 return;
531
532 const LLT v2s64 = LLT::fixed_vector(2, 64);
533 const LLT v4s64 = LLT::fixed_vector(4, 64);
534
535 for (auto Ty : {v2s64, v4s64})
536 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
537}
538
539void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
540 if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
541 return;
542
543 const LLT v64s8 = LLT::fixed_vector(64, 8);
544 const LLT v32s16 = LLT::fixed_vector(32, 16);
545
546 auto &LegacyInfo = getLegacyLegalizerInfo();
547
548 for (unsigned BinOp : {G_ADD, G_SUB})
549 for (auto Ty : {v64s8, v32s16})
550 LegacyInfo.setAction({BinOp, Ty}, LegacyLegalizeActions::Legal);
551
552 LegacyInfo.setAction({G_MUL, v32s16}, LegacyLegalizeActions::Legal);
553
554 /************ VLX *******************/
555 if (!Subtarget.hasVLX())
556 return;
557
558 const LLT v8s16 = LLT::fixed_vector(8, 16);
559 const LLT v16s16 = LLT::fixed_vector(16, 16);
560
561 for (auto Ty : {v8s16, v16s16})
562 LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
563}
IRTranslator LLVM IR MI
ppc ctr loops verify
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static LegacyLegalizerInfo::SizeAndActionsVec widen_1(const LegacyLegalizerInfo::SizeAndActionsVec &v)
static void addAndInterleaveWithUnsupported(LegacyLegalizerInfo::SizeAndActionsVec &result, const LegacyLegalizerInfo::SizeAndActionsVec &v)
FIXME: The following static functions are SizeChangeStrategy functions that are meant to temporarily ...
This file declares the targeting of the Machinelegalizer class for X86.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
static SizeAndActionsVec narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v)
static SizeAndActionsVec widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v)
std::vector< SizeAndAction > SizeAndActionsVec
static SizeAndActionsVec widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v)
A SizeChangeStrategy for the common case where legalization for a particular operation consists of wi...
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getPointerSizeInBits(unsigned AS) const
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
X86LegalizerInfo(const X86Subtarget &STI, const X86TargetMachine &TM)
bool hasSSE1() const
Definition: X86Subtarget.h:199
const X86InstrInfo * getInstrInfo() const override
Definition: X86Subtarget.h:128
bool hasAVX512() const
Definition: X86Subtarget.h:207
bool hasSSE41() const
Definition: X86Subtarget.h:203
bool hasSSE2() const
Definition: X86Subtarget.h:200
bool hasAVX() const
Definition: X86Subtarget.h:205
bool hasAVX2() const
Definition: X86Subtarget.h:206
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
@ Unsupported
This operation is completely unsupported on the target.
@ WidenScalar
The operation should be implemented in terms of a wider scalar base-type.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18