LLVM 19.0.0git
RISCVLegalizerInfo.cpp
Go to the documentation of this file.
1//===-- RISCVLegalizerInfo.cpp ----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for RISC-V.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "RISCVLegalizerInfo.h"
16#include "RISCVSubtarget.h"
26#include "llvm/IR/Type.h"
27
28using namespace llvm;
29using namespace LegalityPredicates;
30using namespace LegalizeMutations;
31
32// Is this type supported by scalar FP arithmetic operations given the current
33// subtarget.
34static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx,
35 const RISCVSubtarget &ST) {
36 return [=, &ST](const LegalityQuery &Query) {
37 return Query.Types[TypeIdx].isScalar() &&
38 ((ST.hasStdExtF() && Query.Types[TypeIdx].getSizeInBits() == 32) ||
39 (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64));
40 };
41}
42
44typeIsLegalIntOrFPVec(unsigned TypeIdx,
45 std::initializer_list<LLT> IntOrFPVecTys,
46 const RISCVSubtarget &ST) {
47 LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
48 return ST.hasVInstructions() &&
49 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
50 ST.hasVInstructionsI64()) &&
51 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
52 ST.getELen() == 64);
53 };
54
55 return all(typeInSet(TypeIdx, IntOrFPVecTys), P);
56}
57
59typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list<LLT> BoolVecTys,
60 const RISCVSubtarget &ST) {
61 LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
62 return ST.hasVInstructions() &&
63 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
64 ST.getELen() == 64);
65 };
66 return all(typeInSet(TypeIdx, BoolVecTys), P);
67}
68
70 : STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
71 const LLT sDoubleXLen = LLT::scalar(2 * XLen);
72 const LLT p0 = LLT::pointer(0, XLen);
73 const LLT s1 = LLT::scalar(1);
74 const LLT s8 = LLT::scalar(8);
75 const LLT s16 = LLT::scalar(16);
76 const LLT s32 = LLT::scalar(32);
77 const LLT s64 = LLT::scalar(64);
78
79 const LLT nxv1s1 = LLT::scalable_vector(1, s1);
80 const LLT nxv2s1 = LLT::scalable_vector(2, s1);
81 const LLT nxv4s1 = LLT::scalable_vector(4, s1);
82 const LLT nxv8s1 = LLT::scalable_vector(8, s1);
83 const LLT nxv16s1 = LLT::scalable_vector(16, s1);
84 const LLT nxv32s1 = LLT::scalable_vector(32, s1);
85 const LLT nxv64s1 = LLT::scalable_vector(64, s1);
86
87 const LLT nxv1s8 = LLT::scalable_vector(1, s8);
88 const LLT nxv2s8 = LLT::scalable_vector(2, s8);
89 const LLT nxv4s8 = LLT::scalable_vector(4, s8);
90 const LLT nxv8s8 = LLT::scalable_vector(8, s8);
91 const LLT nxv16s8 = LLT::scalable_vector(16, s8);
92 const LLT nxv32s8 = LLT::scalable_vector(32, s8);
93 const LLT nxv64s8 = LLT::scalable_vector(64, s8);
94
95 const LLT nxv1s16 = LLT::scalable_vector(1, s16);
96 const LLT nxv2s16 = LLT::scalable_vector(2, s16);
97 const LLT nxv4s16 = LLT::scalable_vector(4, s16);
98 const LLT nxv8s16 = LLT::scalable_vector(8, s16);
99 const LLT nxv16s16 = LLT::scalable_vector(16, s16);
100 const LLT nxv32s16 = LLT::scalable_vector(32, s16);
101
102 const LLT nxv1s32 = LLT::scalable_vector(1, s32);
103 const LLT nxv2s32 = LLT::scalable_vector(2, s32);
104 const LLT nxv4s32 = LLT::scalable_vector(4, s32);
105 const LLT nxv8s32 = LLT::scalable_vector(8, s32);
106 const LLT nxv16s32 = LLT::scalable_vector(16, s32);
107
108 const LLT nxv1s64 = LLT::scalable_vector(1, s64);
109 const LLT nxv2s64 = LLT::scalable_vector(2, s64);
110 const LLT nxv4s64 = LLT::scalable_vector(4, s64);
111 const LLT nxv8s64 = LLT::scalable_vector(8, s64);
112
113 using namespace TargetOpcode;
114
115 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
116
117 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
118 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
119 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
120 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
121
122 getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
123 .legalFor({s32, sXLen})
124 .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
126 .clampScalar(0, s32, sXLen);
127
129 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
130
131 getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
132
133 // TODO: Use Vector Single-Width Saturating Instructions for vector types.
134 getActionDefinitionsBuilder({G_UADDSAT, G_SADDSAT, G_USUBSAT, G_SSUBSAT})
135 .lower();
136
137 auto &ShiftActions = getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL});
138 if (ST.is64Bit())
139 ShiftActions.customFor({{s32, s32}});
140 ShiftActions.legalFor({{s32, s32}, {s32, sXLen}, {sXLen, sXLen}})
141 .widenScalarToNextPow2(0)
142 .clampScalar(1, s32, sXLen)
143 .clampScalar(0, s32, sXLen)
144 .minScalarSameAs(1, 0)
146
147 auto &ExtActions =
148 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
149 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
150 typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
151 if (ST.is64Bit()) {
152 ExtActions.legalFor({{sXLen, s32}});
153 getActionDefinitionsBuilder(G_SEXT_INREG)
154 .customFor({sXLen})
155 .maxScalar(0, sXLen)
156 .lower();
157 } else {
158 getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
159 }
160 ExtActions.customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
161 .maxScalar(0, sXLen);
162
163 // Merge/Unmerge
164 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
165 auto &MergeUnmergeActions = getActionDefinitionsBuilder(Op);
166 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
167 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
168 if (XLen == 32 && ST.hasStdExtD()) {
169 MergeUnmergeActions.legalIf(
170 all(typeIs(BigTyIdx, s64), typeIs(LitTyIdx, s32)));
171 }
172 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
173 .widenScalarToNextPow2(BigTyIdx, XLen)
174 .clampScalar(LitTyIdx, sXLen, sXLen)
175 .clampScalar(BigTyIdx, sXLen, sXLen);
176 }
177
178 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
179
180 auto &RotateActions = getActionDefinitionsBuilder({G_ROTL, G_ROTR});
181 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
182 RotateActions.legalFor({{s32, sXLen}, {sXLen, sXLen}});
183 // Widen s32 rotate amount to s64 so SDAG patterns will match.
184 if (ST.is64Bit())
185 RotateActions.widenScalarIf(all(typeIs(0, s32), typeIs(1, s32)),
186 changeTo(1, sXLen));
187 }
188 RotateActions.lower();
189
190 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
191
194 typeIsLegalBoolVec(0, BoolVecTys, ST)),
196 typeIsLegalBoolVec(1, BoolVecTys, ST))));
197
198 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
199 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
200 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
201 else
202 BSWAPActions.maxScalar(0, sXLen).lower();
203
204 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
205 auto &CountZerosUndefActions =
206 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
207 if (ST.hasStdExtZbb()) {
208 CountZerosActions.legalFor({{s32, s32}, {sXLen, sXLen}})
209 .clampScalar(0, s32, sXLen)
211 .scalarSameSizeAs(1, 0);
212 } else {
213 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
214 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
215 }
216 CountZerosUndefActions.lower();
217
218 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
219 if (ST.hasStdExtZbb()) {
220 CTPOPActions.legalFor({{s32, s32}, {sXLen, sXLen}})
221 .clampScalar(0, s32, sXLen)
222 .widenScalarToNextPow2(0)
223 .scalarSameSizeAs(1, 0);
224 } else {
225 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
226 }
227
228 auto &ConstantActions = getActionDefinitionsBuilder(G_CONSTANT);
229 ConstantActions.legalFor({s32, p0});
230 if (ST.is64Bit())
231 ConstantActions.customFor({s64});
232 ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
233
234 // TODO: transform illegal vector types into legal vector type
236 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER, G_FREEZE})
237 .legalFor({s32, sXLen, p0})
238 .legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
239 .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
241 .clampScalar(0, s32, sXLen);
242
244 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
245 .legalIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
246 typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
248 .clampScalar(1, sXLen, sXLen)
249 .clampScalar(0, sXLen, sXLen);
250
251 auto &SelectActions =
253 .legalFor({{s32, sXLen}, {p0, sXLen}})
254 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
255 typeIsLegalBoolVec(1, BoolVecTys, ST)));
256 if (XLen == 64 || ST.hasStdExtD())
257 SelectActions.legalFor({{s64, sXLen}});
258 SelectActions.widenScalarToNextPow2(0)
259 .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
260 .clampScalar(1, sXLen, sXLen);
261
262 auto &LoadStoreActions =
263 getActionDefinitionsBuilder({G_LOAD, G_STORE})
264 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
265 {s32, p0, s16, 16},
266 {s32, p0, s32, 32},
267 {p0, p0, sXLen, XLen}});
268 auto &ExtLoadActions =
269 getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
270 .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
271 if (XLen == 64) {
272 LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
273 {s64, p0, s16, 16},
274 {s64, p0, s32, 32},
275 {s64, p0, s64, 64}});
276 ExtLoadActions.legalForTypesWithMemDesc(
277 {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
278 } else if (ST.hasStdExtD()) {
279 LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
280 }
281 LoadStoreActions.clampScalar(0, s32, sXLen).lower();
282 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
283
284 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
285
287 .legalFor({{sXLen, p0}})
288 .clampScalar(0, sXLen, sXLen);
289
291 .legalFor({{p0, sXLen}})
292 .clampScalar(1, sXLen, sXLen);
293
294 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
295
296 getActionDefinitionsBuilder(G_BRJT).legalFor({{p0, sXLen}});
297
298 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
299
301 .legalFor({p0, sXLen})
302 .widenScalarToNextPow2(0)
303 .clampScalar(0, sXLen, sXLen);
304
305 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
306 .legalFor({p0});
307
308 if (ST.hasStdExtM() || ST.hasStdExtZmmul()) {
310 .legalFor({s32, sXLen})
311 .widenScalarToNextPow2(0)
312 .clampScalar(0, s32, sXLen);
313
314 // clang-format off
315 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
316 .legalFor({sXLen})
317 .lower();
318 // clang-format on
319
320 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
321 } else {
323 .libcallFor({sXLen, sDoubleXLen})
324 .widenScalarToNextPow2(0)
325 .clampScalar(0, sXLen, sDoubleXLen);
326
327 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
328
329 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
330 .minScalar(0, sXLen)
331 // Widen sXLen to sDoubleXLen so we can use a single libcall to get
332 // the low bits for the mul result and high bits to do the overflow
333 // check.
334 .widenScalarIf(typeIs(0, sXLen),
335 LegalizeMutations::changeTo(0, sDoubleXLen))
336 .lower();
337 }
338
339 if (ST.hasStdExtM()) {
340 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
341 .legalFor({s32, sXLen})
342 .libcallFor({sDoubleXLen})
343 .clampScalar(0, s32, sDoubleXLen)
345 } else {
346 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
347 .libcallFor({sXLen, sDoubleXLen})
348 .clampScalar(0, sXLen, sDoubleXLen)
350 }
351
352 // TODO: Use libcall for sDoubleXLen.
353 getActionDefinitionsBuilder({G_UDIVREM, G_SDIVREM}).lower();
354
355 auto &AbsActions = getActionDefinitionsBuilder(G_ABS);
356 if (ST.hasStdExtZbb())
357 AbsActions.customFor({s32, sXLen}).minScalar(0, sXLen);
358 AbsActions.lower();
359
360 auto &MinMaxActions =
361 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN});
362 if (ST.hasStdExtZbb())
363 MinMaxActions.legalFor({sXLen}).minScalar(0, sXLen);
364 MinMaxActions.lower();
365
366 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
367
368 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
369
370 getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
371
372 // FP Operations
373
374 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FNEG,
375 G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
376 .legalIf(typeIsScalarFPArith(0, ST));
377
379 .libcallFor({s32, s64})
380 .minScalar(0, s32)
381 .scalarize(0);
382
383 getActionDefinitionsBuilder(G_FCOPYSIGN)
385
387 [=, &ST](const LegalityQuery &Query) -> bool {
388 return (ST.hasStdExtD() && typeIs(0, s32)(Query) &&
389 typeIs(1, s64)(Query));
390 });
392 [=, &ST](const LegalityQuery &Query) -> bool {
393 return (ST.hasStdExtD() && typeIs(0, s64)(Query) &&
394 typeIs(1, s32)(Query));
395 });
396
398 .legalIf(all(typeIs(0, sXLen), typeIsScalarFPArith(1, ST)))
399 .clampScalar(0, sXLen, sXLen);
400
401 // TODO: Support vector version of G_IS_FPCLASS.
402 getActionDefinitionsBuilder(G_IS_FPCLASS)
403 .customIf(all(typeIs(0, s1), typeIsScalarFPArith(1, ST)));
404
405 getActionDefinitionsBuilder(G_FCONSTANT)
407 .lowerFor({s32, s64});
408
409 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
410 .legalIf(all(typeInSet(0, {s32, sXLen}), typeIsScalarFPArith(1, ST)))
412 .clampScalar(0, s32, sXLen)
413 .libcall();
414
415 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
416 .legalIf(all(typeIsScalarFPArith(0, ST), typeInSet(1, {s32, sXLen})))
418 .clampScalar(1, s32, sXLen);
419
420 // FIXME: We can do custom inline expansion like SelectionDAG.
421 // FIXME: Legal with Zfa.
422 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
423 .libcallFor({s32, s64});
424
425 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
426
427 // va_list must be a pointer, but most sized types are pretty easy to handle
428 // as the destination.
430 // TODO: Implement narrowScalar and widenScalar for G_VAARG for types
431 // outside the [s32, sXLen] range.
432 .clampScalar(0, s32, sXLen)
433 .lowerForCartesianProduct({s32, sXLen, p0}, {p0});
434
436 .clampScalar(0, sXLen, sXLen)
437 .customFor({sXLen});
438
439 auto &SplatActions =
440 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
441 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
442 typeIs(1, sXLen)))
443 .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST), typeIs(1, s1)));
444 // Handle case of s64 element vectors on RV32. If the subtarget does not have
445 // f64, then try to lower it to G_SPLAT_VECTOR_SPLIT_64_VL. If the subtarget
446 // does have f64, then we don't know whether the type is an f64 or an i64,
447 // so mark the G_SPLAT_VECTOR as legal and decide later what to do with it,
448 // depending on how the instructions it consumes are legalized. They are not
449 // legalized yet since legalization is in reverse postorder, so we cannot
450 // make the decision at this moment.
451 if (XLen == 32) {
452 if (ST.hasVInstructionsF64() && ST.hasStdExtD())
453 SplatActions.legalIf(all(
454 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
455 else if (ST.hasVInstructionsI64())
456 SplatActions.customIf(all(
457 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
458 }
459
460 SplatActions.clampScalar(1, sXLen, sXLen);
461
463}
464
466 if (Ty.isVector())
468 Ty.getNumElements());
469 return IntegerType::get(C, Ty.getSizeInBits());
470}
471
473 MachineInstr &MI) const {
474 Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
475 switch (IntrinsicID) {
476 default:
477 return false;
478 case Intrinsic::vacopy: {
479 // vacopy arguments must be legal because of the intrinsic signature.
480 // No need to check here.
481
482 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
483 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
484 MachineFunction &MF = *MI.getMF();
485 const DataLayout &DL = MIRBuilder.getDataLayout();
486 LLVMContext &Ctx = MF.getFunction().getContext();
487
488 Register DstLst = MI.getOperand(1).getReg();
489 LLT PtrTy = MRI.getType(DstLst);
490
491 // Load the source va_list
492 Align Alignment = DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx));
494 MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, Alignment);
495 auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
496
497 // Store the result in the destination va_list
500 MIRBuilder.buildStore(Tmp, DstLst, *StoreMMO);
501
502 MI.eraseFromParent();
503 return true;
504 }
505 }
506}
507
508bool RISCVLegalizerInfo::legalizeShlAshrLshr(
509 MachineInstr &MI, MachineIRBuilder &MIRBuilder,
510 GISelChangeObserver &Observer) const {
511 assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
512 MI.getOpcode() == TargetOpcode::G_LSHR ||
513 MI.getOpcode() == TargetOpcode::G_SHL);
514 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
515 // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
516 // imported patterns can select it later. Either way, it will be legal.
517 Register AmtReg = MI.getOperand(2).getReg();
518 auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI);
519 if (!VRegAndVal)
520 return true;
521 // Check the shift amount is in range for an immediate form.
522 uint64_t Amount = VRegAndVal->Value.getZExtValue();
523 if (Amount > 31)
524 return true; // This will have to remain a register variant.
525 auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
526 Observer.changingInstr(MI);
527 MI.getOperand(2).setReg(ExtCst.getReg(0));
528 Observer.changedInstr(MI);
529 return true;
530}
531
532bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,
533 MachineIRBuilder &MIRBuilder) const {
534 // Stores the address of the VarArgsFrameIndex slot into the memory location
535 assert(MI.getOpcode() == TargetOpcode::G_VASTART);
536 MachineFunction *MF = MI.getParent()->getParent();
538 int FI = FuncInfo->getVarArgsFrameIndex();
539 LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());
540 auto FINAddr = MIRBuilder.buildFrameIndex(AddrTy, FI);
541 assert(MI.hasOneMemOperand());
542 MIRBuilder.buildStore(FINAddr, MI.getOperand(0).getReg(),
543 *MI.memoperands()[0]);
544 MI.eraseFromParent();
545 return true;
546}
547
548bool RISCVLegalizerInfo::shouldBeInConstantPool(APInt APImm,
549 bool ShouldOptForSize) const {
550 assert(APImm.getBitWidth() == 32 || APImm.getBitWidth() == 64);
551 int64_t Imm = APImm.getSExtValue();
552 // All simm32 constants should be handled by isel.
553 // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
554 // this check redundant, but small immediates are common so this check
555 // should have better compile time.
556 if (isInt<32>(Imm))
557 return false;
558
559 // We only need to cost the immediate, if constant pool lowering is enabled.
561 return false;
562
564 if (Seq.size() <= STI.getMaxBuildIntsCost())
565 return false;
566
567 // Optimizations below are disabled for opt size. If we're optimizing for
568 // size, use a constant pool.
569 if (ShouldOptForSize)
570 return true;
571 //
572 // Special case. See if we can build the constant as (ADD (SLLI X, C), X) do
573 // that if it will avoid a constant pool.
574 // It will require an extra temporary register though.
575 // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
576 // low and high 32 bits are the same and bit 31 and 63 are set.
577 unsigned ShiftAmt, AddOpc;
579 RISCVMatInt::generateTwoRegInstSeq(Imm, STI, ShiftAmt, AddOpc);
580 return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());
581}
582
583bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
584 MachineIRBuilder &MIB) const {
585 const LLT XLenTy(STI.getXLenVT());
586 Register Dst = MI.getOperand(0).getReg();
587
588 // We define our scalable vector types for lmul=1 to use a 64 bit known
589 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
590 // vscale as VLENB / 8.
591 static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
593 // Support for VLEN==32 is incomplete.
594 return false;
595
596 // We assume VLENB is a multiple of 8. We manually choose the best shift
597 // here because SimplifyDemandedBits isn't always able to simplify it.
598 uint64_t Val = MI.getOperand(1).getCImm()->getZExtValue();
599 if (isPowerOf2_64(Val)) {
600 uint64_t Log2 = Log2_64(Val);
601 if (Log2 < 3) {
602 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
603 MIB.buildLShr(Dst, VLENB, MIB.buildConstant(XLenTy, 3 - Log2));
604 } else if (Log2 > 3) {
605 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
606 MIB.buildShl(Dst, VLENB, MIB.buildConstant(XLenTy, Log2 - 3));
607 } else {
608 MIB.buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
609 }
610 } else if ((Val % 8) == 0) {
611 // If the multiplier is a multiple of 8, scale it down to avoid needing
612 // to shift the VLENB value.
613 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
614 MIB.buildMul(Dst, VLENB, MIB.buildConstant(XLenTy, Val / 8));
615 } else {
616 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
617 auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3));
618 MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val));
619 }
620 MI.eraseFromParent();
621 return true;
622}
623
624// Custom-lower extensions from mask vectors by using a vselect either with 1
625// for zero/any-extension or -1 for sign-extension:
626// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
627// Note that any-extension is lowered identically to zero-extension.
628bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
629 MachineIRBuilder &MIB) const {
630
631 unsigned Opc = MI.getOpcode();
632 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
633 Opc == TargetOpcode::G_ANYEXT);
634
636 Register Dst = MI.getOperand(0).getReg();
637 Register Src = MI.getOperand(1).getReg();
638
639 LLT DstTy = MRI.getType(Dst);
640 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
641 LLT DstEltTy = DstTy.getElementType();
642 auto SplatZero = MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, 0));
643 auto SplatTrue =
644 MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, ExtTrueVal));
645 MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);
646
647 MI.eraseFromParent();
648 return true;
649}
650
651/// Return the type of the mask type suitable for masking the provided
652/// vector type. This is simply an i1 element type vector of the same
653/// (possibly scalable) length.
654static LLT getMaskTypeFor(LLT VecTy) {
655 assert(VecTy.isVector());
656 ElementCount EC = VecTy.getElementCount();
657 return LLT::vector(EC, LLT::scalar(1));
658}
659
660/// Creates an all ones mask suitable for masking a vector of type VecTy with
661/// vector length VL.
663 MachineIRBuilder &MIB,
665 LLT MaskTy = getMaskTypeFor(VecTy);
666 return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
667}
668
669/// Gets the two common "VL" operands: an all-ones mask and the vector length.
670/// VecTy is a scalable vector type.
671static std::pair<MachineInstrBuilder, Register>
674 LLT VecTy = Dst.getLLTTy(MRI);
675 assert(VecTy.isScalableVector() && "Expecting scalable container type");
676 Register VL(RISCV::X0);
677 MachineInstrBuilder Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
678 return {Mask, VL};
679}
680
682buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo,
685 // TODO: If the Hi bits of the splat are undefined, then it's fine to just
686 // splat Lo even if it might be sign extended. I don't think we have
687 // introduced a case where we're build a s64 where the upper bits are undef
688 // yet.
689
690 // Fall back to a stack store and stride x0 vector load.
691 // TODO: need to lower G_SPLAT_VECTOR_SPLIT_I64. This is done in
692 // preprocessDAG in SDAG.
693 return MIB.buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
694 {Passthru, Lo, Hi, VL});
695}
696
698buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
699 const SrcOp &Scalar, Register VL,
701 assert(Scalar.getLLTTy(MRI) == LLT::scalar(64) && "Unexpected VecTy!");
702 auto Unmerge = MIB.buildUnmerge(LLT::scalar(32), Scalar);
703 return buildSplatPartsS64WithVL(Dst, Passthru, Unmerge.getReg(0),
704 Unmerge.getReg(1), VL, MIB, MRI);
705}
706
707// Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
708// legal equivalently-sized i8 type, so we can use that as a go-between.
709// Splats of s1 types that have constant value can be legalized as VMSET_VL or
710// VMCLR_VL.
711bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
712 MachineIRBuilder &MIB) const {
713 assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
714
716
717 Register Dst = MI.getOperand(0).getReg();
718 Register SplatVal = MI.getOperand(1).getReg();
719
720 LLT VecTy = MRI.getType(Dst);
721 LLT XLenTy(STI.getXLenVT());
722
723 // Handle case of s64 element vectors on rv32
724 if (XLenTy.getSizeInBits() == 32 &&
725 VecTy.getElementType().getSizeInBits() == 64) {
726 auto [_, VL] = buildDefaultVLOps(Dst, MIB, MRI);
727 buildSplatSplitS64WithVL(Dst, MIB.buildUndef(VecTy), SplatVal, VL, MIB,
728 MRI);
729 MI.eraseFromParent();
730 return true;
731 }
732
733 // All-zeros or all-ones splats are handled specially.
734 MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);
735 if (isAllOnesOrAllOnesSplat(SplatValMI, MRI)) {
736 auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
737 MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
738 MI.eraseFromParent();
739 return true;
740 }
741 if (isNullOrNullSplat(SplatValMI, MRI)) {
742 auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
743 MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
744 MI.eraseFromParent();
745 return true;
746 }
747
748 // Handle non-constant mask splat (i.e. not sure if it's all zeros or all
749 // ones) by promoting it to an s8 splat.
750 LLT InterEltTy = LLT::scalar(8);
751 LLT InterTy = VecTy.changeElementType(InterEltTy);
752 auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);
753 auto And =
754 MIB.buildAnd(InterEltTy, ZExtSplatVal, MIB.buildConstant(InterEltTy, 1));
755 auto LHS = MIB.buildSplatVector(InterTy, And);
756 auto ZeroSplat =
757 MIB.buildSplatVector(InterTy, MIB.buildConstant(InterEltTy, 0));
758 MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, LHS, ZeroSplat);
759 MI.eraseFromParent();
760 return true;
761}
762
765 LostDebugLocObserver &LocObserver) const {
766 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
767 GISelChangeObserver &Observer = Helper.Observer;
768 MachineFunction &MF = *MI.getParent()->getParent();
769 switch (MI.getOpcode()) {
770 default:
771 // No idea what to do.
772 return false;
773 case TargetOpcode::G_ABS:
774 return Helper.lowerAbsToMaxNeg(MI);
775 // TODO: G_FCONSTANT
776 case TargetOpcode::G_CONSTANT: {
777 const Function &F = MF.getFunction();
778 // TODO: if PSI and BFI are present, add " ||
779 // llvm::shouldOptForSize(*CurMBB, PSI, BFI)".
780 bool ShouldOptForSize = F.hasOptSize() || F.hasMinSize();
781 const ConstantInt *ConstVal = MI.getOperand(1).getCImm();
782 if (!shouldBeInConstantPool(ConstVal->getValue(), ShouldOptForSize))
783 return true;
784 return Helper.lowerConstant(MI);
785 }
786 case TargetOpcode::G_SHL:
787 case TargetOpcode::G_ASHR:
788 case TargetOpcode::G_LSHR:
789 return legalizeShlAshrLshr(MI, MIRBuilder, Observer);
790 case TargetOpcode::G_SEXT_INREG: {
791 // Source size of 32 is sext.w.
792 int64_t SizeInBits = MI.getOperand(2).getImm();
793 if (SizeInBits == 32)
794 return true;
795
796 return Helper.lower(MI, 0, /* Unused hint type */ LLT()) ==
798 }
799 case TargetOpcode::G_IS_FPCLASS: {
800 Register GISFPCLASS = MI.getOperand(0).getReg();
801 Register Src = MI.getOperand(1).getReg();
802 const MachineOperand &ImmOp = MI.getOperand(2);
803 MachineIRBuilder MIB(MI);
804
805 // Turn LLVM IR's floating point classes to that in RISC-V,
806 // by simply rotating the 10-bit immediate right by two bits.
807 APInt GFpClassImm(10, static_cast<uint64_t>(ImmOp.getImm()));
808 auto FClassMask = MIB.buildConstant(sXLen, GFpClassImm.rotr(2).zext(XLen));
809 auto ConstZero = MIB.buildConstant(sXLen, 0);
810
811 auto GFClass = MIB.buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
812 auto And = MIB.buildAnd(sXLen, GFClass, FClassMask);
813 MIB.buildICmp(CmpInst::ICMP_NE, GISFPCLASS, And, ConstZero);
814
815 MI.eraseFromParent();
816 return true;
817 }
818 case TargetOpcode::G_VASTART:
819 return legalizeVAStart(MI, MIRBuilder);
820 case TargetOpcode::G_VSCALE:
821 return legalizeVScale(MI, MIRBuilder);
822 case TargetOpcode::G_ZEXT:
823 case TargetOpcode::G_SEXT:
824 case TargetOpcode::G_ANYEXT:
825 return legalizeExt(MI, MIRBuilder);
826 case TargetOpcode::G_SPLAT_VECTOR:
827 return legalizeSplatVector(MI, MIRBuilder);
828 }
829
830 llvm_unreachable("expected switch to return");
831}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
#define _
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
#define P(N)
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx, const RISCVSubtarget &ST)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static std::pair< MachineInstrBuilder, Register > buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:77
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1447
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
Definition: APInt.cpp:1124
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1521
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:356
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
Definition: LowLevelType.h:113
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelType.h:214
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & widenScalarOrEltToNextPow2OrMinSize(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & lowerForCartesianProduct(std::initializer_list< LLT > Types0, std::initializer_list< LLT > Types1)
The instruction is lowered when type indexes 0 and 1 are both in their respective lists.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Helper class to build MachineInstr.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Representation of each machine instruction.
Definition: MachineInstr.h:69
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
unsigned getRealMinVLen() const
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Register getReg() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition: Utils.cpp:1540
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition: Utils.cpp:1522
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:330
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:426
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...