LLVM 23.0.0git
RISCVCallingConv.cpp
Go to the documentation of this file.
1//===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the RISC-V Calling Convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVCallingConv.h"
15#include "RISCVSubtarget.h"
16#include "llvm/IR/DataLayout.h"
17#include "llvm/IR/Module.h"
18#include "llvm/MC/MCRegister.h"
19
20using namespace llvm;
21
22// This does not have the regular `CCAssignFn` signature, it has an extra
23// `bool IsRet` parameter.
24static bool CC_RISCV_Impl(unsigned ValNo, MVT ValVT, MVT LocVT,
26 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
27 CCState &State, bool IsRet);
28
29/// Used for assigning arguments with CallingConvention::GHC
31
32/// Used for assigning arguments with CallingConvention::Fast
34
35bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
36 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
37 Type *OrigTy, CCState &State) {
38 if (State.getCallingConv() == CallingConv::GHC)
39 return CC_RISCV_GHC(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State);
40
41 if (State.getCallingConv() == CallingConv::Fast)
42 return CC_RISCV_FastCC(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy,
43 State);
44
45 // For all other cases, use the standard calling convention
46 return CC_RISCV_Impl(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
47 /*IsRet=*/false);
48}
49
50bool llvm::RetCC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
51 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
52 Type *OrigTy, CCState &State) {
53 // Always use the standard calling convention.
54 return CC_RISCV_Impl(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
55 /*IsRet=*/true);
56}
57
58// Calling Convention Implementation.
59// The expectations for frontend ABI lowering vary from target to target.
60// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
61// details, but this is a longer term goal. For now, we simply try to keep the
62// role of the frontend as simple and well-defined as possible. The rules can
63// be summarised as:
64// * Never split up large scalar arguments. We handle them here.
65// * If a hardfloat calling convention is being used, and the struct may be
66// passed in a pair of registers (fp+fp, int+fp), and both registers are
67// available, then pass as two separate arguments. If either the GPRs or FPRs
68// are exhausted, then pass according to the rule below.
69// * If a struct could never be passed in registers or directly in a stack
70// slot (as it is larger than 2*XLEN and the floating point rules don't
71// apply), then pass it using a pointer with the byval attribute.
72// * If a struct is less than 2*XLEN, then coerce to either a two-element
73// word-sized array or a 2*XLEN scalar (depending on alignment).
74// * The frontend can determine whether a struct is returned by reference or
75// not based on its size and fields. If it will be returned by reference, the
76// frontend must modify the prototype so a pointer with the sret annotation is
77// passed as the first argument. This is not necessary for large scalar
78// returns.
79// * Struct return values and varargs should be coerced to structs containing
80// register-size fields in the same situations they would be for fixed
81// arguments.
82
83static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
84 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
85 RISCV::F16_H, RISCV::F17_H};
86static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
87 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
88 RISCV::F16_F, RISCV::F17_F};
89static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
90 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
91 RISCV::F16_D, RISCV::F17_D};
92// This is an interim calling convention and it may be changed in the future.
93static const MCPhysReg ArgVRs[] = {
94 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
95 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
96 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
97static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
98 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
99 RISCV::V20M2, RISCV::V22M2};
100static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
101 RISCV::V20M4};
102static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
103static const MCPhysReg ArgVRN2M1s[] = {
104 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12,
105 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
106 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
107 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
108static const MCPhysReg ArgVRN3M1s[] = {
109 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12,
110 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
111 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
112 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
113 RISCV::V20_V21_V22, RISCV::V21_V22_V23};
114static const MCPhysReg ArgVRN4M1s[] = {
115 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13,
116 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
117 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
118 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
119 RISCV::V20_V21_V22_V23};
120static const MCPhysReg ArgVRN5M1s[] = {
121 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13,
122 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
123 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
124 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
125 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
126 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
127static const MCPhysReg ArgVRN6M1s[] = {
128 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14,
129 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
130 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
131 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
132 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
133 RISCV::V18_V19_V20_V21_V22_V23};
134static const MCPhysReg ArgVRN7M1s[] = {
135 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15,
136 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
137 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
138 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
139 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
140static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
141 RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
142 RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
143 RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
144 RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
145 RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
146 RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
147 RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
148 RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
149static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2,
150 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
151 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
152 RISCV::V20M2_V22M2};
153static const MCPhysReg ArgVRN3M2s[] = {
154 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2,
155 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
156 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
157static const MCPhysReg ArgVRN4M2s[] = {
158 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
159 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
160 RISCV::V16M2_V18M2_V20M2_V22M2};
161static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
162 RISCV::V16M4_V20M4};
163
165 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
166 // the ILP32E ABI.
167 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
168 RISCV::X13, RISCV::X14, RISCV::X15,
169 RISCV::X16, RISCV::X17};
170 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
171 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
172 RISCV::X13, RISCV::X14, RISCV::X15};
173
174 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
175 return ArrayRef(ArgEGPRs);
176
177 return ArrayRef(ArgIGPRs);
178}
179
181 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
182 // the ILP32E ABI.
183 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
184 RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
185 RISCV::X16_H, RISCV::X17_H};
186 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
187 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
188 RISCV::X12_H, RISCV::X13_H,
189 RISCV::X14_H, RISCV::X15_H};
190
191 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
192 return ArrayRef(ArgEGPRs);
193
194 return ArrayRef(ArgIGPRs);
195}
196
198 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
199 // the ILP32E ABI.
200 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
201 RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
202 RISCV::X16_W, RISCV::X17_W};
203 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
204 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
205 RISCV::X12_W, RISCV::X13_W,
206 RISCV::X14_W, RISCV::X15_W};
207
208 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
209 return ArrayRef(ArgEGPRs);
210
211 return ArrayRef(ArgIGPRs);
212}
213
215 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
216 // for save-restore libcall, so we don't use them.
217 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
218 static const MCPhysReg FastCCIGPRs[] = {
219 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
220 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
221
222 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
223 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
224 RISCV::X13, RISCV::X14, RISCV::X15};
225
226 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
227 return ArrayRef(FastCCEGPRs);
228
229 return ArrayRef(FastCCIGPRs);
230}
231
233 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
234 // for save-restore libcall, so we don't use them.
235 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
236 static const MCPhysReg FastCCIGPRs[] = {
237 RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
238 RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
239 RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
240
241 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
242 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
243 RISCV::X12_H, RISCV::X13_H,
244 RISCV::X14_H, RISCV::X15_H};
245
246 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
247 return ArrayRef(FastCCEGPRs);
248
249 return ArrayRef(FastCCIGPRs);
250}
251
253 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
254 // for save-restore libcall, so we don't use them.
255 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
256 static const MCPhysReg FastCCIGPRs[] = {
257 RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
258 RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
259 RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
260
261 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
262 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
263 RISCV::X12_W, RISCV::X13_W,
264 RISCV::X14_W, RISCV::X15_W};
265
266 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
267 return ArrayRef(FastCCEGPRs);
268
269 return ArrayRef(FastCCIGPRs);
270}
271
272// Pass a 2*XLEN argument that has been split into two XLEN values through
273// registers or the stack as necessary.
275 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
276 MVT ValVT2, MVT LocVT2,
277 ISD::ArgFlagsTy ArgFlags2,
278 const RISCVSubtarget &Subtarget) {
279 unsigned XLen = Subtarget.getXLen();
280 unsigned XLenInBytes = XLen / 8;
281 RISCVABI::ABI ABI = Subtarget.getTargetABI();
282 bool EABI = ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E;
283
285
286 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
287 // At least one half can be passed via register.
288 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
290 } else {
291 // Both halves must be passed on the stack, with proper alignment.
292 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
293 // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
294 Align StackAlign(XLenInBytes);
295 if (!EABI || XLen != 32)
296 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
297 State.addLoc(
299 State.AllocateStack(XLenInBytes, StackAlign),
301 State.addLoc(CCValAssign::getMem(
302 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
303 LocVT2, CCValAssign::Full));
304 return false;
305 }
306
307 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
308 // The second half can also be passed via register.
309 State.addLoc(
310 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
311 } else {
312 // The second half is passed via the stack, without additional alignment.
313 State.addLoc(CCValAssign::getMem(
314 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
315 LocVT2, CCValAssign::Full));
316 }
317
318 return false;
319}
320
321static MCRegister allocateRVVReg(MVT LocVT, unsigned ValNo, CCState &State,
322 const RISCVTargetLowering &TLI) {
323 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT);
324 if (RC == &RISCV::VRRegClass) {
325 // Assign the first mask argument to V0.
326 // This is an interim calling convention and it may be changed in the
327 // future.
328 if (LocVT.getVectorElementType() == MVT::i1)
329 if (MCRegister Reg = State.AllocateReg(RISCV::V0))
330 return Reg;
331 return State.AllocateReg(ArgVRs);
332 }
333 if (RC == &RISCV::VRM2RegClass)
334 return State.AllocateReg(ArgVRM2s);
335 if (RC == &RISCV::VRM4RegClass)
336 return State.AllocateReg(ArgVRM4s);
337 if (RC == &RISCV::VRM8RegClass)
338 return State.AllocateReg(ArgVRM8s);
339 if (RC == &RISCV::VRN2M1RegClass)
340 return State.AllocateReg(ArgVRN2M1s);
341 if (RC == &RISCV::VRN3M1RegClass)
342 return State.AllocateReg(ArgVRN3M1s);
343 if (RC == &RISCV::VRN4M1RegClass)
344 return State.AllocateReg(ArgVRN4M1s);
345 if (RC == &RISCV::VRN5M1RegClass)
346 return State.AllocateReg(ArgVRN5M1s);
347 if (RC == &RISCV::VRN6M1RegClass)
348 return State.AllocateReg(ArgVRN6M1s);
349 if (RC == &RISCV::VRN7M1RegClass)
350 return State.AllocateReg(ArgVRN7M1s);
351 if (RC == &RISCV::VRN8M1RegClass)
352 return State.AllocateReg(ArgVRN8M1s);
353 if (RC == &RISCV::VRN2M2RegClass)
354 return State.AllocateReg(ArgVRN2M2s);
355 if (RC == &RISCV::VRN3M2RegClass)
356 return State.AllocateReg(ArgVRN3M2s);
357 if (RC == &RISCV::VRN4M2RegClass)
358 return State.AllocateReg(ArgVRN4M2s);
359 if (RC == &RISCV::VRN2M4RegClass)
360 return State.AllocateReg(ArgVRN2M4s);
361 llvm_unreachable("Unhandled register class for ValueType");
362}
363
364// Implements the RISC-V calling convention. Returns true upon failure.
365//
366// This has a slightly different signature to CCAssignFn - it adds `bool IsRet`.
367static bool CC_RISCV_Impl(unsigned ValNo, MVT ValVT, MVT LocVT,
368 CCValAssign::LocInfo LocInfo,
369 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
370 CCState &State, bool IsRet) {
371 assert(ValVT == LocVT && "Expected ValVT and LocVT to match");
372 const MachineFunction &MF = State.getMachineFunction();
373 const DataLayout &DL = MF.getDataLayout();
374 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
375 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
376
377 unsigned XLen = Subtarget.getXLen();
378 MVT XLenVT = Subtarget.getXLenVT();
379
380 if (ArgFlags.isNest()) {
381 // Static chain parameter must not be passed in normal argument registers,
382 // so we assign t2/t3 for it as done in GCC's
383 // __builtin_call_with_static_chain
384 bool HasCFBranch =
385 MF.getInfo<RISCVMachineFunctionInfo>()->hasCFProtectionBranch();
386
387 // Normal: t2, Branch control flow protection: t3
388 const auto StaticChainReg = HasCFBranch ? RISCV::X28 : RISCV::X7;
389
390 RISCVABI::ABI ABI = Subtarget.getTargetABI();
391 if (HasCFBranch &&
392 (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E))
394 "Nested functions with control flow protection are not "
395 "usable with ILP32E or LP64E ABI.");
396 if (MCRegister Reg = State.AllocateReg(StaticChainReg)) {
397 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
398 return false;
399 }
400 }
401
402 // Any return value split in to more than two values can't be returned
403 // directly. Vectors are returned via the available vector registers.
404 if ((!LocVT.isVector() || Subtarget.isPExtPackedType(LocVT)) && IsRet &&
405 ValNo > 1)
406 return true;
407
408 // AllowFPRForF16_F32 if targeting an FLEN>=32 ABI and the argument isn't
409 // variadic.
410 bool AllowFPRForF16_F32 = false;
411 // UseFPRForF64 if targeting an FLEN>=64 ABI and the argument isn't variadic.
412 bool AllowFPRForF64 = false;
413
414 RISCVABI::ABI ABI = Subtarget.getTargetABI();
415 switch (ABI) {
416 default:
417 llvm_unreachable("Unexpected ABI");
422 break;
425 AllowFPRForF64 = !ArgFlags.isVarArg();
426 [[fallthrough]];
429 AllowFPRForF16_F32 = !ArgFlags.isVarArg();
430 break;
431 }
432
433 if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && AllowFPRForF16_F32) {
434 if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) {
435 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
436 return false;
437 }
438 }
439
440 if (LocVT == MVT::f32 && AllowFPRForF16_F32) {
441 if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) {
442 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
443 return false;
444 }
445 }
446
447 if (LocVT == MVT::f64 && AllowFPRForF64) {
448 if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) {
449 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
450 return false;
451 }
452 }
453
454 if (LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) {
455 if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
456 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
457 return false;
458 }
459 }
460
461 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
462 if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
463 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
464 return false;
465 }
466 }
467
469
470 // Zdinx use GPR without a bitcast when possible.
471 if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
472 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
473 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
474 return false;
475 }
476 }
477
478 // FP smaller than XLen, uses custom GPR.
479 if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
480 (LocVT == MVT::f32 && XLen == 64)) {
481 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
482 LocVT = XLenVT;
483 State.addLoc(
484 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
485 return false;
486 }
487 }
488
489 // Bitcast FP to GPR if we can use a GPR register.
490 if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
491 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
492 LocVT = XLenVT;
493 LocInfo = CCValAssign::BCvt;
494 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
495 return false;
496 }
497 }
498
499 // If this is a variadic argument, the RISC-V calling convention requires
500 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
501 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
502 // be used regardless of whether the original argument was split during
503 // legalisation or not. The argument will not be passed by registers if the
504 // original type is larger than 2*XLEN, so the register alignment rule does
505 // not apply.
506 // TODO: To be compatible with GCC's behaviors, we don't align registers
507 // currently if we are using ILP32E calling convention. This behavior may be
508 // changed when RV32E/ILP32E is ratified.
509 unsigned TwoXLenInBytes = (2 * XLen) / 8;
510 if (ArgFlags.isVarArg() && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
511 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
512 ABI != RISCVABI::ABI_ILP32E) {
513 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
514 // Skip 'odd' register if necessary.
515 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
516 State.AllocateReg(ArgGPRs);
517 }
518
519 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
520 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
521 State.getPendingArgFlags();
522
523 assert(PendingLocs.size() == PendingArgFlags.size() &&
524 "PendingLocs and PendingArgFlags out of sync");
525
526 // Handle passing f64 on RV32D with a soft float ABI or when floating point
527 // registers are exhausted.
528 if (XLen == 32 && LocVT == MVT::f64) {
529 assert(PendingLocs.empty() && "Can't lower f64 if it is split");
530 // Depending on available argument GPRS, f64 may be passed in a pair of
531 // GPRs, split between a GPR and the stack, or passed completely on the
532 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
533 // cases.
534 MCRegister Reg = State.AllocateReg(ArgGPRs);
535 if (!Reg) {
536 int64_t StackOffset = State.AllocateStack(8, Align(8));
537 State.addLoc(
538 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
539 return false;
540 }
541 LocVT = MVT::i32;
542 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
543 MCRegister HiReg = State.AllocateReg(ArgGPRs);
544 if (HiReg) {
545 State.addLoc(
546 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
547 } else {
548 int64_t StackOffset = State.AllocateStack(4, Align(4));
549 State.addLoc(
550 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
551 }
552 return false;
553 }
554
555 // If the split argument only had two elements, it should be passed directly
556 // in registers or on the stack.
557 if ((LocVT.isScalarInteger() || Subtarget.isPExtPackedType(LocVT)) &&
558 ArgFlags.isSplitEnd() && PendingLocs.size() <= 1) {
559 assert(PendingLocs.size() == 1 && "Unexpected PendingLocs.size()");
560 // Apply the normal calling convention rules to the first half of the
561 // split argument.
562 CCValAssign VA = PendingLocs[0];
563 ISD::ArgFlagsTy AF = PendingArgFlags[0];
564 PendingLocs.clear();
565 PendingArgFlags.clear();
566 return CC_RISCVAssign2XLen(State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
567 Subtarget);
568 }
569
570 // Split arguments might be passed indirectly, so keep track of the pending
571 // values. Split vectors excluding P extension packed vectors(see
572 // isPExtPackedType) are passed via a mix of registers and indirectly, so
573 // treat them as we would any other argument.
574 if ((LocVT.isScalarInteger() || Subtarget.isPExtPackedType(LocVT)) &&
575 (ArgFlags.isSplit() || !PendingLocs.empty())) {
576 PendingLocs.push_back(
577 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
578 PendingArgFlags.push_back(ArgFlags);
579 if (!ArgFlags.isSplitEnd()) {
580 return false;
581 }
582 }
583
584 // Allocate to a register if possible, or else a stack slot.
586 unsigned StoreSizeBytes = XLen / 8;
587 Align StackAlign = Align(XLen / 8);
588
589 // FIXME: If P extension and V extension are enabled at the same time,
590 // who should go first?
591 if (!Subtarget.isPExtPackedType(LocVT) &&
592 (LocVT.isVector() || LocVT.isRISCVVectorTuple())) {
593 Reg = allocateRVVReg(LocVT, ValNo, State, TLI);
594 if (Reg) {
595 // Fixed-length vectors are located in the corresponding scalable-vector
596 // container types.
597 if (LocVT.isFixedLengthVector()) {
598 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
599 State.addLoc(
600 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
601 return false;
602 }
603 } else {
604 // For return values, the vector must be passed fully via registers or
605 // via the stack.
606 if (IsRet)
607 return true;
608 // Try using a GPR to pass the address
609 if ((Reg = State.AllocateReg(ArgGPRs))) {
610 LocVT = XLenVT;
611 LocInfo = CCValAssign::Indirect;
612 } else if (LocVT.isScalableVector()) {
613 LocVT = XLenVT;
614 LocInfo = CCValAssign::Indirect;
615 } else {
616 StoreSizeBytes = LocVT.getStoreSize();
617 // Align vectors to their element sizes, being careful for vXi1
618 // vectors.
619 StackAlign = MaybeAlign(LocVT.getScalarSizeInBits() / 8).valueOrOne();
620 }
621 }
622 } else {
623 Reg = State.AllocateReg(ArgGPRs);
624 }
625
626 int64_t StackOffset =
627 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
628
629 // If we reach this point and PendingLocs is non-empty, we must be at the
630 // end of a split argument that must be passed indirectly.
631 if (!PendingLocs.empty()) {
632 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
633 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
634
635 for (auto &It : PendingLocs) {
636 if (Reg)
637 State.addLoc(CCValAssign::getReg(It.getValNo(), It.getValVT(), Reg,
638 XLenVT, CCValAssign::Indirect));
639 else
640 State.addLoc(CCValAssign::getMem(It.getValNo(), It.getValVT(),
641 StackOffset, XLenVT,
643 }
644 PendingLocs.clear();
645 PendingArgFlags.clear();
646 return false;
647 }
648
649 assert(((LocVT.isFloatingPoint() && !LocVT.isVector()) || LocVT == XLenVT ||
650 Subtarget.isPExtPackedType(LocVT) ||
651 (TLI.getSubtarget().hasVInstructions() &&
652 (LocVT.isVector() || LocVT.isRISCVVectorTuple()))) &&
653 "Expected an XLenVT or vector types at this stage");
654
655 if (Reg) {
656 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
657 return false;
658 }
659
660 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
661 return false;
662}
663
664// FastCC has less than 1% performance improvement for some particular
665// benchmark. But theoretically, it may have benefit for some cases.
666static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
667 CCValAssign::LocInfo LocInfo,
668 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
669 CCState &State) {
670 const MachineFunction &MF = State.getMachineFunction();
671 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
672 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
673 RISCVABI::ABI ABI = Subtarget.getTargetABI();
674
675 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
676 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
677 static const MCPhysReg FPR16List[] = {
678 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
679 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
680 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
681 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
682 if (MCRegister Reg = State.AllocateReg(FPR16List)) {
683 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
684 return false;
685 }
686 }
687
688 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
689 static const MCPhysReg FPR32List[] = {
690 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
691 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
692 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
693 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
694 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
695 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
696 return false;
697 }
698 }
699
700 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
701 static const MCPhysReg FPR64List[] = {
702 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
703 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
704 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
705 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
706 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
707 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
708 return false;
709 }
710 }
711
712 MVT XLenVT = Subtarget.getXLenVT();
713
714 // Check if there is an available GPRF16 before hitting the stack.
715 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
716 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
717 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
718 return false;
719 }
720 }
721
722 // Check if there is an available GPRF32 before hitting the stack.
723 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
724 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
725 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
726 return false;
727 }
728 }
729
730 // Check if there is an available GPR before hitting the stack.
731 if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
732 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
733 if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
734 LocVT = XLenVT;
735 State.addLoc(
736 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
737 return false;
738 }
739 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
740 return false;
741 }
742 }
743
745
746 if (LocVT.isVector()) {
747 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
748 // Fixed-length vectors are located in the corresponding scalable-vector
749 // container types.
750 if (LocVT.isFixedLengthVector()) {
751 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
752 State.addLoc(
753 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
754 return false;
755 }
756 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
757 return false;
758 }
759
760 // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
761 // have a free GPR.
762 if (LocVT.isScalableVector() ||
763 State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) {
764 LocInfo = CCValAssign::Indirect;
765 LocVT = XLenVT;
766 }
767 }
768
769 if (LocVT == XLenVT) {
770 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
771 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
772 return false;
773 }
774 }
775
776 if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
777 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
778 Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
779 int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
780 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
781 return false;
782 }
783
784 return true; // CC didn't match.
785}
786
787static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
788 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
789 Type *OrigTy, CCState &State) {
790 if (ArgFlags.isNest()) {
792 "Attribute 'nest' is not supported in GHC calling convention");
793 }
794
795 static const MCPhysReg GPRList[] = {
796 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
797 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
798
799 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
800 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
801 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
802 if (MCRegister Reg = State.AllocateReg(GPRList)) {
803 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
804 return false;
805 }
806 }
807
808 const RISCVSubtarget &Subtarget =
809 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
810
811 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
812 // Pass in STG registers: F1, ..., F6
813 // fs0 ... fs5
814 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
815 RISCV::F18_F, RISCV::F19_F,
816 RISCV::F20_F, RISCV::F21_F};
817 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
818 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
819 return false;
820 }
821 }
822
823 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
824 // Pass in STG registers: D1, ..., D6
825 // fs6 ... fs11
826 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
827 RISCV::F24_D, RISCV::F25_D,
828 RISCV::F26_D, RISCV::F27_D};
829 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
830 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
831 return false;
832 }
833 }
834
835 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
836 static const MCPhysReg GPR32List[] = {
837 RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
838 RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
839 RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
840 if (MCRegister Reg = State.AllocateReg(GPR32List)) {
841 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
842 return false;
843 }
844 }
845
846 if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
847 if (MCRegister Reg = State.AllocateReg(GPRList)) {
848 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
849 return false;
850 }
851 }
852
853 report_fatal_error("No registers left in GHC calling convention");
854 return true;
855}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Module.h This file contains the declarations for the Module class.
const MCPhysReg ArgFPR32s[]
const MCPhysReg ArgVRs[]
const MCPhysReg ArgFPR64s[]
const MCPhysReg ArgGPRs[]
Register Reg
static const MCPhysReg ArgVRN2M2s[]
static CCAssignFn CC_RISCV_FastCC
Used for assigning arguments with CallingConvention::Fast.
static const MCPhysReg ArgVRM2s[]
static CCAssignFn CC_RISCV_GHC
Used for assigning arguments with CallingConvention::GHC.
static const MCPhysReg ArgVRN3M2s[]
static const MCPhysReg ArgVRN4M1s[]
static const MCPhysReg ArgVRN6M1s[]
static bool CC_RISCV_Impl(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, bool IsRet)
static ArrayRef< MCPhysReg > getFastCCArgGPRF32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN4M2s[]
static const MCPhysReg ArgVRN3M1s[]
static const MCPhysReg ArgVRN7M1s[]
static bool CC_RISCVAssign2XLen(CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, const RISCVSubtarget &Subtarget)
static MCRegister allocateRVVReg(MVT LocVT, unsigned ValNo, CCState &State, const RISCVTargetLowering &TLI)
static const MCPhysReg ArgVRN5M1s[]
static const MCPhysReg ArgVRN2M4s[]
static ArrayRef< MCPhysReg > getFastCCArgGPRF16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getArgGPR32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN2M1s[]
static const MCPhysReg ArgVRN8M1s[]
static ArrayRef< MCPhysReg > getArgGPR16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRM8s[]
static const MCPhysReg ArgVRM4s[]
static const MCPhysReg ArgFPR16s[]
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
bool isRISCVVectorTuple() const
Return true if this is a RISCV vector tuple type where the runtime length is machine dependent.
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
RISCVABI::ABI getTargetABI() const
bool isPExtPackedType(MVT VT) const
unsigned getXLen() const
const RISCVTargetLowering * getTargetLowering() const override
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CCAssignFn RetCC_RISCV
This is used for assigning return values to locations when making calls.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
ArrayRef(const T &OneElt) -> ArrayRef< T >
EABI
Definition CodeGen.h:73
CCAssignFn CC_RISCV
This is used for assigining arguments to locations when making calls.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align getNonZeroOrigAlign() const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130