LLVM 23.0.0git
RISCVCallingConv.cpp
Go to the documentation of this file.
1//===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the RISC-V Calling Convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVCallingConv.h"
14#include "RISCVSubtarget.h"
15#include "llvm/IR/DataLayout.h"
16#include "llvm/IR/Module.h"
17#include "llvm/MC/MCRegister.h"
18
19using namespace llvm;
20
21// Calling Convention Implementation.
22// The expectations for frontend ABI lowering vary from target to target.
23// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
24// details, but this is a longer term goal. For now, we simply try to keep the
25// role of the frontend as simple and well-defined as possible. The rules can
26// be summarised as:
27// * Never split up large scalar arguments. We handle them here.
28// * If a hardfloat calling convention is being used, and the struct may be
29// passed in a pair of registers (fp+fp, int+fp), and both registers are
30// available, then pass as two separate arguments. If either the GPRs or FPRs
31// are exhausted, then pass according to the rule below.
32// * If a struct could never be passed in registers or directly in a stack
33// slot (as it is larger than 2*XLEN and the floating point rules don't
34// apply), then pass it using a pointer with the byval attribute.
35// * If a struct is less than 2*XLEN, then coerce to either a two-element
36// word-sized array or a 2*XLEN scalar (depending on alignment).
37// * The frontend can determine whether a struct is returned by reference or
38// not based on its size and fields. If it will be returned by reference, the
39// frontend must modify the prototype so a pointer with the sret annotation is
40// passed as the first argument. This is not necessary for large scalar
41// returns.
42// * Struct return values and varargs should be coerced to structs containing
43// register-size fields in the same situations they would be for fixed
44// arguments.
45
46static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
47 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
48 RISCV::F16_H, RISCV::F17_H};
49static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
50 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
51 RISCV::F16_F, RISCV::F17_F};
52static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
53 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
54 RISCV::F16_D, RISCV::F17_D};
55// This is an interim calling convention and it may be changed in the future.
56static const MCPhysReg ArgVRs[] = {
57 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
58 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
59 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
60static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
61 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
62 RISCV::V20M2, RISCV::V22M2};
63static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
64 RISCV::V20M4};
65static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
66static const MCPhysReg ArgVRN2M1s[] = {
67 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12,
68 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
69 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
70 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
71static const MCPhysReg ArgVRN3M1s[] = {
72 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12,
73 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
74 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
75 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
76 RISCV::V20_V21_V22, RISCV::V21_V22_V23};
77static const MCPhysReg ArgVRN4M1s[] = {
78 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13,
79 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
80 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
81 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
82 RISCV::V20_V21_V22_V23};
83static const MCPhysReg ArgVRN5M1s[] = {
84 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13,
85 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
86 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
87 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
88 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
89 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
90static const MCPhysReg ArgVRN6M1s[] = {
91 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14,
92 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
93 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
94 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
95 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
96 RISCV::V18_V19_V20_V21_V22_V23};
97static const MCPhysReg ArgVRN7M1s[] = {
98 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15,
99 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
100 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
101 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
102 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
103static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
104 RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
105 RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
106 RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
107 RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
108 RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
109 RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
110 RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
111 RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
112static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2,
113 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
114 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
115 RISCV::V20M2_V22M2};
116static const MCPhysReg ArgVRN3M2s[] = {
117 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2,
118 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
119 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
120static const MCPhysReg ArgVRN4M2s[] = {
121 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
122 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
123 RISCV::V16M2_V18M2_V20M2_V22M2};
124static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
125 RISCV::V16M4_V20M4};
126
128 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
129 // the ILP32E ABI.
130 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
131 RISCV::X13, RISCV::X14, RISCV::X15,
132 RISCV::X16, RISCV::X17};
133 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
134 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
135 RISCV::X13, RISCV::X14, RISCV::X15};
136
137 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
138 return ArrayRef(ArgEGPRs);
139
140 return ArrayRef(ArgIGPRs);
141}
142
144 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
145 // the ILP32E ABI.
146 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
147 RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
148 RISCV::X16_H, RISCV::X17_H};
149 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
150 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
151 RISCV::X12_H, RISCV::X13_H,
152 RISCV::X14_H, RISCV::X15_H};
153
154 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
155 return ArrayRef(ArgEGPRs);
156
157 return ArrayRef(ArgIGPRs);
158}
159
161 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
162 // the ILP32E ABI.
163 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
164 RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
165 RISCV::X16_W, RISCV::X17_W};
166 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
167 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
168 RISCV::X12_W, RISCV::X13_W,
169 RISCV::X14_W, RISCV::X15_W};
170
171 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
172 return ArrayRef(ArgEGPRs);
173
174 return ArrayRef(ArgIGPRs);
175}
176
178 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
179 // for save-restore libcall, so we don't use them.
180 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
181 static const MCPhysReg FastCCIGPRs[] = {
182 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
183 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
184
185 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
186 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
187 RISCV::X13, RISCV::X14, RISCV::X15};
188
189 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
190 return ArrayRef(FastCCEGPRs);
191
192 return ArrayRef(FastCCIGPRs);
193}
194
196 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
197 // for save-restore libcall, so we don't use them.
198 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
199 static const MCPhysReg FastCCIGPRs[] = {
200 RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
201 RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
202 RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
203
204 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
205 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
206 RISCV::X12_H, RISCV::X13_H,
207 RISCV::X14_H, RISCV::X15_H};
208
209 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
210 return ArrayRef(FastCCEGPRs);
211
212 return ArrayRef(FastCCIGPRs);
213}
214
216 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
217 // for save-restore libcall, so we don't use them.
218 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
219 static const MCPhysReg FastCCIGPRs[] = {
220 RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
221 RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
222 RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
223
224 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
225 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
226 RISCV::X12_W, RISCV::X13_W,
227 RISCV::X14_W, RISCV::X15_W};
228
229 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
230 return ArrayRef(FastCCEGPRs);
231
232 return ArrayRef(FastCCIGPRs);
233}
234
235// Pass a 2*XLEN argument that has been split into two XLEN values through
236// registers or the stack as necessary.
237static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
238 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
239 MVT ValVT2, MVT LocVT2,
240 ISD::ArgFlagsTy ArgFlags2, bool EABI) {
241 unsigned XLenInBytes = XLen / 8;
242 const RISCVSubtarget &STI =
243 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
245
246 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
247 // At least one half can be passed via register.
248 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
250 } else {
251 // Both halves must be passed on the stack, with proper alignment.
252 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
253 // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
254 Align StackAlign(XLenInBytes);
255 if (!EABI || XLen != 32)
256 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
257 State.addLoc(
259 State.AllocateStack(XLenInBytes, StackAlign),
261 State.addLoc(CCValAssign::getMem(
262 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
263 LocVT2, CCValAssign::Full));
264 return false;
265 }
266
267 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
268 // The second half can also be passed via register.
269 State.addLoc(
270 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
271 } else {
272 // The second half is passed via the stack, without additional alignment.
273 State.addLoc(CCValAssign::getMem(
274 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
275 LocVT2, CCValAssign::Full));
276 }
277
278 return false;
279}
280
281static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State,
282 const RISCVTargetLowering &TLI) {
283 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
284 if (RC == &RISCV::VRRegClass) {
285 // Assign the first mask argument to V0.
286 // This is an interim calling convention and it may be changed in the
287 // future.
288 if (ValVT.getVectorElementType() == MVT::i1)
289 if (MCRegister Reg = State.AllocateReg(RISCV::V0))
290 return Reg;
291 return State.AllocateReg(ArgVRs);
292 }
293 if (RC == &RISCV::VRM2RegClass)
294 return State.AllocateReg(ArgVRM2s);
295 if (RC == &RISCV::VRM4RegClass)
296 return State.AllocateReg(ArgVRM4s);
297 if (RC == &RISCV::VRM8RegClass)
298 return State.AllocateReg(ArgVRM8s);
299 if (RC == &RISCV::VRN2M1RegClass)
300 return State.AllocateReg(ArgVRN2M1s);
301 if (RC == &RISCV::VRN3M1RegClass)
302 return State.AllocateReg(ArgVRN3M1s);
303 if (RC == &RISCV::VRN4M1RegClass)
304 return State.AllocateReg(ArgVRN4M1s);
305 if (RC == &RISCV::VRN5M1RegClass)
306 return State.AllocateReg(ArgVRN5M1s);
307 if (RC == &RISCV::VRN6M1RegClass)
308 return State.AllocateReg(ArgVRN6M1s);
309 if (RC == &RISCV::VRN7M1RegClass)
310 return State.AllocateReg(ArgVRN7M1s);
311 if (RC == &RISCV::VRN8M1RegClass)
312 return State.AllocateReg(ArgVRN8M1s);
313 if (RC == &RISCV::VRN2M2RegClass)
314 return State.AllocateReg(ArgVRN2M2s);
315 if (RC == &RISCV::VRN3M2RegClass)
316 return State.AllocateReg(ArgVRN3M2s);
317 if (RC == &RISCV::VRN4M2RegClass)
318 return State.AllocateReg(ArgVRN4M2s);
319 if (RC == &RISCV::VRN2M4RegClass)
320 return State.AllocateReg(ArgVRN2M4s);
321 llvm_unreachable("Unhandled register class for ValueType");
322}
323
324// Implements the RISC-V calling convention. Returns true upon failure.
325bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
326 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
327 CCState &State, bool IsRet, Type *OrigTy) {
328 const MachineFunction &MF = State.getMachineFunction();
329 const DataLayout &DL = MF.getDataLayout();
330 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
331 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
332
333 unsigned XLen = Subtarget.getXLen();
334 MVT XLenVT = Subtarget.getXLenVT();
335
336 if (ArgFlags.isNest()) {
337 // Static chain parameter must not be passed in normal argument registers,
338 // so we assign t2/t3 for it as done in GCC's
339 // __builtin_call_with_static_chain
340 bool HasCFBranch =
341 Subtarget.hasStdExtZicfilp() &&
342 MF.getFunction().getParent()->getModuleFlag("cf-protection-branch");
343
344 // Normal: t2, Branch control flow protection: t3
345 const auto StaticChainReg = HasCFBranch ? RISCV::X28 : RISCV::X7;
346
347 RISCVABI::ABI ABI = Subtarget.getTargetABI();
348 if (HasCFBranch &&
349 (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E))
351 "Nested functions with control flow protection are not "
352 "usable with ILP32E or LP64E ABI.");
353 if (MCRegister Reg = State.AllocateReg(StaticChainReg)) {
354 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
355 return false;
356 }
357 }
358
359 // Any return value split in to more than two values can't be returned
360 // directly. Vectors are returned via the available vector registers.
361 if (!LocVT.isVector() && IsRet && ValNo > 1)
362 return true;
363
364 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
365 // variadic argument, or if no F16/F32 argument registers are available.
366 bool UseGPRForF16_F32 = true;
367 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
368 // variadic argument, or if no F64 argument registers are available.
369 bool UseGPRForF64 = true;
370
371 RISCVABI::ABI ABI = Subtarget.getTargetABI();
372 switch (ABI) {
373 default:
374 llvm_unreachable("Unexpected ABI");
379 break;
382 UseGPRForF16_F32 = ArgFlags.isVarArg();
383 break;
386 UseGPRForF16_F32 = ArgFlags.isVarArg();
387 UseGPRForF64 = ArgFlags.isVarArg();
388 break;
389 }
390
391 if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && !UseGPRForF16_F32) {
392 if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) {
393 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
394 return false;
395 }
396 }
397
398 if (LocVT == MVT::f32 && !UseGPRForF16_F32) {
399 if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) {
400 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
401 return false;
402 }
403 }
404
405 if (LocVT == MVT::f64 && !UseGPRForF64) {
406 if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) {
407 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
408 return false;
409 }
410 }
411
412 if ((ValVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
413 if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
414 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
415 return false;
416 }
417 }
418
419 if (ValVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
420 if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
421 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
422 return false;
423 }
424 }
425
427
428 // Zdinx use GPR without a bitcast when possible.
429 if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
430 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
431 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
432 return false;
433 }
434 }
435
436 // FP smaller than XLen, uses custom GPR.
437 if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
438 (LocVT == MVT::f32 && XLen == 64)) {
439 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
440 LocVT = XLenVT;
441 State.addLoc(
442 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
443 return false;
444 }
445 }
446
447 // Bitcast FP to GPR if we can use a GPR register.
448 if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
449 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
450 LocVT = XLenVT;
451 LocInfo = CCValAssign::BCvt;
452 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
453 return false;
454 }
455 }
456
457 // If this is a variadic argument, the RISC-V calling convention requires
458 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
459 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
460 // be used regardless of whether the original argument was split during
461 // legalisation or not. The argument will not be passed by registers if the
462 // original type is larger than 2*XLEN, so the register alignment rule does
463 // not apply.
464 // TODO: To be compatible with GCC's behaviors, we don't align registers
465 // currently if we are using ILP32E calling convention. This behavior may be
466 // changed when RV32E/ILP32E is ratified.
467 unsigned TwoXLenInBytes = (2 * XLen) / 8;
468 if (ArgFlags.isVarArg() && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
469 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
470 ABI != RISCVABI::ABI_ILP32E) {
471 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
472 // Skip 'odd' register if necessary.
473 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
474 State.AllocateReg(ArgGPRs);
475 }
476
477 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
478 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
479 State.getPendingArgFlags();
480
481 assert(PendingLocs.size() == PendingArgFlags.size() &&
482 "PendingLocs and PendingArgFlags out of sync");
483
484 // Handle passing f64 on RV32D with a soft float ABI or when floating point
485 // registers are exhausted.
486 if (XLen == 32 && LocVT == MVT::f64) {
487 assert(PendingLocs.empty() && "Can't lower f64 if it is split");
488 // Depending on available argument GPRS, f64 may be passed in a pair of
489 // GPRs, split between a GPR and the stack, or passed completely on the
490 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
491 // cases.
492 MCRegister Reg = State.AllocateReg(ArgGPRs);
493 if (!Reg) {
494 int64_t StackOffset = State.AllocateStack(8, Align(8));
495 State.addLoc(
496 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
497 return false;
498 }
499 LocVT = MVT::i32;
500 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
501 MCRegister HiReg = State.AllocateReg(ArgGPRs);
502 if (HiReg) {
503 State.addLoc(
504 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
505 } else {
506 int64_t StackOffset = State.AllocateStack(4, Align(4));
507 State.addLoc(
508 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
509 }
510 return false;
511 }
512
513 // If the split argument only had two elements, it should be passed directly
514 // in registers or on the stack.
515 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
516 PendingLocs.size() <= 1) {
517 assert(PendingLocs.size() == 1 && "Unexpected PendingLocs.size()");
518 // Apply the normal calling convention rules to the first half of the
519 // split argument.
520 CCValAssign VA = PendingLocs[0];
521 ISD::ArgFlagsTy AF = PendingArgFlags[0];
522 PendingLocs.clear();
523 PendingArgFlags.clear();
524 return CC_RISCVAssign2XLen(
525 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
527 }
528
529 // Split arguments might be passed indirectly, so keep track of the pending
530 // values. Split vectors are passed via a mix of registers and indirectly, so
531 // treat them as we would any other argument.
532 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
533 PendingLocs.push_back(
534 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
535 PendingArgFlags.push_back(ArgFlags);
536 if (!ArgFlags.isSplitEnd()) {
537 return false;
538 }
539 }
540
541 // Allocate to a register if possible, or else a stack slot.
542 MCRegister Reg;
543 unsigned StoreSizeBytes = XLen / 8;
544 Align StackAlign = Align(XLen / 8);
545
546 if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) {
547 Reg = allocateRVVReg(ValVT, ValNo, State, TLI);
548 if (Reg) {
549 // Fixed-length vectors are located in the corresponding scalable-vector
550 // container types.
551 if (ValVT.isFixedLengthVector()) {
552 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
553 State.addLoc(
554 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
555 return false;
556 }
557 } else {
558 // For return values, the vector must be passed fully via registers or
559 // via the stack.
560 if (IsRet)
561 return true;
562 // Try using a GPR to pass the address
563 if ((Reg = State.AllocateReg(ArgGPRs))) {
564 LocVT = XLenVT;
565 LocInfo = CCValAssign::Indirect;
566 } else if (ValVT.isScalableVector()) {
567 LocVT = XLenVT;
568 LocInfo = CCValAssign::Indirect;
569 } else {
570 StoreSizeBytes = ValVT.getStoreSize();
571 // Align vectors to their element sizes, being careful for vXi1
572 // vectors.
573 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
574 }
575 }
576 } else {
577 Reg = State.AllocateReg(ArgGPRs);
578 }
579
580 int64_t StackOffset =
581 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
582
583 // If we reach this point and PendingLocs is non-empty, we must be at the
584 // end of a split argument that must be passed indirectly.
585 if (!PendingLocs.empty()) {
586 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
587 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
588
589 for (auto &It : PendingLocs) {
590 if (Reg)
591 State.addLoc(CCValAssign::getReg(It.getValNo(), It.getValVT(), Reg,
592 XLenVT, CCValAssign::Indirect));
593 else
594 State.addLoc(CCValAssign::getMem(It.getValNo(), It.getValVT(),
595 StackOffset, XLenVT,
597 }
598 PendingLocs.clear();
599 PendingArgFlags.clear();
600 return false;
601 }
602
603 assert(((ValVT.isFloatingPoint() && !ValVT.isVector()) || LocVT == XLenVT ||
604 (TLI.getSubtarget().hasVInstructions() &&
605 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) &&
606 "Expected an XLenVT or vector types at this stage");
607
608 if (Reg) {
609 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
610 return false;
611 }
612
613 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
614 return false;
615}
616
617// FastCC has less than 1% performance improvement for some particular
618// benchmark. But theoretically, it may have benefit for some cases.
619bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
620 CCValAssign::LocInfo LocInfo,
621 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet,
622 Type *OrigTy) {
623 const MachineFunction &MF = State.getMachineFunction();
624 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
625 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
626 RISCVABI::ABI ABI = Subtarget.getTargetABI();
627
628 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
629 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
630 static const MCPhysReg FPR16List[] = {
631 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
632 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
633 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
634 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
635 if (MCRegister Reg = State.AllocateReg(FPR16List)) {
636 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
637 return false;
638 }
639 }
640
641 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
642 static const MCPhysReg FPR32List[] = {
643 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
644 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
645 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
646 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
647 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
648 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
649 return false;
650 }
651 }
652
653 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
654 static const MCPhysReg FPR64List[] = {
655 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
656 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
657 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
658 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
659 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
660 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
661 return false;
662 }
663 }
664
665 MVT XLenVT = Subtarget.getXLenVT();
666
667 // Check if there is an available GPRF16 before hitting the stack.
668 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
669 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
670 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
671 return false;
672 }
673 }
674
675 // Check if there is an available GPRF32 before hitting the stack.
676 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
677 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
678 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
679 return false;
680 }
681 }
682
683 // Check if there is an available GPR before hitting the stack.
684 if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
685 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
686 if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
687 LocVT = XLenVT;
688 State.addLoc(
689 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
690 return false;
691 }
692 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
693 return false;
694 }
695 }
696
698
699 if (LocVT.isVector()) {
700 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
701 // Fixed-length vectors are located in the corresponding scalable-vector
702 // container types.
703 if (LocVT.isFixedLengthVector()) {
704 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
705 State.addLoc(
706 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
707 return false;
708 }
709 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
710 return false;
711 }
712
713 // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
714 // have a free GPR.
715 if (LocVT.isScalableVector() ||
716 State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) {
717 LocInfo = CCValAssign::Indirect;
718 LocVT = XLenVT;
719 }
720 }
721
722 if (LocVT == XLenVT) {
723 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
724 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
725 return false;
726 }
727 }
728
729 if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
730 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
731 Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
732 int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
733 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
734 return false;
735 }
736
737 return true; // CC didn't match.
738}
739
740bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
741 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
742 Type *OrigTy, CCState &State) {
743 if (ArgFlags.isNest()) {
745 "Attribute 'nest' is not supported in GHC calling convention");
746 }
747
748 static const MCPhysReg GPRList[] = {
749 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
750 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
751
752 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
753 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
754 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
755 if (MCRegister Reg = State.AllocateReg(GPRList)) {
756 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
757 return false;
758 }
759 }
760
761 const RISCVSubtarget &Subtarget =
762 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
763
764 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
765 // Pass in STG registers: F1, ..., F6
766 // fs0 ... fs5
767 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
768 RISCV::F18_F, RISCV::F19_F,
769 RISCV::F20_F, RISCV::F21_F};
770 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
771 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
772 return false;
773 }
774 }
775
776 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
777 // Pass in STG registers: D1, ..., D6
778 // fs6 ... fs11
779 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
780 RISCV::F24_D, RISCV::F25_D,
781 RISCV::F26_D, RISCV::F27_D};
782 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
783 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
784 return false;
785 }
786 }
787
788 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
789 static const MCPhysReg GPR32List[] = {
790 RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
791 RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
792 RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
793 if (MCRegister Reg = State.AllocateReg(GPR32List)) {
794 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
795 return false;
796 }
797 }
798
799 if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
800 if (MCRegister Reg = State.AllocateReg(GPRList)) {
801 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
802 return false;
803 }
804 }
805
806 report_fatal_error("No registers left in GHC calling convention");
807 return true;
808}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Module.h This file contains the declarations for the Module class.
const MCPhysReg ArgFPR32s[]
const MCPhysReg ArgVRs[]
const MCPhysReg ArgFPR64s[]
const MCPhysReg ArgGPRs[]
Register Reg
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, bool EABI)
static const MCPhysReg ArgVRN2M2s[]
static const MCPhysReg ArgVRM2s[]
static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State, const RISCVTargetLowering &TLI)
static const MCPhysReg ArgVRN3M2s[]
static const MCPhysReg ArgVRN4M1s[]
static const MCPhysReg ArgVRN6M1s[]
static ArrayRef< MCPhysReg > getFastCCArgGPRF32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN4M2s[]
static const MCPhysReg ArgVRN3M1s[]
static const MCPhysReg ArgVRN7M1s[]
static const MCPhysReg ArgVRN5M1s[]
static const MCPhysReg ArgVRN2M4s[]
static ArrayRef< MCPhysReg > getFastCCArgGPRF16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getArgGPR32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN2M1s[]
static const MCPhysReg ArgVRN8M1s[]
static ArrayRef< MCPhysReg > getArgGPR16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRM8s[]
static const MCPhysReg ArgVRM4s[]
static const MCPhysReg ArgFPR16s[]
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Module * getParent()
Get the module that this global value is contained inside of...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
bool isRISCVVectorTuple() const
Return true if this is a RISCV vector tuple type where the runtime length is machine dependent.
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Definition Module.cpp:358
RISCVABI::ABI getTargetABI() const
unsigned getXLen() const
const RISCVTargetLowering * getTargetLowering() const override
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
ArrayRef(const T &OneElt) -> ArrayRef< T >
EABI
Definition CodeGen.h:73
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align getNonZeroOrigAlign() const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130