LLVM 22.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
15#include "RISCVSubtarget.h"
16#include "llvm/ADT/SmallSet.h"
26
27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
29
30using namespace llvm;
31
32static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
33 cl::init(false), cl::Hidden);
34static cl::opt<bool>
35 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
36 cl::init(false),
37 cl::desc("Disable two address hints for register "
38 "allocation"));
39
40static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::F1_Q == RISCV::F0_Q + 1, "Register list not consecutive");
52static_assert(RISCV::F31_Q == RISCV::F0_Q + 31,
53 "Register list not consecutive");
54static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
55static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
56
58 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
59 /*PC*/0, HwMode) {}
60
61const MCPhysReg *
63 return CSR_IPRA_SaveList;
64}
65
66const MCPhysReg *
68 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
70 return CSR_NoRegs_SaveList;
72 return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
73 : CSR_RT_MostRegs_SaveList;
74 if (MF->getFunction().hasFnAttribute("interrupt")) {
75 if (Subtarget.hasVInstructions()) {
76 if (Subtarget.hasStdExtD())
77 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_V_Interrupt_RVE_SaveList
78 : CSR_XLEN_F64_V_Interrupt_SaveList;
79 if (Subtarget.hasStdExtF())
80 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_V_Interrupt_RVE_SaveList
81 : CSR_XLEN_F32_V_Interrupt_SaveList;
82 return Subtarget.hasStdExtE() ? CSR_XLEN_V_Interrupt_RVE_SaveList
83 : CSR_XLEN_V_Interrupt_SaveList;
84 }
85 if (Subtarget.hasStdExtD())
86 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_Interrupt_RVE_SaveList
87 : CSR_XLEN_F64_Interrupt_SaveList;
88 if (Subtarget.hasStdExtF())
89 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
90 : CSR_XLEN_F32_Interrupt_SaveList;
91 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
92 : CSR_Interrupt_SaveList;
93 }
94
95 bool HasVectorCSR =
97 Subtarget.hasVInstructions();
98
99 switch (Subtarget.getTargetABI()) {
100 default:
101 llvm_unreachable("Unrecognized ABI");
104 return CSR_ILP32E_LP64E_SaveList;
107 if (HasVectorCSR)
108 return CSR_ILP32_LP64_V_SaveList;
109 return CSR_ILP32_LP64_SaveList;
112 if (HasVectorCSR)
113 return CSR_ILP32F_LP64F_V_SaveList;
114 return CSR_ILP32F_LP64F_SaveList;
117 if (HasVectorCSR)
118 return CSR_ILP32D_LP64D_V_SaveList;
119 return CSR_ILP32D_LP64D_SaveList;
120 }
121}
122
124 const RISCVFrameLowering *TFI = getFrameLowering(MF);
125 BitVector Reserved(getNumRegs());
126 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
127
128 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
129 // Mark any GPRs requested to be reserved as such
130 if (Subtarget.isRegisterReservedByUser(Reg))
131 markSuperRegs(Reserved, Reg);
132
133 // Mark all the registers defined as constant in TableGen as reserved.
134 if (isConstantPhysReg(Reg))
135 markSuperRegs(Reserved, Reg);
136 }
137
138 // Use markSuperRegs to ensure any register aliases are also reserved
139 markSuperRegs(Reserved, RISCV::X2_H); // sp
140 markSuperRegs(Reserved, RISCV::X3_H); // gp
141 markSuperRegs(Reserved, RISCV::X4_H); // tp
142 if (TFI->hasFP(MF))
143 markSuperRegs(Reserved, RISCV::X8_H); // fp
144 // Reserve the base register if we need to realign the stack and allocate
145 // variable-sized objects at runtime.
146 if (TFI->hasBP(MF))
147 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
148
149 // Additionally reserve dummy register used to form the register pair
150 // beginning with 'x0' for instructions that take register pairs.
151 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
152
153 // There are only 16 GPRs for RVE.
154 if (Subtarget.hasStdExtE())
155 for (MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
156 markSuperRegs(Reserved, Reg);
157
158 // V registers for code generation. We handle them manually.
159 markSuperRegs(Reserved, RISCV::VL);
160 markSuperRegs(Reserved, RISCV::VTYPE);
161 markSuperRegs(Reserved, RISCV::VXSAT);
162 markSuperRegs(Reserved, RISCV::VXRM);
163
164 // Floating point environment registers.
165 markSuperRegs(Reserved, RISCV::FRM);
166 markSuperRegs(Reserved, RISCV::FFLAGS);
167
168 // SiFive VCIX state registers.
169 markSuperRegs(Reserved, RISCV::SF_VCIX_STATE);
170
172 if (Subtarget.hasStdExtE())
173 reportFatalUsageError("Graal reserved registers do not exist in RVE");
174 markSuperRegs(Reserved, RISCV::X23_H);
175 markSuperRegs(Reserved, RISCV::X27_H);
176 }
177
178 // Shadow stack pointer.
179 markSuperRegs(Reserved, RISCV::SSP);
180
181 // XSfmmbase
182 for (MCPhysReg Reg = RISCV::T0; Reg <= RISCV::T15; Reg++)
183 markSuperRegs(Reserved, Reg);
184
185 assert(checkAllSuperRegsMarked(Reserved));
186 return Reserved;
187}
188
190 MCRegister PhysReg) const {
191 return !MF.getSubtarget().isRegisterReservedByUser(PhysReg);
192}
193
195 return CSR_NoRegs_RegMask;
196}
197
200 const DebugLoc &DL, Register DestReg,
203 MaybeAlign RequiredAlign) const {
204
205 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
206 return;
207
208 MachineFunction &MF = *MBB.getParent();
211 const RISCVInstrInfo *TII = ST.getInstrInfo();
212
213 // Optimize compile time offset case
214 if (Offset.getScalable()) {
215 if (auto VLEN = ST.getRealVLen()) {
216 // 1. Multiply the number of v-slots by the (constant) length of register
217 const int64_t VLENB = *VLEN / 8;
218 assert(Offset.getScalable() % RISCV::RVVBytesPerBlock == 0 &&
219 "Reserve the stack by the multiple of one vector size.");
220 const int64_t NumOfVReg = Offset.getScalable() / 8;
221 const int64_t FixedOffset = NumOfVReg * VLENB;
222 if (!isInt<32>(FixedOffset)) {
224 "Frame size outside of the signed 32-bit range not supported");
225 }
226 Offset = StackOffset::getFixed(FixedOffset + Offset.getFixed());
227 }
228 }
229
230 bool KillSrcReg = false;
231
232 if (Offset.getScalable()) {
233 unsigned ScalableAdjOpc = RISCV::ADD;
234 int64_t ScalableValue = Offset.getScalable();
235 if (ScalableValue < 0) {
236 ScalableValue = -ScalableValue;
237 ScalableAdjOpc = RISCV::SUB;
238 }
239 // Get vlenb and multiply vlen with the number of vector registers.
240 Register ScratchReg = DestReg;
241 if (DestReg == SrcReg)
242 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
243
244 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
245 assert(ScalableValue % RISCV::RVVBytesPerBlock == 0 &&
246 "Reserve the stack by the multiple of one vector size.");
247 assert(isInt<32>(ScalableValue / RISCV::RVVBytesPerBlock) &&
248 "Expect the number of vector registers within 32-bits.");
249 uint32_t NumOfVReg = ScalableValue / RISCV::RVVBytesPerBlock;
250 // Only use vsetvli rather than vlenb if adjusting in the prologue or
251 // epilogue, otherwise it may disturb the VTYPE and VL status.
252 bool IsPrologueOrEpilogue =
254 bool UseVsetvliRatherThanVlenb =
255 IsPrologueOrEpilogue && ST.preferVsetvliOverReadVLENB();
256 if (UseVsetvliRatherThanVlenb && (NumOfVReg == 1 || NumOfVReg == 2 ||
257 NumOfVReg == 4 || NumOfVReg == 8)) {
258 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
259 ScratchReg)
260 .addImm(NumOfVReg)
261 .setMIFlag(Flag);
262 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
263 .addReg(SrcReg)
264 .addReg(ScratchReg, RegState::Kill)
265 .setMIFlag(Flag);
266 } else {
267 if (UseVsetvliRatherThanVlenb)
268 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
269 ScratchReg)
270 .addImm(1)
271 .setMIFlag(Flag);
272 else
273 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
274 .setMIFlag(Flag);
275
276 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
277 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
278 unsigned Opc = NumOfVReg == 2
279 ? RISCV::SH1ADD
280 : (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
281 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
282 .addReg(ScratchReg, RegState::Kill)
283 .addReg(SrcReg)
284 .setMIFlag(Flag);
285 } else {
286 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
287 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
288 .addReg(SrcReg)
289 .addReg(ScratchReg, RegState::Kill)
290 .setMIFlag(Flag);
291 }
292 }
293 SrcReg = DestReg;
294 KillSrcReg = true;
295 }
296
297 int64_t Val = Offset.getFixed();
298 if (DestReg == SrcReg && Val == 0)
299 return;
300
301 const uint64_t Align = RequiredAlign.valueOrOne().value();
302
303 if (isInt<12>(Val)) {
304 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
305 .addReg(SrcReg, getKillRegState(KillSrcReg))
306 .addImm(Val)
307 .setMIFlag(Flag);
308 return;
309 }
310
311 // Use the QC_E_ADDI instruction from the Xqcilia extension that can take a
312 // signed 26-bit immediate.
313 if (ST.hasVendorXqcilia() && isInt<26>(Val)) {
314 // The one case where using this instruction is sub-optimal is if Val can be
315 // materialized with a single compressible LUI and following add/sub is also
316 // compressible. Avoid doing this if that is the case.
317 int Hi20 = (Val & 0xFFFFF000) >> 12;
318 bool IsCompressLUI =
319 ((Val & 0xFFF) == 0) && (Hi20 != 0) &&
320 (isUInt<5>(Hi20) || (Hi20 >= 0xfffe0 && Hi20 <= 0xfffff));
321 bool IsCompressAddSub =
322 (SrcReg == DestReg) &&
323 ((Val > 0 && RISCV::GPRNoX0RegClass.contains(SrcReg)) ||
324 (Val < 0 && RISCV::GPRCRegClass.contains(SrcReg)));
325
326 if (!(IsCompressLUI && IsCompressAddSub)) {
327 BuildMI(MBB, II, DL, TII->get(RISCV::QC_E_ADDI), DestReg)
328 .addReg(SrcReg, getKillRegState(KillSrcReg))
329 .addImm(Val)
330 .setMIFlag(Flag);
331 return;
332 }
333 }
334
335 // Try to split the offset across two ADDIs. We need to keep the intermediate
336 // result aligned after each ADDI. We need to determine the maximum value we
337 // can put in each ADDI. In the negative direction, we can use -2048 which is
338 // always sufficiently aligned. In the positive direction, we need to find the
339 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
340 // created with LUI.
341 assert(Align < 2048 && "Required alignment too large");
342 int64_t MaxPosAdjStep = 2048 - Align;
343 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
344 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
345 Val -= FirstAdj;
346 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
347 .addReg(SrcReg, getKillRegState(KillSrcReg))
348 .addImm(FirstAdj)
349 .setMIFlag(Flag);
350 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
351 .addReg(DestReg, RegState::Kill)
352 .addImm(Val)
353 .setMIFlag(Flag);
354 return;
355 }
356
357 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
358 // instruction. This saves 1 instruction over the full lui/addi+add fallback
359 // path. We avoid anything which can be done with a single lui as it might
360 // be compressible. Note that the sh1add case is fully covered by the 2x addi
361 // case just above and is thus omitted.
362 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
363 unsigned Opc = 0;
364 if (isShiftedInt<12, 3>(Val)) {
365 Opc = RISCV::SH3ADD;
366 Val = Val >> 3;
367 } else if (isShiftedInt<12, 2>(Val)) {
368 Opc = RISCV::SH2ADD;
369 Val = Val >> 2;
370 }
371 if (Opc) {
372 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
373 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
374 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
375 .addReg(ScratchReg, RegState::Kill)
376 .addReg(SrcReg, getKillRegState(KillSrcReg))
377 .setMIFlag(Flag);
378 return;
379 }
380 }
381
382 unsigned Opc = RISCV::ADD;
383 if (Val < 0) {
384 Val = -Val;
385 Opc = RISCV::SUB;
386 }
387
388 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
389 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
390 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
391 .addReg(SrcReg, getKillRegState(KillSrcReg))
392 .addReg(ScratchReg, RegState::Kill)
393 .setMIFlag(Flag);
394}
395
396static std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned>
397getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill) {
398 if (NumRemaining >= 8 && RegEncoding % 8 == 0)
399 return {RISCVVType::LMUL_8, RISCV::VRM8RegClass,
400 IsSpill ? RISCV::VS8R_V : RISCV::VL8RE8_V};
401 if (NumRemaining >= 4 && RegEncoding % 4 == 0)
402 return {RISCVVType::LMUL_4, RISCV::VRM4RegClass,
403 IsSpill ? RISCV::VS4R_V : RISCV::VL4RE8_V};
404 if (NumRemaining >= 2 && RegEncoding % 2 == 0)
405 return {RISCVVType::LMUL_2, RISCV::VRM2RegClass,
406 IsSpill ? RISCV::VS2R_V : RISCV::VL2RE8_V};
407 return {RISCVVType::LMUL_1, RISCV::VRRegClass,
408 IsSpill ? RISCV::VS1R_V : RISCV::VL1RE8_V};
409}
410
411// Split a VSPILLx_Mx/VSPILLx_Mx pseudo into multiple whole register stores
412// separated by LMUL*VLENB bytes.
414 bool IsSpill) const {
415 DebugLoc DL = II->getDebugLoc();
416 MachineBasicBlock &MBB = *II->getParent();
417 MachineFunction &MF = *MBB.getParent();
419 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
420 const TargetInstrInfo *TII = STI.getInstrInfo();
422
423 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
424 unsigned NF = ZvlssegInfo->first;
425 unsigned LMUL = ZvlssegInfo->second;
426 unsigned NumRegs = NF * LMUL;
427 assert(NumRegs <= 8 && "Invalid NF/LMUL combinations.");
428
429 Register Reg = II->getOperand(0).getReg();
430 uint16_t RegEncoding = TRI->getEncodingValue(Reg);
431 Register Base = II->getOperand(1).getReg();
432 bool IsBaseKill = II->getOperand(1).isKill();
433 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
434
435 auto *OldMMO = *(II->memoperands_begin());
436 LocationSize OldLoc = OldMMO->getSize();
437 assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
438 TypeSize VRegSize = OldLoc.getValue().divideCoefficientBy(NumRegs);
439
440 Register VLENB = 0;
441 unsigned VLENBShift = 0;
442 unsigned PrevHandledNum = 0;
443 unsigned I = 0;
444 while (I != NumRegs) {
445 auto [LMulHandled, RegClass, Opcode] =
446 getSpillReloadInfo(NumRegs - I, RegEncoding, IsSpill);
447 auto [RegNumHandled, _] = RISCVVType::decodeVLMUL(LMulHandled);
448 bool IsLast = I + RegNumHandled == NumRegs;
449 if (PrevHandledNum) {
450 Register Step;
451 // Optimize for constant VLEN.
452 if (auto VLEN = STI.getRealVLen()) {
453 int64_t Offset = *VLEN / 8 * PrevHandledNum;
454 Step = MRI.createVirtualRegister(&RISCV::GPRRegClass);
455 STI.getInstrInfo()->movImm(MBB, II, DL, Step, Offset);
456 } else {
457 if (!VLENB) {
458 VLENB = MRI.createVirtualRegister(&RISCV::GPRRegClass);
459 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VLENB);
460 }
461 uint32_t ShiftAmount = Log2_32(PrevHandledNum);
462 // To avoid using an extra register, we shift the VLENB register and
463 // remember how much it has been shifted. We can then use relative
464 // shifts to adjust to the desired shift amount.
465 if (VLENBShift > ShiftAmount) {
466 BuildMI(MBB, II, DL, TII->get(RISCV::SRLI), VLENB)
467 .addReg(VLENB, RegState::Kill)
468 .addImm(VLENBShift - ShiftAmount);
469 } else if (VLENBShift < ShiftAmount) {
470 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VLENB)
471 .addReg(VLENB, RegState::Kill)
472 .addImm(ShiftAmount - VLENBShift);
473 }
474 VLENBShift = ShiftAmount;
475 Step = VLENB;
476 }
477
478 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
479 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
480 .addReg(Step, getKillRegState(Step != VLENB || IsLast));
481 Base = NewBase;
482 }
483
484 MCRegister ActualReg = findVRegWithEncoding(RegClass, RegEncoding);
486 BuildMI(MBB, II, DL, TII->get(Opcode))
487 .addReg(ActualReg, getDefRegState(!IsSpill))
488 .addReg(Base, getKillRegState(IsLast))
489 .addMemOperand(MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
490 VRegSize * RegNumHandled));
491
492 // Adding implicit-use of super register to describe we are using part of
493 // super register, that prevents machine verifier complaining when part of
494 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
495 // detail.
496 if (IsSpill)
497 MIB.addReg(Reg, RegState::Implicit);
498
499 PrevHandledNum = RegNumHandled;
500 RegEncoding += RegNumHandled;
501 I += RegNumHandled;
502 }
503 II->eraseFromParent();
504}
505
507 int SPAdj, unsigned FIOperandNum,
508 RegScavenger *RS) const {
509 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
510
511 MachineInstr &MI = *II;
512 MachineFunction &MF = *MI.getParent()->getParent();
514 DebugLoc DL = MI.getDebugLoc();
515
516 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
517 Register FrameReg;
519 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
520 bool IsRVVSpill = RISCV::isRVVSpill(MI);
521 if (!IsRVVSpill)
522 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
523
524 if (!isInt<32>(Offset.getFixed())) {
526 "Frame offsets outside of the signed 32-bit range not supported");
527 }
528
529 if (!IsRVVSpill) {
530 int64_t Val = Offset.getFixed();
531 int64_t Lo12 = SignExtend64<12>(Val);
532 unsigned Opc = MI.getOpcode();
533
534 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
535 // We chose to emit the canonical immediate sequence rather than folding
536 // the offset into the using add under the theory that doing so doesn't
537 // save dynamic instruction count and some target may fuse the canonical
538 // 32 bit immediate sequence. We still need to clear the portion of the
539 // offset encoded in the immediate.
540 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
541 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
542 Opc == RISCV::PREFETCH_W) &&
543 (Lo12 & 0b11111) != 0) {
544 // Prefetch instructions require the offset to be 32 byte aligned.
545 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
546 } else if (Opc == RISCV::MIPS_PREF && !isUInt<9>(Val)) {
547 // MIPS Prefetch instructions require the offset to be 9 bits encoded.
548 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
549 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
550 Opc == RISCV::PseudoRV32ZdinxSD) &&
551 Lo12 >= 2044) {
552 // This instruction will be split into 2 instructions. The second
553 // instruction will add 4 to the immediate. If that would overflow 12
554 // bits, we can't fold the offset.
555 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
556 } else {
557 // We can encode an add with 12 bit signed immediate in the immediate
558 // operand of our user instruction. As a result, the remaining
559 // offset can by construction, at worst, a LUI and a ADD.
560 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
562 Offset.getScalable());
563 }
564 }
565
566 if (Offset.getScalable() || Offset.getFixed()) {
567 Register DestReg;
568 if (MI.getOpcode() == RISCV::ADDI)
569 DestReg = MI.getOperand(0).getReg();
570 else
571 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
572 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
573 MachineInstr::NoFlags, std::nullopt);
574 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
575 /*IsImp*/false,
576 /*IsKill*/true);
577 } else {
578 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
579 /*IsImp*/false,
580 /*IsKill*/false);
581 }
582
583 // If after materializing the adjustment, we have a pointless ADDI, remove it
584 if (MI.getOpcode() == RISCV::ADDI &&
585 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
586 MI.getOperand(2).getImm() == 0) {
587 MI.eraseFromParent();
588 return true;
589 }
590
591 // Handle spill/fill of synthetic register classes for segment operations to
592 // ensure correctness in the edge case one gets spilled.
593 switch (MI.getOpcode()) {
594 case RISCV::PseudoVSPILL2_M1:
595 case RISCV::PseudoVSPILL2_M2:
596 case RISCV::PseudoVSPILL2_M4:
597 case RISCV::PseudoVSPILL3_M1:
598 case RISCV::PseudoVSPILL3_M2:
599 case RISCV::PseudoVSPILL4_M1:
600 case RISCV::PseudoVSPILL4_M2:
601 case RISCV::PseudoVSPILL5_M1:
602 case RISCV::PseudoVSPILL6_M1:
603 case RISCV::PseudoVSPILL7_M1:
604 case RISCV::PseudoVSPILL8_M1:
605 lowerSegmentSpillReload(II, /*IsSpill=*/true);
606 return true;
607 case RISCV::PseudoVRELOAD2_M1:
608 case RISCV::PseudoVRELOAD2_M2:
609 case RISCV::PseudoVRELOAD2_M4:
610 case RISCV::PseudoVRELOAD3_M1:
611 case RISCV::PseudoVRELOAD3_M2:
612 case RISCV::PseudoVRELOAD4_M1:
613 case RISCV::PseudoVRELOAD4_M2:
614 case RISCV::PseudoVRELOAD5_M1:
615 case RISCV::PseudoVRELOAD6_M1:
616 case RISCV::PseudoVRELOAD7_M1:
617 case RISCV::PseudoVRELOAD8_M1:
618 lowerSegmentSpillReload(II, /*IsSpill=*/false);
619 return true;
620 }
621
622 return false;
623}
624
626 const MachineFunction &MF) const {
627 return true;
628}
629
630// Returns true if the instruction's frame index reference would be better
631// served by a base register other than FP or SP.
632// Used by LocalStackSlotAllocation pass to determine which frame index
633// references it should create new base registers for.
635 int64_t Offset) const {
636 unsigned FIOperandNum = 0;
637 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
638 assert(FIOperandNum < MI->getNumOperands() &&
639 "Instr doesn't have FrameIndex operand");
640
641 // For RISC-V, The machine instructions that include a FrameIndex operand
642 // are load/store, ADDI instructions.
643 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
644 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
645 return false;
646 // We only generate virtual base registers for loads and stores, so
647 // return false for everything else.
648 if (!MI->mayLoad() && !MI->mayStore())
649 return false;
650
651 const MachineFunction &MF = *MI->getMF();
652 const MachineFrameInfo &MFI = MF.getFrameInfo();
653 const RISCVFrameLowering *TFI = getFrameLowering(MF);
654 const MachineRegisterInfo &MRI = MF.getRegInfo();
655
656 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
657 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
658 // Estimate the stack size used to store callee saved registers(
659 // excludes reserved registers).
660 unsigned CalleeSavedSize = 0;
661 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
662 ++R) {
663 if (Subtarget.isRegisterReservedByUser(Reg))
664 continue;
665
666 if (RISCV::GPRRegClass.contains(Reg))
667 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
668 else if (RISCV::FPR64RegClass.contains(Reg))
669 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
670 else if (RISCV::FPR32RegClass.contains(Reg))
671 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
672 // Ignore vector registers.
673 }
674
675 int64_t MaxFPOffset = Offset - CalleeSavedSize;
676 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
677 }
678
679 // Assume 128 bytes spill slots size to estimate the maximum possible
680 // offset relative to the stack pointer.
681 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
682 // real one for RISC-V.
683 int64_t MaxSPOffset = Offset + 128;
684 MaxSPOffset += MFI.getLocalFrameSize();
685 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
686}
687
688// Determine whether a given base register plus offset immediate is
689// encodable to resolve a frame index.
691 Register BaseReg,
692 int64_t Offset) const {
693 unsigned FIOperandNum = 0;
694 while (!MI->getOperand(FIOperandNum).isFI()) {
695 FIOperandNum++;
696 assert(FIOperandNum < MI->getNumOperands() &&
697 "Instr does not have a FrameIndex operand!");
698 }
699
700 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
701 return isInt<12>(Offset);
702}
703
704// Insert defining instruction(s) for a pointer to FrameIdx before
705// insertion point I.
706// Return materialized frame pointer.
708 int FrameIdx,
709 int64_t Offset) const {
711 DebugLoc DL;
712 if (MBBI != MBB->end())
713 DL = MBBI->getDebugLoc();
714 MachineFunction *MF = MBB->getParent();
715 MachineRegisterInfo &MFI = MF->getRegInfo();
717
718 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
719 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
720 .addFrameIndex(FrameIdx)
721 .addImm(Offset);
722 return BaseReg;
723}
724
725// Resolve a frame index operand of an instruction to reference the
726// indicated base register plus offset instead.
728 int64_t Offset) const {
729 unsigned FIOperandNum = 0;
730 while (!MI.getOperand(FIOperandNum).isFI()) {
731 FIOperandNum++;
732 assert(FIOperandNum < MI.getNumOperands() &&
733 "Instr does not have a FrameIndex operand!");
734 }
735
736 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
737 // FrameIndex Operands are always represented as a
738 // register followed by an immediate.
739 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
740 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
741}
742
743// Get the offset from the referenced frame index in the instruction,
744// if there is one.
746 int Idx) const {
747 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
748 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
749 "The MI must be I or S format.");
750 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
751 "FrameIndex operand");
752 return MI->getOperand(Idx + 1).getImm();
753}
754
756 const TargetFrameLowering *TFI = getFrameLowering(MF);
757 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
758}
759
761 if (Reg == RISCV::SF_VCIX_STATE)
762 return "sf.vcix_state";
764}
765
766const uint32_t *
768 CallingConv::ID CC) const {
769 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
770
771 if (CC == CallingConv::GHC)
772 return CSR_NoRegs_RegMask;
773 RISCVABI::ABI ABI = Subtarget.getTargetABI();
774 if (CC == CallingConv::PreserveMost) {
775 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
776 return CSR_RT_MostRegs_RVE_RegMask;
777 return CSR_RT_MostRegs_RegMask;
778 }
779 switch (ABI) {
780 default:
781 llvm_unreachable("Unrecognized ABI");
784 return CSR_ILP32E_LP64E_RegMask;
788 return CSR_ILP32_LP64_V_RegMask;
789 return CSR_ILP32_LP64_RegMask;
793 return CSR_ILP32F_LP64F_V_RegMask;
794 return CSR_ILP32F_LP64F_RegMask;
798 return CSR_ILP32D_LP64D_V_RegMask;
799 return CSR_ILP32D_LP64D_RegMask;
800 }
801}
802
805 const MachineFunction &) const {
806 if (RC == &RISCV::VMV0RegClass)
807 return &RISCV::VRRegClass;
808 if (RC == &RISCV::VRNoV0RegClass)
809 return &RISCV::VRRegClass;
810 if (RC == &RISCV::VRM2NoV0RegClass)
811 return &RISCV::VRM2RegClass;
812 if (RC == &RISCV::VRM4NoV0RegClass)
813 return &RISCV::VRM4RegClass;
814 if (RC == &RISCV::VRM8NoV0RegClass)
815 return &RISCV::VRM8RegClass;
816 return RC;
817}
818
821 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
822 // to represent one vector register. The dwarf offset is
823 // VLENB * scalable_offset / 8.
824 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
825
826 // Add fixed-sized offset using existing DIExpression interface.
828
829 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
830 int64_t VLENBSized = Offset.getScalable() / 8;
831 if (VLENBSized > 0) {
832 Ops.push_back(dwarf::DW_OP_constu);
833 Ops.push_back(VLENBSized);
834 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
835 Ops.push_back(dwarf::DW_OP_mul);
836 Ops.push_back(dwarf::DW_OP_plus);
837 } else if (VLENBSized < 0) {
838 Ops.push_back(dwarf::DW_OP_constu);
839 Ops.push_back(-VLENBSized);
840 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
841 Ops.push_back(dwarf::DW_OP_mul);
842 Ops.push_back(dwarf::DW_OP_minus);
843 }
844}
845
846unsigned
848 return MF.getSubtarget<RISCVSubtarget>().hasStdExtZca() && !DisableCostPerUse
849 ? 1
850 : 0;
851}
852
854 const TargetRegisterClass *RC) const {
855 return getRegClassWeight(RC).RegWeight;
856}
857
858// Add two address hints to improve chances of being able to use a compressed
859// instruction.
861 Register VirtReg, ArrayRef<MCPhysReg> Order,
863 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
864 const MachineRegisterInfo *MRI = &MF.getRegInfo();
865 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
866
867 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
868 VirtReg, Order, Hints, MF, VRM, Matrix);
869
870 if (!VRM || DisableRegAllocHints)
871 return BaseImplRetVal;
872
873 // Add any two address hints after any copy hints.
874 SmallSet<Register, 4> TwoAddrHints;
875
876 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
877 bool NeedGPRC) -> void {
878 Register Reg = MO.getReg();
879 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
880 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
881 // registers. If the virtual register is an odd register of a pair and the
882 // physical register is even (or vice versa), we should not add the hint.
883 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
884 !MO.getSubReg() && !VRRegMO.getSubReg()) {
885 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
886 TwoAddrHints.insert(PhysReg);
887 }
888 };
889
890 // This is all of the compressible binary instructions. If an instruction
891 // needs GPRC register class operands \p NeedGPRC will be set to true.
892 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
893 NeedGPRC = false;
894 switch (MI.getOpcode()) {
895 default:
896 return false;
897 case RISCV::AND:
898 case RISCV::OR:
899 case RISCV::XOR:
900 case RISCV::SUB:
901 case RISCV::ADDW:
902 case RISCV::SUBW:
903 NeedGPRC = true;
904 return true;
905 case RISCV::ANDI: {
906 NeedGPRC = true;
907 if (!MI.getOperand(2).isImm())
908 return false;
909 int64_t Imm = MI.getOperand(2).getImm();
910 if (isInt<6>(Imm))
911 return true;
912 // c.zext.b
913 return Subtarget.hasStdExtZcb() && Imm == 255;
914 }
915 case RISCV::SRAI:
916 case RISCV::SRLI:
917 NeedGPRC = true;
918 return true;
919 case RISCV::ADD:
920 case RISCV::SLLI:
921 return true;
922 case RISCV::ADDI:
923 case RISCV::ADDIW:
924 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
925 case RISCV::MUL:
926 case RISCV::SEXT_B:
927 case RISCV::SEXT_H:
928 case RISCV::ZEXT_H_RV32:
929 case RISCV::ZEXT_H_RV64:
930 // c.mul, c.sext.b, c.sext.h, c.zext.h
931 NeedGPRC = true;
932 return Subtarget.hasStdExtZcb();
933 case RISCV::ADD_UW:
934 // c.zext.w
935 NeedGPRC = true;
936 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
937 MI.getOperand(2).getReg() == RISCV::X0;
938 case RISCV::XORI:
939 // c.not
940 NeedGPRC = true;
941 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
942 MI.getOperand(2).getImm() == -1;
943 }
944 };
945
946 // Returns true if this operand is compressible. For non-registers it always
947 // returns true. Immediate range was already checked in isCompressible.
948 // For registers, it checks if the register is a GPRC register. reg-reg
949 // instructions that require GPRC need all register operands to be GPRC.
950 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
951 if (!MO.isReg())
952 return true;
953 Register Reg = MO.getReg();
954 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
955 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
956 };
957
958 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
959 const MachineInstr &MI = *MO.getParent();
960 unsigned OpIdx = MO.getOperandNo();
961 bool NeedGPRC;
962 if (isCompressible(MI, NeedGPRC)) {
963 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
964 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
965 MI.getOpcode() == RISCV::ADD_UW ||
966 isCompressibleOpnd(MI.getOperand(2)))
967 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
968 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
969 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
970 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
971 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
972 isCompressibleOpnd(MI.getOperand(2)))) {
973 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
974 } else if (MI.isCommutable() && OpIdx == 2 &&
975 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
976 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
977 }
978 }
979
980 // Add a hint if it would allow auipc/lui+addi(w) fusion. We do this even
981 // without the fusions explicitly enabled as the impact is rarely negative
982 // and some cores do implement this fusion.
983 if ((MI.getOpcode() == RISCV::ADDIW || MI.getOpcode() == RISCV::ADDI) &&
984 MI.getOperand(1).isReg()) {
985 const MachineBasicBlock &MBB = *MI.getParent();
986 MachineBasicBlock::const_iterator I = MI.getIterator();
987 // Is the previous instruction a LUI or AUIPC that can be fused?
988 if (I != MBB.begin()) {
989 I = skipDebugInstructionsBackward(std::prev(I), MBB.begin());
990 if ((I->getOpcode() == RISCV::LUI || I->getOpcode() == RISCV::AUIPC) &&
991 I->getOperand(0).getReg() == MI.getOperand(1).getReg()) {
992 if (OpIdx == 0)
993 tryAddHint(MO, MI.getOperand(1), /*NeedGPRC=*/false);
994 else
995 tryAddHint(MO, MI.getOperand(0), /*NeedGPRC=*/false);
996 }
997 }
998 }
999 }
1000
1001 for (MCPhysReg OrderReg : Order)
1002 if (TwoAddrHints.count(OrderReg))
1003 Hints.push_back(OrderReg);
1004
1005 return BaseImplRetVal;
1006}
1007
1010 uint16_t Encoding) const {
1011 MCRegister Reg = RISCV::V0 + Encoding;
1013 return Reg;
1014 return getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
1015}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
static std::tuple< RISCVVType::VLMUL, const TargetRegisterClass &, unsigned > getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallSet class.
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
TypeSize getValue() const
bool isPrecise() const
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition TypeSize.h:42
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
Definition TypeSize.h:181
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:253
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
Definition MathExtras.h:182
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Register findVRegWithEncoding(const TargetRegisterClass &RegClass, uint16_t Encoding) const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Register getFrameRegister(const MachineFunction &MF) const override
const MCPhysReg * getIPRACSRegs(const MachineFunction *MF) const override
void lowerSegmentSpillReload(MachineBasicBlock::iterator II, bool IsSpill) const
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
float getSpillWeightScaleFactor(const TargetRegisterClass *RC) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override