LLVM 18.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
16#include "RISCVSubtarget.h"
17#include "llvm/ADT/SmallSet.h"
27
28#define GET_REGINFO_TARGET_DESC
29#include "RISCVGenRegisterInfo.inc"
30
31using namespace llvm;
32
33static cl::opt<bool>
34 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
35 cl::init(false),
36 cl::desc("Disable two address hints for register "
37 "allocation"));
38
39static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
40static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
41static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
42static_assert(RISCV::F31_H == RISCV::F0_H + 31,
43 "Register list not consecutive");
44static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
45static_assert(RISCV::F31_F == RISCV::F0_F + 31,
46 "Register list not consecutive");
47static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
48static_assert(RISCV::F31_D == RISCV::F0_D + 31,
49 "Register list not consecutive");
50static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
51static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
52
54 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
55 /*PC*/0, HwMode) {}
56
57const MCPhysReg *
59 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
61 return CSR_NoRegs_SaveList;
62 if (MF->getFunction().hasFnAttribute("interrupt")) {
63 if (Subtarget.hasStdExtD())
64 return CSR_XLEN_F64_Interrupt_SaveList;
65 if (Subtarget.hasStdExtF())
66 return CSR_XLEN_F32_Interrupt_SaveList;
67 return CSR_Interrupt_SaveList;
68 }
69
70 switch (Subtarget.getTargetABI()) {
71 default:
72 llvm_unreachable("Unrecognized ABI");
75 return CSR_ILP32_LP64_SaveList;
78 return CSR_ILP32F_LP64F_SaveList;
81 return CSR_ILP32D_LP64D_SaveList;
82 }
83}
84
86 const RISCVFrameLowering *TFI = getFrameLowering(MF);
87 BitVector Reserved(getNumRegs());
88
89 // Mark any registers requested to be reserved as such
90 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
92 markSuperRegs(Reserved, Reg);
93 }
94
95 // Use markSuperRegs to ensure any register aliases are also reserved
96 markSuperRegs(Reserved, RISCV::X0); // zero
97 markSuperRegs(Reserved, RISCV::X2); // sp
98 markSuperRegs(Reserved, RISCV::X3); // gp
99 markSuperRegs(Reserved, RISCV::X4); // tp
100 if (TFI->hasFP(MF))
101 markSuperRegs(Reserved, RISCV::X8); // fp
102 // Reserve the base register if we need to realign the stack and allocate
103 // variable-sized objects at runtime.
104 if (TFI->hasBP(MF))
105 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
106
107 // Additionally reserve dummy register used to form the register pair
108 // beginning with 'x0' for instructions that take register pairs.
109 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
110
111 // V registers for code generation. We handle them manually.
112 markSuperRegs(Reserved, RISCV::VL);
113 markSuperRegs(Reserved, RISCV::VTYPE);
114 markSuperRegs(Reserved, RISCV::VXSAT);
115 markSuperRegs(Reserved, RISCV::VXRM);
116 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
117
118 // Floating point environment registers.
119 markSuperRegs(Reserved, RISCV::FRM);
120 markSuperRegs(Reserved, RISCV::FFLAGS);
121
122 assert(checkAllSuperRegsMarked(Reserved));
123 return Reserved;
124}
125
127 MCRegister PhysReg) const {
128 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
129}
130
132 return CSR_NoRegs_RegMask;
133}
134
135// Frame indexes representing locations of CSRs which are given a fixed location
136// by save/restore libcalls or Zcmp Push/Pop.
137static const std::pair<unsigned, int> FixedCSRFIMap[] = {
138 {/*ra*/ RISCV::X1, -1},
139 {/*s0*/ RISCV::X8, -2},
140 {/*s1*/ RISCV::X9, -3},
141 {/*s2*/ RISCV::X18, -4},
142 {/*s3*/ RISCV::X19, -5},
143 {/*s4*/ RISCV::X20, -6},
144 {/*s5*/ RISCV::X21, -7},
145 {/*s6*/ RISCV::X22, -8},
146 {/*s7*/ RISCV::X23, -9},
147 {/*s8*/ RISCV::X24, -10},
148 {/*s9*/ RISCV::X25, -11},
149 {/*s10*/ RISCV::X26, -12},
150 {/*s11*/ RISCV::X27, -13}
151};
152
154 Register Reg,
155 int &FrameIdx) const {
156 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
157 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
158 return false;
159
160 const auto *FII =
161 llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
162 if (FII == std::end(FixedCSRFIMap))
163 return false;
164
165 FrameIdx = FII->second;
166 return true;
167}
168
171 const DebugLoc &DL, Register DestReg,
174 MaybeAlign RequiredAlign) const {
175
176 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
177 return;
178
182 const RISCVInstrInfo *TII = ST.getInstrInfo();
183
184 bool KillSrcReg = false;
185
186 if (Offset.getScalable()) {
187 unsigned ScalableAdjOpc = RISCV::ADD;
188 int64_t ScalableValue = Offset.getScalable();
189 if (ScalableValue < 0) {
190 ScalableValue = -ScalableValue;
191 ScalableAdjOpc = RISCV::SUB;
192 }
193 // Get vlenb and multiply vlen with the number of vector registers.
194 Register ScratchReg = DestReg;
195 if (DestReg == SrcReg)
196 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
197 TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
198 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
199 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
200 .setMIFlag(Flag);
201 SrcReg = DestReg;
202 KillSrcReg = true;
203 }
204
205 int64_t Val = Offset.getFixed();
206 if (DestReg == SrcReg && Val == 0)
207 return;
208
209 const uint64_t Align = RequiredAlign.valueOrOne().value();
210
211 if (isInt<12>(Val)) {
212 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
213 .addReg(SrcReg, getKillRegState(KillSrcReg))
214 .addImm(Val)
215 .setMIFlag(Flag);
216 return;
217 }
218
219 // Try to split the offset across two ADDIs. We need to keep the intermediate
220 // result aligned after each ADDI. We need to determine the maximum value we
221 // can put in each ADDI. In the negative direction, we can use -2048 which is
222 // always sufficiently aligned. In the positive direction, we need to find the
223 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
224 // created with LUI.
225 assert(Align < 2048 && "Required alignment too large");
226 int64_t MaxPosAdjStep = 2048 - Align;
227 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
228 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
229 Val -= FirstAdj;
230 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
231 .addReg(SrcReg, getKillRegState(KillSrcReg))
232 .addImm(FirstAdj)
233 .setMIFlag(Flag);
234 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
235 .addReg(DestReg, RegState::Kill)
236 .addImm(Val)
237 .setMIFlag(Flag);
238 return;
239 }
240
241 unsigned Opc = RISCV::ADD;
242 if (Val < 0) {
243 Val = -Val;
244 Opc = RISCV::SUB;
245 }
246
247 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
248 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
249 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
250 .addReg(SrcReg, getKillRegState(KillSrcReg))
251 .addReg(ScratchReg, RegState::Kill)
252 .setMIFlag(Flag);
253}
254
255// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
256// LMUL*VLENB bytes.
258 DebugLoc DL = II->getDebugLoc();
259 MachineBasicBlock &MBB = *II->getParent();
264
265 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
266 unsigned NF = ZvlssegInfo->first;
267 unsigned LMUL = ZvlssegInfo->second;
268 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
269 unsigned Opcode, SubRegIdx;
270 switch (LMUL) {
271 default:
272 llvm_unreachable("LMUL must be 1, 2, or 4.");
273 case 1:
274 Opcode = RISCV::VS1R_V;
275 SubRegIdx = RISCV::sub_vrm1_0;
276 break;
277 case 2:
278 Opcode = RISCV::VS2R_V;
279 SubRegIdx = RISCV::sub_vrm2_0;
280 break;
281 case 4:
282 Opcode = RISCV::VS4R_V;
283 SubRegIdx = RISCV::sub_vrm4_0;
284 break;
285 }
286 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
287 "Unexpected subreg numbering");
288 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
289 "Unexpected subreg numbering");
290 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
291 "Unexpected subreg numbering");
292
293 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
294 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
295 uint32_t ShiftAmount = Log2_32(LMUL);
296 if (ShiftAmount != 0)
297 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
298 .addReg(VL)
299 .addImm(ShiftAmount);
300
301 Register SrcReg = II->getOperand(0).getReg();
302 Register Base = II->getOperand(1).getReg();
303 bool IsBaseKill = II->getOperand(1).isKill();
304 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
305 for (unsigned I = 0; I < NF; ++I) {
306 // Adding implicit-use of super register to describe we are using part of
307 // super register, that prevents machine verifier complaining when part of
308 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
309 // detail.
310 BuildMI(MBB, II, DL, TII->get(Opcode))
311 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
312 .addReg(Base, getKillRegState(I == NF - 1))
313 .addMemOperand(*(II->memoperands_begin()))
314 .addReg(SrcReg, RegState::Implicit);
315 if (I != NF - 1)
316 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
317 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
318 .addReg(VL, getKillRegState(I == NF - 2));
319 Base = NewBase;
320 }
321 II->eraseFromParent();
322}
323
324// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
325// LMUL*VLENB bytes.
327 DebugLoc DL = II->getDebugLoc();
328 MachineBasicBlock &MBB = *II->getParent();
333
334 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
335 unsigned NF = ZvlssegInfo->first;
336 unsigned LMUL = ZvlssegInfo->second;
337 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
338 unsigned Opcode, SubRegIdx;
339 switch (LMUL) {
340 default:
341 llvm_unreachable("LMUL must be 1, 2, or 4.");
342 case 1:
343 Opcode = RISCV::VL1RE8_V;
344 SubRegIdx = RISCV::sub_vrm1_0;
345 break;
346 case 2:
347 Opcode = RISCV::VL2RE8_V;
348 SubRegIdx = RISCV::sub_vrm2_0;
349 break;
350 case 4:
351 Opcode = RISCV::VL4RE8_V;
352 SubRegIdx = RISCV::sub_vrm4_0;
353 break;
354 }
355 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
356 "Unexpected subreg numbering");
357 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
358 "Unexpected subreg numbering");
359 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
360 "Unexpected subreg numbering");
361
362 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
363 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
364 uint32_t ShiftAmount = Log2_32(LMUL);
365 if (ShiftAmount != 0)
366 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
367 .addReg(VL)
368 .addImm(ShiftAmount);
369
370 Register DestReg = II->getOperand(0).getReg();
371 Register Base = II->getOperand(1).getReg();
372 bool IsBaseKill = II->getOperand(1).isKill();
373 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
374 for (unsigned I = 0; I < NF; ++I) {
375 BuildMI(MBB, II, DL, TII->get(Opcode),
376 TRI->getSubReg(DestReg, SubRegIdx + I))
377 .addReg(Base, getKillRegState(I == NF - 1))
378 .addMemOperand(*(II->memoperands_begin()));
379 if (I != NF - 1)
380 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
381 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
382 .addReg(VL, getKillRegState(I == NF - 2));
383 Base = NewBase;
384 }
385 II->eraseFromParent();
386}
387
389 int SPAdj, unsigned FIOperandNum,
390 RegScavenger *RS) const {
391 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
392
393 MachineInstr &MI = *II;
394 MachineFunction &MF = *MI.getParent()->getParent();
397 DebugLoc DL = MI.getDebugLoc();
398
399 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
400 Register FrameReg;
402 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
403 bool IsRVVSpill = RISCV::isRVVSpill(MI);
404 if (!IsRVVSpill)
405 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
406
407 if (Offset.getScalable() &&
408 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
409 // For an exact VLEN value, scalable offsets become constant and thus
410 // can be converted entirely into fixed offsets.
411 int64_t FixedValue = Offset.getFixed();
412 int64_t ScalableValue = Offset.getScalable();
413 assert(ScalableValue % 8 == 0 &&
414 "Scalable offset is not a multiple of a single vector size.");
415 int64_t NumOfVReg = ScalableValue / 8;
416 int64_t VLENB = ST.getRealMinVLen() / 8;
417 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
418 }
419
420 if (!isInt<32>(Offset.getFixed())) {
422 "Frame offsets outside of the signed 32-bit range not supported");
423 }
424
425 if (!IsRVVSpill) {
426 if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
427 // We chose to emit the canonical immediate sequence rather than folding
428 // the offset into the using add under the theory that doing so doesn't
429 // save dynamic instruction count and some target may fuse the canonical
430 // 32 bit immediate sequence. We still need to clear the portion of the
431 // offset encoded in the immediate.
432 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
433 } else {
434 // We can encode an add with 12 bit signed immediate in the immediate
435 // operand of our user instruction. As a result, the remaining
436 // offset can by construction, at worst, a LUI and a ADD.
437 int64_t Val = Offset.getFixed();
438 int64_t Lo12 = SignExtend64<12>(Val);
439 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
441 Offset.getScalable());
442 }
443 }
444
445 if (Offset.getScalable() || Offset.getFixed()) {
446 Register DestReg;
447 if (MI.getOpcode() == RISCV::ADDI)
448 DestReg = MI.getOperand(0).getReg();
449 else
450 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
451 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
452 MachineInstr::NoFlags, std::nullopt);
453 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
454 /*IsImp*/false,
455 /*IsKill*/true);
456 } else {
457 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
458 /*IsImp*/false,
459 /*IsKill*/false);
460 }
461
462 // If after materializing the adjustment, we have a pointless ADDI, remove it
463 if (MI.getOpcode() == RISCV::ADDI &&
464 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
465 MI.getOperand(2).getImm() == 0) {
466 MI.eraseFromParent();
467 return true;
468 }
469
470 // Handle spill/fill of synthetic register classes for segment operations to
471 // ensure correctness in the edge case one gets spilled. There are many
472 // possible optimizations here, but given the extreme rarity of such spills,
473 // we prefer simplicity of implementation for now.
474 switch (MI.getOpcode()) {
475 case RISCV::PseudoVSPILL2_M1:
476 case RISCV::PseudoVSPILL2_M2:
477 case RISCV::PseudoVSPILL2_M4:
478 case RISCV::PseudoVSPILL3_M1:
479 case RISCV::PseudoVSPILL3_M2:
480 case RISCV::PseudoVSPILL4_M1:
481 case RISCV::PseudoVSPILL4_M2:
482 case RISCV::PseudoVSPILL5_M1:
483 case RISCV::PseudoVSPILL6_M1:
484 case RISCV::PseudoVSPILL7_M1:
485 case RISCV::PseudoVSPILL8_M1:
486 lowerVSPILL(II);
487 return true;
488 case RISCV::PseudoVRELOAD2_M1:
489 case RISCV::PseudoVRELOAD2_M2:
490 case RISCV::PseudoVRELOAD2_M4:
491 case RISCV::PseudoVRELOAD3_M1:
492 case RISCV::PseudoVRELOAD3_M2:
493 case RISCV::PseudoVRELOAD4_M1:
494 case RISCV::PseudoVRELOAD4_M2:
495 case RISCV::PseudoVRELOAD5_M1:
496 case RISCV::PseudoVRELOAD6_M1:
497 case RISCV::PseudoVRELOAD7_M1:
498 case RISCV::PseudoVRELOAD8_M1:
499 lowerVRELOAD(II);
500 return true;
501 }
502
503 return false;
504}
505
507 const MachineFunction &MF) const {
508 return true;
509}
510
511// Returns true if the instruction's frame index reference would be better
512// served by a base register other than FP or SP.
513// Used by LocalStackSlotAllocation pass to determine which frame index
514// references it should create new base registers for.
516 int64_t Offset) const {
517 unsigned FIOperandNum = 0;
518 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
519 assert(FIOperandNum < MI->getNumOperands() &&
520 "Instr doesn't have FrameIndex operand");
521
522 // For RISC-V, The machine instructions that include a FrameIndex operand
523 // are load/store, ADDI instructions.
524 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
525 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
526 return false;
527 // We only generate virtual base registers for loads and stores, so
528 // return false for everything else.
529 if (!MI->mayLoad() && !MI->mayStore())
530 return false;
531
532 const MachineFunction &MF = *MI->getMF();
533 const MachineFrameInfo &MFI = MF.getFrameInfo();
534 const RISCVFrameLowering *TFI = getFrameLowering(MF);
535 const MachineRegisterInfo &MRI = MF.getRegInfo();
536 unsigned CalleeSavedSize = 0;
537 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
538
539 // Estimate the stack size used to store callee saved registers(
540 // excludes reserved registers).
541 BitVector ReservedRegs = getReservedRegs(MF);
542 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) {
543 if (!ReservedRegs.test(Reg))
544 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
545 }
546
547 int64_t MaxFPOffset = Offset - CalleeSavedSize;
548 if (TFI->hasFP(MF) && !shouldRealignStack(MF))
549 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
550
551 // Assume 128 bytes spill slots size to estimate the maximum possible
552 // offset relative to the stack pointer.
553 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
554 // real one for RISC-V.
555 int64_t MaxSPOffset = Offset + 128;
556 MaxSPOffset += MFI.getLocalFrameSize();
557 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
558}
559
560// Determine whether a given base register plus offset immediate is
561// encodable to resolve a frame index.
563 Register BaseReg,
564 int64_t Offset) const {
565 unsigned FIOperandNum = 0;
566 while (!MI->getOperand(FIOperandNum).isFI()) {
567 FIOperandNum++;
568 assert(FIOperandNum < MI->getNumOperands() &&
569 "Instr does not have a FrameIndex operand!");
570 }
571
572 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
573 return isInt<12>(Offset);
574}
575
576// Insert defining instruction(s) for a pointer to FrameIdx before
577// insertion point I.
578// Return materialized frame pointer.
580 int FrameIdx,
581 int64_t Offset) const {
583 DebugLoc DL;
584 if (MBBI != MBB->end())
585 DL = MBBI->getDebugLoc();
587 MachineRegisterInfo &MFI = MF->getRegInfo();
589
590 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
591 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
592 .addFrameIndex(FrameIdx)
593 .addImm(Offset);
594 return BaseReg;
595}
596
597// Resolve a frame index operand of an instruction to reference the
598// indicated base register plus offset instead.
600 int64_t Offset) const {
601 unsigned FIOperandNum = 0;
602 while (!MI.getOperand(FIOperandNum).isFI()) {
603 FIOperandNum++;
604 assert(FIOperandNum < MI.getNumOperands() &&
605 "Instr does not have a FrameIndex operand!");
606 }
607
608 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
609 // FrameIndex Operands are always represented as a
610 // register followed by an immediate.
611 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
612 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
613}
614
615// Get the offset from the referenced frame index in the instruction,
616// if there is one.
618 int Idx) const {
619 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
620 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
621 "The MI must be I or S format.");
622 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
623 "FrameIndex operand");
624 return MI->getOperand(Idx + 1).getImm();
625}
626
628 const TargetFrameLowering *TFI = getFrameLowering(MF);
629 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
630}
631
632const uint32_t *
634 CallingConv::ID CC) const {
635 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
636
637 if (CC == CallingConv::GHC)
638 return CSR_NoRegs_RegMask;
639 switch (Subtarget.getTargetABI()) {
640 default:
641 llvm_unreachable("Unrecognized ABI");
644 return CSR_ILP32_LP64_RegMask;
647 return CSR_ILP32F_LP64F_RegMask;
650 return CSR_ILP32D_LP64D_RegMask;
651 }
652}
653
656 const MachineFunction &) const {
657 if (RC == &RISCV::VMV0RegClass)
658 return &RISCV::VRRegClass;
659 return RC;
660}
661
663 SmallVectorImpl<uint64_t> &Ops) const {
664 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
665 // to represent one vector register. The dwarf offset is
666 // VLENB * scalable_offset / 8.
667 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
668
669 // Add fixed-sized offset using existing DIExpression interface.
670 DIExpression::appendOffset(Ops, Offset.getFixed());
671
672 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
673 int64_t VLENBSized = Offset.getScalable() / 8;
674 if (VLENBSized > 0) {
675 Ops.push_back(dwarf::DW_OP_constu);
676 Ops.push_back(VLENBSized);
677 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
678 Ops.push_back(dwarf::DW_OP_mul);
679 Ops.push_back(dwarf::DW_OP_plus);
680 } else if (VLENBSized < 0) {
681 Ops.push_back(dwarf::DW_OP_constu);
682 Ops.push_back(-VLENBSized);
683 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
684 Ops.push_back(dwarf::DW_OP_mul);
685 Ops.push_back(dwarf::DW_OP_minus);
686 }
687}
688
689unsigned
691 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
692}
693
694// Add two address hints to improve chances of being able to use a compressed
695// instruction.
697 Register VirtReg, ArrayRef<MCPhysReg> Order,
699 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
700 const MachineRegisterInfo *MRI = &MF.getRegInfo();
701
702 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
703 VirtReg, Order, Hints, MF, VRM, Matrix);
704
705 if (!VRM || DisableRegAllocHints)
706 return BaseImplRetVal;
707
708 // Add any two address hints after any copy hints.
709 SmallSet<Register, 4> TwoAddrHints;
710
711 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
712 bool NeedGPRC) -> void {
713 Register Reg = MO.getReg();
714 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
715 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
716 assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
717 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
718 TwoAddrHints.insert(PhysReg);
719 }
720 };
721
722 // This is all of the compressible binary instructions. If an instruction
723 // needs GPRC register class operands \p NeedGPRC will be set to true.
724 auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
725 NeedGPRC = false;
726 switch (MI.getOpcode()) {
727 default:
728 return false;
729 case RISCV::AND:
730 case RISCV::OR:
731 case RISCV::XOR:
732 case RISCV::SUB:
733 case RISCV::ADDW:
734 case RISCV::SUBW:
735 NeedGPRC = true;
736 return true;
737 case RISCV::ANDI:
738 NeedGPRC = true;
739 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
740 case RISCV::SRAI:
741 case RISCV::SRLI:
742 NeedGPRC = true;
743 return true;
744 case RISCV::ADD:
745 case RISCV::SLLI:
746 return true;
747 case RISCV::ADDI:
748 case RISCV::ADDIW:
749 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
750 }
751 };
752
753 // Returns true if this operand is compressible. For non-registers it always
754 // returns true. Immediate range was already checked in isCompressible.
755 // For registers, it checks if the register is a GPRC register. reg-reg
756 // instructions that require GPRC need all register operands to be GPRC.
757 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
758 if (!MO.isReg())
759 return true;
760 Register Reg = MO.getReg();
761 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
762 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
763 };
764
765 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
766 const MachineInstr &MI = *MO.getParent();
767 unsigned OpIdx = MO.getOperandNo();
768 bool NeedGPRC;
769 if (isCompressible(MI, NeedGPRC)) {
770 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
771 if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))
772 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
773 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
774 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
775 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
776 } else if (OpIdx == 1 &&
777 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) {
778 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
779 } else if (MI.isCommutable() && OpIdx == 2 &&
780 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
781 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
782 }
783 }
784 }
785
786 for (MCPhysReg OrderReg : Order)
787 if (TwoAddrHints.count(OrderReg))
788 Hints.push_back(OrderReg);
789
790 return BaseImplRetVal;
791}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define P(N)
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static const std::pair< unsigned, int > FixedCSRFIMap[]
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:239
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:645
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool isRegisterReservedByUser(Register i) const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:36
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:52
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:47
Information about stack frame layout on the target.
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
unsigned getKillRegState(bool B)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1754
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1884
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg, int &FrameIdx) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override