LLVM 23.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
15#include "RISCVSubtarget.h"
16#include "llvm/ADT/SmallSet.h"
26
27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
29
30using namespace llvm;
31
32static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
33 cl::init(false), cl::Hidden);
34static cl::opt<bool>
35 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
36 cl::init(false),
37 cl::desc("Disable two address hints for register "
38 "allocation"));
39
40static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::F1_Q == RISCV::F0_Q + 1, "Register list not consecutive");
52static_assert(RISCV::F31_Q == RISCV::F0_Q + 31,
53 "Register list not consecutive");
54static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
55static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
56
58 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
59 /*PC*/0, HwMode) {}
60
61const MCPhysReg *
63 return CSR_IPRA_SaveList;
64}
65
66const MCPhysReg *
68 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
70 return CSR_NoRegs_SaveList;
72 return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
73 : CSR_RT_MostRegs_SaveList;
74 if (MF->getFunction().hasFnAttribute("interrupt")) {
75 if (Subtarget.hasVInstructions()) {
76 if (Subtarget.hasStdExtD())
77 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_V_Interrupt_RVE_SaveList
78 : CSR_XLEN_F64_V_Interrupt_SaveList;
79 if (Subtarget.hasStdExtF())
80 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_V_Interrupt_RVE_SaveList
81 : CSR_XLEN_F32_V_Interrupt_SaveList;
82 return Subtarget.hasStdExtE() ? CSR_XLEN_V_Interrupt_RVE_SaveList
83 : CSR_XLEN_V_Interrupt_SaveList;
84 }
85 if (Subtarget.hasStdExtD())
86 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_Interrupt_RVE_SaveList
87 : CSR_XLEN_F64_Interrupt_SaveList;
88 if (Subtarget.hasStdExtF())
89 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
90 : CSR_XLEN_F32_Interrupt_SaveList;
91 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
92 : CSR_Interrupt_SaveList;
93 }
94
95 bool HasVectorCSR =
97 Subtarget.hasVInstructions();
98
99 switch (Subtarget.getTargetABI()) {
100 default:
101 llvm_unreachable("Unrecognized ABI");
104 return CSR_ILP32E_LP64E_SaveList;
107 if (HasVectorCSR)
108 return CSR_ILP32_LP64_V_SaveList;
109 return CSR_ILP32_LP64_SaveList;
112 if (HasVectorCSR)
113 return CSR_ILP32F_LP64F_V_SaveList;
114 return CSR_ILP32F_LP64F_SaveList;
117 if (HasVectorCSR)
118 return CSR_ILP32D_LP64D_V_SaveList;
119 return CSR_ILP32D_LP64D_SaveList;
120 }
121}
122
124 const MachineOperand &MO, const MachineRegisterInfo &MRI) const {
125 const RISCVSubtarget &STI = MRI.getMF().getSubtarget<RISCVSubtarget>();
126
127 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
128 if (const RegisterBank *RB = dyn_cast<const RegisterBank *>(RCOrRB))
129 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB,
130 STI.is64Bit());
131
132 if (const auto *RC = dyn_cast<const TargetRegisterClass *>(RCOrRB)) {
133 return getAllocatableClass(RC);
134 }
135
136 return nullptr;
137}
138
141 bool Is64Bit) const {
142 if (RB.getID() == RISCV::GPRBRegBankID) {
143 if (Ty.getSizeInBits() <= 32 || (Is64Bit && Ty.getSizeInBits() == 64))
144 return &RISCV::GPRRegClass;
145 }
146
147 if (RB.getID() == RISCV::FPRBRegBankID) {
148 if (Ty.getSizeInBits() == 16)
149 return &RISCV::FPR16RegClass;
150 if (Ty.getSizeInBits() == 32)
151 return &RISCV::FPR32RegClass;
152 if (Ty.getSizeInBits() == 64)
153 return &RISCV::FPR64RegClass;
154 }
155
156 if (RB.getID() == RISCV::VRBRegBankID) {
157 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
158 return &RISCV::VRRegClass;
159
160 if (Ty.getSizeInBits().getKnownMinValue() == 128)
161 return &RISCV::VRM2RegClass;
162
163 if (Ty.getSizeInBits().getKnownMinValue() == 256)
164 return &RISCV::VRM4RegClass;
165
166 if (Ty.getSizeInBits().getKnownMinValue() == 512)
167 return &RISCV::VRM8RegClass;
168 }
169
170 return nullptr;
171}
172
174 const RISCVFrameLowering *TFI = getFrameLowering(MF);
175 BitVector Reserved(getNumRegs());
176 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
177
178 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
179 // Mark any GPRs requested to be reserved as such
180 if (Subtarget.isRegisterReservedByUser(Reg))
181 markSuperRegs(Reserved, Reg);
182
183 // Mark all the registers defined as constant in TableGen as reserved.
184 if (isConstantPhysReg(Reg))
185 markSuperRegs(Reserved, Reg);
186 }
187
188 // Use markSuperRegs to ensure any register aliases are also reserved
189 markSuperRegs(Reserved, RISCV::X2_H); // sp
190 markSuperRegs(Reserved, RISCV::X3_H); // gp
191 markSuperRegs(Reserved, RISCV::X4_H); // tp
192 if (TFI->hasFP(MF))
193 markSuperRegs(Reserved, RISCV::X8_H); // fp
194 // Reserve the base register if we need to realign the stack and allocate
195 // variable-sized objects at runtime.
196 if (TFI->hasBP(MF))
197 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
198
199 // Additionally reserve dummy register used to form the register pair
200 // beginning with 'x0' for instructions that take register pairs.
201 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
202
203 // There are only 16 GPRs for RVE.
204 if (Subtarget.hasStdExtE())
205 for (MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
206 markSuperRegs(Reserved, Reg);
207
208 // V registers for code generation. We handle them manually.
209 markSuperRegs(Reserved, RISCV::VL);
210 markSuperRegs(Reserved, RISCV::VTYPE);
211 markSuperRegs(Reserved, RISCV::VXSAT);
212 markSuperRegs(Reserved, RISCV::VXRM);
213
214 // Floating point environment registers.
215 markSuperRegs(Reserved, RISCV::FRM);
216 markSuperRegs(Reserved, RISCV::FFLAGS);
217
218 // SiFive VCIX state registers.
219 markSuperRegs(Reserved, RISCV::SF_VCIX_STATE);
220
222 if (Subtarget.hasStdExtE())
223 reportFatalUsageError("Graal reserved registers do not exist in RVE");
224 markSuperRegs(Reserved, RISCV::X23_H);
225 markSuperRegs(Reserved, RISCV::X27_H);
226 }
227
228 // Shadow stack pointer.
229 markSuperRegs(Reserved, RISCV::SSP);
230
231 // XSfmmbase
232 for (MCPhysReg Reg = RISCV::T0; Reg <= RISCV::T15; Reg++)
233 markSuperRegs(Reserved, Reg);
234
235 assert(checkAllSuperRegsMarked(Reserved));
236 return Reserved;
237}
238
240 MCRegister PhysReg) const {
241 return !MF.getSubtarget().isRegisterReservedByUser(PhysReg);
242}
243
245 return CSR_NoRegs_RegMask;
246}
247
250 const DebugLoc &DL, Register DestReg,
253 MaybeAlign RequiredAlign) const {
254
255 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
256 return;
257
258 MachineFunction &MF = *MBB.getParent();
261 const RISCVInstrInfo *TII = ST.getInstrInfo();
262
263 // Optimize compile time offset case
264 if (Offset.getScalable()) {
265 if (auto VLEN = ST.getRealVLen()) {
266 // 1. Multiply the number of v-slots by the (constant) length of register
267 const int64_t VLENB = *VLEN / 8;
268 assert(Offset.getScalable() % RISCV::RVVBytesPerBlock == 0 &&
269 "Reserve the stack by the multiple of one vector size.");
270 const int64_t NumOfVReg = Offset.getScalable() / 8;
271 const int64_t FixedOffset = NumOfVReg * VLENB;
272 if (!isInt<32>(FixedOffset)) {
274 "Frame size outside of the signed 32-bit range not supported");
275 }
276 Offset = StackOffset::getFixed(FixedOffset + Offset.getFixed());
277 }
278 }
279
280 bool KillSrcReg = false;
281
282 if (Offset.getScalable()) {
283 unsigned ScalableAdjOpc = RISCV::ADD;
284 int64_t ScalableValue = Offset.getScalable();
285 if (ScalableValue < 0) {
286 ScalableValue = -ScalableValue;
287 ScalableAdjOpc = RISCV::SUB;
288 }
289 // Get vlenb and multiply vlen with the number of vector registers.
290 Register ScratchReg = DestReg;
291 if (DestReg == SrcReg)
292 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
293
294 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
295 assert(ScalableValue % RISCV::RVVBytesPerBlock == 0 &&
296 "Reserve the stack by the multiple of one vector size.");
297 assert(isInt<32>(ScalableValue / RISCV::RVVBytesPerBlock) &&
298 "Expect the number of vector registers within 32-bits.");
299 uint32_t NumOfVReg = ScalableValue / RISCV::RVVBytesPerBlock;
300 // Only use vsetvli rather than vlenb if adjusting in the prologue or
301 // epilogue, otherwise it may disturb the VTYPE and VL status.
302 bool IsPrologueOrEpilogue =
304 bool UseVsetvliRatherThanVlenb =
305 IsPrologueOrEpilogue && ST.preferVsetvliOverReadVLENB();
306 if (UseVsetvliRatherThanVlenb && (NumOfVReg == 1 || NumOfVReg == 2 ||
307 NumOfVReg == 4 || NumOfVReg == 8)) {
308 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
309 ScratchReg)
310 .addImm(NumOfVReg)
311 .setMIFlag(Flag);
312 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
313 .addReg(SrcReg)
314 .addReg(ScratchReg, RegState::Kill)
315 .setMIFlag(Flag);
316 } else {
317 if (UseVsetvliRatherThanVlenb)
318 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
319 ScratchReg)
320 .addImm(1)
321 .setMIFlag(Flag);
322 else
323 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
324 .setMIFlag(Flag);
325
326 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
327 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
328 unsigned Opc = NumOfVReg == 2
329 ? RISCV::SH1ADD
330 : (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
331 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
332 .addReg(ScratchReg, RegState::Kill)
333 .addReg(SrcReg)
334 .setMIFlag(Flag);
335 } else {
336 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
337 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
338 .addReg(SrcReg)
339 .addReg(ScratchReg, RegState::Kill)
340 .setMIFlag(Flag);
341 }
342 }
343 SrcReg = DestReg;
344 KillSrcReg = true;
345 }
346
347 int64_t Val = Offset.getFixed();
348 if (DestReg == SrcReg && Val == 0)
349 return;
350
351 const uint64_t Align = RequiredAlign.valueOrOne().value();
352
353 if (isInt<12>(Val)) {
354 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
355 .addReg(SrcReg, getKillRegState(KillSrcReg))
356 .addImm(Val)
357 .setMIFlag(Flag);
358 return;
359 }
360
361 // Use the QC_E_ADDI instruction from the Xqcilia extension that can take a
362 // signed 26-bit immediate.
363 if (ST.hasVendorXqcilia() && isInt<26>(Val)) {
364 // The one case where using this instruction is sub-optimal is if Val can be
365 // materialized with a single compressible LUI and following add/sub is also
366 // compressible. Avoid doing this if that is the case.
367 int Hi20 = (Val & 0xFFFFF000) >> 12;
368 bool IsCompressLUI =
369 ((Val & 0xFFF) == 0) && (Hi20 != 0) &&
370 (isUInt<5>(Hi20) || (Hi20 >= 0xfffe0 && Hi20 <= 0xfffff));
371 bool IsCompressAddSub =
372 (SrcReg == DestReg) &&
373 ((Val > 0 && RISCV::GPRNoX0RegClass.contains(SrcReg)) ||
374 (Val < 0 && RISCV::GPRCRegClass.contains(SrcReg)));
375
376 if (!(IsCompressLUI && IsCompressAddSub)) {
377 BuildMI(MBB, II, DL, TII->get(RISCV::QC_E_ADDI), DestReg)
378 .addReg(SrcReg, getKillRegState(KillSrcReg))
379 .addImm(Val)
380 .setMIFlag(Flag);
381 return;
382 }
383 }
384
385 // Try to split the offset across two ADDIs. We need to keep the intermediate
386 // result aligned after each ADDI. We need to determine the maximum value we
387 // can put in each ADDI. In the negative direction, we can use -2048 which is
388 // always sufficiently aligned. In the positive direction, we need to find the
389 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
390 // created with LUI.
391 assert(Align < 2048 && "Required alignment too large");
392 int64_t MaxPosAdjStep = 2048 - Align;
393 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
394 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
395 Val -= FirstAdj;
396 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
397 .addReg(SrcReg, getKillRegState(KillSrcReg))
398 .addImm(FirstAdj)
399 .setMIFlag(Flag);
400 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
401 .addReg(DestReg, RegState::Kill)
402 .addImm(Val)
403 .setMIFlag(Flag);
404 return;
405 }
406
407 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
408 // instruction. This saves 1 instruction over the full lui/addi+add fallback
409 // path. We avoid anything which can be done with a single lui as it might
410 // be compressible. Note that the sh1add case is fully covered by the 2x addi
411 // case just above and is thus omitted.
412 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
413 unsigned Opc = 0;
414 if (isShiftedInt<12, 3>(Val)) {
415 Opc = RISCV::SH3ADD;
416 Val = Val >> 3;
417 } else if (isShiftedInt<12, 2>(Val)) {
418 Opc = RISCV::SH2ADD;
419 Val = Val >> 2;
420 }
421 if (Opc) {
422 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
423 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
424 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
425 .addReg(ScratchReg, RegState::Kill)
426 .addReg(SrcReg, getKillRegState(KillSrcReg))
427 .setMIFlag(Flag);
428 return;
429 }
430 }
431
432 unsigned Opc = RISCV::ADD;
433 if (Val < 0) {
434 Val = -Val;
435 Opc = RISCV::SUB;
436 }
437
438 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
439 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
440 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
441 .addReg(SrcReg, getKillRegState(KillSrcReg))
442 .addReg(ScratchReg, RegState::Kill)
443 .setMIFlag(Flag);
444}
445
446static std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned>
447getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill) {
448 if (NumRemaining >= 8 && RegEncoding % 8 == 0)
449 return {RISCVVType::LMUL_8, RISCV::VRM8RegClass,
450 IsSpill ? RISCV::VS8R_V : RISCV::VL8RE8_V};
451 if (NumRemaining >= 4 && RegEncoding % 4 == 0)
452 return {RISCVVType::LMUL_4, RISCV::VRM4RegClass,
453 IsSpill ? RISCV::VS4R_V : RISCV::VL4RE8_V};
454 if (NumRemaining >= 2 && RegEncoding % 2 == 0)
455 return {RISCVVType::LMUL_2, RISCV::VRM2RegClass,
456 IsSpill ? RISCV::VS2R_V : RISCV::VL2RE8_V};
457 return {RISCVVType::LMUL_1, RISCV::VRRegClass,
458 IsSpill ? RISCV::VS1R_V : RISCV::VL1RE8_V};
459}
460
461// Split a VSPILLx_Mx/VSPILLx_Mx pseudo into multiple whole register stores
462// separated by LMUL*VLENB bytes.
464 bool IsSpill) const {
465 DebugLoc DL = II->getDebugLoc();
466 MachineBasicBlock &MBB = *II->getParent();
467 MachineFunction &MF = *MBB.getParent();
469 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
470 const TargetInstrInfo *TII = STI.getInstrInfo();
472
473 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
474 unsigned NF = ZvlssegInfo->first;
475 unsigned LMUL = ZvlssegInfo->second;
476 unsigned NumRegs = NF * LMUL;
477 assert(NumRegs <= 8 && "Invalid NF/LMUL combinations.");
478
479 Register Reg = II->getOperand(0).getReg();
480 uint16_t RegEncoding = TRI->getEncodingValue(Reg);
481 Register Base = II->getOperand(1).getReg();
482 bool IsBaseKill = II->getOperand(1).isKill();
483 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
484
485 auto *OldMMO = *(II->memoperands_begin());
486 LocationSize OldLoc = OldMMO->getSize();
487 assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
488 TypeSize VRegSize = OldLoc.getValue().divideCoefficientBy(NumRegs);
489
490 Register VLENB = 0;
491 unsigned VLENBShift = 0;
492 unsigned PrevHandledNum = 0;
493 unsigned I = 0;
494 while (I != NumRegs) {
495 auto [LMulHandled, RegClass, Opcode] =
496 getSpillReloadInfo(NumRegs - I, RegEncoding, IsSpill);
497 auto [RegNumHandled, _] = RISCVVType::decodeVLMUL(LMulHandled);
498 bool IsLast = I + RegNumHandled == NumRegs;
499 if (PrevHandledNum) {
500 Register Step;
501 // Optimize for constant VLEN.
502 if (auto VLEN = STI.getRealVLen()) {
503 int64_t Offset = *VLEN / 8 * PrevHandledNum;
504 Step = MRI.createVirtualRegister(&RISCV::GPRRegClass);
505 STI.getInstrInfo()->movImm(MBB, II, DL, Step, Offset);
506 } else {
507 if (!VLENB) {
508 VLENB = MRI.createVirtualRegister(&RISCV::GPRRegClass);
509 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VLENB);
510 }
511 uint32_t ShiftAmount = Log2_32(PrevHandledNum);
512 // To avoid using an extra register, we shift the VLENB register and
513 // remember how much it has been shifted. We can then use relative
514 // shifts to adjust to the desired shift amount.
515 if (VLENBShift > ShiftAmount) {
516 BuildMI(MBB, II, DL, TII->get(RISCV::SRLI), VLENB)
517 .addReg(VLENB, RegState::Kill)
518 .addImm(VLENBShift - ShiftAmount);
519 } else if (VLENBShift < ShiftAmount) {
520 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VLENB)
521 .addReg(VLENB, RegState::Kill)
522 .addImm(ShiftAmount - VLENBShift);
523 }
524 VLENBShift = ShiftAmount;
525 Step = VLENB;
526 }
527
528 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
529 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
530 .addReg(Step, getKillRegState(Step != VLENB || IsLast));
531 Base = NewBase;
532 }
533
534 MCRegister ActualReg = findVRegWithEncoding(RegClass, RegEncoding);
536 BuildMI(MBB, II, DL, TII->get(Opcode))
537 .addReg(ActualReg, getDefRegState(!IsSpill))
538 .addReg(Base, getKillRegState(IsLast))
539 .addMemOperand(MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
540 VRegSize * RegNumHandled));
541
542 // Adding implicit-use of super register to describe we are using part of
543 // super register, that prevents machine verifier complaining when part of
544 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
545 // detail.
546 if (IsSpill)
547 MIB.addReg(Reg, RegState::Implicit);
548
549 PrevHandledNum = RegNumHandled;
550 RegEncoding += RegNumHandled;
551 I += RegNumHandled;
552 }
553 II->eraseFromParent();
554}
555
557 int SPAdj, unsigned FIOperandNum,
558 RegScavenger *RS) const {
559 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
560
561 MachineInstr &MI = *II;
562 MachineFunction &MF = *MI.getParent()->getParent();
564 DebugLoc DL = MI.getDebugLoc();
565
566 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
567 Register FrameReg;
569 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
570 bool IsRVVSpill = RISCV::isRVVSpill(MI);
571 if (!IsRVVSpill)
572 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
573
574 if (!isInt<32>(Offset.getFixed())) {
576 "Frame offsets outside of the signed 32-bit range not supported");
577 }
578
579 if (!IsRVVSpill) {
580 int64_t Val = Offset.getFixed();
581 int64_t Lo12 = SignExtend64<12>(Val);
582 unsigned Opc = MI.getOpcode();
583
584 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
585 // We chose to emit the canonical immediate sequence rather than folding
586 // the offset into the using add under the theory that doing so doesn't
587 // save dynamic instruction count and some target may fuse the canonical
588 // 32 bit immediate sequence. We still need to clear the portion of the
589 // offset encoded in the immediate.
590 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
591 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
592 Opc == RISCV::PREFETCH_W) &&
593 (Lo12 & 0b11111) != 0) {
594 // Prefetch instructions require the offset to be 32 byte aligned.
595 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
596 } else if (Opc == RISCV::MIPS_PREF && !isUInt<9>(Val)) {
597 // MIPS Prefetch instructions require the offset to be 9 bits encoded.
598 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
599 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
600 Opc == RISCV::PseudoRV32ZdinxSD ||
601 Opc == RISCV::PseudoLD_RV32_OPT ||
602 Opc == RISCV::PseudoSD_RV32_OPT) &&
603 Lo12 >= 2044) {
604 // This instruction will/might be split into 2 instructions. The second
605 // instruction will add 4 to the immediate. If that would overflow 12
606 // bits, we can't fold the offset.
607 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
608 } else {
609 // We can encode an add with 12 bit signed immediate in the immediate
610 // operand of our user instruction. As a result, the remaining
611 // offset can by construction, at worst, a LUI and a ADD.
612 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
614 Offset.getScalable());
615 }
616 }
617
618 if (Offset.getScalable() || Offset.getFixed()) {
619 Register DestReg;
620 if (MI.getOpcode() == RISCV::ADDI)
621 DestReg = MI.getOperand(0).getReg();
622 else
623 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
624 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
625 MachineInstr::NoFlags, std::nullopt);
626 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
627 /*IsImp*/false,
628 /*IsKill*/true);
629 } else {
630 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
631 /*IsImp*/false,
632 /*IsKill*/false);
633 }
634
635 // If after materializing the adjustment, we have a pointless ADDI, remove it
636 if (MI.getOpcode() == RISCV::ADDI &&
637 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
638 MI.getOperand(2).getImm() == 0) {
639 MI.eraseFromParent();
640 return true;
641 }
642
643 // Handle spill/fill of synthetic register classes for segment operations to
644 // ensure correctness in the edge case one gets spilled.
645 switch (MI.getOpcode()) {
646 case RISCV::PseudoVSPILL2_M1:
647 case RISCV::PseudoVSPILL2_M2:
648 case RISCV::PseudoVSPILL2_M4:
649 case RISCV::PseudoVSPILL3_M1:
650 case RISCV::PseudoVSPILL3_M2:
651 case RISCV::PseudoVSPILL4_M1:
652 case RISCV::PseudoVSPILL4_M2:
653 case RISCV::PseudoVSPILL5_M1:
654 case RISCV::PseudoVSPILL6_M1:
655 case RISCV::PseudoVSPILL7_M1:
656 case RISCV::PseudoVSPILL8_M1:
657 lowerSegmentSpillReload(II, /*IsSpill=*/true);
658 return true;
659 case RISCV::PseudoVRELOAD2_M1:
660 case RISCV::PseudoVRELOAD2_M2:
661 case RISCV::PseudoVRELOAD2_M4:
662 case RISCV::PseudoVRELOAD3_M1:
663 case RISCV::PseudoVRELOAD3_M2:
664 case RISCV::PseudoVRELOAD4_M1:
665 case RISCV::PseudoVRELOAD4_M2:
666 case RISCV::PseudoVRELOAD5_M1:
667 case RISCV::PseudoVRELOAD6_M1:
668 case RISCV::PseudoVRELOAD7_M1:
669 case RISCV::PseudoVRELOAD8_M1:
670 lowerSegmentSpillReload(II, /*IsSpill=*/false);
671 return true;
672 }
673
674 return false;
675}
676
678 const MachineFunction &MF) const {
679 return true;
680}
681
682// Returns true if the instruction's frame index reference would be better
683// served by a base register other than FP or SP.
684// Used by LocalStackSlotAllocation pass to determine which frame index
685// references it should create new base registers for.
687 int64_t Offset) const {
688 unsigned FIOperandNum = 0;
689 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
690 assert(FIOperandNum < MI->getNumOperands() &&
691 "Instr doesn't have FrameIndex operand");
692
693 // For RISC-V, The machine instructions that include a FrameIndex operand
694 // are load/store, ADDI instructions.
695 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
696 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
697 return false;
698 // We only generate virtual base registers for loads and stores, so
699 // return false for everything else.
700 if (!MI->mayLoad() && !MI->mayStore())
701 return false;
702
703 const MachineFunction &MF = *MI->getMF();
704 const MachineFrameInfo &MFI = MF.getFrameInfo();
705 const RISCVFrameLowering *TFI = getFrameLowering(MF);
706 const MachineRegisterInfo &MRI = MF.getRegInfo();
707
708 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
709 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
710 // Estimate the stack size used to store callee saved registers(
711 // excludes reserved registers).
712 unsigned CalleeSavedSize = 0;
713 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
714 ++R) {
715 if (Subtarget.isRegisterReservedByUser(Reg))
716 continue;
717
718 if (RISCV::GPRRegClass.contains(Reg))
719 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
720 else if (RISCV::FPR64RegClass.contains(Reg))
721 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
722 else if (RISCV::FPR32RegClass.contains(Reg))
723 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
724 // Ignore vector registers.
725 }
726
727 int64_t MaxFPOffset = Offset - CalleeSavedSize;
728 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
729 }
730
731 // Assume 128 bytes spill slots size to estimate the maximum possible
732 // offset relative to the stack pointer.
733 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
734 // real one for RISC-V.
735 int64_t MaxSPOffset = Offset + 128;
736 MaxSPOffset += MFI.getLocalFrameSize();
737 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
738}
739
740// Determine whether a given base register plus offset immediate is
741// encodable to resolve a frame index.
743 Register BaseReg,
744 int64_t Offset) const {
745 unsigned FIOperandNum = 0;
746 while (!MI->getOperand(FIOperandNum).isFI()) {
747 FIOperandNum++;
748 assert(FIOperandNum < MI->getNumOperands() &&
749 "Instr does not have a FrameIndex operand!");
750 }
751
752 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
753 return isInt<12>(Offset);
754}
755
756// Insert defining instruction(s) for a pointer to FrameIdx before
757// insertion point I.
758// Return materialized frame pointer.
760 int FrameIdx,
761 int64_t Offset) const {
763 DebugLoc DL;
764 if (MBBI != MBB->end())
765 DL = MBBI->getDebugLoc();
766 MachineFunction *MF = MBB->getParent();
767 MachineRegisterInfo &MFI = MF->getRegInfo();
769
770 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
771 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
772 .addFrameIndex(FrameIdx)
773 .addImm(Offset);
774 return BaseReg;
775}
776
777// Resolve a frame index operand of an instruction to reference the
778// indicated base register plus offset instead.
780 int64_t Offset) const {
781 unsigned FIOperandNum = 0;
782 while (!MI.getOperand(FIOperandNum).isFI()) {
783 FIOperandNum++;
784 assert(FIOperandNum < MI.getNumOperands() &&
785 "Instr does not have a FrameIndex operand!");
786 }
787
788 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
789 // FrameIndex Operands are always represented as a
790 // register followed by an immediate.
791 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
792 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
793}
794
795// Get the offset from the referenced frame index in the instruction,
796// if there is one.
798 int Idx) const {
799 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
800 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
801 "The MI must be I or S format.");
802 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
803 "FrameIndex operand");
804 return MI->getOperand(Idx + 1).getImm();
805}
806
808 const TargetFrameLowering *TFI = getFrameLowering(MF);
809 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
810}
811
813 if (Reg == RISCV::SF_VCIX_STATE)
814 return "sf.vcix_state";
816}
817
818const uint32_t *
820 CallingConv::ID CC) const {
821 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
822
823 if (CC == CallingConv::GHC)
824 return CSR_NoRegs_RegMask;
825 RISCVABI::ABI ABI = Subtarget.getTargetABI();
826 if (CC == CallingConv::PreserveMost) {
827 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
828 return CSR_RT_MostRegs_RVE_RegMask;
829 return CSR_RT_MostRegs_RegMask;
830 }
831 switch (ABI) {
832 default:
833 llvm_unreachable("Unrecognized ABI");
836 return CSR_ILP32E_LP64E_RegMask;
840 return CSR_ILP32_LP64_V_RegMask;
841 return CSR_ILP32_LP64_RegMask;
845 return CSR_ILP32F_LP64F_V_RegMask;
846 return CSR_ILP32F_LP64F_RegMask;
850 return CSR_ILP32D_LP64D_V_RegMask;
851 return CSR_ILP32D_LP64D_RegMask;
852 }
853}
854
857 const MachineFunction &) const {
858 if (RC == &RISCV::VMV0RegClass)
859 return &RISCV::VRRegClass;
860 if (RC == &RISCV::VRNoV0RegClass)
861 return &RISCV::VRRegClass;
862 if (RC == &RISCV::VRM2NoV0RegClass)
863 return &RISCV::VRM2RegClass;
864 if (RC == &RISCV::VRM4NoV0RegClass)
865 return &RISCV::VRM4RegClass;
866 if (RC == &RISCV::VRM8NoV0RegClass)
867 return &RISCV::VRM8RegClass;
868 return RC;
869}
870
873 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
874 // to represent one vector register. The dwarf offset is
875 // VLENB * scalable_offset / 8.
876 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
877
878 // Add fixed-sized offset using existing DIExpression interface.
880
881 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
882 int64_t VLENBSized = Offset.getScalable() / 8;
883 if (VLENBSized > 0) {
884 Ops.push_back(dwarf::DW_OP_constu);
885 Ops.push_back(VLENBSized);
886 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
887 Ops.push_back(dwarf::DW_OP_mul);
888 Ops.push_back(dwarf::DW_OP_plus);
889 } else if (VLENBSized < 0) {
890 Ops.push_back(dwarf::DW_OP_constu);
891 Ops.push_back(-VLENBSized);
892 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
893 Ops.push_back(dwarf::DW_OP_mul);
894 Ops.push_back(dwarf::DW_OP_minus);
895 }
896}
897
898unsigned
900 return MF.getSubtarget<RISCVSubtarget>().hasStdExtZca() && !DisableCostPerUse
901 ? 1
902 : 0;
903}
904
906 const TargetRegisterClass *RC) const {
907 return getRegClassWeight(RC).RegWeight;
908}
909
910// Add two address hints to improve chances of being able to use a compressed
911// instruction.
913 Register VirtReg, ArrayRef<MCPhysReg> Order,
915 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
916 const MachineRegisterInfo *MRI = &MF.getRegInfo();
917 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
918
919 // Handle RegPairEven/RegPairOdd hints for Zilsd register pairs
920 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(VirtReg);
921 unsigned HintType = Hint.first;
922 Register Partner = Hint.second;
923
924 MCRegister TargetReg;
925 if (HintType == RISCVRI::RegPairEven || HintType == RISCVRI::RegPairOdd) {
926 // Check if we want the even or odd register of a consecutive pair
927 bool WantOdd = (HintType == RISCVRI::RegPairOdd);
928
929 // First priority: Check if partner is already allocated
930 if (Partner.isVirtual() && VRM && VRM->hasPhys(Partner)) {
931 MCRegister PartnerPhys = VRM->getPhys(Partner);
932 // Calculate the exact register we need for consecutive pairing
933 TargetReg = PartnerPhys.id() + (WantOdd ? 1 : -1);
934
935 // Verify it's valid and available
936 if (RISCV::GPRRegClass.contains(TargetReg) &&
937 is_contained(Order, TargetReg))
938 Hints.push_back(TargetReg.id());
939 }
940
941 // Second priority: Try to find consecutive register pairs in the allocation
942 // order
943 for (MCPhysReg PhysReg : Order) {
944 // Don't add the hint if we already added above.
945 if (TargetReg == PhysReg)
946 continue;
947
948 unsigned RegNum = getEncodingValue(PhysReg);
949 // Check if this register matches the even/odd requirement
950 bool IsOdd = (RegNum % 2 != 0);
951
952 // Don't provide hints that are paired to a reserved register.
953 MCRegister Paired = PhysReg + (IsOdd ? -1 : 1);
954 if (WantOdd == IsOdd && !MRI->isReserved(Paired))
955 Hints.push_back(PhysReg);
956 }
957 }
958
959 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
960 VirtReg, Order, Hints, MF, VRM, Matrix);
961
962 if (!VRM || DisableRegAllocHints)
963 return BaseImplRetVal;
964
965 // Add any two address hints after any copy hints.
966 SmallSet<Register, 4> TwoAddrHints;
967
968 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
969 bool NeedGPRC) -> void {
970 Register Reg = MO.getReg();
971 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
972 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
973 // registers. If the virtual register is an odd register of a pair and the
974 // physical register is even (or vice versa), we should not add the hint.
975 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
976 !MO.getSubReg() && !VRRegMO.getSubReg()) {
977 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
978 TwoAddrHints.insert(PhysReg);
979 }
980 };
981
982 // This is all of the compressible binary instructions. If an instruction
983 // needs GPRC register class operands \p NeedGPRC will be set to true.
984 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
985 NeedGPRC = false;
986 switch (MI.getOpcode()) {
987 default:
988 return false;
989 case RISCV::AND:
990 case RISCV::OR:
991 case RISCV::XOR:
992 case RISCV::SUB:
993 case RISCV::ADDW:
994 case RISCV::SUBW:
995 NeedGPRC = true;
996 return true;
997 case RISCV::ANDI: {
998 NeedGPRC = true;
999 if (!MI.getOperand(2).isImm())
1000 return false;
1001 int64_t Imm = MI.getOperand(2).getImm();
1002 if (isInt<6>(Imm))
1003 return true;
1004 // c.zext.b
1005 return Subtarget.hasStdExtZcb() && Imm == 255;
1006 }
1007 case RISCV::SRAI:
1008 case RISCV::SRLI:
1009 NeedGPRC = true;
1010 return true;
1011 case RISCV::ADD:
1012 case RISCV::SLLI:
1013 return true;
1014 case RISCV::ADDI:
1015 case RISCV::ADDIW:
1016 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
1017 case RISCV::MUL:
1018 // c.mul
1019 NeedGPRC = true;
1020 return Subtarget.hasStdExtZcb();
1021 case RISCV::SEXT_B:
1022 case RISCV::SEXT_H:
1023 case RISCV::ZEXT_H_RV32:
1024 case RISCV::ZEXT_H_RV64:
1025 // c.sext.b, c.sext.h, c.zext.h
1026 NeedGPRC = true;
1027 return Subtarget.hasStdExtZcb() && Subtarget.hasStdExtZbb();
1028 case RISCV::ADD_UW:
1029 // c.zext.w
1030 NeedGPRC = true;
1031 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
1032 MI.getOperand(2).getReg() == RISCV::X0;
1033 case RISCV::XORI:
1034 // c.not
1035 NeedGPRC = true;
1036 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
1037 MI.getOperand(2).getImm() == -1;
1038 case RISCV::QC_EXTU:
1039 return MI.getOperand(2).getImm() >= 6 && MI.getOperand(3).getImm() == 0;
1040 case RISCV::BSETI:
1041 case RISCV::BEXTI:
1042 // qc.c.bseti, qc.c.bexti
1043 NeedGPRC = true;
1044 return Subtarget.hasVendorXqcibm() && MI.getOperand(2).getImm() != 0;
1045 }
1046 };
1047
1048 // Returns true if this operand is compressible. For non-registers it always
1049 // returns true. Immediate range was already checked in isCompressible.
1050 // For registers, it checks if the register is a GPRC register. reg-reg
1051 // instructions that require GPRC need all register operands to be GPRC.
1052 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
1053 if (!MO.isReg())
1054 return true;
1055 Register Reg = MO.getReg();
1056 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
1057 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
1058 };
1059
1060 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
1061 const MachineInstr &MI = *MO.getParent();
1062 unsigned OpIdx = MO.getOperandNo();
1063 bool NeedGPRC;
1064 if (isCompressible(MI, NeedGPRC)) {
1065 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
1066 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
1067 MI.getOpcode() == RISCV::ADD_UW ||
1068 isCompressibleOpnd(MI.getOperand(2)))
1069 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
1070 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
1071 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
1072 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
1073 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
1074 isCompressibleOpnd(MI.getOperand(2)))) {
1075 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
1076 } else if (MI.isCommutable() && OpIdx == 2 &&
1077 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
1078 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
1079 }
1080 }
1081
1082 // Add a hint if it would allow auipc/lui+addi(w) fusion. We do this even
1083 // without the fusions explicitly enabled as the impact is rarely negative
1084 // and some cores do implement this fusion.
1085 if ((MI.getOpcode() == RISCV::ADDIW || MI.getOpcode() == RISCV::ADDI) &&
1086 MI.getOperand(1).isReg()) {
1087 const MachineBasicBlock &MBB = *MI.getParent();
1088 MachineBasicBlock::const_iterator I = MI.getIterator();
1089 // Is the previous instruction a LUI or AUIPC that can be fused?
1090 if (I != MBB.begin()) {
1091 I = skipDebugInstructionsBackward(std::prev(I), MBB.begin());
1092 if ((I->getOpcode() == RISCV::LUI || I->getOpcode() == RISCV::AUIPC) &&
1093 I->getOperand(0).getReg() == MI.getOperand(1).getReg()) {
1094 if (OpIdx == 0)
1095 tryAddHint(MO, MI.getOperand(1), /*NeedGPRC=*/false);
1096 else
1097 tryAddHint(MO, MI.getOperand(0), /*NeedGPRC=*/false);
1098 }
1099 }
1100 }
1101 }
1102
1103 for (MCPhysReg OrderReg : Order)
1104 if (TwoAddrHints.count(OrderReg))
1105 Hints.push_back(OrderReg);
1106
1107 return BaseImplRetVal;
1108}
1109
1111 MachineFunction &MF) const {
1112 MachineRegisterInfo *MRI = &MF.getRegInfo();
1113 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(Reg);
1114
1115 // Handle RegPairEven/RegPairOdd hints for Zilsd register pairs
1116 if ((Hint.first == RISCVRI::RegPairOdd ||
1117 Hint.first == RISCVRI::RegPairEven) &&
1118 Hint.second.isVirtual()) {
1119 // If 'Reg' is one of the even/odd register pair and it's now changed
1120 // (e.g. coalesced) into a different register, the other register of the
1121 // pair allocation hint must be updated to reflect the relationship change.
1122 Register Partner = Hint.second;
1123 std::pair<unsigned, Register> PartnerHint =
1124 MRI->getRegAllocationHint(Partner);
1125
1126 // Make sure partner still points to us
1127 if (PartnerHint.second == Reg) {
1128 // Update partner to point to NewReg instead of Reg
1129 MRI->setRegAllocationHint(Partner, PartnerHint.first, NewReg);
1130
1131 // If NewReg is virtual, set up the reciprocal hint
1132 // NewReg takes over Reg's role, so it gets the SAME hint type as Reg
1133 if (NewReg.isVirtual())
1134 MRI->setRegAllocationHint(NewReg, Hint.first, Partner);
1135 }
1136 }
1137}
1138
1141 uint16_t Encoding) const {
1142 MCRegister Reg = RISCV::V0 + Encoding;
1144 return Reg;
1145 return getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
1146}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
static std::tuple< RISCVVType::VLMUL, const TargetRegisterClass &, unsigned > getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
This file defines the SmallSet class.
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:123
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
TypeSize getValue() const
bool isPrecise() const
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
std::pair< unsigned, Register > getRegAllocationHint(Register VReg) const
getRegAllocationHint - Return the register allocation hint for the specified virtual register.
void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register.
const MachineFunction & getMF() const
iterator_range< reg_nodbg_iterator > reg_nodbg_operands(Register Reg) const
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:176
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:46
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition TypeSize.h:41
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
Definition TypeSize.h:180
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr RegState getDefRegState(bool B)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
Definition MathExtras.h:182
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Register findVRegWithEncoding(const TargetRegisterClass &RegClass, uint16_t Encoding) const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const TargetRegisterClass * getConstrainedRegClassForOperand(const MachineOperand &MO, const MachineRegisterInfo &MRI) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Register getFrameRegister(const MachineFunction &MF) const override
const MCPhysReg * getIPRACSRegs(const MachineFunction *MF) const override
void lowerSegmentSpillReload(MachineBasicBlock::iterator II, bool IsSpill) const
const TargetRegisterClass * getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB, bool Is64Bit) const
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
float getSpillWeightScaleFactor(const TargetRegisterClass *RC) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override