LLVM 19.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
16#include "RISCVSubtarget.h"
17#include "llvm/ADT/SmallSet.h"
27
28#define GET_REGINFO_TARGET_DESC
29#include "RISCVGenRegisterInfo.inc"
30
31using namespace llvm;
32
33static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
34 cl::init(false), cl::Hidden);
35static cl::opt<bool>
36 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
37 cl::init(false),
38 cl::desc("Disable two address hints for register "
39 "allocation"));
40
41static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
42static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
43static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
44static_assert(RISCV::F31_H == RISCV::F0_H + 31,
45 "Register list not consecutive");
46static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
47static_assert(RISCV::F31_F == RISCV::F0_F + 31,
48 "Register list not consecutive");
49static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
50static_assert(RISCV::F31_D == RISCV::F0_D + 31,
51 "Register list not consecutive");
52static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
53static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
54
56 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
57 /*PC*/0, HwMode) {}
58
59const MCPhysReg *
61 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
63 return CSR_NoRegs_SaveList;
64 if (MF->getFunction().hasFnAttribute("interrupt")) {
65 if (Subtarget.hasStdExtD())
66 return CSR_XLEN_F64_Interrupt_SaveList;
67 if (Subtarget.hasStdExtF())
68 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
69 : CSR_XLEN_F32_Interrupt_SaveList;
70 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
71 : CSR_Interrupt_SaveList;
72 }
73
74 bool HasVectorCSR =
76
77 switch (Subtarget.getTargetABI()) {
78 default:
79 llvm_unreachable("Unrecognized ABI");
82 return CSR_ILP32E_LP64E_SaveList;
85 if (HasVectorCSR)
86 return CSR_ILP32_LP64_V_SaveList;
87 return CSR_ILP32_LP64_SaveList;
90 if (HasVectorCSR)
91 return CSR_ILP32F_LP64F_V_SaveList;
92 return CSR_ILP32F_LP64F_SaveList;
95 if (HasVectorCSR)
96 return CSR_ILP32D_LP64D_V_SaveList;
97 return CSR_ILP32D_LP64D_SaveList;
98 }
99}
100
102 const RISCVFrameLowering *TFI = getFrameLowering(MF);
103 BitVector Reserved(getNumRegs());
104 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
105
106 // Mark any registers requested to be reserved as such
107 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
108 if (Subtarget.isRegisterReservedByUser(Reg))
109 markSuperRegs(Reserved, Reg);
110 }
111
112 // Use markSuperRegs to ensure any register aliases are also reserved
113 markSuperRegs(Reserved, RISCV::X0); // zero
114 markSuperRegs(Reserved, RISCV::X2); // sp
115 markSuperRegs(Reserved, RISCV::X3); // gp
116 markSuperRegs(Reserved, RISCV::X4); // tp
117 if (TFI->hasFP(MF))
118 markSuperRegs(Reserved, RISCV::X8); // fp
119 // Reserve the base register if we need to realign the stack and allocate
120 // variable-sized objects at runtime.
121 if (TFI->hasBP(MF))
122 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
123
124 // Additionally reserve dummy register used to form the register pair
125 // beginning with 'x0' for instructions that take register pairs.
126 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
127
128 // There are only 16 GPRs for RVE.
129 if (Subtarget.hasStdExtE())
130 for (MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
131 markSuperRegs(Reserved, Reg);
132
133 // V registers for code generation. We handle them manually.
134 markSuperRegs(Reserved, RISCV::VL);
135 markSuperRegs(Reserved, RISCV::VTYPE);
136 markSuperRegs(Reserved, RISCV::VXSAT);
137 markSuperRegs(Reserved, RISCV::VXRM);
138 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
139
140 // Floating point environment registers.
141 markSuperRegs(Reserved, RISCV::FRM);
142 markSuperRegs(Reserved, RISCV::FFLAGS);
143
144 // SiFive VCIX state registers.
145 markSuperRegs(Reserved, RISCV::VCIX_STATE);
146
148 if (Subtarget.hasStdExtE())
149 report_fatal_error("Graal reserved registers do not exist in RVE");
150 markSuperRegs(Reserved, RISCV::X23);
151 markSuperRegs(Reserved, RISCV::X27);
152 }
153
154 // Shadow stack pointer.
155 markSuperRegs(Reserved, RISCV::SSP);
156
157 assert(checkAllSuperRegsMarked(Reserved));
158 return Reserved;
159}
160
162 MCRegister PhysReg) const {
163 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
164}
165
167 return CSR_NoRegs_RegMask;
168}
169
172 const DebugLoc &DL, Register DestReg,
175 MaybeAlign RequiredAlign) const {
176
177 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
178 return;
179
183 const RISCVInstrInfo *TII = ST.getInstrInfo();
184
185 bool KillSrcReg = false;
186
187 if (Offset.getScalable()) {
188 unsigned ScalableAdjOpc = RISCV::ADD;
189 int64_t ScalableValue = Offset.getScalable();
190 if (ScalableValue < 0) {
191 ScalableValue = -ScalableValue;
192 ScalableAdjOpc = RISCV::SUB;
193 }
194 // Get vlenb and multiply vlen with the number of vector registers.
195 Register ScratchReg = DestReg;
196 if (DestReg == SrcReg)
197 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
198
199 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
200 assert(ScalableValue % 8 == 0 &&
201 "Reserve the stack by the multiple of one vector size.");
202 assert(isInt<32>(ScalableValue / 8) &&
203 "Expect the number of vector registers within 32-bits.");
204 uint32_t NumOfVReg = ScalableValue / 8;
205 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
206 .setMIFlag(Flag);
207
208 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
209 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
210 unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
211 (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
212 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
213 .addReg(ScratchReg, RegState::Kill).addReg(SrcReg)
214 .setMIFlag(Flag);
215 } else {
216 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
217 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
218 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
219 .setMIFlag(Flag);
220 }
221 SrcReg = DestReg;
222 KillSrcReg = true;
223 }
224
225 int64_t Val = Offset.getFixed();
226 if (DestReg == SrcReg && Val == 0)
227 return;
228
229 const uint64_t Align = RequiredAlign.valueOrOne().value();
230
231 if (isInt<12>(Val)) {
232 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
233 .addReg(SrcReg, getKillRegState(KillSrcReg))
234 .addImm(Val)
235 .setMIFlag(Flag);
236 return;
237 }
238
239 // Try to split the offset across two ADDIs. We need to keep the intermediate
240 // result aligned after each ADDI. We need to determine the maximum value we
241 // can put in each ADDI. In the negative direction, we can use -2048 which is
242 // always sufficiently aligned. In the positive direction, we need to find the
243 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
244 // created with LUI.
245 assert(Align < 2048 && "Required alignment too large");
246 int64_t MaxPosAdjStep = 2048 - Align;
247 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
248 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
249 Val -= FirstAdj;
250 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
251 .addReg(SrcReg, getKillRegState(KillSrcReg))
252 .addImm(FirstAdj)
253 .setMIFlag(Flag);
254 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
255 .addReg(DestReg, RegState::Kill)
256 .addImm(Val)
257 .setMIFlag(Flag);
258 return;
259 }
260
261 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
262 // instruction. This saves 1 instruction over the full lui/addi+add fallback
263 // path. We avoid anything which can be done with a single lui as it might
264 // be compressible. Note that the sh1add case is fully covered by the 2x addi
265 // case just above and is thus ommitted.
266 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
267 unsigned Opc = 0;
268 if (isShiftedInt<12, 3>(Val)) {
269 Opc = RISCV::SH3ADD;
270 Val = Val >> 3;
271 } else if (isShiftedInt<12, 2>(Val)) {
272 Opc = RISCV::SH2ADD;
273 Val = Val >> 2;
274 }
275 if (Opc) {
276 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
277 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
278 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
279 .addReg(ScratchReg, RegState::Kill)
280 .addReg(SrcReg, getKillRegState(KillSrcReg))
281 .setMIFlag(Flag);
282 return;
283 }
284 }
285
286 unsigned Opc = RISCV::ADD;
287 if (Val < 0) {
288 Val = -Val;
289 Opc = RISCV::SUB;
290 }
291
292 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
293 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
294 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
295 .addReg(SrcReg, getKillRegState(KillSrcReg))
296 .addReg(ScratchReg, RegState::Kill)
297 .setMIFlag(Flag);
298}
299
300// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
301// LMUL*VLENB bytes.
303 DebugLoc DL = II->getDebugLoc();
304 MachineBasicBlock &MBB = *II->getParent();
307 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
308 const TargetInstrInfo *TII = STI.getInstrInfo();
310
311 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
312 unsigned NF = ZvlssegInfo->first;
313 unsigned LMUL = ZvlssegInfo->second;
314 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
315 unsigned Opcode, SubRegIdx;
316 switch (LMUL) {
317 default:
318 llvm_unreachable("LMUL must be 1, 2, or 4.");
319 case 1:
320 Opcode = RISCV::VS1R_V;
321 SubRegIdx = RISCV::sub_vrm1_0;
322 break;
323 case 2:
324 Opcode = RISCV::VS2R_V;
325 SubRegIdx = RISCV::sub_vrm2_0;
326 break;
327 case 4:
328 Opcode = RISCV::VS4R_V;
329 SubRegIdx = RISCV::sub_vrm4_0;
330 break;
331 }
332 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
333 "Unexpected subreg numbering");
334 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
335 "Unexpected subreg numbering");
336 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
337 "Unexpected subreg numbering");
338
339 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
340 // Optimize for constant VLEN.
341 if (auto VLEN = STI.getRealVLen()) {
342 const int64_t VLENB = *VLEN / 8;
343 int64_t Offset = VLENB * LMUL;
344 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
345 } else {
346 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
347 uint32_t ShiftAmount = Log2_32(LMUL);
348 if (ShiftAmount != 0)
349 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
350 .addReg(VL)
351 .addImm(ShiftAmount);
352 }
353
354 Register SrcReg = II->getOperand(0).getReg();
355 Register Base = II->getOperand(1).getReg();
356 bool IsBaseKill = II->getOperand(1).isKill();
357 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
358 for (unsigned I = 0; I < NF; ++I) {
359 // Adding implicit-use of super register to describe we are using part of
360 // super register, that prevents machine verifier complaining when part of
361 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
362 // detail.
363 BuildMI(MBB, II, DL, TII->get(Opcode))
364 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
365 .addReg(Base, getKillRegState(I == NF - 1))
366 .addMemOperand(*(II->memoperands_begin()))
367 .addReg(SrcReg, RegState::Implicit);
368 if (I != NF - 1)
369 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
370 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
371 .addReg(VL, getKillRegState(I == NF - 2));
372 Base = NewBase;
373 }
374 II->eraseFromParent();
375}
376
377// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
378// LMUL*VLENB bytes.
380 DebugLoc DL = II->getDebugLoc();
381 MachineBasicBlock &MBB = *II->getParent();
384 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
385 const TargetInstrInfo *TII = STI.getInstrInfo();
387
388 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
389 unsigned NF = ZvlssegInfo->first;
390 unsigned LMUL = ZvlssegInfo->second;
391 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
392 unsigned Opcode, SubRegIdx;
393 switch (LMUL) {
394 default:
395 llvm_unreachable("LMUL must be 1, 2, or 4.");
396 case 1:
397 Opcode = RISCV::VL1RE8_V;
398 SubRegIdx = RISCV::sub_vrm1_0;
399 break;
400 case 2:
401 Opcode = RISCV::VL2RE8_V;
402 SubRegIdx = RISCV::sub_vrm2_0;
403 break;
404 case 4:
405 Opcode = RISCV::VL4RE8_V;
406 SubRegIdx = RISCV::sub_vrm4_0;
407 break;
408 }
409 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
410 "Unexpected subreg numbering");
411 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
412 "Unexpected subreg numbering");
413 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
414 "Unexpected subreg numbering");
415
416 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
417 // Optimize for constant VLEN.
418 if (auto VLEN = STI.getRealVLen()) {
419 const int64_t VLENB = *VLEN / 8;
420 int64_t Offset = VLENB * LMUL;
421 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
422 } else {
423 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
424 uint32_t ShiftAmount = Log2_32(LMUL);
425 if (ShiftAmount != 0)
426 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
427 .addReg(VL)
428 .addImm(ShiftAmount);
429 }
430
431 Register DestReg = II->getOperand(0).getReg();
432 Register Base = II->getOperand(1).getReg();
433 bool IsBaseKill = II->getOperand(1).isKill();
434 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
435 for (unsigned I = 0; I < NF; ++I) {
436 BuildMI(MBB, II, DL, TII->get(Opcode),
437 TRI->getSubReg(DestReg, SubRegIdx + I))
438 .addReg(Base, getKillRegState(I == NF - 1))
439 .addMemOperand(*(II->memoperands_begin()));
440 if (I != NF - 1)
441 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
442 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
443 .addReg(VL, getKillRegState(I == NF - 2));
444 Base = NewBase;
445 }
446 II->eraseFromParent();
447}
448
450 int SPAdj, unsigned FIOperandNum,
451 RegScavenger *RS) const {
452 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
453
454 MachineInstr &MI = *II;
455 MachineFunction &MF = *MI.getParent()->getParent();
458 DebugLoc DL = MI.getDebugLoc();
459
460 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
461 Register FrameReg;
463 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
464 bool IsRVVSpill = RISCV::isRVVSpill(MI);
465 if (!IsRVVSpill)
466 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
467
468 if (Offset.getScalable() &&
469 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
470 // For an exact VLEN value, scalable offsets become constant and thus
471 // can be converted entirely into fixed offsets.
472 int64_t FixedValue = Offset.getFixed();
473 int64_t ScalableValue = Offset.getScalable();
474 assert(ScalableValue % 8 == 0 &&
475 "Scalable offset is not a multiple of a single vector size.");
476 int64_t NumOfVReg = ScalableValue / 8;
477 int64_t VLENB = ST.getRealMinVLen() / 8;
478 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
479 }
480
481 if (!isInt<32>(Offset.getFixed())) {
483 "Frame offsets outside of the signed 32-bit range not supported");
484 }
485
486 if (!IsRVVSpill) {
487 int64_t Val = Offset.getFixed();
488 int64_t Lo12 = SignExtend64<12>(Val);
489 unsigned Opc = MI.getOpcode();
490 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
491 // We chose to emit the canonical immediate sequence rather than folding
492 // the offset into the using add under the theory that doing so doesn't
493 // save dynamic instruction count and some target may fuse the canonical
494 // 32 bit immediate sequence. We still need to clear the portion of the
495 // offset encoded in the immediate.
496 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
497 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
498 Opc == RISCV::PREFETCH_W) &&
499 (Lo12 & 0b11111) != 0) {
500 // Prefetch instructions require the offset to be 32 byte aligned.
501 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
502 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
503 Opc == RISCV::PseudoRV32ZdinxSD) &&
504 Lo12 >= 2044) {
505 // This instruction will be split into 2 instructions. The second
506 // instruction will add 4 to the immediate. If that would overflow 12
507 // bits, we can't fold the offset.
508 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
509 } else {
510 // We can encode an add with 12 bit signed immediate in the immediate
511 // operand of our user instruction. As a result, the remaining
512 // offset can by construction, at worst, a LUI and a ADD.
513 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
515 Offset.getScalable());
516 }
517 }
518
519 if (Offset.getScalable() || Offset.getFixed()) {
520 Register DestReg;
521 if (MI.getOpcode() == RISCV::ADDI)
522 DestReg = MI.getOperand(0).getReg();
523 else
524 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
525 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
526 MachineInstr::NoFlags, std::nullopt);
527 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
528 /*IsImp*/false,
529 /*IsKill*/true);
530 } else {
531 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
532 /*IsImp*/false,
533 /*IsKill*/false);
534 }
535
536 // If after materializing the adjustment, we have a pointless ADDI, remove it
537 if (MI.getOpcode() == RISCV::ADDI &&
538 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
539 MI.getOperand(2).getImm() == 0) {
540 MI.eraseFromParent();
541 return true;
542 }
543
544 // Handle spill/fill of synthetic register classes for segment operations to
545 // ensure correctness in the edge case one gets spilled. There are many
546 // possible optimizations here, but given the extreme rarity of such spills,
547 // we prefer simplicity of implementation for now.
548 switch (MI.getOpcode()) {
549 case RISCV::PseudoVSPILL2_M1:
550 case RISCV::PseudoVSPILL2_M2:
551 case RISCV::PseudoVSPILL2_M4:
552 case RISCV::PseudoVSPILL3_M1:
553 case RISCV::PseudoVSPILL3_M2:
554 case RISCV::PseudoVSPILL4_M1:
555 case RISCV::PseudoVSPILL4_M2:
556 case RISCV::PseudoVSPILL5_M1:
557 case RISCV::PseudoVSPILL6_M1:
558 case RISCV::PseudoVSPILL7_M1:
559 case RISCV::PseudoVSPILL8_M1:
560 lowerVSPILL(II);
561 return true;
562 case RISCV::PseudoVRELOAD2_M1:
563 case RISCV::PseudoVRELOAD2_M2:
564 case RISCV::PseudoVRELOAD2_M4:
565 case RISCV::PseudoVRELOAD3_M1:
566 case RISCV::PseudoVRELOAD3_M2:
567 case RISCV::PseudoVRELOAD4_M1:
568 case RISCV::PseudoVRELOAD4_M2:
569 case RISCV::PseudoVRELOAD5_M1:
570 case RISCV::PseudoVRELOAD6_M1:
571 case RISCV::PseudoVRELOAD7_M1:
572 case RISCV::PseudoVRELOAD8_M1:
573 lowerVRELOAD(II);
574 return true;
575 }
576
577 return false;
578}
579
581 const MachineFunction &MF) const {
582 return true;
583}
584
585// Returns true if the instruction's frame index reference would be better
586// served by a base register other than FP or SP.
587// Used by LocalStackSlotAllocation pass to determine which frame index
588// references it should create new base registers for.
590 int64_t Offset) const {
591 unsigned FIOperandNum = 0;
592 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
593 assert(FIOperandNum < MI->getNumOperands() &&
594 "Instr doesn't have FrameIndex operand");
595
596 // For RISC-V, The machine instructions that include a FrameIndex operand
597 // are load/store, ADDI instructions.
598 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
599 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
600 return false;
601 // We only generate virtual base registers for loads and stores, so
602 // return false for everything else.
603 if (!MI->mayLoad() && !MI->mayStore())
604 return false;
605
606 const MachineFunction &MF = *MI->getMF();
607 const MachineFrameInfo &MFI = MF.getFrameInfo();
608 const RISCVFrameLowering *TFI = getFrameLowering(MF);
609 const MachineRegisterInfo &MRI = MF.getRegInfo();
610 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
611
612 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
613 // Estimate the stack size used to store callee saved registers(
614 // excludes reserved registers).
615 unsigned CalleeSavedSize = 0;
616 BitVector ReservedRegs = getReservedRegs(MF);
617 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
618 ++R) {
619 if (!ReservedRegs.test(Reg))
620 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
621 }
622
623 int64_t MaxFPOffset = Offset - CalleeSavedSize;
624 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
625 }
626
627 // Assume 128 bytes spill slots size to estimate the maximum possible
628 // offset relative to the stack pointer.
629 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
630 // real one for RISC-V.
631 int64_t MaxSPOffset = Offset + 128;
632 MaxSPOffset += MFI.getLocalFrameSize();
633 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
634}
635
636// Determine whether a given base register plus offset immediate is
637// encodable to resolve a frame index.
639 Register BaseReg,
640 int64_t Offset) const {
641 unsigned FIOperandNum = 0;
642 while (!MI->getOperand(FIOperandNum).isFI()) {
643 FIOperandNum++;
644 assert(FIOperandNum < MI->getNumOperands() &&
645 "Instr does not have a FrameIndex operand!");
646 }
647
648 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
649 return isInt<12>(Offset);
650}
651
652// Insert defining instruction(s) for a pointer to FrameIdx before
653// insertion point I.
654// Return materialized frame pointer.
656 int FrameIdx,
657 int64_t Offset) const {
659 DebugLoc DL;
660 if (MBBI != MBB->end())
661 DL = MBBI->getDebugLoc();
663 MachineRegisterInfo &MFI = MF->getRegInfo();
665
666 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
667 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
668 .addFrameIndex(FrameIdx)
669 .addImm(Offset);
670 return BaseReg;
671}
672
673// Resolve a frame index operand of an instruction to reference the
674// indicated base register plus offset instead.
676 int64_t Offset) const {
677 unsigned FIOperandNum = 0;
678 while (!MI.getOperand(FIOperandNum).isFI()) {
679 FIOperandNum++;
680 assert(FIOperandNum < MI.getNumOperands() &&
681 "Instr does not have a FrameIndex operand!");
682 }
683
684 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
685 // FrameIndex Operands are always represented as a
686 // register followed by an immediate.
687 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
688 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
689}
690
691// Get the offset from the referenced frame index in the instruction,
692// if there is one.
694 int Idx) const {
695 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
696 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
697 "The MI must be I or S format.");
698 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
699 "FrameIndex operand");
700 return MI->getOperand(Idx + 1).getImm();
701}
702
704 const TargetFrameLowering *TFI = getFrameLowering(MF);
705 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
706}
707
708const uint32_t *
710 CallingConv::ID CC) const {
711 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
712
713 if (CC == CallingConv::GHC)
714 return CSR_NoRegs_RegMask;
715 switch (Subtarget.getTargetABI()) {
716 default:
717 llvm_unreachable("Unrecognized ABI");
720 return CSR_ILP32E_LP64E_RegMask;
724 return CSR_ILP32_LP64_V_RegMask;
725 return CSR_ILP32_LP64_RegMask;
729 return CSR_ILP32F_LP64F_V_RegMask;
730 return CSR_ILP32F_LP64F_RegMask;
734 return CSR_ILP32D_LP64D_V_RegMask;
735 return CSR_ILP32D_LP64D_RegMask;
736 }
737}
738
741 const MachineFunction &) const {
742 if (RC == &RISCV::VMV0RegClass)
743 return &RISCV::VRRegClass;
744 if (RC == &RISCV::VRNoV0RegClass)
745 return &RISCV::VRRegClass;
746 if (RC == &RISCV::VRM2NoV0RegClass)
747 return &RISCV::VRM2RegClass;
748 if (RC == &RISCV::VRM4NoV0RegClass)
749 return &RISCV::VRM4RegClass;
750 if (RC == &RISCV::VRM8NoV0RegClass)
751 return &RISCV::VRM8RegClass;
752 return RC;
753}
754
756 SmallVectorImpl<uint64_t> &Ops) const {
757 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
758 // to represent one vector register. The dwarf offset is
759 // VLENB * scalable_offset / 8.
760 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
761
762 // Add fixed-sized offset using existing DIExpression interface.
763 DIExpression::appendOffset(Ops, Offset.getFixed());
764
765 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
766 int64_t VLENBSized = Offset.getScalable() / 8;
767 if (VLENBSized > 0) {
768 Ops.push_back(dwarf::DW_OP_constu);
769 Ops.push_back(VLENBSized);
770 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
771 Ops.push_back(dwarf::DW_OP_mul);
772 Ops.push_back(dwarf::DW_OP_plus);
773 } else if (VLENBSized < 0) {
774 Ops.push_back(dwarf::DW_OP_constu);
775 Ops.push_back(-VLENBSized);
776 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
777 Ops.push_back(dwarf::DW_OP_mul);
778 Ops.push_back(dwarf::DW_OP_minus);
779 }
780}
781
782unsigned
784 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
786 ? 1
787 : 0;
788}
789
790// Add two address hints to improve chances of being able to use a compressed
791// instruction.
793 Register VirtReg, ArrayRef<MCPhysReg> Order,
795 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
796 const MachineRegisterInfo *MRI = &MF.getRegInfo();
797 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
798
799 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
800 VirtReg, Order, Hints, MF, VRM, Matrix);
801
802 if (!VRM || DisableRegAllocHints)
803 return BaseImplRetVal;
804
805 // Add any two address hints after any copy hints.
806 SmallSet<Register, 4> TwoAddrHints;
807
808 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
809 bool NeedGPRC) -> void {
810 Register Reg = MO.getReg();
811 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
812 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
813 // registers. If the virtual register is an odd register of a pair and the
814 // physical register is even (or vice versa), we should not add the hint.
815 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
816 !MO.getSubReg() && !VRRegMO.getSubReg()) {
817 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
818 TwoAddrHints.insert(PhysReg);
819 }
820 };
821
822 // This is all of the compressible binary instructions. If an instruction
823 // needs GPRC register class operands \p NeedGPRC will be set to true.
824 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
825 NeedGPRC = false;
826 switch (MI.getOpcode()) {
827 default:
828 return false;
829 case RISCV::AND:
830 case RISCV::OR:
831 case RISCV::XOR:
832 case RISCV::SUB:
833 case RISCV::ADDW:
834 case RISCV::SUBW:
835 NeedGPRC = true;
836 return true;
837 case RISCV::ANDI: {
838 NeedGPRC = true;
839 if (!MI.getOperand(2).isImm())
840 return false;
841 int64_t Imm = MI.getOperand(2).getImm();
842 if (isInt<6>(Imm))
843 return true;
844 // c.zext.b
845 return Subtarget.hasStdExtZcb() && Imm == 255;
846 }
847 case RISCV::SRAI:
848 case RISCV::SRLI:
849 NeedGPRC = true;
850 return true;
851 case RISCV::ADD:
852 case RISCV::SLLI:
853 return true;
854 case RISCV::ADDI:
855 case RISCV::ADDIW:
856 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
857 case RISCV::MUL:
858 case RISCV::SEXT_B:
859 case RISCV::SEXT_H:
860 case RISCV::ZEXT_H_RV32:
861 case RISCV::ZEXT_H_RV64:
862 // c.mul, c.sext.b, c.sext.h, c.zext.h
863 NeedGPRC = true;
864 return Subtarget.hasStdExtZcb();
865 case RISCV::ADD_UW:
866 // c.zext.w
867 NeedGPRC = true;
868 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
869 MI.getOperand(2).getReg() == RISCV::X0;
870 case RISCV::XORI:
871 // c.not
872 NeedGPRC = true;
873 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
874 MI.getOperand(2).getImm() == -1;
875 }
876 };
877
878 // Returns true if this operand is compressible. For non-registers it always
879 // returns true. Immediate range was already checked in isCompressible.
880 // For registers, it checks if the register is a GPRC register. reg-reg
881 // instructions that require GPRC need all register operands to be GPRC.
882 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
883 if (!MO.isReg())
884 return true;
885 Register Reg = MO.getReg();
886 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
887 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
888 };
889
890 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
891 const MachineInstr &MI = *MO.getParent();
892 unsigned OpIdx = MO.getOperandNo();
893 bool NeedGPRC;
894 if (isCompressible(MI, NeedGPRC)) {
895 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
896 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
897 MI.getOpcode() == RISCV::ADD_UW ||
898 isCompressibleOpnd(MI.getOperand(2)))
899 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
900 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
901 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
902 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
903 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
904 isCompressibleOpnd(MI.getOperand(2)))) {
905 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
906 } else if (MI.isCommutable() && OpIdx == 2 &&
907 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
908 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
909 }
910 }
911 }
912
913 for (MCPhysReg OrderReg : Order)
914 if (TwoAddrHints.count(OrderReg))
915 Hints.push_back(OrderReg);
916
917 return BaseImplRetVal;
918}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:263
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:675
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:44
Information about stack frame layout on the target.
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
Definition: CallingConv.h:268
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:324
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override