LLVM 22.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
15#include "RISCVSubtarget.h"
16#include "llvm/ADT/SmallSet.h"
26
27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
29
30using namespace llvm;
31
32static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
33 cl::init(false), cl::Hidden);
34static cl::opt<bool>
35 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
36 cl::init(false),
37 cl::desc("Disable two address hints for register "
38 "allocation"));
39
40static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::F1_Q == RISCV::F0_Q + 1, "Register list not consecutive");
52static_assert(RISCV::F31_Q == RISCV::F0_Q + 31,
53 "Register list not consecutive");
54static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
55static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
56
58 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
59 /*PC*/0, HwMode) {}
60
61const MCPhysReg *
63 return CSR_IPRA_SaveList;
64}
65
66const MCPhysReg *
68 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
70 return CSR_NoRegs_SaveList;
72 return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
73 : CSR_RT_MostRegs_SaveList;
74 if (MF->getFunction().hasFnAttribute("interrupt")) {
75 if (Subtarget.hasVInstructions()) {
76 if (Subtarget.hasStdExtD())
77 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_V_Interrupt_RVE_SaveList
78 : CSR_XLEN_F64_V_Interrupt_SaveList;
79 if (Subtarget.hasStdExtF())
80 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_V_Interrupt_RVE_SaveList
81 : CSR_XLEN_F32_V_Interrupt_SaveList;
82 return Subtarget.hasStdExtE() ? CSR_XLEN_V_Interrupt_RVE_SaveList
83 : CSR_XLEN_V_Interrupt_SaveList;
84 }
85 if (Subtarget.hasStdExtD())
86 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_Interrupt_RVE_SaveList
87 : CSR_XLEN_F64_Interrupt_SaveList;
88 if (Subtarget.hasStdExtF())
89 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
90 : CSR_XLEN_F32_Interrupt_SaveList;
91 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
92 : CSR_Interrupt_SaveList;
93 }
94
95 bool HasVectorCSR =
97 Subtarget.hasVInstructions();
98
99 switch (Subtarget.getTargetABI()) {
100 default:
101 llvm_unreachable("Unrecognized ABI");
104 return CSR_ILP32E_LP64E_SaveList;
107 if (HasVectorCSR)
108 return CSR_ILP32_LP64_V_SaveList;
109 return CSR_ILP32_LP64_SaveList;
112 if (HasVectorCSR)
113 return CSR_ILP32F_LP64F_V_SaveList;
114 return CSR_ILP32F_LP64F_SaveList;
117 if (HasVectorCSR)
118 return CSR_ILP32D_LP64D_V_SaveList;
119 return CSR_ILP32D_LP64D_SaveList;
120 }
121}
122
124 const RISCVFrameLowering *TFI = getFrameLowering(MF);
125 BitVector Reserved(getNumRegs());
126 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
127
128 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
129 // Mark any GPRs requested to be reserved as such
130 if (Subtarget.isRegisterReservedByUser(Reg))
131 markSuperRegs(Reserved, Reg);
132
133 // Mark all the registers defined as constant in TableGen as reserved.
134 if (isConstantPhysReg(Reg))
135 markSuperRegs(Reserved, Reg);
136 }
137
138 // Use markSuperRegs to ensure any register aliases are also reserved
139 markSuperRegs(Reserved, RISCV::X2_H); // sp
140 markSuperRegs(Reserved, RISCV::X3_H); // gp
141 markSuperRegs(Reserved, RISCV::X4_H); // tp
142 if (TFI->hasFP(MF))
143 markSuperRegs(Reserved, RISCV::X8_H); // fp
144 // Reserve the base register if we need to realign the stack and allocate
145 // variable-sized objects at runtime.
146 if (TFI->hasBP(MF))
147 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
148
149 // Additionally reserve dummy register used to form the register pair
150 // beginning with 'x0' for instructions that take register pairs.
151 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
152
153 // There are only 16 GPRs for RVE.
154 if (Subtarget.hasStdExtE())
155 for (MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
156 markSuperRegs(Reserved, Reg);
157
158 // V registers for code generation. We handle them manually.
159 markSuperRegs(Reserved, RISCV::VL);
160 markSuperRegs(Reserved, RISCV::VTYPE);
161 markSuperRegs(Reserved, RISCV::VXSAT);
162 markSuperRegs(Reserved, RISCV::VXRM);
163
164 // Floating point environment registers.
165 markSuperRegs(Reserved, RISCV::FRM);
166 markSuperRegs(Reserved, RISCV::FFLAGS);
167
168 // SiFive VCIX state registers.
169 markSuperRegs(Reserved, RISCV::SF_VCIX_STATE);
170
172 if (Subtarget.hasStdExtE())
173 reportFatalUsageError("Graal reserved registers do not exist in RVE");
174 markSuperRegs(Reserved, RISCV::X23_H);
175 markSuperRegs(Reserved, RISCV::X27_H);
176 }
177
178 // Shadow stack pointer.
179 markSuperRegs(Reserved, RISCV::SSP);
180
181 assert(checkAllSuperRegsMarked(Reserved));
182 return Reserved;
183}
184
186 MCRegister PhysReg) const {
187 return !MF.getSubtarget().isRegisterReservedByUser(PhysReg);
188}
189
191 return CSR_NoRegs_RegMask;
192}
193
196 const DebugLoc &DL, Register DestReg,
199 MaybeAlign RequiredAlign) const {
200
201 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
202 return;
203
204 MachineFunction &MF = *MBB.getParent();
207 const RISCVInstrInfo *TII = ST.getInstrInfo();
208
209 // Optimize compile time offset case
210 if (Offset.getScalable()) {
211 if (auto VLEN = ST.getRealVLen()) {
212 // 1. Multiply the number of v-slots by the (constant) length of register
213 const int64_t VLENB = *VLEN / 8;
214 assert(Offset.getScalable() % RISCV::RVVBytesPerBlock == 0 &&
215 "Reserve the stack by the multiple of one vector size.");
216 const int64_t NumOfVReg = Offset.getScalable() / 8;
217 const int64_t FixedOffset = NumOfVReg * VLENB;
218 if (!isInt<32>(FixedOffset)) {
220 "Frame size outside of the signed 32-bit range not supported");
221 }
222 Offset = StackOffset::getFixed(FixedOffset + Offset.getFixed());
223 }
224 }
225
226 bool KillSrcReg = false;
227
228 if (Offset.getScalable()) {
229 unsigned ScalableAdjOpc = RISCV::ADD;
230 int64_t ScalableValue = Offset.getScalable();
231 if (ScalableValue < 0) {
232 ScalableValue = -ScalableValue;
233 ScalableAdjOpc = RISCV::SUB;
234 }
235 // Get vlenb and multiply vlen with the number of vector registers.
236 Register ScratchReg = DestReg;
237 if (DestReg == SrcReg)
238 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
239
240 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
241 assert(ScalableValue % RISCV::RVVBytesPerBlock == 0 &&
242 "Reserve the stack by the multiple of one vector size.");
243 assert(isInt<32>(ScalableValue / RISCV::RVVBytesPerBlock) &&
244 "Expect the number of vector registers within 32-bits.");
245 uint32_t NumOfVReg = ScalableValue / RISCV::RVVBytesPerBlock;
246 // Only use vsetvli rather than vlenb if adjusting in the prologue or
247 // epilogue, otherwise it may disturb the VTYPE and VL status.
248 bool IsPrologueOrEpilogue =
250 bool UseVsetvliRatherThanVlenb =
251 IsPrologueOrEpilogue && ST.preferVsetvliOverReadVLENB();
252 if (UseVsetvliRatherThanVlenb && (NumOfVReg == 1 || NumOfVReg == 2 ||
253 NumOfVReg == 4 || NumOfVReg == 8)) {
254 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
255 ScratchReg)
256 .addImm(NumOfVReg)
257 .setMIFlag(Flag);
258 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
259 .addReg(SrcReg)
260 .addReg(ScratchReg, RegState::Kill)
261 .setMIFlag(Flag);
262 } else {
263 if (UseVsetvliRatherThanVlenb)
264 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
265 ScratchReg)
266 .addImm(1)
267 .setMIFlag(Flag);
268 else
269 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
270 .setMIFlag(Flag);
271
272 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
273 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
274 unsigned Opc = NumOfVReg == 2
275 ? RISCV::SH1ADD
276 : (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
277 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
278 .addReg(ScratchReg, RegState::Kill)
279 .addReg(SrcReg)
280 .setMIFlag(Flag);
281 } else {
282 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
283 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
284 .addReg(SrcReg)
285 .addReg(ScratchReg, RegState::Kill)
286 .setMIFlag(Flag);
287 }
288 }
289 SrcReg = DestReg;
290 KillSrcReg = true;
291 }
292
293 int64_t Val = Offset.getFixed();
294 if (DestReg == SrcReg && Val == 0)
295 return;
296
297 const uint64_t Align = RequiredAlign.valueOrOne().value();
298
299 if (isInt<12>(Val)) {
300 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
301 .addReg(SrcReg, getKillRegState(KillSrcReg))
302 .addImm(Val)
303 .setMIFlag(Flag);
304 return;
305 }
306
307 // Use the QC_E_ADDI instruction from the Xqcilia extension that can take a
308 // signed 26-bit immediate.
309 if (ST.hasVendorXqcilia() && isInt<26>(Val)) {
310 // The one case where using this instruction is sub-optimal is if Val can be
311 // materialized with a single compressible LUI and following add/sub is also
312 // compressible. Avoid doing this if that is the case.
313 int Hi20 = (Val & 0xFFFFF000) >> 12;
314 bool IsCompressLUI =
315 ((Val & 0xFFF) == 0) && (Hi20 != 0) &&
316 (isUInt<5>(Hi20) || (Hi20 >= 0xfffe0 && Hi20 <= 0xfffff));
317 bool IsCompressAddSub =
318 (SrcReg == DestReg) &&
319 ((Val > 0 && RISCV::GPRNoX0RegClass.contains(SrcReg)) ||
320 (Val < 0 && RISCV::GPRCRegClass.contains(SrcReg)));
321
322 if (!(IsCompressLUI && IsCompressAddSub)) {
323 BuildMI(MBB, II, DL, TII->get(RISCV::QC_E_ADDI), DestReg)
324 .addReg(SrcReg, getKillRegState(KillSrcReg))
325 .addImm(Val)
326 .setMIFlag(Flag);
327 return;
328 }
329 }
330
331 // Try to split the offset across two ADDIs. We need to keep the intermediate
332 // result aligned after each ADDI. We need to determine the maximum value we
333 // can put in each ADDI. In the negative direction, we can use -2048 which is
334 // always sufficiently aligned. In the positive direction, we need to find the
335 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
336 // created with LUI.
337 assert(Align < 2048 && "Required alignment too large");
338 int64_t MaxPosAdjStep = 2048 - Align;
339 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
340 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
341 Val -= FirstAdj;
342 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
343 .addReg(SrcReg, getKillRegState(KillSrcReg))
344 .addImm(FirstAdj)
345 .setMIFlag(Flag);
346 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
347 .addReg(DestReg, RegState::Kill)
348 .addImm(Val)
349 .setMIFlag(Flag);
350 return;
351 }
352
353 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
354 // instruction. This saves 1 instruction over the full lui/addi+add fallback
355 // path. We avoid anything which can be done with a single lui as it might
356 // be compressible. Note that the sh1add case is fully covered by the 2x addi
357 // case just above and is thus omitted.
358 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
359 unsigned Opc = 0;
360 if (isShiftedInt<12, 3>(Val)) {
361 Opc = RISCV::SH3ADD;
362 Val = Val >> 3;
363 } else if (isShiftedInt<12, 2>(Val)) {
364 Opc = RISCV::SH2ADD;
365 Val = Val >> 2;
366 }
367 if (Opc) {
368 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
369 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
370 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
371 .addReg(ScratchReg, RegState::Kill)
372 .addReg(SrcReg, getKillRegState(KillSrcReg))
373 .setMIFlag(Flag);
374 return;
375 }
376 }
377
378 unsigned Opc = RISCV::ADD;
379 if (Val < 0) {
380 Val = -Val;
381 Opc = RISCV::SUB;
382 }
383
384 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
385 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
386 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
387 .addReg(SrcReg, getKillRegState(KillSrcReg))
388 .addReg(ScratchReg, RegState::Kill)
389 .setMIFlag(Flag);
390}
391
392static std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned>
393getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill) {
394 if (NumRemaining >= 8 && RegEncoding % 8 == 0)
395 return {RISCVVType::LMUL_8, RISCV::VRM8RegClass,
396 IsSpill ? RISCV::VS8R_V : RISCV::VL8RE8_V};
397 if (NumRemaining >= 4 && RegEncoding % 4 == 0)
398 return {RISCVVType::LMUL_4, RISCV::VRM4RegClass,
399 IsSpill ? RISCV::VS4R_V : RISCV::VL4RE8_V};
400 if (NumRemaining >= 2 && RegEncoding % 2 == 0)
401 return {RISCVVType::LMUL_2, RISCV::VRM2RegClass,
402 IsSpill ? RISCV::VS2R_V : RISCV::VL2RE8_V};
403 return {RISCVVType::LMUL_1, RISCV::VRRegClass,
404 IsSpill ? RISCV::VS1R_V : RISCV::VL1RE8_V};
405}
406
407// Split a VSPILLx_Mx/VSPILLx_Mx pseudo into multiple whole register stores
408// separated by LMUL*VLENB bytes.
410 bool IsSpill) const {
411 DebugLoc DL = II->getDebugLoc();
412 MachineBasicBlock &MBB = *II->getParent();
413 MachineFunction &MF = *MBB.getParent();
415 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
416 const TargetInstrInfo *TII = STI.getInstrInfo();
418
419 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
420 unsigned NF = ZvlssegInfo->first;
421 unsigned LMUL = ZvlssegInfo->second;
422 unsigned NumRegs = NF * LMUL;
423 assert(NumRegs <= 8 && "Invalid NF/LMUL combinations.");
424
425 Register Reg = II->getOperand(0).getReg();
426 uint16_t RegEncoding = TRI->getEncodingValue(Reg);
427 Register Base = II->getOperand(1).getReg();
428 bool IsBaseKill = II->getOperand(1).isKill();
429 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
430
431 auto *OldMMO = *(II->memoperands_begin());
432 LocationSize OldLoc = OldMMO->getSize();
433 assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
434 TypeSize VRegSize = OldLoc.getValue().divideCoefficientBy(NumRegs);
435
436 Register VLENB = 0;
437 unsigned PreHandledNum = 0;
438 unsigned I = 0;
439 while (I != NumRegs) {
440 auto [LMulHandled, RegClass, Opcode] =
441 getSpillReloadInfo(NumRegs - I, RegEncoding, IsSpill);
442 auto [RegNumHandled, _] = RISCVVType::decodeVLMUL(LMulHandled);
443 bool IsLast = I + RegNumHandled == NumRegs;
444 if (PreHandledNum) {
445 Register Step;
446 // Optimize for constant VLEN.
447 if (auto VLEN = STI.getRealVLen()) {
448 int64_t Offset = *VLEN / 8 * PreHandledNum;
449 Step = MRI.createVirtualRegister(&RISCV::GPRRegClass);
450 STI.getInstrInfo()->movImm(MBB, II, DL, Step, Offset);
451 } else {
452 if (!VLENB) {
453 VLENB = MRI.createVirtualRegister(&RISCV::GPRRegClass);
454 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VLENB);
455 }
456 uint32_t ShiftAmount = Log2_32(PreHandledNum);
457 if (ShiftAmount == 0)
458 Step = VLENB;
459 else {
460 Step = MRI.createVirtualRegister(&RISCV::GPRRegClass);
461 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), Step)
462 .addReg(VLENB, getKillRegState(IsLast))
463 .addImm(ShiftAmount);
464 }
465 }
466
467 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
468 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
469 .addReg(Step, getKillRegState(Step != VLENB || IsLast));
470 Base = NewBase;
471 }
472
473 MCRegister ActualReg = findVRegWithEncoding(RegClass, RegEncoding);
475 BuildMI(MBB, II, DL, TII->get(Opcode))
476 .addReg(ActualReg, getDefRegState(!IsSpill))
477 .addReg(Base, getKillRegState(IsLast))
478 .addMemOperand(MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
479 VRegSize * RegNumHandled));
480
481 // Adding implicit-use of super register to describe we are using part of
482 // super register, that prevents machine verifier complaining when part of
483 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
484 // detail.
485 if (IsSpill)
486 MIB.addReg(Reg, RegState::Implicit);
487
488 PreHandledNum = RegNumHandled;
489 RegEncoding += RegNumHandled;
490 I += RegNumHandled;
491 }
492 II->eraseFromParent();
493}
494
496 int SPAdj, unsigned FIOperandNum,
497 RegScavenger *RS) const {
498 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
499
500 MachineInstr &MI = *II;
501 MachineFunction &MF = *MI.getParent()->getParent();
503 DebugLoc DL = MI.getDebugLoc();
504
505 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
506 Register FrameReg;
508 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
509 bool IsRVVSpill = RISCV::isRVVSpill(MI);
510 if (!IsRVVSpill)
511 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
512
513 if (!isInt<32>(Offset.getFixed())) {
515 "Frame offsets outside of the signed 32-bit range not supported");
516 }
517
518 if (!IsRVVSpill) {
519 int64_t Val = Offset.getFixed();
520 int64_t Lo12 = SignExtend64<12>(Val);
521 unsigned Opc = MI.getOpcode();
522
523 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
524 // We chose to emit the canonical immediate sequence rather than folding
525 // the offset into the using add under the theory that doing so doesn't
526 // save dynamic instruction count and some target may fuse the canonical
527 // 32 bit immediate sequence. We still need to clear the portion of the
528 // offset encoded in the immediate.
529 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
530 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
531 Opc == RISCV::PREFETCH_W) &&
532 (Lo12 & 0b11111) != 0) {
533 // Prefetch instructions require the offset to be 32 byte aligned.
534 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
535 } else if (Opc == RISCV::MIPS_PREF && !isUInt<9>(Val)) {
536 // MIPS Prefetch instructions require the offset to be 9 bits encoded.
537 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
538 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
539 Opc == RISCV::PseudoRV32ZdinxSD) &&
540 Lo12 >= 2044) {
541 // This instruction will be split into 2 instructions. The second
542 // instruction will add 4 to the immediate. If that would overflow 12
543 // bits, we can't fold the offset.
544 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
545 } else {
546 // We can encode an add with 12 bit signed immediate in the immediate
547 // operand of our user instruction. As a result, the remaining
548 // offset can by construction, at worst, a LUI and a ADD.
549 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
551 Offset.getScalable());
552 }
553 }
554
555 if (Offset.getScalable() || Offset.getFixed()) {
556 Register DestReg;
557 if (MI.getOpcode() == RISCV::ADDI)
558 DestReg = MI.getOperand(0).getReg();
559 else
560 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
561 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
562 MachineInstr::NoFlags, std::nullopt);
563 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
564 /*IsImp*/false,
565 /*IsKill*/true);
566 } else {
567 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
568 /*IsImp*/false,
569 /*IsKill*/false);
570 }
571
572 // If after materializing the adjustment, we have a pointless ADDI, remove it
573 if (MI.getOpcode() == RISCV::ADDI &&
574 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
575 MI.getOperand(2).getImm() == 0) {
576 MI.eraseFromParent();
577 return true;
578 }
579
580 // Handle spill/fill of synthetic register classes for segment operations to
581 // ensure correctness in the edge case one gets spilled.
582 switch (MI.getOpcode()) {
583 case RISCV::PseudoVSPILL2_M1:
584 case RISCV::PseudoVSPILL2_M2:
585 case RISCV::PseudoVSPILL2_M4:
586 case RISCV::PseudoVSPILL3_M1:
587 case RISCV::PseudoVSPILL3_M2:
588 case RISCV::PseudoVSPILL4_M1:
589 case RISCV::PseudoVSPILL4_M2:
590 case RISCV::PseudoVSPILL5_M1:
591 case RISCV::PseudoVSPILL6_M1:
592 case RISCV::PseudoVSPILL7_M1:
593 case RISCV::PseudoVSPILL8_M1:
594 lowerSegmentSpillReload(II, /*IsSpill=*/true);
595 return true;
596 case RISCV::PseudoVRELOAD2_M1:
597 case RISCV::PseudoVRELOAD2_M2:
598 case RISCV::PseudoVRELOAD2_M4:
599 case RISCV::PseudoVRELOAD3_M1:
600 case RISCV::PseudoVRELOAD3_M2:
601 case RISCV::PseudoVRELOAD4_M1:
602 case RISCV::PseudoVRELOAD4_M2:
603 case RISCV::PseudoVRELOAD5_M1:
604 case RISCV::PseudoVRELOAD6_M1:
605 case RISCV::PseudoVRELOAD7_M1:
606 case RISCV::PseudoVRELOAD8_M1:
607 lowerSegmentSpillReload(II, /*IsSpill=*/false);
608 return true;
609 }
610
611 return false;
612}
613
615 const MachineFunction &MF) const {
616 return true;
617}
618
619// Returns true if the instruction's frame index reference would be better
620// served by a base register other than FP or SP.
621// Used by LocalStackSlotAllocation pass to determine which frame index
622// references it should create new base registers for.
624 int64_t Offset) const {
625 unsigned FIOperandNum = 0;
626 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
627 assert(FIOperandNum < MI->getNumOperands() &&
628 "Instr doesn't have FrameIndex operand");
629
630 // For RISC-V, The machine instructions that include a FrameIndex operand
631 // are load/store, ADDI instructions.
632 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
633 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
634 return false;
635 // We only generate virtual base registers for loads and stores, so
636 // return false for everything else.
637 if (!MI->mayLoad() && !MI->mayStore())
638 return false;
639
640 const MachineFunction &MF = *MI->getMF();
641 const MachineFrameInfo &MFI = MF.getFrameInfo();
642 const RISCVFrameLowering *TFI = getFrameLowering(MF);
643 const MachineRegisterInfo &MRI = MF.getRegInfo();
644
645 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
646 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
647 // Estimate the stack size used to store callee saved registers(
648 // excludes reserved registers).
649 unsigned CalleeSavedSize = 0;
650 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
651 ++R) {
652 if (Subtarget.isRegisterReservedByUser(Reg))
653 continue;
654
655 if (RISCV::GPRRegClass.contains(Reg))
656 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
657 else if (RISCV::FPR64RegClass.contains(Reg))
658 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
659 else if (RISCV::FPR32RegClass.contains(Reg))
660 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
661 // Ignore vector registers.
662 }
663
664 int64_t MaxFPOffset = Offset - CalleeSavedSize;
665 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
666 }
667
668 // Assume 128 bytes spill slots size to estimate the maximum possible
669 // offset relative to the stack pointer.
670 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
671 // real one for RISC-V.
672 int64_t MaxSPOffset = Offset + 128;
673 MaxSPOffset += MFI.getLocalFrameSize();
674 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
675}
676
677// Determine whether a given base register plus offset immediate is
678// encodable to resolve a frame index.
680 Register BaseReg,
681 int64_t Offset) const {
682 unsigned FIOperandNum = 0;
683 while (!MI->getOperand(FIOperandNum).isFI()) {
684 FIOperandNum++;
685 assert(FIOperandNum < MI->getNumOperands() &&
686 "Instr does not have a FrameIndex operand!");
687 }
688
689 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
690 return isInt<12>(Offset);
691}
692
693// Insert defining instruction(s) for a pointer to FrameIdx before
694// insertion point I.
695// Return materialized frame pointer.
697 int FrameIdx,
698 int64_t Offset) const {
700 DebugLoc DL;
701 if (MBBI != MBB->end())
702 DL = MBBI->getDebugLoc();
703 MachineFunction *MF = MBB->getParent();
704 MachineRegisterInfo &MFI = MF->getRegInfo();
706
707 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
708 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
709 .addFrameIndex(FrameIdx)
710 .addImm(Offset);
711 return BaseReg;
712}
713
714// Resolve a frame index operand of an instruction to reference the
715// indicated base register plus offset instead.
717 int64_t Offset) const {
718 unsigned FIOperandNum = 0;
719 while (!MI.getOperand(FIOperandNum).isFI()) {
720 FIOperandNum++;
721 assert(FIOperandNum < MI.getNumOperands() &&
722 "Instr does not have a FrameIndex operand!");
723 }
724
725 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
726 // FrameIndex Operands are always represented as a
727 // register followed by an immediate.
728 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
729 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
730}
731
732// Get the offset from the referenced frame index in the instruction,
733// if there is one.
735 int Idx) const {
736 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
737 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
738 "The MI must be I or S format.");
739 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
740 "FrameIndex operand");
741 return MI->getOperand(Idx + 1).getImm();
742}
743
745 const TargetFrameLowering *TFI = getFrameLowering(MF);
746 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
747}
748
750 if (Reg == RISCV::SF_VCIX_STATE)
751 return "sf.vcix_state";
753}
754
755const uint32_t *
757 CallingConv::ID CC) const {
758 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
759
760 if (CC == CallingConv::GHC)
761 return CSR_NoRegs_RegMask;
762 RISCVABI::ABI ABI = Subtarget.getTargetABI();
763 if (CC == CallingConv::PreserveMost) {
764 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
765 return CSR_RT_MostRegs_RVE_RegMask;
766 return CSR_RT_MostRegs_RegMask;
767 }
768 switch (ABI) {
769 default:
770 llvm_unreachable("Unrecognized ABI");
773 return CSR_ILP32E_LP64E_RegMask;
777 return CSR_ILP32_LP64_V_RegMask;
778 return CSR_ILP32_LP64_RegMask;
782 return CSR_ILP32F_LP64F_V_RegMask;
783 return CSR_ILP32F_LP64F_RegMask;
787 return CSR_ILP32D_LP64D_V_RegMask;
788 return CSR_ILP32D_LP64D_RegMask;
789 }
790}
791
794 const MachineFunction &) const {
795 if (RC == &RISCV::VMV0RegClass)
796 return &RISCV::VRRegClass;
797 if (RC == &RISCV::VRNoV0RegClass)
798 return &RISCV::VRRegClass;
799 if (RC == &RISCV::VRM2NoV0RegClass)
800 return &RISCV::VRM2RegClass;
801 if (RC == &RISCV::VRM4NoV0RegClass)
802 return &RISCV::VRM4RegClass;
803 if (RC == &RISCV::VRM8NoV0RegClass)
804 return &RISCV::VRM8RegClass;
805 return RC;
806}
807
810 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
811 // to represent one vector register. The dwarf offset is
812 // VLENB * scalable_offset / 8.
813 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
814
815 // Add fixed-sized offset using existing DIExpression interface.
817
818 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
819 int64_t VLENBSized = Offset.getScalable() / 8;
820 if (VLENBSized > 0) {
821 Ops.push_back(dwarf::DW_OP_constu);
822 Ops.push_back(VLENBSized);
823 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
824 Ops.push_back(dwarf::DW_OP_mul);
825 Ops.push_back(dwarf::DW_OP_plus);
826 } else if (VLENBSized < 0) {
827 Ops.push_back(dwarf::DW_OP_constu);
828 Ops.push_back(-VLENBSized);
829 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
830 Ops.push_back(dwarf::DW_OP_mul);
831 Ops.push_back(dwarf::DW_OP_minus);
832 }
833}
834
835unsigned
837 return MF.getSubtarget<RISCVSubtarget>().hasStdExtZca() && !DisableCostPerUse
838 ? 1
839 : 0;
840}
841
843 const TargetRegisterClass *RC) const {
844 return getRegClassWeight(RC).RegWeight;
845}
846
847// Add two address hints to improve chances of being able to use a compressed
848// instruction.
850 Register VirtReg, ArrayRef<MCPhysReg> Order,
852 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
853 const MachineRegisterInfo *MRI = &MF.getRegInfo();
854 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
855
856 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
857 VirtReg, Order, Hints, MF, VRM, Matrix);
858
859 if (!VRM || DisableRegAllocHints)
860 return BaseImplRetVal;
861
862 // Add any two address hints after any copy hints.
863 SmallSet<Register, 4> TwoAddrHints;
864
865 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
866 bool NeedGPRC) -> void {
867 Register Reg = MO.getReg();
868 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
869 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
870 // registers. If the virtual register is an odd register of a pair and the
871 // physical register is even (or vice versa), we should not add the hint.
872 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
873 !MO.getSubReg() && !VRRegMO.getSubReg()) {
874 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
875 TwoAddrHints.insert(PhysReg);
876 }
877 };
878
879 // This is all of the compressible binary instructions. If an instruction
880 // needs GPRC register class operands \p NeedGPRC will be set to true.
881 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
882 NeedGPRC = false;
883 switch (MI.getOpcode()) {
884 default:
885 return false;
886 case RISCV::AND:
887 case RISCV::OR:
888 case RISCV::XOR:
889 case RISCV::SUB:
890 case RISCV::ADDW:
891 case RISCV::SUBW:
892 NeedGPRC = true;
893 return true;
894 case RISCV::ANDI: {
895 NeedGPRC = true;
896 if (!MI.getOperand(2).isImm())
897 return false;
898 int64_t Imm = MI.getOperand(2).getImm();
899 if (isInt<6>(Imm))
900 return true;
901 // c.zext.b
902 return Subtarget.hasStdExtZcb() && Imm == 255;
903 }
904 case RISCV::SRAI:
905 case RISCV::SRLI:
906 NeedGPRC = true;
907 return true;
908 case RISCV::ADD:
909 case RISCV::SLLI:
910 return true;
911 case RISCV::ADDI:
912 case RISCV::ADDIW:
913 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
914 case RISCV::MUL:
915 case RISCV::SEXT_B:
916 case RISCV::SEXT_H:
917 case RISCV::ZEXT_H_RV32:
918 case RISCV::ZEXT_H_RV64:
919 // c.mul, c.sext.b, c.sext.h, c.zext.h
920 NeedGPRC = true;
921 return Subtarget.hasStdExtZcb();
922 case RISCV::ADD_UW:
923 // c.zext.w
924 NeedGPRC = true;
925 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
926 MI.getOperand(2).getReg() == RISCV::X0;
927 case RISCV::XORI:
928 // c.not
929 NeedGPRC = true;
930 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
931 MI.getOperand(2).getImm() == -1;
932 }
933 };
934
935 // Returns true if this operand is compressible. For non-registers it always
936 // returns true. Immediate range was already checked in isCompressible.
937 // For registers, it checks if the register is a GPRC register. reg-reg
938 // instructions that require GPRC need all register operands to be GPRC.
939 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
940 if (!MO.isReg())
941 return true;
942 Register Reg = MO.getReg();
943 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
944 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
945 };
946
947 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
948 const MachineInstr &MI = *MO.getParent();
949 unsigned OpIdx = MO.getOperandNo();
950 bool NeedGPRC;
951 if (isCompressible(MI, NeedGPRC)) {
952 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
953 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
954 MI.getOpcode() == RISCV::ADD_UW ||
955 isCompressibleOpnd(MI.getOperand(2)))
956 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
957 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
958 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
959 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
960 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
961 isCompressibleOpnd(MI.getOperand(2)))) {
962 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
963 } else if (MI.isCommutable() && OpIdx == 2 &&
964 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
965 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
966 }
967 }
968
969 // Add a hint if it would allow auipc/lui+addi(w) fusion. We do this even
970 // without the fusions explicitly enabled as the impact is rarely negative
971 // and some cores do implement this fusion.
972 if ((MI.getOpcode() == RISCV::ADDIW || MI.getOpcode() == RISCV::ADDI) &&
973 MI.getOperand(1).isReg()) {
974 const MachineBasicBlock &MBB = *MI.getParent();
975 MachineBasicBlock::const_iterator I = MI.getIterator();
976 // Is the previous instruction a LUI or AUIPC that can be fused?
977 if (I != MBB.begin()) {
978 I = skipDebugInstructionsBackward(std::prev(I), MBB.begin());
979 if ((I->getOpcode() == RISCV::LUI || I->getOpcode() == RISCV::AUIPC) &&
980 I->getOperand(0).getReg() == MI.getOperand(1).getReg()) {
981 if (OpIdx == 0)
982 tryAddHint(MO, MI.getOperand(1), /*NeedGPRC=*/false);
983 else
984 tryAddHint(MO, MI.getOperand(0), /*NeedGPRC=*/false);
985 }
986 }
987 }
988 }
989
990 for (MCPhysReg OrderReg : Order)
991 if (TwoAddrHints.count(OrderReg))
992 Hints.push_back(OrderReg);
993
994 return BaseImplRetVal;
995}
996
999 uint16_t Encoding) const {
1000 MCRegister Reg = RISCV::V0 + Encoding;
1002 return Reg;
1003 return getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
1004}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
static std::tuple< RISCVVType::VLMUL, const TargetRegisterClass &, unsigned > getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallSet class.
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
TypeSize getValue() const
bool isPrecise() const
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition TypeSize.h:42
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
Definition TypeSize.h:181
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
Definition MathExtras.h:191
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1886
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:583
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Register findVRegWithEncoding(const TargetRegisterClass &RegClass, uint16_t Encoding) const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Register getFrameRegister(const MachineFunction &MF) const override
const MCPhysReg * getIPRACSRegs(const MachineFunction *MF) const override
void lowerSegmentSpillReload(MachineBasicBlock::iterator II, bool IsSpill) const
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
float getSpillWeightScaleFactor(const TargetRegisterClass *RC) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override