LLVM  15.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/MC/MCInstBuilder.h"
29 #include "llvm/MC/TargetRegistry.h"
31 
32 using namespace llvm;
33 
34 #define GEN_CHECK_COMPRESS_INSTR
35 #include "RISCVGenCompressInstEmitter.inc"
36 
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRINFO_NAMED_OPS
39 #include "RISCVGenInstrInfo.inc"
40 
42  "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
43  cl::desc("Prefer whole register move for vector registers."));
44 
45 namespace llvm {
46 namespace RISCVVPseudosTable {
47 
48 using namespace RISCV;
49 
50 #define GET_RISCVVPseudosTable_IMPL
51 #include "RISCVGenSearchableTables.inc"
52 
53 } // namespace RISCVVPseudosTable
54 } // namespace llvm
55 
57  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
58  STI(STI) {}
59 
61  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
62  return MCInstBuilder(RISCV::C_NOP);
63  return MCInstBuilder(RISCV::ADDI)
64  .addReg(RISCV::X0)
65  .addReg(RISCV::X0)
66  .addImm(0);
67 }
68 
70  int &FrameIndex) const {
71  switch (MI.getOpcode()) {
72  default:
73  return 0;
74  case RISCV::LB:
75  case RISCV::LBU:
76  case RISCV::LH:
77  case RISCV::LHU:
78  case RISCV::FLH:
79  case RISCV::LW:
80  case RISCV::FLW:
81  case RISCV::LWU:
82  case RISCV::LD:
83  case RISCV::FLD:
84  break;
85  }
86 
87  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
88  MI.getOperand(2).getImm() == 0) {
89  FrameIndex = MI.getOperand(1).getIndex();
90  return MI.getOperand(0).getReg();
91  }
92 
93  return 0;
94 }
95 
97  int &FrameIndex) const {
98  switch (MI.getOpcode()) {
99  default:
100  return 0;
101  case RISCV::SB:
102  case RISCV::SH:
103  case RISCV::SW:
104  case RISCV::FSH:
105  case RISCV::FSW:
106  case RISCV::SD:
107  case RISCV::FSD:
108  break;
109  }
110 
111  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
112  MI.getOperand(2).getImm() == 0) {
113  FrameIndex = MI.getOperand(1).getIndex();
114  return MI.getOperand(0).getReg();
115  }
116 
117  return 0;
118 }
119 
120 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
121  unsigned NumRegs) {
122  return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
123 }
124 
125 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
126  const MachineBasicBlock &MBB,
129  RISCVII::VLMUL LMul) {
131  return false;
132 
133  assert(MBBI->getOpcode() == TargetOpcode::COPY &&
134  "Unexpected COPY instruction.");
135  Register SrcReg = MBBI->getOperand(1).getReg();
136  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
137 
138  bool FoundDef = false;
139  bool FirstVSetVLI = false;
140  unsigned FirstSEW = 0;
141  while (MBBI != MBB.begin()) {
142  --MBBI;
143  if (MBBI->isMetaInstruction())
144  continue;
145 
146  if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
147  MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
148  MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
149  // There is a vsetvli between COPY and source define instruction.
150  // vy = def_vop ... (producing instruction)
151  // ...
152  // vsetvli
153  // ...
154  // vx = COPY vy
155  if (!FoundDef) {
156  if (!FirstVSetVLI) {
157  FirstVSetVLI = true;
158  unsigned FirstVType = MBBI->getOperand(2).getImm();
159  RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
160  FirstSEW = RISCVVType::getSEW(FirstVType);
161  // The first encountered vsetvli must have the same lmul as the
162  // register class of COPY.
163  if (FirstLMul != LMul)
164  return false;
165  }
166  // Only permit `vsetvli x0, x0, vtype` between COPY and the source
167  // define instruction.
168  if (MBBI->getOperand(0).getReg() != RISCV::X0)
169  return false;
170  if (MBBI->getOperand(1).isImm())
171  return false;
172  if (MBBI->getOperand(1).getReg() != RISCV::X0)
173  return false;
174  continue;
175  }
176 
177  // MBBI is the first vsetvli before the producing instruction.
178  unsigned VType = MBBI->getOperand(2).getImm();
179  // If there is a vsetvli between COPY and the producing instruction.
180  if (FirstVSetVLI) {
181  // If SEW is different, return false.
182  if (RISCVVType::getSEW(VType) != FirstSEW)
183  return false;
184  }
185 
186  // If the vsetvli is tail undisturbed, keep the whole register move.
187  if (!RISCVVType::isTailAgnostic(VType))
188  return false;
189 
190  // The checking is conservative. We only have register classes for
191  // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
192  // for fractional LMUL operations. However, we could not use the vsetvli
193  // lmul for widening operations. The result of widening operation is
194  // 2 x LMUL.
195  return LMul == RISCVVType::getVLMUL(VType);
196  } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
197  return false;
198  } else if (MBBI->getNumDefs()) {
199  // Check all the instructions which will change VL.
200  // For example, vleff has implicit def VL.
201  if (MBBI->modifiesRegister(RISCV::VL))
202  return false;
203 
204  // Only converting whole register copies to vmv.v.v when the defining
205  // value appears in the explicit operands.
206  for (const MachineOperand &MO : MBBI->explicit_operands()) {
207  if (!MO.isReg() || !MO.isDef())
208  continue;
209  if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
210  // We only permit the source of COPY has the same LMUL as the defined
211  // operand.
212  // There are cases we need to keep the whole register copy if the LMUL
213  // is different.
214  // For example,
215  // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
216  // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
217  // # The COPY may be created by vlmul_trunc intrinsic.
218  // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
219  //
220  // After widening, the valid value will be 4 x e32 elements. If we
221  // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
222  // FIXME: The COPY of subregister of Zvlsseg register will not be able
223  // to convert to vmv.v.[v|i] under the constraint.
224  if (MO.getReg() != SrcReg)
225  return false;
226 
227  // In widening reduction instructions with LMUL_1 input vector case,
228  // only checking the LMUL is insufficient due to reduction result is
229  // always LMUL_1.
230  // For example,
231  // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
232  // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
233  // $v26 = COPY killed renamable $v8
234  // After widening, The valid value will be 1 x e16 elements. If we
235  // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
236  uint64_t TSFlags = MBBI->getDesc().TSFlags;
237  if (RISCVII::isRVVWideningReduction(TSFlags))
238  return false;
239 
240  // Found the definition.
241  FoundDef = true;
242  DefMBBI = MBBI;
243  // If the producing instruction does not depend on vsetvli, do not
244  // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
245  if (!RISCVII::hasSEWOp(TSFlags))
246  return false;
247  break;
248  }
249  }
250  }
251  }
252 
253  return false;
254 }
255 
258  const DebugLoc &DL, MCRegister DstReg,
259  MCRegister SrcReg, bool KillSrc) const {
260  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
261  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
262  .addReg(SrcReg, getKillRegState(KillSrc))
263  .addImm(0);
264  return;
265  }
266 
267  // Handle copy from csr
268  if (RISCV::VCSRRegClass.contains(SrcReg) &&
269  RISCV::GPRRegClass.contains(DstReg)) {
271  BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
273  .addReg(RISCV::X0);
274  return;
275  }
276 
277  // FPR->FPR copies and VR->VR copies.
278  unsigned Opc;
279  bool IsScalableVector = true;
280  unsigned NF = 1;
282  unsigned SubRegIdx = RISCV::sub_vrm1_0;
283  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
284  Opc = RISCV::FSGNJ_H;
285  IsScalableVector = false;
286  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
287  Opc = RISCV::FSGNJ_S;
288  IsScalableVector = false;
289  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
290  Opc = RISCV::FSGNJ_D;
291  IsScalableVector = false;
292  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
293  Opc = RISCV::PseudoVMV1R_V;
294  LMul = RISCVII::LMUL_1;
295  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
296  Opc = RISCV::PseudoVMV2R_V;
297  LMul = RISCVII::LMUL_2;
298  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
299  Opc = RISCV::PseudoVMV4R_V;
300  LMul = RISCVII::LMUL_4;
301  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
302  Opc = RISCV::PseudoVMV8R_V;
303  LMul = RISCVII::LMUL_8;
304  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
305  Opc = RISCV::PseudoVMV1R_V;
306  SubRegIdx = RISCV::sub_vrm1_0;
307  NF = 2;
308  LMul = RISCVII::LMUL_1;
309  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
310  Opc = RISCV::PseudoVMV2R_V;
311  SubRegIdx = RISCV::sub_vrm2_0;
312  NF = 2;
313  LMul = RISCVII::LMUL_2;
314  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
315  Opc = RISCV::PseudoVMV4R_V;
316  SubRegIdx = RISCV::sub_vrm4_0;
317  NF = 2;
318  LMul = RISCVII::LMUL_4;
319  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
320  Opc = RISCV::PseudoVMV1R_V;
321  SubRegIdx = RISCV::sub_vrm1_0;
322  NF = 3;
323  LMul = RISCVII::LMUL_1;
324  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
325  Opc = RISCV::PseudoVMV2R_V;
326  SubRegIdx = RISCV::sub_vrm2_0;
327  NF = 3;
328  LMul = RISCVII::LMUL_2;
329  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
330  Opc = RISCV::PseudoVMV1R_V;
331  SubRegIdx = RISCV::sub_vrm1_0;
332  NF = 4;
333  LMul = RISCVII::LMUL_1;
334  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
335  Opc = RISCV::PseudoVMV2R_V;
336  SubRegIdx = RISCV::sub_vrm2_0;
337  NF = 4;
338  LMul = RISCVII::LMUL_2;
339  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
340  Opc = RISCV::PseudoVMV1R_V;
341  SubRegIdx = RISCV::sub_vrm1_0;
342  NF = 5;
343  LMul = RISCVII::LMUL_1;
344  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
345  Opc = RISCV::PseudoVMV1R_V;
346  SubRegIdx = RISCV::sub_vrm1_0;
347  NF = 6;
348  LMul = RISCVII::LMUL_1;
349  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
350  Opc = RISCV::PseudoVMV1R_V;
351  SubRegIdx = RISCV::sub_vrm1_0;
352  NF = 7;
353  LMul = RISCVII::LMUL_1;
354  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
355  Opc = RISCV::PseudoVMV1R_V;
356  SubRegIdx = RISCV::sub_vrm1_0;
357  NF = 8;
358  LMul = RISCVII::LMUL_1;
359  } else {
360  llvm_unreachable("Impossible reg-to-reg copy");
361  }
362 
363  if (IsScalableVector) {
364  bool UseVMV_V_V = false;
366  unsigned DefExplicitOpNum;
367  unsigned VIOpc;
368  if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
369  UseVMV_V_V = true;
370  DefExplicitOpNum = DefMBBI->getNumExplicitOperands();
371  // We only need to handle LMUL = 1/2/4/8 here because we only define
372  // vector register classes for LMUL = 1/2/4/8.
373  switch (LMul) {
374  default:
375  llvm_unreachable("Impossible LMUL for vector register copy.");
376  case RISCVII::LMUL_1:
377  Opc = RISCV::PseudoVMV_V_V_M1;
378  VIOpc = RISCV::PseudoVMV_V_I_M1;
379  break;
380  case RISCVII::LMUL_2:
381  Opc = RISCV::PseudoVMV_V_V_M2;
382  VIOpc = RISCV::PseudoVMV_V_I_M2;
383  break;
384  case RISCVII::LMUL_4:
385  Opc = RISCV::PseudoVMV_V_V_M4;
386  VIOpc = RISCV::PseudoVMV_V_I_M4;
387  break;
388  case RISCVII::LMUL_8:
389  Opc = RISCV::PseudoVMV_V_V_M8;
390  VIOpc = RISCV::PseudoVMV_V_I_M8;
391  break;
392  }
393  }
394 
395  bool UseVMV_V_I = false;
396  if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
397  UseVMV_V_I = true;
398  Opc = VIOpc;
399  }
400 
401  if (NF == 1) {
402  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
403  if (UseVMV_V_I)
404  MIB = MIB.add(DefMBBI->getOperand(1));
405  else
406  MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
407  if (UseVMV_V_V) {
408  // The last two arguments of vector instructions are
409  // AVL, SEW. We also need to append the implicit-use vl and vtype.
410  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL
411  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW
412  MIB.addReg(RISCV::VL, RegState::Implicit);
413  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
414  }
415  } else {
417 
418  int I = 0, End = NF, Incr = 1;
419  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
420  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
421  unsigned LMulVal;
422  bool Fractional;
423  std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
424  assert(!Fractional && "It is impossible be fractional lmul here.");
425  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
426  I = NF - 1;
427  End = -1;
428  Incr = -1;
429  }
430 
431  for (; I != End; I += Incr) {
432  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
433  TRI->getSubReg(DstReg, SubRegIdx + I));
434  if (UseVMV_V_I)
435  MIB = MIB.add(DefMBBI->getOperand(1));
436  else
437  MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
438  getKillRegState(KillSrc));
439  if (UseVMV_V_V) {
440  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL
441  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW
442  MIB.addReg(RISCV::VL, RegState::Implicit);
443  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
444  }
445  }
446  }
447  } else {
448  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
449  .addReg(SrcReg, getKillRegState(KillSrc))
450  .addReg(SrcReg, getKillRegState(KillSrc));
451  }
452 }
453 
456  Register SrcReg, bool IsKill, int FI,
457  const TargetRegisterClass *RC,
458  const TargetRegisterInfo *TRI) const {
459  DebugLoc DL;
460  if (I != MBB.end())
461  DL = I->getDebugLoc();
462 
463  MachineFunction *MF = MBB.getParent();
464  MachineFrameInfo &MFI = MF->getFrameInfo();
465 
466  unsigned Opcode;
467  bool IsScalableVector = true;
468  bool IsZvlsseg = true;
469  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
470  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
471  RISCV::SW : RISCV::SD;
472  IsScalableVector = false;
473  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
474  Opcode = RISCV::FSH;
475  IsScalableVector = false;
476  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
477  Opcode = RISCV::FSW;
478  IsScalableVector = false;
479  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
480  Opcode = RISCV::FSD;
481  IsScalableVector = false;
482  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
483  Opcode = RISCV::PseudoVSPILL_M1;
484  IsZvlsseg = false;
485  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
486  Opcode = RISCV::PseudoVSPILL_M2;
487  IsZvlsseg = false;
488  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
489  Opcode = RISCV::PseudoVSPILL_M4;
490  IsZvlsseg = false;
491  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
492  Opcode = RISCV::PseudoVSPILL_M8;
493  IsZvlsseg = false;
494  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
495  Opcode = RISCV::PseudoVSPILL2_M1;
496  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
497  Opcode = RISCV::PseudoVSPILL2_M2;
498  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
499  Opcode = RISCV::PseudoVSPILL2_M4;
500  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
501  Opcode = RISCV::PseudoVSPILL3_M1;
502  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
503  Opcode = RISCV::PseudoVSPILL3_M2;
504  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
505  Opcode = RISCV::PseudoVSPILL4_M1;
506  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
507  Opcode = RISCV::PseudoVSPILL4_M2;
508  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
509  Opcode = RISCV::PseudoVSPILL5_M1;
510  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
511  Opcode = RISCV::PseudoVSPILL6_M1;
512  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
513  Opcode = RISCV::PseudoVSPILL7_M1;
514  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
515  Opcode = RISCV::PseudoVSPILL8_M1;
516  else
517  llvm_unreachable("Can't store this register to stack slot");
518 
519  if (IsScalableVector) {
523 
525  auto MIB = BuildMI(MBB, I, DL, get(Opcode))
526  .addReg(SrcReg, getKillRegState(IsKill))
527  .addFrameIndex(FI)
528  .addMemOperand(MMO);
529  if (IsZvlsseg) {
530  // For spilling/reloading Zvlsseg registers, append the dummy field for
531  // the scaled vector length. The argument will be used when expanding
532  // these pseudo instructions.
533  MIB.addReg(RISCV::X0);
534  }
535  } else {
538  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
539 
540  BuildMI(MBB, I, DL, get(Opcode))
541  .addReg(SrcReg, getKillRegState(IsKill))
542  .addFrameIndex(FI)
543  .addImm(0)
544  .addMemOperand(MMO);
545  }
546 }
547 
550  Register DstReg, int FI,
551  const TargetRegisterClass *RC,
552  const TargetRegisterInfo *TRI) const {
553  DebugLoc DL;
554  if (I != MBB.end())
555  DL = I->getDebugLoc();
556 
557  MachineFunction *MF = MBB.getParent();
558  MachineFrameInfo &MFI = MF->getFrameInfo();
559 
560  unsigned Opcode;
561  bool IsScalableVector = true;
562  bool IsZvlsseg = true;
563  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
564  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
565  RISCV::LW : RISCV::LD;
566  IsScalableVector = false;
567  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
568  Opcode = RISCV::FLH;
569  IsScalableVector = false;
570  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
571  Opcode = RISCV::FLW;
572  IsScalableVector = false;
573  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
574  Opcode = RISCV::FLD;
575  IsScalableVector = false;
576  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
577  Opcode = RISCV::PseudoVRELOAD_M1;
578  IsZvlsseg = false;
579  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
580  Opcode = RISCV::PseudoVRELOAD_M2;
581  IsZvlsseg = false;
582  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
583  Opcode = RISCV::PseudoVRELOAD_M4;
584  IsZvlsseg = false;
585  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
586  Opcode = RISCV::PseudoVRELOAD_M8;
587  IsZvlsseg = false;
588  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
589  Opcode = RISCV::PseudoVRELOAD2_M1;
590  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
591  Opcode = RISCV::PseudoVRELOAD2_M2;
592  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
593  Opcode = RISCV::PseudoVRELOAD2_M4;
594  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
595  Opcode = RISCV::PseudoVRELOAD3_M1;
596  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
597  Opcode = RISCV::PseudoVRELOAD3_M2;
598  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
599  Opcode = RISCV::PseudoVRELOAD4_M1;
600  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
601  Opcode = RISCV::PseudoVRELOAD4_M2;
602  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
603  Opcode = RISCV::PseudoVRELOAD5_M1;
604  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
605  Opcode = RISCV::PseudoVRELOAD6_M1;
606  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
607  Opcode = RISCV::PseudoVRELOAD7_M1;
608  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
609  Opcode = RISCV::PseudoVRELOAD8_M1;
610  else
611  llvm_unreachable("Can't load this register from stack slot");
612 
613  if (IsScalableVector) {
617 
619  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
620  .addFrameIndex(FI)
621  .addMemOperand(MMO);
622  if (IsZvlsseg) {
623  // For spilling/reloading Zvlsseg registers, append the dummy field for
624  // the scaled vector length. The argument will be used when expanding
625  // these pseudo instructions.
626  MIB.addReg(RISCV::X0);
627  }
628  } else {
631  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
632 
633  BuildMI(MBB, I, DL, get(Opcode), DstReg)
634  .addFrameIndex(FI)
635  .addImm(0)
636  .addMemOperand(MMO);
637  }
638 }
639 
642  const DebugLoc &DL, Register DstReg, uint64_t Val,
643  MachineInstr::MIFlag Flag) const {
644  Register SrcReg = RISCV::X0;
645 
646  if (!STI.is64Bit() && !isInt<32>(Val))
647  report_fatal_error("Should only materialize 32-bit constants for RV32");
648 
650  RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
651  assert(!Seq.empty());
652 
653  for (RISCVMatInt::Inst &Inst : Seq) {
654  if (Inst.Opc == RISCV::LUI) {
655  BuildMI(MBB, MBBI, DL, get(RISCV::LUI), DstReg)
656  .addImm(Inst.Imm)
657  .setMIFlag(Flag);
658  } else if (Inst.Opc == RISCV::ADD_UW) {
659  BuildMI(MBB, MBBI, DL, get(RISCV::ADD_UW), DstReg)
660  .addReg(SrcReg, RegState::Kill)
661  .addReg(RISCV::X0)
662  .setMIFlag(Flag);
663  } else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
664  Inst.Opc == RISCV::SH3ADD) {
665  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
666  .addReg(SrcReg, RegState::Kill)
667  .addReg(SrcReg, RegState::Kill)
668  .setMIFlag(Flag);
669  } else {
670  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
671  .addReg(SrcReg, RegState::Kill)
672  .addImm(Inst.Imm)
673  .setMIFlag(Flag);
674  }
675  // Only the first instruction has X0 as its source.
676  SrcReg = DstReg;
677  }
678 }
679 
681  switch (Opc) {
682  default:
683  return RISCVCC::COND_INVALID;
684  case RISCV::BEQ:
685  return RISCVCC::COND_EQ;
686  case RISCV::BNE:
687  return RISCVCC::COND_NE;
688  case RISCV::BLT:
689  return RISCVCC::COND_LT;
690  case RISCV::BGE:
691  return RISCVCC::COND_GE;
692  case RISCV::BLTU:
693  return RISCVCC::COND_LTU;
694  case RISCV::BGEU:
695  return RISCVCC::COND_GEU;
696  }
697 }
698 
699 // The contents of values added to Cond are not examined outside of
700 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
701 // push BranchOpcode, Reg1, Reg2.
704  // Block ends with fall-through condbranch.
705  assert(LastInst.getDesc().isConditionalBranch() &&
706  "Unknown conditional branch");
707  Target = LastInst.getOperand(2).getMBB();
708  unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
709  Cond.push_back(MachineOperand::CreateImm(CC));
710  Cond.push_back(LastInst.getOperand(0));
711  Cond.push_back(LastInst.getOperand(1));
712 }
713 
715  switch (CC) {
716  default:
717  llvm_unreachable("Unknown condition code!");
718  case RISCVCC::COND_EQ:
719  return get(RISCV::BEQ);
720  case RISCVCC::COND_NE:
721  return get(RISCV::BNE);
722  case RISCVCC::COND_LT:
723  return get(RISCV::BLT);
724  case RISCVCC::COND_GE:
725  return get(RISCV::BGE);
726  case RISCVCC::COND_LTU:
727  return get(RISCV::BLTU);
728  case RISCVCC::COND_GEU:
729  return get(RISCV::BGEU);
730  }
731 }
732 
734  switch (CC) {
735  default:
736  llvm_unreachable("Unrecognized conditional branch");
737  case RISCVCC::COND_EQ:
738  return RISCVCC::COND_NE;
739  case RISCVCC::COND_NE:
740  return RISCVCC::COND_EQ;
741  case RISCVCC::COND_LT:
742  return RISCVCC::COND_GE;
743  case RISCVCC::COND_GE:
744  return RISCVCC::COND_LT;
745  case RISCVCC::COND_LTU:
746  return RISCVCC::COND_GEU;
747  case RISCVCC::COND_GEU:
748  return RISCVCC::COND_LTU;
749  }
750 }
751 
753  MachineBasicBlock *&TBB,
754  MachineBasicBlock *&FBB,
756  bool AllowModify) const {
757  TBB = FBB = nullptr;
758  Cond.clear();
759 
760  // If the block has no terminators, it just falls into the block after it.
762  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
763  return false;
764 
765  // Count the number of terminators and find the first unconditional or
766  // indirect branch.
767  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
768  int NumTerminators = 0;
769  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
770  J++) {
771  NumTerminators++;
772  if (J->getDesc().isUnconditionalBranch() ||
773  J->getDesc().isIndirectBranch()) {
774  FirstUncondOrIndirectBr = J.getReverse();
775  }
776  }
777 
778  // If AllowModify is true, we can erase any terminators after
779  // FirstUncondOrIndirectBR.
780  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
781  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
782  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
783  NumTerminators--;
784  }
785  I = FirstUncondOrIndirectBr;
786  }
787 
788  // We can't handle blocks that end in an indirect branch.
789  if (I->getDesc().isIndirectBranch())
790  return true;
791 
792  // We can't handle blocks with more than 2 terminators.
793  if (NumTerminators > 2)
794  return true;
795 
796  // Handle a single unconditional branch.
797  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
798  TBB = getBranchDestBlock(*I);
799  return false;
800  }
801 
802  // Handle a single conditional branch.
803  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
804  parseCondBranch(*I, TBB, Cond);
805  return false;
806  }
807 
808  // Handle a conditional branch followed by an unconditional branch.
809  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
810  I->getDesc().isUnconditionalBranch()) {
811  parseCondBranch(*std::prev(I), TBB, Cond);
812  FBB = getBranchDestBlock(*I);
813  return false;
814  }
815 
816  // Otherwise, we can't handle this.
817  return true;
818 }
819 
821  int *BytesRemoved) const {
822  if (BytesRemoved)
823  *BytesRemoved = 0;
825  if (I == MBB.end())
826  return 0;
827 
828  if (!I->getDesc().isUnconditionalBranch() &&
829  !I->getDesc().isConditionalBranch())
830  return 0;
831 
832  // Remove the branch.
833  if (BytesRemoved)
834  *BytesRemoved += getInstSizeInBytes(*I);
835  I->eraseFromParent();
836 
837  I = MBB.end();
838 
839  if (I == MBB.begin())
840  return 1;
841  --I;
842  if (!I->getDesc().isConditionalBranch())
843  return 1;
844 
845  // Remove the branch.
846  if (BytesRemoved)
847  *BytesRemoved += getInstSizeInBytes(*I);
848  I->eraseFromParent();
849  return 2;
850 }
851 
852 // Inserts a branch into the end of the specific MachineBasicBlock, returning
853 // the number of instructions inserted.
856  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
857  if (BytesAdded)
858  *BytesAdded = 0;
859 
860  // Shouldn't be a fall through.
861  assert(TBB && "insertBranch must not be told to insert a fallthrough");
862  assert((Cond.size() == 3 || Cond.size() == 0) &&
863  "RISCV branch conditions have two components!");
864 
865  // Unconditional branch.
866  if (Cond.empty()) {
867  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
868  if (BytesAdded)
869  *BytesAdded += getInstSizeInBytes(MI);
870  return 1;
871  }
872 
873  // Either a one or two-way conditional branch.
874  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
875  MachineInstr &CondMI =
876  *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
877  if (BytesAdded)
878  *BytesAdded += getInstSizeInBytes(CondMI);
879 
880  // One-way conditional branch.
881  if (!FBB)
882  return 1;
883 
884  // Two-way conditional branch.
885  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
886  if (BytesAdded)
887  *BytesAdded += getInstSizeInBytes(MI);
888  return 2;
889 }
890 
892  MachineBasicBlock &DestBB,
893  MachineBasicBlock &RestoreBB,
894  const DebugLoc &DL, int64_t BrOffset,
895  RegScavenger *RS) const {
896  assert(RS && "RegScavenger required for long branching");
897  assert(MBB.empty() &&
898  "new block should be inserted for expanding unconditional branch");
899  assert(MBB.pred_size() == 1);
900 
901  MachineFunction *MF = MBB.getParent();
903 
904  if (!isInt<32>(BrOffset))
906  "Branch offsets outside of the signed 32-bit range not supported");
907 
908  // FIXME: A virtual register must be used initially, as the register
909  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
910  // uses the same workaround).
911  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
912  auto II = MBB.end();
913 
914  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
915  .addReg(ScratchReg, RegState::Define | RegState::Dead)
916  .addMBB(&DestBB, RISCVII::MO_CALL);
917 
918  RS->enterBasicBlockEnd(MBB);
919  Register Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
920  MI.getIterator(), false, 0);
921  // TODO: The case when there is no scavenged register needs special handling.
922  assert(Scav != RISCV::NoRegister && "No register is scavenged!");
923  MRI.replaceRegWith(ScratchReg, Scav);
924  MRI.clearVirtRegs();
925  RS->setRegUsed(Scav);
926 }
927 
930  assert((Cond.size() == 3) && "Invalid branch condition!");
931  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
932  Cond[0].setImm(getOppositeBranchCondition(CC));
933  return false;
934 }
935 
938  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
939  // The branch target is always the last operand.
940  int NumOp = MI.getNumExplicitOperands();
941  return MI.getOperand(NumOp - 1).getMBB();
942 }
943 
945  int64_t BrOffset) const {
946  unsigned XLen = STI.getXLen();
947  // Ideally we could determine the supported branch offset from the
948  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
949  // PseudoBR.
950  switch (BranchOp) {
951  default:
952  llvm_unreachable("Unexpected opcode!");
953  case RISCV::BEQ:
954  case RISCV::BNE:
955  case RISCV::BLT:
956  case RISCV::BGE:
957  case RISCV::BLTU:
958  case RISCV::BGEU:
959  return isIntN(13, BrOffset);
960  case RISCV::JAL:
961  case RISCV::PseudoBR:
962  return isIntN(21, BrOffset);
963  case RISCV::PseudoJump:
964  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
965  }
966 }
967 
969  if (MI.isMetaInstruction())
970  return 0;
971 
972  unsigned Opcode = MI.getOpcode();
973 
974  if (Opcode == TargetOpcode::INLINEASM ||
975  Opcode == TargetOpcode::INLINEASM_BR) {
976  const MachineFunction &MF = *MI.getParent()->getParent();
977  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
978  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
979  *TM.getMCAsmInfo());
980  }
981 
982  if (MI.getParent() && MI.getParent()->getParent()) {
983  const auto MF = MI.getMF();
984  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
985  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
986  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
987  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
988  if (isCompressibleInst(MI, &ST, MRI, STI))
989  return 2;
990  }
991  return get(Opcode).getSize();
992 }
993 
995  const unsigned Opcode = MI.getOpcode();
996  switch (Opcode) {
997  default:
998  break;
999  case RISCV::FSGNJ_D:
1000  case RISCV::FSGNJ_S:
1001  case RISCV::FSGNJ_H:
1002  // The canonical floating-point move is fsgnj rd, rs, rs.
1003  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1004  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1005  case RISCV::ADDI:
1006  case RISCV::ORI:
1007  case RISCV::XORI:
1008  return (MI.getOperand(1).isReg() &&
1009  MI.getOperand(1).getReg() == RISCV::X0) ||
1010  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1011  }
1012  return MI.isAsCheapAsAMove();
1013 }
1014 
1017  if (MI.isMoveReg())
1018  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1019  switch (MI.getOpcode()) {
1020  default:
1021  break;
1022  case RISCV::ADDI:
1023  // Operand 1 can be a frameindex but callers expect registers
1024  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1025  MI.getOperand(2).getImm() == 0)
1026  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1027  break;
1028  case RISCV::FSGNJ_D:
1029  case RISCV::FSGNJ_S:
1030  case RISCV::FSGNJ_H:
1031  // The canonical floating-point move is fsgnj rd, rs, rs.
1032  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1033  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1034  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1035  break;
1036  }
1037  return None;
1038 }
1039 
1041  StringRef &ErrInfo) const {
1042  const MCInstrInfo *MCII = STI.getInstrInfo();
1043  MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
1044 
1045  for (auto &OI : enumerate(Desc.operands())) {
1046  unsigned OpType = OI.value().OperandType;
1047  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1048  OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1049  const MachineOperand &MO = MI.getOperand(OI.index());
1050  if (MO.isImm()) {
1051  int64_t Imm = MO.getImm();
1052  bool Ok;
1053  switch (OpType) {
1054  default:
1055  llvm_unreachable("Unexpected operand type");
1057  Ok = isUInt<2>(Imm);
1058  break;
1060  Ok = isUInt<3>(Imm);
1061  break;
1063  Ok = isUInt<4>(Imm);
1064  break;
1066  Ok = isUInt<5>(Imm);
1067  break;
1069  Ok = isUInt<7>(Imm);
1070  break;
1072  Ok = isUInt<12>(Imm);
1073  break;
1075  Ok = isInt<12>(Imm);
1076  break;
1078  Ok = isUInt<20>(Imm);
1079  break;
1081  if (STI.getTargetTriple().isArch64Bit())
1082  Ok = isUInt<6>(Imm);
1083  else
1084  Ok = isUInt<5>(Imm);
1085  break;
1087  Ok = Imm >= 0 && Imm <= 10;
1088  break;
1089  }
1090  if (!Ok) {
1091  ErrInfo = "Invalid immediate";
1092  return false;
1093  }
1094  }
1095  }
1096  }
1097 
1098  return true;
1099 }
1100 
1101 // Return true if get the base operand, byte offset of an instruction and the
1102 // memory width. Width is the size of memory that is being loaded/stored.
1104  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1105  unsigned &Width, const TargetRegisterInfo *TRI) const {
1106  if (!LdSt.mayLoadOrStore())
1107  return false;
1108 
1109  // Here we assume the standard RISC-V ISA, which uses a base+offset
1110  // addressing mode. You'll need to relax these conditions to support custom
1111  // load/stores instructions.
1112  if (LdSt.getNumExplicitOperands() != 3)
1113  return false;
1114  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1115  return false;
1116 
1117  if (!LdSt.hasOneMemOperand())
1118  return false;
1119 
1120  Width = (*LdSt.memoperands_begin())->getSize();
1121  BaseReg = &LdSt.getOperand(1);
1122  Offset = LdSt.getOperand(2).getImm();
1123  return true;
1124 }
1125 
1127  const MachineInstr &MIa, const MachineInstr &MIb) const {
1128  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1129  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1130 
1131  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1133  return false;
1134 
1135  // Retrieve the base register, offset from the base register and width. Width
1136  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
1137  // base registers are identical, and the offset of a lower memory access +
1138  // the width doesn't overlap the offset of a higher memory access,
1139  // then the memory accesses are different.
1141  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1142  int64_t OffsetA = 0, OffsetB = 0;
1143  unsigned int WidthA = 0, WidthB = 0;
1144  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1145  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1146  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1147  int LowOffset = std::min(OffsetA, OffsetB);
1148  int HighOffset = std::max(OffsetA, OffsetB);
1149  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1150  if (LowOffset + LowWidth <= HighOffset)
1151  return true;
1152  }
1153  }
1154  return false;
1155 }
1156 
1157 std::pair<unsigned, unsigned>
1159  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1160  return std::make_pair(TF & Mask, TF & ~Mask);
1161 }
1162 
1165  using namespace RISCVII;
1166  static const std::pair<unsigned, const char *> TargetFlags[] = {
1167  {MO_CALL, "riscv-call"},
1168  {MO_PLT, "riscv-plt"},
1169  {MO_LO, "riscv-lo"},
1170  {MO_HI, "riscv-hi"},
1171  {MO_PCREL_LO, "riscv-pcrel-lo"},
1172  {MO_PCREL_HI, "riscv-pcrel-hi"},
1173  {MO_GOT_HI, "riscv-got-hi"},
1174  {MO_TPREL_LO, "riscv-tprel-lo"},
1175  {MO_TPREL_HI, "riscv-tprel-hi"},
1176  {MO_TPREL_ADD, "riscv-tprel-add"},
1177  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1178  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1179  return makeArrayRef(TargetFlags);
1180 }
1182  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1183  const Function &F = MF.getFunction();
1184 
1185  // Can F be deduplicated by the linker? If it can, don't outline from it.
1186  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1187  return false;
1188 
1189  // Don't outline from functions with section markings; the program could
1190  // expect that all the code is in the named section.
1191  if (F.hasSection())
1192  return false;
1193 
1194  // It's safe to outline from MF.
1195  return true;
1196 }
1197 
1199  unsigned &Flags) const {
1200  // More accurate safety checking is done in getOutliningCandidateInfo.
1202 }
1203 
1204 // Enum values indicating how an outlined call should be constructed.
1207 };
1208 
1210  MachineFunction &MF) const {
1211  return MF.getFunction().hasMinSize();
1212 }
1213 
1215  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1216 
1217  // First we need to filter out candidates where the X5 register (IE t0) can't
1218  // be used to setup the function call.
1219  auto CannotInsertCall = [](outliner::Candidate &C) {
1220  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1221  return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
1222  };
1223 
1224  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1225 
1226  // If the sequence doesn't have enough candidates left, then we're done.
1227  if (RepeatedSequenceLocs.size() < 2)
1228  return outliner::OutlinedFunction();
1229 
1230  unsigned SequenceSize = 0;
1231 
1232  auto I = RepeatedSequenceLocs[0].front();
1233  auto E = std::next(RepeatedSequenceLocs[0].back());
1234  for (; I != E; ++I)
1235  SequenceSize += getInstSizeInBytes(*I);
1236 
1237  // call t0, function = 8 bytes.
1238  unsigned CallOverhead = 8;
1239  for (auto &C : RepeatedSequenceLocs)
1240  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1241 
1242  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1243  unsigned FrameOverhead = 4;
1244  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1245  .getFeatureBits()[RISCV::FeatureStdExtC])
1246  FrameOverhead = 2;
1247 
1248  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1249  FrameOverhead, MachineOutlinerDefault);
1250 }
1251 
1254  unsigned Flags) const {
1255  MachineInstr &MI = *MBBI;
1256  MachineBasicBlock *MBB = MI.getParent();
1257  const TargetRegisterInfo *TRI =
1259 
1260  // Positions generally can't safely be outlined.
1261  if (MI.isPosition()) {
1262  // We can manually strip out CFI instructions later.
1263  if (MI.isCFIInstruction())
1264  // If current function has exception handling code, we can't outline &
1265  // strip these CFI instructions since it may break .eh_frame section
1266  // needed in unwinding.
1267  return MI.getMF()->getFunction().needsUnwindTableEntry()
1270 
1272  }
1273 
1274  // Don't trust the user to write safe inline assembly.
1275  if (MI.isInlineAsm())
1277 
1278  // We can't outline branches to other basic blocks.
1279  if (MI.isTerminator() && !MBB->succ_empty())
1281 
1282  // We need support for tail calls to outlined functions before return
1283  // statements can be allowed.
1284  if (MI.isReturn())
1286 
1287  // Don't allow modifying the X5 register which we use for return addresses for
1288  // these outlined functions.
1289  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1290  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1292 
1293  // Make sure the operands don't reference something unsafe.
1294  for (const auto &MO : MI.operands())
1295  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1297 
1298  // Don't allow instructions which won't be materialized to impact outlining
1299  // analysis.
1300  if (MI.isMetaInstruction())
1302 
1304 }
1305 
1308  const outliner::OutlinedFunction &OF) const {
1309 
1310  // Strip out any CFI instructions
1311  bool Changed = true;
1312  while (Changed) {
1313  Changed = false;
1314  auto I = MBB.begin();
1315  auto E = MBB.end();
1316  for (; I != E; ++I) {
1317  if (I->isCFIInstruction()) {
1318  I->removeFromParent();
1319  Changed = true;
1320  break;
1321  }
1322  }
1323  }
1324 
1325  MBB.addLiveIn(RISCV::X5);
1326 
1327  // Add in a return instruction to the end of the outlined frame.
1328  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1329  .addReg(RISCV::X0, RegState::Define)
1330  .addReg(RISCV::X5)
1331  .addImm(0));
1332 }
1333 
1336  MachineFunction &MF, outliner::Candidate &C) const {
1337 
1338  // Add in a call instruction to the outlined function at the given location.
1339  It = MBB.insert(It,
1340  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1341  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1342  RISCVII::MO_CALL));
1343  return It;
1344 }
1345 
1346 // MIR printer helper function to annotate Operands with a comment.
1348  const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1349  const TargetRegisterInfo *TRI) const {
1350  // Print a generic comment for this operand if there is one.
1351  std::string GenericComment =
1353  if (!GenericComment.empty())
1354  return GenericComment;
1355 
1356  // If not, we must have an immediate operand.
1357  if (Op.getType() != MachineOperand::MO_Immediate)
1358  return std::string();
1359 
1360  std::string Comment;
1361  raw_string_ostream OS(Comment);
1362 
1363  uint64_t TSFlags = MI.getDesc().TSFlags;
1364 
1365  // Print the full VType operand of vsetvli/vsetivli and PseudoReadVL
1366  // instructions, and the SEW operand of vector codegen pseudos.
1367  if (((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
1368  MI.getOpcode() == RISCV::PseudoVSETVLI ||
1369  MI.getOpcode() == RISCV::PseudoVSETIVLI ||
1370  MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
1371  OpIdx == 2) ||
1372  (MI.getOpcode() == RISCV::PseudoReadVL && OpIdx == 1)) {
1373  unsigned Imm = MI.getOperand(OpIdx).getImm();
1374  RISCVVType::printVType(Imm, OS);
1375  } else if (RISCVII::hasSEWOp(TSFlags)) {
1376  unsigned NumOperands = MI.getNumExplicitOperands();
1377  bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags);
1378 
1379  // The SEW operand is before any policy operand.
1380  if (OpIdx != NumOperands - HasPolicy - 1)
1381  return std::string();
1382 
1383  unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
1384  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1385  assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
1386 
1387  OS << "e" << SEW;
1388  }
1389 
1390  OS.flush();
1391  return Comment;
1392 }
1393 
1394 // clang-format off
1395 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1396  RISCV::PseudoV##OP##_##TYPE##_##LMUL
1397 
1398 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
1399  CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1400  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1401  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1402  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1403 
1404 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
1405  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1406  case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
1407 
1408 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
1409  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1410  case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
1411 
1412 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1413  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1414  case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
1415 
1416 #define CASE_VFMA_SPLATS(OP) \
1417  CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
1418  case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
1419  case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
1420 // clang-format on
1421 
1423  unsigned &SrcOpIdx1,
1424  unsigned &SrcOpIdx2) const {
1425  const MCInstrDesc &Desc = MI.getDesc();
1426  if (!Desc.isCommutable())
1427  return false;
1428 
1429  switch (MI.getOpcode()) {
1430  case CASE_VFMA_SPLATS(FMADD):
1431  case CASE_VFMA_SPLATS(FMSUB):
1432  case CASE_VFMA_SPLATS(FMACC):
1433  case CASE_VFMA_SPLATS(FMSAC):
1434  case CASE_VFMA_SPLATS(FNMADD):
1435  case CASE_VFMA_SPLATS(FNMSUB):
1436  case CASE_VFMA_SPLATS(FNMACC):
1437  case CASE_VFMA_SPLATS(FNMSAC):
1438  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1439  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1440  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1441  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1442  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1443  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1444  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1445  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1446  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1447  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1448  // If the tail policy is undisturbed we can't commute.
1449  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1450  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1451  return false;
1452 
1453  // For these instructions we can only swap operand 1 and operand 3 by
1454  // changing the opcode.
1455  unsigned CommutableOpIdx1 = 1;
1456  unsigned CommutableOpIdx2 = 3;
1457  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1458  CommutableOpIdx2))
1459  return false;
1460  return true;
1461  }
1462  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1466  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1467  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1468  // If the tail policy is undisturbed we can't commute.
1469  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1470  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1471  return false;
1472 
1473  // For these instructions we have more freedom. We can commute with the
1474  // other multiplicand or with the addend/subtrahend/minuend.
1475 
1476  // Any fixed operand must be from source 1, 2 or 3.
1477  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1478  return false;
1479  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1480  return false;
1481 
1482  // It both ops are fixed one must be the tied source.
1483  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1484  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1485  return false;
1486 
1487  // Look for two different register operands assumed to be commutable
1488  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1489  // needed.
1490  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1491  SrcOpIdx2 == CommuteAnyOperandIndex) {
1492  // At least one of operands to be commuted is not specified and
1493  // this method is free to choose appropriate commutable operands.
1494  unsigned CommutableOpIdx1 = SrcOpIdx1;
1495  if (SrcOpIdx1 == SrcOpIdx2) {
1496  // Both of operands are not fixed. Set one of commutable
1497  // operands to the tied source.
1498  CommutableOpIdx1 = 1;
1499  } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1500  // Only one of the operands is not fixed.
1501  CommutableOpIdx1 = SrcOpIdx2;
1502  }
1503 
1504  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1505  // operand and assign its index to CommutableOpIdx2.
1506  unsigned CommutableOpIdx2;
1507  if (CommutableOpIdx1 != 1) {
1508  // If we haven't already used the tied source, we must use it now.
1509  CommutableOpIdx2 = 1;
1510  } else {
1511  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1512 
1513  // The commuted operands should have different registers.
1514  // Otherwise, the commute transformation does not change anything and
1515  // is useless. We use this as a hint to make our decision.
1516  if (Op1Reg != MI.getOperand(2).getReg())
1517  CommutableOpIdx2 = 2;
1518  else
1519  CommutableOpIdx2 = 3;
1520  }
1521 
1522  // Assign the found pair of commutable indices to SrcOpIdx1 and
1523  // SrcOpIdx2 to return those values.
1524  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1525  CommutableOpIdx2))
1526  return false;
1527  }
1528 
1529  return true;
1530  }
1531  }
1532 
1533  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1534 }
1535 
1536 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1537  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1538  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1539  break;
1540 
1541 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
1542  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1543  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1544  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1545  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1546 
1547 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
1548  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1549  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
1550 
1551 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
1552  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1553  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
1554 
1555 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1556  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1557  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
1558 
1559 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1560  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
1561  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
1562  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
1563 
1565  bool NewMI,
1566  unsigned OpIdx1,
1567  unsigned OpIdx2) const {
1568  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1569  if (NewMI)
1570  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1571  return MI;
1572  };
1573 
1574  switch (MI.getOpcode()) {
1575  case CASE_VFMA_SPLATS(FMACC):
1576  case CASE_VFMA_SPLATS(FMADD):
1577  case CASE_VFMA_SPLATS(FMSAC):
1578  case CASE_VFMA_SPLATS(FMSUB):
1579  case CASE_VFMA_SPLATS(FNMACC):
1580  case CASE_VFMA_SPLATS(FNMADD):
1581  case CASE_VFMA_SPLATS(FNMSAC):
1582  case CASE_VFMA_SPLATS(FNMSUB):
1583  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1584  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1585  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1586  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1587  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1588  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1589  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1590  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1591  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1592  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1593  // It only make sense to toggle these between clobbering the
1594  // addend/subtrahend/minuend one of the multiplicands.
1595  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1596  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1597  unsigned Opc;
1598  switch (MI.getOpcode()) {
1599  default:
1600  llvm_unreachable("Unexpected opcode");
1601  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1602  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1609  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
1613  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1614  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1615  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1616  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1617  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1618  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1619  }
1620 
1621  auto &WorkingMI = cloneIfNew(MI);
1622  WorkingMI.setDesc(get(Opc));
1623  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1624  OpIdx1, OpIdx2);
1625  }
1626  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1630  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1631  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1632  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1633  // If one of the operands, is the addend we need to change opcode.
1634  // Otherwise we're just swapping 2 of the multiplicands.
1635  if (OpIdx1 == 3 || OpIdx2 == 3) {
1636  unsigned Opc;
1637  switch (MI.getOpcode()) {
1638  default:
1639  llvm_unreachable("Unexpected opcode");
1640  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
1644  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1645  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1646  }
1647 
1648  auto &WorkingMI = cloneIfNew(MI);
1649  WorkingMI.setDesc(get(Opc));
1650  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1651  OpIdx1, OpIdx2);
1652  }
1653  // Let the default code handle it.
1654  break;
1655  }
1656  }
1657 
1658  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1659 }
1660 
1661 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1662 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1663 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1664 #undef CASE_VFMA_SPLATS
1665 #undef CASE_VFMA_OPCODE_LMULS
1666 #undef CASE_VFMA_OPCODE_COMMON
1667 
1668 // clang-format off
1669 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1670  RISCV::PseudoV##OP##_##LMUL##_TIED
1671 
1672 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
1673  CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1674  case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1675  case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1676  case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1677  case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1678 
1679 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1680  CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1681  case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
1682 // clang-format on
1683 
1684 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1685  case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1686  NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1687  break;
1688 
1689 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
1690  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1691  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1692  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1693  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1694  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1695 
1696 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1697  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1698  CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
1699 
1701  LiveVariables *LV,
1702  LiveIntervals *LIS) const {
1703  switch (MI.getOpcode()) {
1704  default:
1705  break;
1706  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
1707  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
1708  case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1709  case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1710  case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1711  case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1712  // clang-format off
1713  unsigned NewOpc;
1714  switch (MI.getOpcode()) {
1715  default:
1716  llvm_unreachable("Unexpected opcode");
1723  }
1724  // clang-format on
1725 
1726  MachineBasicBlock &MBB = *MI.getParent();
1727  MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
1728  .add(MI.getOperand(0))
1729  .add(MI.getOperand(1))
1730  .add(MI.getOperand(2))
1731  .add(MI.getOperand(3))
1732  .add(MI.getOperand(4));
1733  MIB.copyImplicitOps(MI);
1734 
1735  if (LV) {
1736  unsigned NumOps = MI.getNumOperands();
1737  for (unsigned I = 1; I < NumOps; ++I) {
1738  MachineOperand &Op = MI.getOperand(I);
1739  if (Op.isReg() && Op.isKill())
1740  LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1741  }
1742  }
1743 
1744  if (LIS) {
1745  SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1746 
1747  if (MI.getOperand(0).isEarlyClobber()) {
1748  // Use operand 1 was tied to early-clobber def operand 0, so its live
1749  // interval could have ended at an early-clobber slot. Now they are not
1750  // tied we need to update it to the normal register slot.
1751  LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
1753  if (S->end == Idx.getRegSlot(true))
1754  S->end = Idx.getRegSlot();
1755  }
1756  }
1757 
1758  return MIB;
1759  }
1760  }
1761 
1762  return nullptr;
1763 }
1764 
1765 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1766 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1767 #undef CASE_WIDEOP_OPCODE_LMULS
1768 #undef CASE_WIDEOP_OPCODE_COMMON
1769 
1773  const DebugLoc &DL,
1774  int64_t Amount,
1775  MachineInstr::MIFlag Flag) const {
1776  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1777  assert(Amount % 8 == 0 &&
1778  "Reserve the stack by the multiple of one vector size.");
1779 
1781  int64_t NumOfVReg = Amount / 8;
1782 
1783  Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1784  BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), VL)
1785  .setMIFlag(Flag);
1786  assert(isInt<32>(NumOfVReg) &&
1787  "Expect the number of vector registers within 32-bits.");
1788  if (isPowerOf2_32(NumOfVReg)) {
1789  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1790  if (ShiftAmount == 0)
1791  return VL;
1792  BuildMI(MBB, II, DL, get(RISCV::SLLI), VL)
1793  .addReg(VL, RegState::Kill)
1794  .addImm(ShiftAmount)
1795  .setMIFlag(Flag);
1796  } else if ((NumOfVReg == 3 || NumOfVReg == 5 || NumOfVReg == 9) &&
1797  STI.hasStdExtZba()) {
1798  // We can use Zba SHXADD instructions for multiply in some cases.
1799  // TODO: Generalize to SHXADD+SLLI.
1800  unsigned Opc;
1801  switch (NumOfVReg) {
1802  default: llvm_unreachable("Unexpected number of vregs");
1803  case 3: Opc = RISCV::SH1ADD; break;
1804  case 5: Opc = RISCV::SH2ADD; break;
1805  case 9: Opc = RISCV::SH3ADD; break;
1806  }
1807  BuildMI(MBB, II, DL, get(Opc), VL)
1808  .addReg(VL, RegState::Kill)
1809  .addReg(VL)
1810  .setMIFlag(Flag);
1811  } else if (isPowerOf2_32(NumOfVReg - 1)) {
1812  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1813  uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
1814  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
1815  .addReg(VL)
1816  .addImm(ShiftAmount)
1817  .setMIFlag(Flag);
1818  BuildMI(MBB, II, DL, get(RISCV::ADD), VL)
1819  .addReg(ScaledRegister, RegState::Kill)
1820  .addReg(VL, RegState::Kill)
1821  .setMIFlag(Flag);
1822  } else if (isPowerOf2_32(NumOfVReg + 1)) {
1823  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1824  uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
1825  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
1826  .addReg(VL)
1827  .addImm(ShiftAmount)
1828  .setMIFlag(Flag);
1829  BuildMI(MBB, II, DL, get(RISCV::SUB), VL)
1830  .addReg(ScaledRegister, RegState::Kill)
1831  .addReg(VL, RegState::Kill)
1832  .setMIFlag(Flag);
1833  } else {
1834  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1835  movImm(MBB, II, DL, N, NumOfVReg, Flag);
1836  if (!STI.hasStdExtM())
1838  MF.getFunction(),
1839  "M-extension must be enabled to calculate the vscaled size/offset."});
1840  BuildMI(MBB, II, DL, get(RISCV::MUL), VL)
1841  .addReg(VL, RegState::Kill)
1843  .setMIFlag(Flag);
1844  }
1845 
1846  return VL;
1847 }
1848 
1849 static bool isRVVWholeLoadStore(unsigned Opcode) {
1850  switch (Opcode) {
1851  default:
1852  return false;
1853  case RISCV::VS1R_V:
1854  case RISCV::VS2R_V:
1855  case RISCV::VS4R_V:
1856  case RISCV::VS8R_V:
1857  case RISCV::VL1RE8_V:
1858  case RISCV::VL2RE8_V:
1859  case RISCV::VL4RE8_V:
1860  case RISCV::VL8RE8_V:
1861  case RISCV::VL1RE16_V:
1862  case RISCV::VL2RE16_V:
1863  case RISCV::VL4RE16_V:
1864  case RISCV::VL8RE16_V:
1865  case RISCV::VL1RE32_V:
1866  case RISCV::VL2RE32_V:
1867  case RISCV::VL4RE32_V:
1868  case RISCV::VL8RE32_V:
1869  case RISCV::VL1RE64_V:
1870  case RISCV::VL2RE64_V:
1871  case RISCV::VL4RE64_V:
1872  case RISCV::VL8RE64_V:
1873  return true;
1874  }
1875 }
1876 
1877 bool RISCVInstrInfo::isRVVSpill(const MachineInstr &MI, bool CheckFIs) const {
1878  // RVV lacks any support for immediate addressing for stack addresses, so be
1879  // conservative.
1880  unsigned Opcode = MI.getOpcode();
1881  if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1882  !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
1883  return false;
1884  return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
1885  return MO.isFI();
1886  });
1887 }
1888 
1891  switch (Opcode) {
1892  default:
1893  return None;
1894  case RISCV::PseudoVSPILL2_M1:
1895  case RISCV::PseudoVRELOAD2_M1:
1896  return std::make_pair(2u, 1u);
1897  case RISCV::PseudoVSPILL2_M2:
1898  case RISCV::PseudoVRELOAD2_M2:
1899  return std::make_pair(2u, 2u);
1900  case RISCV::PseudoVSPILL2_M4:
1901  case RISCV::PseudoVRELOAD2_M4:
1902  return std::make_pair(2u, 4u);
1903  case RISCV::PseudoVSPILL3_M1:
1904  case RISCV::PseudoVRELOAD3_M1:
1905  return std::make_pair(3u, 1u);
1906  case RISCV::PseudoVSPILL3_M2:
1907  case RISCV::PseudoVRELOAD3_M2:
1908  return std::make_pair(3u, 2u);
1909  case RISCV::PseudoVSPILL4_M1:
1910  case RISCV::PseudoVRELOAD4_M1:
1911  return std::make_pair(4u, 1u);
1912  case RISCV::PseudoVSPILL4_M2:
1913  case RISCV::PseudoVRELOAD4_M2:
1914  return std::make_pair(4u, 2u);
1915  case RISCV::PseudoVSPILL5_M1:
1916  case RISCV::PseudoVRELOAD5_M1:
1917  return std::make_pair(5u, 1u);
1918  case RISCV::PseudoVSPILL6_M1:
1919  case RISCV::PseudoVRELOAD6_M1:
1920  return std::make_pair(6u, 1u);
1921  case RISCV::PseudoVSPILL7_M1:
1922  case RISCV::PseudoVRELOAD7_M1:
1923  return std::make_pair(7u, 1u);
1924  case RISCV::PseudoVSPILL8_M1:
1925  case RISCV::PseudoVRELOAD8_M1:
1926  return std::make_pair(8u, 1u);
1927  }
1928 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:109
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:20
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:205
llvm::RISCVII::isRVVWideningReduction
static bool isRVVWideningReduction(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:163
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:928
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::MachineOperand::MO_Immediate
@ MO_Immediate
Immediate operand.
Definition: MachineOperand.h:52
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:1164
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::RISCVInstrInfo::shouldOutlineFromFunctionByDefault
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Definition: RISCVInstrInfo.cpp:1209
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::MachineInstrBuilder::copyImplicitOps
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
Definition: MachineInstrBuilder.h:315
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:255
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:56
llvm::MCRegisterInfo::getName
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
Definition: MCRegisterInfo.h:485
llvm::RISCVInstrInfo::getBrCond
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
Definition: RISCVInstrInfo.cpp:714
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
PreferWholeRegisterMove
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
llvm::RISCVCC::COND_GEU
@ COND_GEU
Definition: RISCVInstrInfo.h:36
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:223
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::RISCVCC::getOppositeBranchCondition
CondCode getOppositeBranchCondition(CondCode)
Definition: RISCVInstrInfo.cpp:733
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1009
llvm::RISCVCC::COND_INVALID
@ COND_INVALID
Definition: RISCVInstrInfo.h:37
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1036
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:679
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:60
llvm::RISCVInstrInfo::getOutliningType
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1253
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:710
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:227
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:585
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:632
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::AArch64SysReg::lookupSysRegByName
const SysReg * lookupSysRegByName(StringRef)
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:140
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:225
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:456
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2045
ErrorHandling.h
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1795
llvm::LiveRange::Segment
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:177
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:125
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:112
llvm::RISCVVType::isValidSEW
static bool isValidSEW(unsigned SEW)
Definition: RISCVBaseInfo.h:386
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:232
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::RISCVII::hasSEWOp
static bool hasSEWOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:151
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:552
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:488
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:33
llvm::RISCVVType::isTailAgnostic
static bool isTailAgnostic(unsigned VType)
Definition: RISCVBaseInfo.h:427
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:126
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
Definition: RISCVInstrInfo.cpp:891
llvm::RISCVInstrInfo::insertOutlinedCall
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1334
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:189
llvm::Optional
Definition: APInt.h:33
STLExtras.h
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:937
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:214
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:159
RISCVMatInt.h
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:69
llvm::RISCVVType::getSEW
static unsigned getSEW(unsigned VType)
Definition: RISCVBaseInfo.h:422
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:944
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1618
RISCVGenInstrInfo
llvm::RISCVInstrInfo::convertToThreeAddress
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Definition: RISCVInstrInfo.cpp:1700
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:725
llvm::RISCVOp::OPERAND_UIMM7
@ OPERAND_UIMM7
Definition: RISCVBaseInfo.h:221
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1329
MachineRegisterInfo.h
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1018
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:204
llvm::RISCVInstrInfo::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode) const
Definition: RISCVInstrInfo.cpp:1890
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:178
CASE_VFMA_OPCODE_LMULS_MF4
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1408
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:111
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:337
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:650
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:165
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RISCVCC::COND_LT
@ COND_LT
Definition: RISCVInstrInfo.h:33
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:782
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::RISCVOp::OPERAND_RVKRNUM
@ OPERAND_RVKRNUM
Definition: RISCVBaseInfo.h:226
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:702
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1422
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:752
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
LiveVariables.h
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:623
llvm::LiveVariables::replaceKillInstruction
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Definition: LiveVariables.cpp:752
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::RISCVCC::COND_LTU
@ COND_LTU
Definition: RISCVInstrInfo.h:35
llvm::RISCVOp::OPERAND_UIMM5
@ OPERAND_UIMM5
Definition: RISCVBaseInfo.h:220
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:478
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:147
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:1158
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:287
getOppositeBranchCondition
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
Definition: ARCInstrInfo.cpp:102
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::RISCVInstrInfo::isRVVSpill
bool isRVVSpill(const MachineInstr &MI, bool CheckFIs) const
Definition: RISCVInstrInfo.cpp:1877
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:87
llvm::RISCVOp::OPERAND_UIMM12
@ OPERAND_UIMM12
Definition: RISCVBaseInfo.h:222
llvm::raw_ostream::flush
void flush()
Definition: raw_ostream.h:187
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:820
llvm::RISCVOp::OPERAND_UIMM2
@ OPERAND_UIMM2
Definition: RISCVBaseInfo.h:217
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:454
llvm::MCRegisterInfo::isSubRegisterEq
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
Definition: MCRegisterInfo.h:568
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:126
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:60
llvm::TargetInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
Definition: TargetInstrInfo.cpp:1422
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:24
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:1126
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1551
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1205
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::TargetInstrInfo::createMIROperandComment
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
Definition: TargetInstrInfo.cpp:1347
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:640
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::cl::opt< bool >
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:120
llvm::RISCVOp::OPERAND_UIMM3
@ OPERAND_UIMM3
Definition: RISCVBaseInfo.h:218
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::RISCVVType::decodeVLMUL
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
Definition: RISCVBaseInfo.cpp:144
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:76
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:469
LiveIntervals.h
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:1181
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:37
llvm::RISCVOp::OPERAND_UIMM20
@ OPERAND_UIMM20
Definition: RISCVBaseInfo.h:224
llvm::RISCVCC::COND_EQ
@ COND_EQ
Definition: RISCVInstrInfo.h:31
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:1103
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:1198
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:483
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:263
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:198
MachineFunctionPass.h
isConvertibleToVMV_V_V
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
Definition: RISCVInstrInfo.cpp:125
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:553
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:567
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:656
llvm::AArch64SysReg::SysReg::Encoding
unsigned Encoding
Definition: AArch64BaseInfo.h:631
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:234
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:202
llvm::RISCVInstrInfo::createMIROperandComment
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:1347
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1016
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1416
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:82
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:151
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:253
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::MachineFunction
Definition: MachineFunction.h:241
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1412
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:356
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:728
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:561
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1612
CASE_WIDEOP_OPCODE_LMULS
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1679
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:178
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:491
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:127
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:640
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:110
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:836
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:203
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::RISCVInstrInfo::buildOutlinedFrame
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1306
llvm::RISCVInstrInfo::getVLENFactoredAmount
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:1770
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:854
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
getCondFromBranchOpc
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition: RISCVInstrInfo.cpp:680
llvm::RISCVOp::OPERAND_UIMM4
@ OPERAND_UIMM4
Definition: RISCVBaseInfo.h:219
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:804
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:210
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:133
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:376
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:378
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
CASE_WIDEOP_CHANGE_OPCODE_LMULS
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1696
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:243
llvm::RISCVVType::printVType
void printVType(unsigned VType, raw_ostream &OS)
Definition: RISCVBaseInfo.cpp:160
llvm::RISCVVType::getVLMUL
static RISCVII::VLMUL getVLMUL(unsigned VType)
Definition: RISCVBaseInfo.h:398
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:606
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1214
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:275
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:636
llvm::DestSourcePair
Definition: TargetInstrInfo.h:68
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
CASE_WIDEOP_OPCODE_LMULS_MF4
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1672
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:326
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1308
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1410
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1021
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:51
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:184
RISCVInstrInfo.h
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:96
llvm::RISCVCC::COND_GE
@ COND_GE
Definition: RISCVInstrInfo.h:34
llvm::RISCVII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: RISCVBaseInfo.h:199
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:200
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:135
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:417
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:994
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1564
RISCVSubtarget.h
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:548
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:256
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1206
llvm::RISCVCC::CondCode
CondCode
Definition: RISCVInstrInfo.h:30
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:215
CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1689
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1006
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:277
MachineInstrBuilder.h
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:200
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:294
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:1040
RISCVMachineFunctionInfo.h
llvm::LiveRange::getSegmentContaining
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:249
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1555
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:108
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::Function::hasMinSize
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:660
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1103
isRVVWholeLoadStore
static bool isRVVWholeLoadStore(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:1849
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::LiveVariables
Definition: LiveVariables.h:47
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::cl::desc
Definition: CommandLine.h:405
RegisterScavenging.h
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:144
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1559
llvm::MachineInstrBundleIterator< const MachineInstr >
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:76
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:216
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:237
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:194
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:279
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:314
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:968
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:285
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::RISCVCC::COND_NE
@ COND_NE
Definition: RISCVInstrInfo.h:32
RISCVTargetMachine.h