LLVM  16.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/MC/MCInstBuilder.h"
29 #include "llvm/MC/TargetRegistry.h"
31 
32 using namespace llvm;
33 
34 #define GEN_CHECK_COMPRESS_INSTR
35 #include "RISCVGenCompressInstEmitter.inc"
36 
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRINFO_NAMED_OPS
39 #include "RISCVGenInstrInfo.inc"
40 
42  "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
43  cl::desc("Prefer whole register move for vector registers."));
44 
46 
47 using namespace RISCV;
48 
49 #define GET_RISCVVPseudosTable_IMPL
50 #include "RISCVGenSearchableTables.inc"
51 
52 } // namespace llvm::RISCVVPseudosTable
53 
55  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
56  STI(STI) {}
57 
59  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
60  return MCInstBuilder(RISCV::C_NOP);
61  return MCInstBuilder(RISCV::ADDI)
62  .addReg(RISCV::X0)
63  .addReg(RISCV::X0)
64  .addImm(0);
65 }
66 
68  int &FrameIndex) const {
69  switch (MI.getOpcode()) {
70  default:
71  return 0;
72  case RISCV::LB:
73  case RISCV::LBU:
74  case RISCV::LH:
75  case RISCV::LHU:
76  case RISCV::FLH:
77  case RISCV::LW:
78  case RISCV::FLW:
79  case RISCV::LWU:
80  case RISCV::LD:
81  case RISCV::FLD:
82  break;
83  }
84 
85  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
86  MI.getOperand(2).getImm() == 0) {
87  FrameIndex = MI.getOperand(1).getIndex();
88  return MI.getOperand(0).getReg();
89  }
90 
91  return 0;
92 }
93 
95  int &FrameIndex) const {
96  switch (MI.getOpcode()) {
97  default:
98  return 0;
99  case RISCV::SB:
100  case RISCV::SH:
101  case RISCV::SW:
102  case RISCV::FSH:
103  case RISCV::FSW:
104  case RISCV::SD:
105  case RISCV::FSD:
106  break;
107  }
108 
109  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
110  MI.getOperand(2).getImm() == 0) {
111  FrameIndex = MI.getOperand(1).getIndex();
112  return MI.getOperand(0).getReg();
113  }
114 
115  return 0;
116 }
117 
118 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
119  unsigned NumRegs) {
120  return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
121 }
122 
123 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
124  const MachineBasicBlock &MBB,
127  RISCVII::VLMUL LMul) {
129  return false;
130 
131  assert(MBBI->getOpcode() == TargetOpcode::COPY &&
132  "Unexpected COPY instruction.");
133  Register SrcReg = MBBI->getOperand(1).getReg();
134  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
135 
136  bool FoundDef = false;
137  bool FirstVSetVLI = false;
138  unsigned FirstSEW = 0;
139  while (MBBI != MBB.begin()) {
140  --MBBI;
141  if (MBBI->isMetaInstruction())
142  continue;
143 
144  if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
145  MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
146  MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
147  // There is a vsetvli between COPY and source define instruction.
148  // vy = def_vop ... (producing instruction)
149  // ...
150  // vsetvli
151  // ...
152  // vx = COPY vy
153  if (!FoundDef) {
154  if (!FirstVSetVLI) {
155  FirstVSetVLI = true;
156  unsigned FirstVType = MBBI->getOperand(2).getImm();
157  RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
158  FirstSEW = RISCVVType::getSEW(FirstVType);
159  // The first encountered vsetvli must have the same lmul as the
160  // register class of COPY.
161  if (FirstLMul != LMul)
162  return false;
163  }
164  // Only permit `vsetvli x0, x0, vtype` between COPY and the source
165  // define instruction.
166  if (MBBI->getOperand(0).getReg() != RISCV::X0)
167  return false;
168  if (MBBI->getOperand(1).isImm())
169  return false;
170  if (MBBI->getOperand(1).getReg() != RISCV::X0)
171  return false;
172  continue;
173  }
174 
175  // MBBI is the first vsetvli before the producing instruction.
176  unsigned VType = MBBI->getOperand(2).getImm();
177  // If there is a vsetvli between COPY and the producing instruction.
178  if (FirstVSetVLI) {
179  // If SEW is different, return false.
180  if (RISCVVType::getSEW(VType) != FirstSEW)
181  return false;
182  }
183 
184  // If the vsetvli is tail undisturbed, keep the whole register move.
185  if (!RISCVVType::isTailAgnostic(VType))
186  return false;
187 
188  // The checking is conservative. We only have register classes for
189  // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
190  // for fractional LMUL operations. However, we could not use the vsetvli
191  // lmul for widening operations. The result of widening operation is
192  // 2 x LMUL.
193  return LMul == RISCVVType::getVLMUL(VType);
194  } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
195  return false;
196  } else if (MBBI->getNumDefs()) {
197  // Check all the instructions which will change VL.
198  // For example, vleff has implicit def VL.
199  if (MBBI->modifiesRegister(RISCV::VL))
200  return false;
201 
202  // Only converting whole register copies to vmv.v.v when the defining
203  // value appears in the explicit operands.
204  for (const MachineOperand &MO : MBBI->explicit_operands()) {
205  if (!MO.isReg() || !MO.isDef())
206  continue;
207  if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
208  // We only permit the source of COPY has the same LMUL as the defined
209  // operand.
210  // There are cases we need to keep the whole register copy if the LMUL
211  // is different.
212  // For example,
213  // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
214  // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
215  // # The COPY may be created by vlmul_trunc intrinsic.
216  // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
217  //
218  // After widening, the valid value will be 4 x e32 elements. If we
219  // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
220  // FIXME: The COPY of subregister of Zvlsseg register will not be able
221  // to convert to vmv.v.[v|i] under the constraint.
222  if (MO.getReg() != SrcReg)
223  return false;
224 
225  // In widening reduction instructions with LMUL_1 input vector case,
226  // only checking the LMUL is insufficient due to reduction result is
227  // always LMUL_1.
228  // For example,
229  // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
230  // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
231  // $v26 = COPY killed renamable $v8
232  // After widening, The valid value will be 1 x e16 elements. If we
233  // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
234  uint64_t TSFlags = MBBI->getDesc().TSFlags;
236  return false;
237 
238  // If the producing instruction does not depend on vsetvli, do not
239  // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
241  return false;
242 
243  // Found the definition.
244  FoundDef = true;
245  DefMBBI = MBBI;
246  break;
247  }
248  }
249  }
250  }
251 
252  return false;
253 }
254 
257  const DebugLoc &DL, MCRegister DstReg,
258  MCRegister SrcReg, bool KillSrc) const {
259  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
260  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
261  .addReg(SrcReg, getKillRegState(KillSrc))
262  .addImm(0);
263  return;
264  }
265 
266  // Handle copy from csr
267  if (RISCV::VCSRRegClass.contains(SrcReg) &&
268  RISCV::GPRRegClass.contains(DstReg)) {
270  BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
272  .addReg(RISCV::X0);
273  return;
274  }
275 
276  // FPR->FPR copies and VR->VR copies.
277  unsigned Opc;
278  bool IsScalableVector = true;
279  unsigned NF = 1;
281  unsigned SubRegIdx = RISCV::sub_vrm1_0;
282  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
283  Opc = RISCV::FSGNJ_H;
284  IsScalableVector = false;
285  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
286  Opc = RISCV::FSGNJ_S;
287  IsScalableVector = false;
288  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
289  Opc = RISCV::FSGNJ_D;
290  IsScalableVector = false;
291  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
292  Opc = RISCV::PseudoVMV1R_V;
293  LMul = RISCVII::LMUL_1;
294  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
295  Opc = RISCV::PseudoVMV2R_V;
296  LMul = RISCVII::LMUL_2;
297  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
298  Opc = RISCV::PseudoVMV4R_V;
299  LMul = RISCVII::LMUL_4;
300  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
301  Opc = RISCV::PseudoVMV8R_V;
302  LMul = RISCVII::LMUL_8;
303  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
304  Opc = RISCV::PseudoVMV1R_V;
305  SubRegIdx = RISCV::sub_vrm1_0;
306  NF = 2;
307  LMul = RISCVII::LMUL_1;
308  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
309  Opc = RISCV::PseudoVMV2R_V;
310  SubRegIdx = RISCV::sub_vrm2_0;
311  NF = 2;
312  LMul = RISCVII::LMUL_2;
313  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
314  Opc = RISCV::PseudoVMV4R_V;
315  SubRegIdx = RISCV::sub_vrm4_0;
316  NF = 2;
317  LMul = RISCVII::LMUL_4;
318  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
319  Opc = RISCV::PseudoVMV1R_V;
320  SubRegIdx = RISCV::sub_vrm1_0;
321  NF = 3;
322  LMul = RISCVII::LMUL_1;
323  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
324  Opc = RISCV::PseudoVMV2R_V;
325  SubRegIdx = RISCV::sub_vrm2_0;
326  NF = 3;
327  LMul = RISCVII::LMUL_2;
328  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
329  Opc = RISCV::PseudoVMV1R_V;
330  SubRegIdx = RISCV::sub_vrm1_0;
331  NF = 4;
332  LMul = RISCVII::LMUL_1;
333  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
334  Opc = RISCV::PseudoVMV2R_V;
335  SubRegIdx = RISCV::sub_vrm2_0;
336  NF = 4;
337  LMul = RISCVII::LMUL_2;
338  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
339  Opc = RISCV::PseudoVMV1R_V;
340  SubRegIdx = RISCV::sub_vrm1_0;
341  NF = 5;
342  LMul = RISCVII::LMUL_1;
343  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
344  Opc = RISCV::PseudoVMV1R_V;
345  SubRegIdx = RISCV::sub_vrm1_0;
346  NF = 6;
347  LMul = RISCVII::LMUL_1;
348  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
349  Opc = RISCV::PseudoVMV1R_V;
350  SubRegIdx = RISCV::sub_vrm1_0;
351  NF = 7;
352  LMul = RISCVII::LMUL_1;
353  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
354  Opc = RISCV::PseudoVMV1R_V;
355  SubRegIdx = RISCV::sub_vrm1_0;
356  NF = 8;
357  LMul = RISCVII::LMUL_1;
358  } else {
359  llvm_unreachable("Impossible reg-to-reg copy");
360  }
361 
362  if (IsScalableVector) {
363  bool UseVMV_V_V = false;
365  unsigned VIOpc;
366  if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
367  UseVMV_V_V = true;
368  // We only need to handle LMUL = 1/2/4/8 here because we only define
369  // vector register classes for LMUL = 1/2/4/8.
370  switch (LMul) {
371  default:
372  llvm_unreachable("Impossible LMUL for vector register copy.");
373  case RISCVII::LMUL_1:
374  Opc = RISCV::PseudoVMV_V_V_M1;
375  VIOpc = RISCV::PseudoVMV_V_I_M1;
376  break;
377  case RISCVII::LMUL_2:
378  Opc = RISCV::PseudoVMV_V_V_M2;
379  VIOpc = RISCV::PseudoVMV_V_I_M2;
380  break;
381  case RISCVII::LMUL_4:
382  Opc = RISCV::PseudoVMV_V_V_M4;
383  VIOpc = RISCV::PseudoVMV_V_I_M4;
384  break;
385  case RISCVII::LMUL_8:
386  Opc = RISCV::PseudoVMV_V_V_M8;
387  VIOpc = RISCV::PseudoVMV_V_I_M8;
388  break;
389  }
390  }
391 
392  bool UseVMV_V_I = false;
393  if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
394  UseVMV_V_I = true;
395  Opc = VIOpc;
396  }
397 
398  if (NF == 1) {
399  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
400  if (UseVMV_V_I)
401  MIB = MIB.add(DefMBBI->getOperand(1));
402  else
403  MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
404  if (UseVMV_V_V) {
405  const MCInstrDesc &Desc = DefMBBI->getDesc();
406  MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
407  MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
408  MIB.addReg(RISCV::VL, RegState::Implicit);
409  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
410  }
411  } else {
413 
414  int I = 0, End = NF, Incr = 1;
415  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
416  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
417  unsigned LMulVal;
418  bool Fractional;
419  std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
420  assert(!Fractional && "It is impossible be fractional lmul here.");
421  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
422  I = NF - 1;
423  End = -1;
424  Incr = -1;
425  }
426 
427  for (; I != End; I += Incr) {
428  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
429  TRI->getSubReg(DstReg, SubRegIdx + I));
430  if (UseVMV_V_I)
431  MIB = MIB.add(DefMBBI->getOperand(1));
432  else
433  MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
434  getKillRegState(KillSrc));
435  if (UseVMV_V_V) {
436  const MCInstrDesc &Desc = DefMBBI->getDesc();
437  MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
438  MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
439  MIB.addReg(RISCV::VL, RegState::Implicit);
440  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
441  }
442  }
443  }
444  } else {
445  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
446  .addReg(SrcReg, getKillRegState(KillSrc))
447  .addReg(SrcReg, getKillRegState(KillSrc));
448  }
449 }
450 
453  Register SrcReg, bool IsKill, int FI,
454  const TargetRegisterClass *RC,
455  const TargetRegisterInfo *TRI) const {
456  DebugLoc DL;
457  if (I != MBB.end())
458  DL = I->getDebugLoc();
459 
460  MachineFunction *MF = MBB.getParent();
461  MachineFrameInfo &MFI = MF->getFrameInfo();
462 
463  unsigned Opcode;
464  bool IsScalableVector = true;
465  bool IsZvlsseg = true;
466  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
467  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
468  RISCV::SW : RISCV::SD;
469  IsScalableVector = false;
470  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
471  Opcode = RISCV::FSH;
472  IsScalableVector = false;
473  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
474  Opcode = RISCV::FSW;
475  IsScalableVector = false;
476  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
477  Opcode = RISCV::FSD;
478  IsScalableVector = false;
479  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
480  Opcode = RISCV::PseudoVSPILL_M1;
481  IsZvlsseg = false;
482  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
483  Opcode = RISCV::PseudoVSPILL_M2;
484  IsZvlsseg = false;
485  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
486  Opcode = RISCV::PseudoVSPILL_M4;
487  IsZvlsseg = false;
488  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
489  Opcode = RISCV::PseudoVSPILL_M8;
490  IsZvlsseg = false;
491  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
492  Opcode = RISCV::PseudoVSPILL2_M1;
493  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
494  Opcode = RISCV::PseudoVSPILL2_M2;
495  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
496  Opcode = RISCV::PseudoVSPILL2_M4;
497  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
498  Opcode = RISCV::PseudoVSPILL3_M1;
499  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
500  Opcode = RISCV::PseudoVSPILL3_M2;
501  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
502  Opcode = RISCV::PseudoVSPILL4_M1;
503  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
504  Opcode = RISCV::PseudoVSPILL4_M2;
505  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
506  Opcode = RISCV::PseudoVSPILL5_M1;
507  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
508  Opcode = RISCV::PseudoVSPILL6_M1;
509  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
510  Opcode = RISCV::PseudoVSPILL7_M1;
511  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
512  Opcode = RISCV::PseudoVSPILL8_M1;
513  else
514  llvm_unreachable("Can't store this register to stack slot");
515 
516  if (IsScalableVector) {
520 
522  auto MIB = BuildMI(MBB, I, DL, get(Opcode))
523  .addReg(SrcReg, getKillRegState(IsKill))
524  .addFrameIndex(FI)
525  .addMemOperand(MMO);
526  if (IsZvlsseg) {
527  // For spilling/reloading Zvlsseg registers, append the dummy field for
528  // the scaled vector length. The argument will be used when expanding
529  // these pseudo instructions.
530  MIB.addReg(RISCV::X0);
531  }
532  } else {
535  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
536 
537  BuildMI(MBB, I, DL, get(Opcode))
538  .addReg(SrcReg, getKillRegState(IsKill))
539  .addFrameIndex(FI)
540  .addImm(0)
541  .addMemOperand(MMO);
542  }
543 }
544 
547  Register DstReg, int FI,
548  const TargetRegisterClass *RC,
549  const TargetRegisterInfo *TRI) const {
550  DebugLoc DL;
551  if (I != MBB.end())
552  DL = I->getDebugLoc();
553 
554  MachineFunction *MF = MBB.getParent();
555  MachineFrameInfo &MFI = MF->getFrameInfo();
556 
557  unsigned Opcode;
558  bool IsScalableVector = true;
559  bool IsZvlsseg = true;
560  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
561  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
562  RISCV::LW : RISCV::LD;
563  IsScalableVector = false;
564  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
565  Opcode = RISCV::FLH;
566  IsScalableVector = false;
567  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
568  Opcode = RISCV::FLW;
569  IsScalableVector = false;
570  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
571  Opcode = RISCV::FLD;
572  IsScalableVector = false;
573  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
574  Opcode = RISCV::PseudoVRELOAD_M1;
575  IsZvlsseg = false;
576  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
577  Opcode = RISCV::PseudoVRELOAD_M2;
578  IsZvlsseg = false;
579  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
580  Opcode = RISCV::PseudoVRELOAD_M4;
581  IsZvlsseg = false;
582  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
583  Opcode = RISCV::PseudoVRELOAD_M8;
584  IsZvlsseg = false;
585  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
586  Opcode = RISCV::PseudoVRELOAD2_M1;
587  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
588  Opcode = RISCV::PseudoVRELOAD2_M2;
589  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
590  Opcode = RISCV::PseudoVRELOAD2_M4;
591  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
592  Opcode = RISCV::PseudoVRELOAD3_M1;
593  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
594  Opcode = RISCV::PseudoVRELOAD3_M2;
595  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
596  Opcode = RISCV::PseudoVRELOAD4_M1;
597  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
598  Opcode = RISCV::PseudoVRELOAD4_M2;
599  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
600  Opcode = RISCV::PseudoVRELOAD5_M1;
601  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
602  Opcode = RISCV::PseudoVRELOAD6_M1;
603  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
604  Opcode = RISCV::PseudoVRELOAD7_M1;
605  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
606  Opcode = RISCV::PseudoVRELOAD8_M1;
607  else
608  llvm_unreachable("Can't load this register from stack slot");
609 
610  if (IsScalableVector) {
614 
616  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
617  .addFrameIndex(FI)
618  .addMemOperand(MMO);
619  if (IsZvlsseg) {
620  // For spilling/reloading Zvlsseg registers, append the dummy field for
621  // the scaled vector length. The argument will be used when expanding
622  // these pseudo instructions.
623  MIB.addReg(RISCV::X0);
624  }
625  } else {
628  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
629 
630  BuildMI(MBB, I, DL, get(Opcode), DstReg)
631  .addFrameIndex(FI)
632  .addImm(0)
633  .addMemOperand(MMO);
634  }
635 }
636 
640  VirtRegMap *VRM) const {
641  const MachineFrameInfo &MFI = MF.getFrameInfo();
642 
643  // The below optimizations narrow the load so they are only valid for little
644  // endian.
645  // TODO: Support big endian by adding an offset into the frame object?
646  if (MF.getDataLayout().isBigEndian())
647  return nullptr;
648 
649  // Fold load from stack followed by sext.w into lw.
650  // TODO: Fold with sext.b, sext.h, zext.b, zext.h, zext.w?
651  if (Ops.size() != 1 || Ops[0] != 1)
652  return nullptr;
653 
654  unsigned LoadOpc;
655  switch (MI.getOpcode()) {
656  default:
657  if (RISCV::isSEXT_W(MI)) {
658  LoadOpc = RISCV::LW;
659  break;
660  }
661  if (RISCV::isZEXT_W(MI)) {
662  LoadOpc = RISCV::LWU;
663  break;
664  }
665  if (RISCV::isZEXT_B(MI)) {
666  LoadOpc = RISCV::LBU;
667  break;
668  }
669  return nullptr;
670  case RISCV::SEXT_H:
671  LoadOpc = RISCV::LH;
672  break;
673  case RISCV::SEXT_B:
674  LoadOpc = RISCV::LB;
675  break;
676  case RISCV::ZEXT_H_RV32:
677  case RISCV::ZEXT_H_RV64:
678  LoadOpc = RISCV::LHU;
679  break;
680  }
681 
686 
687  Register DstReg = MI.getOperand(0).getReg();
688  return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
689  DstReg)
691  .addImm(0)
692  .addMemOperand(MMO);
693 }
694 
697  const DebugLoc &DL, Register DstReg, uint64_t Val,
698  MachineInstr::MIFlag Flag) const {
699  Register SrcReg = RISCV::X0;
700 
701  if (!STI.is64Bit() && !isInt<32>(Val))
702  report_fatal_error("Should only materialize 32-bit constants for RV32");
703 
705  RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
706  assert(!Seq.empty());
707 
708  for (RISCVMatInt::Inst &Inst : Seq) {
709  switch (Inst.getOpndKind()) {
710  case RISCVMatInt::Imm:
711  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
712  .addImm(Inst.Imm)
713  .setMIFlag(Flag);
714  break;
715  case RISCVMatInt::RegX0:
716  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
717  .addReg(SrcReg, RegState::Kill)
718  .addReg(RISCV::X0)
719  .setMIFlag(Flag);
720  break;
721  case RISCVMatInt::RegReg:
722  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
723  .addReg(SrcReg, RegState::Kill)
724  .addReg(SrcReg, RegState::Kill)
725  .setMIFlag(Flag);
726  break;
727  case RISCVMatInt::RegImm:
728  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
729  .addReg(SrcReg, RegState::Kill)
730  .addImm(Inst.Imm)
731  .setMIFlag(Flag);
732  break;
733  }
734 
735  // Only the first instruction has X0 as its source.
736  SrcReg = DstReg;
737  }
738 }
739 
741  switch (Opc) {
742  default:
743  return RISCVCC::COND_INVALID;
744  case RISCV::BEQ:
745  return RISCVCC::COND_EQ;
746  case RISCV::BNE:
747  return RISCVCC::COND_NE;
748  case RISCV::BLT:
749  return RISCVCC::COND_LT;
750  case RISCV::BGE:
751  return RISCVCC::COND_GE;
752  case RISCV::BLTU:
753  return RISCVCC::COND_LTU;
754  case RISCV::BGEU:
755  return RISCVCC::COND_GEU;
756  }
757 }
758 
759 // The contents of values added to Cond are not examined outside of
760 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
761 // push BranchOpcode, Reg1, Reg2.
764  // Block ends with fall-through condbranch.
765  assert(LastInst.getDesc().isConditionalBranch() &&
766  "Unknown conditional branch");
767  Target = LastInst.getOperand(2).getMBB();
768  unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
769  Cond.push_back(MachineOperand::CreateImm(CC));
770  Cond.push_back(LastInst.getOperand(0));
771  Cond.push_back(LastInst.getOperand(1));
772 }
773 
775  switch (CC) {
776  default:
777  llvm_unreachable("Unknown condition code!");
778  case RISCVCC::COND_EQ:
779  return get(RISCV::BEQ);
780  case RISCVCC::COND_NE:
781  return get(RISCV::BNE);
782  case RISCVCC::COND_LT:
783  return get(RISCV::BLT);
784  case RISCVCC::COND_GE:
785  return get(RISCV::BGE);
786  case RISCVCC::COND_LTU:
787  return get(RISCV::BLTU);
788  case RISCVCC::COND_GEU:
789  return get(RISCV::BGEU);
790  }
791 }
792 
794  switch (CC) {
795  default:
796  llvm_unreachable("Unrecognized conditional branch");
797  case RISCVCC::COND_EQ:
798  return RISCVCC::COND_NE;
799  case RISCVCC::COND_NE:
800  return RISCVCC::COND_EQ;
801  case RISCVCC::COND_LT:
802  return RISCVCC::COND_GE;
803  case RISCVCC::COND_GE:
804  return RISCVCC::COND_LT;
805  case RISCVCC::COND_LTU:
806  return RISCVCC::COND_GEU;
807  case RISCVCC::COND_GEU:
808  return RISCVCC::COND_LTU;
809  }
810 }
811 
814  MachineBasicBlock *&FBB,
816  bool AllowModify) const {
817  TBB = FBB = nullptr;
818  Cond.clear();
819 
820  // If the block has no terminators, it just falls into the block after it.
822  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
823  return false;
824 
825  // Count the number of terminators and find the first unconditional or
826  // indirect branch.
827  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
828  int NumTerminators = 0;
829  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
830  J++) {
831  NumTerminators++;
832  if (J->getDesc().isUnconditionalBranch() ||
833  J->getDesc().isIndirectBranch()) {
834  FirstUncondOrIndirectBr = J.getReverse();
835  }
836  }
837 
838  // If AllowModify is true, we can erase any terminators after
839  // FirstUncondOrIndirectBR.
840  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
841  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
842  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
843  NumTerminators--;
844  }
845  I = FirstUncondOrIndirectBr;
846  }
847 
848  // We can't handle blocks that end in an indirect branch.
849  if (I->getDesc().isIndirectBranch())
850  return true;
851 
852  // We can't handle blocks with more than 2 terminators.
853  if (NumTerminators > 2)
854  return true;
855 
856  // Handle a single unconditional branch.
857  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
859  return false;
860  }
861 
862  // Handle a single conditional branch.
863  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
865  return false;
866  }
867 
868  // Handle a conditional branch followed by an unconditional branch.
869  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
870  I->getDesc().isUnconditionalBranch()) {
871  parseCondBranch(*std::prev(I), TBB, Cond);
872  FBB = getBranchDestBlock(*I);
873  return false;
874  }
875 
876  // Otherwise, we can't handle this.
877  return true;
878 }
879 
881  int *BytesRemoved) const {
882  if (BytesRemoved)
883  *BytesRemoved = 0;
885  if (I == MBB.end())
886  return 0;
887 
888  if (!I->getDesc().isUnconditionalBranch() &&
889  !I->getDesc().isConditionalBranch())
890  return 0;
891 
892  // Remove the branch.
893  if (BytesRemoved)
894  *BytesRemoved += getInstSizeInBytes(*I);
895  I->eraseFromParent();
896 
897  I = MBB.end();
898 
899  if (I == MBB.begin())
900  return 1;
901  --I;
902  if (!I->getDesc().isConditionalBranch())
903  return 1;
904 
905  // Remove the branch.
906  if (BytesRemoved)
907  *BytesRemoved += getInstSizeInBytes(*I);
908  I->eraseFromParent();
909  return 2;
910 }
911 
912 // Inserts a branch into the end of the specific MachineBasicBlock, returning
913 // the number of instructions inserted.
916  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
917  if (BytesAdded)
918  *BytesAdded = 0;
919 
920  // Shouldn't be a fall through.
921  assert(TBB && "insertBranch must not be told to insert a fallthrough");
922  assert((Cond.size() == 3 || Cond.size() == 0) &&
923  "RISCV branch conditions have two components!");
924 
925  // Unconditional branch.
926  if (Cond.empty()) {
927  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
928  if (BytesAdded)
929  *BytesAdded += getInstSizeInBytes(MI);
930  return 1;
931  }
932 
933  // Either a one or two-way conditional branch.
934  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
935  MachineInstr &CondMI =
936  *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
937  if (BytesAdded)
938  *BytesAdded += getInstSizeInBytes(CondMI);
939 
940  // One-way conditional branch.
941  if (!FBB)
942  return 1;
943 
944  // Two-way conditional branch.
945  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
946  if (BytesAdded)
947  *BytesAdded += getInstSizeInBytes(MI);
948  return 2;
949 }
950 
952  MachineBasicBlock &DestBB,
953  MachineBasicBlock &RestoreBB,
954  const DebugLoc &DL, int64_t BrOffset,
955  RegScavenger *RS) const {
956  assert(RS && "RegScavenger required for long branching");
957  assert(MBB.empty() &&
958  "new block should be inserted for expanding unconditional branch");
959  assert(MBB.pred_size() == 1);
960  assert(RestoreBB.empty() &&
961  "restore block should be inserted for restoring clobbered registers");
962 
963  MachineFunction *MF = MBB.getParent();
967 
968  if (!isInt<32>(BrOffset))
970  "Branch offsets outside of the signed 32-bit range not supported");
971 
972  // FIXME: A virtual register must be used initially, as the register
973  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
974  // uses the same workaround).
975  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
976  auto II = MBB.end();
977  // We may also update the jump target to RestoreBB later.
978  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
979  .addReg(ScratchReg, RegState::Define | RegState::Dead)
980  .addMBB(&DestBB, RISCVII::MO_CALL);
981 
982  RS->enterBasicBlockEnd(MBB);
983  Register TmpGPR =
984  RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
985  /*RestoreAfter=*/false, /*SpAdj=*/0,
986  /*AllowSpill=*/false);
987  if (TmpGPR != RISCV::NoRegister)
988  RS->setRegUsed(TmpGPR);
989  else {
990  // The case when there is no scavenged register needs special handling.
991 
992  // Pick s11 because it doesn't make a difference.
993  TmpGPR = RISCV::X27;
994 
996  if (FrameIndex == -1)
997  report_fatal_error("underestimated function size");
998 
999  storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
1000  &RISCV::GPRRegClass, TRI);
1001  TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1002  /*SpAdj=*/0, /*FIOperandNum=*/1);
1003 
1004  MI.getOperand(1).setMBB(&RestoreBB);
1005 
1006  loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
1007  &RISCV::GPRRegClass, TRI);
1008  TRI->eliminateFrameIndex(RestoreBB.back(),
1009  /*SpAdj=*/0, /*FIOperandNum=*/1);
1010  }
1011 
1012  MRI.replaceRegWith(ScratchReg, TmpGPR);
1013  MRI.clearVirtRegs();
1014 }
1015 
1018  assert((Cond.size() == 3) && "Invalid branch condition!");
1019  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1020  Cond[0].setImm(getOppositeBranchCondition(CC));
1021  return false;
1022 }
1023 
1026  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1027  // The branch target is always the last operand.
1028  int NumOp = MI.getNumExplicitOperands();
1029  return MI.getOperand(NumOp - 1).getMBB();
1030 }
1031 
1033  int64_t BrOffset) const {
1034  unsigned XLen = STI.getXLen();
1035  // Ideally we could determine the supported branch offset from the
1036  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1037  // PseudoBR.
1038  switch (BranchOp) {
1039  default:
1040  llvm_unreachable("Unexpected opcode!");
1041  case RISCV::BEQ:
1042  case RISCV::BNE:
1043  case RISCV::BLT:
1044  case RISCV::BGE:
1045  case RISCV::BLTU:
1046  case RISCV::BGEU:
1047  return isIntN(13, BrOffset);
1048  case RISCV::JAL:
1049  case RISCV::PseudoBR:
1050  return isIntN(21, BrOffset);
1051  case RISCV::PseudoJump:
1052  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
1053  }
1054 }
1055 
1057  if (MI.isMetaInstruction())
1058  return 0;
1059 
1060  unsigned Opcode = MI.getOpcode();
1061 
1062  if (Opcode == TargetOpcode::INLINEASM ||
1063  Opcode == TargetOpcode::INLINEASM_BR) {
1064  const MachineFunction &MF = *MI.getParent()->getParent();
1065  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1066  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1067  *TM.getMCAsmInfo());
1068  }
1069 
1070  if (MI.getParent() && MI.getParent()->getParent()) {
1071  const auto MF = MI.getMF();
1072  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
1073  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
1074  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
1075  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
1076  if (isCompressibleInst(MI, &ST, MRI, STI))
1077  return 2;
1078  }
1079  return get(Opcode).getSize();
1080 }
1081 
1083  const unsigned Opcode = MI.getOpcode();
1084  switch (Opcode) {
1085  default:
1086  break;
1087  case RISCV::FSGNJ_D:
1088  case RISCV::FSGNJ_S:
1089  case RISCV::FSGNJ_H:
1090  // The canonical floating-point move is fsgnj rd, rs, rs.
1091  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1092  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1093  case RISCV::ADDI:
1094  case RISCV::ORI:
1095  case RISCV::XORI:
1096  return (MI.getOperand(1).isReg() &&
1097  MI.getOperand(1).getReg() == RISCV::X0) ||
1098  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1099  }
1100  return MI.isAsCheapAsAMove();
1101 }
1102 
1105  if (MI.isMoveReg())
1106  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1107  switch (MI.getOpcode()) {
1108  default:
1109  break;
1110  case RISCV::ADDI:
1111  // Operand 1 can be a frameindex but callers expect registers
1112  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1113  MI.getOperand(2).getImm() == 0)
1114  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1115  break;
1116  case RISCV::FSGNJ_D:
1117  case RISCV::FSGNJ_S:
1118  case RISCV::FSGNJ_H:
1119  // The canonical floating-point move is fsgnj rd, rs, rs.
1120  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1121  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1122  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1123  break;
1124  }
1125  return None;
1126 }
1127 
1129  StringRef &ErrInfo) const {
1130  MCInstrDesc const &Desc = MI.getDesc();
1131 
1132  for (auto &OI : enumerate(Desc.operands())) {
1133  unsigned OpType = OI.value().OperandType;
1134  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1135  OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1136  const MachineOperand &MO = MI.getOperand(OI.index());
1137  if (MO.isImm()) {
1138  int64_t Imm = MO.getImm();
1139  bool Ok;
1140  switch (OpType) {
1141  default:
1142  llvm_unreachable("Unexpected operand type");
1143 
1144  // clang-format off
1145 #define CASE_OPERAND_UIMM(NUM) \
1146  case RISCVOp::OPERAND_UIMM##NUM: \
1147  Ok = isUInt<NUM>(Imm); \
1148  break;
1155  Ok = isShiftedUInt<5, 2>(Imm);
1156  break;
1158  Ok = isShiftedUInt<6, 2>(Imm);
1159  break;
1161  Ok = isShiftedUInt<5, 3>(Imm);
1162  break;
1163  CASE_OPERAND_UIMM(12)
1164  CASE_OPERAND_UIMM(20)
1165  // clang-format on
1167  Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1168  break;
1169  case RISCVOp::OPERAND_ZERO:
1170  Ok = Imm == 0;
1171  break;
1173  Ok = isInt<5>(Imm);
1174  break;
1176  Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1177  break;
1179  Ok = isInt<6>(Imm);
1180  break;
1182  Ok = Imm != 0 && isInt<6>(Imm);
1183  break;
1185  Ok = isUInt<10>(Imm);
1186  break;
1188  Ok = isUInt<11>(Imm);
1189  break;
1191  Ok = isInt<12>(Imm);
1192  break;
1194  Ok = isShiftedInt<7, 5>(Imm);
1195  break;
1197  Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1198  break;
1200  Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1201  Ok = Ok && Imm != 0;
1202  break;
1204  Ok = STI.is64Bit() ? isUInt<5>(Imm) : isUInt<4>(Imm);
1205  break;
1207  Ok = Imm >= 0 && Imm <= 10;
1208  break;
1209  }
1210  if (!Ok) {
1211  ErrInfo = "Invalid immediate";
1212  return false;
1213  }
1214  }
1215  }
1216  }
1217 
1218  const uint64_t TSFlags = Desc.TSFlags;
1220  unsigned OpIdx = RISCVII::getMergeOpNum(Desc);
1221  if (MI.findTiedOperandIdx(0) != OpIdx) {
1222  ErrInfo = "Merge op improperly tied";
1223  return false;
1224  }
1225  }
1226  if (RISCVII::hasVLOp(TSFlags)) {
1227  const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
1228  if (!Op.isImm() && !Op.isReg()) {
1229  ErrInfo = "Invalid operand type for VL operand";
1230  return false;
1231  }
1232  if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1233  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1234  auto *RC = MRI.getRegClass(Op.getReg());
1235  if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1236  ErrInfo = "Invalid register class for VL operand";
1237  return false;
1238  }
1239  }
1240  if (!RISCVII::hasSEWOp(TSFlags)) {
1241  ErrInfo = "VL operand w/o SEW operand?";
1242  return false;
1243  }
1244  }
1245  if (RISCVII::hasSEWOp(TSFlags)) {
1246  unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
1247  uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
1248  if (Log2SEW > 31) {
1249  ErrInfo = "Unexpected SEW value";
1250  return false;
1251  }
1252  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1253  if (!RISCVVType::isValidSEW(SEW)) {
1254  ErrInfo = "Unexpected SEW value";
1255  return false;
1256  }
1257  }
1259  unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
1260  uint64_t Policy = MI.getOperand(OpIdx).getImm();
1262  ErrInfo = "Invalid Policy Value";
1263  return false;
1264  }
1265  if (!RISCVII::hasVLOp(TSFlags)) {
1266  ErrInfo = "policy operand w/o VL operand?";
1267  return false;
1268  }
1269  }
1270 
1271  return true;
1272 }
1273 
1274 // Return true if get the base operand, byte offset of an instruction and the
1275 // memory width. Width is the size of memory that is being loaded/stored.
1277  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1278  unsigned &Width, const TargetRegisterInfo *TRI) const {
1279  if (!LdSt.mayLoadOrStore())
1280  return false;
1281 
1282  // Here we assume the standard RISC-V ISA, which uses a base+offset
1283  // addressing mode. You'll need to relax these conditions to support custom
1284  // load/stores instructions.
1285  if (LdSt.getNumExplicitOperands() != 3)
1286  return false;
1287  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1288  return false;
1289 
1290  if (!LdSt.hasOneMemOperand())
1291  return false;
1292 
1293  Width = (*LdSt.memoperands_begin())->getSize();
1294  BaseReg = &LdSt.getOperand(1);
1295  Offset = LdSt.getOperand(2).getImm();
1296  return true;
1297 }
1298 
1300  const MachineInstr &MIa, const MachineInstr &MIb) const {
1301  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1302  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1303 
1304  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1306  return false;
1307 
1308  // Retrieve the base register, offset from the base register and width. Width
1309  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
1310  // base registers are identical, and the offset of a lower memory access +
1311  // the width doesn't overlap the offset of a higher memory access,
1312  // then the memory accesses are different.
1314  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1315  int64_t OffsetA = 0, OffsetB = 0;
1316  unsigned int WidthA = 0, WidthB = 0;
1317  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1318  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1319  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1320  int LowOffset = std::min(OffsetA, OffsetB);
1321  int HighOffset = std::max(OffsetA, OffsetB);
1322  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1323  if (LowOffset + LowWidth <= HighOffset)
1324  return true;
1325  }
1326  }
1327  return false;
1328 }
1329 
1330 std::pair<unsigned, unsigned>
1332  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1333  return std::make_pair(TF & Mask, TF & ~Mask);
1334 }
1335 
1338  using namespace RISCVII;
1339  static const std::pair<unsigned, const char *> TargetFlags[] = {
1340  {MO_CALL, "riscv-call"},
1341  {MO_PLT, "riscv-plt"},
1342  {MO_LO, "riscv-lo"},
1343  {MO_HI, "riscv-hi"},
1344  {MO_PCREL_LO, "riscv-pcrel-lo"},
1345  {MO_PCREL_HI, "riscv-pcrel-hi"},
1346  {MO_GOT_HI, "riscv-got-hi"},
1347  {MO_TPREL_LO, "riscv-tprel-lo"},
1348  {MO_TPREL_HI, "riscv-tprel-hi"},
1349  {MO_TPREL_ADD, "riscv-tprel-add"},
1350  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1351  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1352  return makeArrayRef(TargetFlags);
1353 }
1355  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1356  const Function &F = MF.getFunction();
1357 
1358  // Can F be deduplicated by the linker? If it can, don't outline from it.
1359  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1360  return false;
1361 
1362  // Don't outline from functions with section markings; the program could
1363  // expect that all the code is in the named section.
1364  if (F.hasSection())
1365  return false;
1366 
1367  // It's safe to outline from MF.
1368  return true;
1369 }
1370 
1372  unsigned &Flags) const {
1373  // More accurate safety checking is done in getOutliningCandidateInfo.
1375 }
1376 
1377 // Enum values indicating how an outlined call should be constructed.
1380 };
1381 
1383  MachineFunction &MF) const {
1384  return MF.getFunction().hasMinSize();
1385 }
1386 
1388  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1389 
1390  // First we need to filter out candidates where the X5 register (IE t0) can't
1391  // be used to setup the function call.
1392  auto CannotInsertCall = [](outliner::Candidate &C) {
1393  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1394  return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
1395  };
1396 
1397  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1398 
1399  // If the sequence doesn't have enough candidates left, then we're done.
1400  if (RepeatedSequenceLocs.size() < 2)
1401  return outliner::OutlinedFunction();
1402 
1403  unsigned SequenceSize = 0;
1404 
1405  auto I = RepeatedSequenceLocs[0].front();
1406  auto E = std::next(RepeatedSequenceLocs[0].back());
1407  for (; I != E; ++I)
1408  SequenceSize += getInstSizeInBytes(*I);
1409 
1410  // call t0, function = 8 bytes.
1411  unsigned CallOverhead = 8;
1412  for (auto &C : RepeatedSequenceLocs)
1413  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1414 
1415  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1416  unsigned FrameOverhead = 4;
1417  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1418  .getFeatureBits()[RISCV::FeatureStdExtC])
1419  FrameOverhead = 2;
1420 
1421  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1422  FrameOverhead, MachineOutlinerDefault);
1423 }
1424 
1427  unsigned Flags) const {
1428  MachineInstr &MI = *MBBI;
1429  MachineBasicBlock *MBB = MI.getParent();
1430  const TargetRegisterInfo *TRI =
1432  const auto &F = MI.getMF()->getFunction();
1433 
1434  // Positions generally can't safely be outlined.
1435  if (MI.isPosition()) {
1436  // We can manually strip out CFI instructions later.
1437  if (MI.isCFIInstruction())
1438  // If current function has exception handling code, we can't outline &
1439  // strip these CFI instructions since it may break .eh_frame section
1440  // needed in unwinding.
1441  return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
1443 
1445  }
1446 
1447  // Don't trust the user to write safe inline assembly.
1448  if (MI.isInlineAsm())
1450 
1451  // We can't outline branches to other basic blocks.
1452  if (MI.isTerminator() && !MBB->succ_empty())
1454 
1455  // We need support for tail calls to outlined functions before return
1456  // statements can be allowed.
1457  if (MI.isReturn())
1459 
1460  // Don't allow modifying the X5 register which we use for return addresses for
1461  // these outlined functions.
1462  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1463  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1465 
1466  // Make sure the operands don't reference something unsafe.
1467  for (const auto &MO : MI.operands()) {
1468  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1470 
1471  // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
1472  // if any possible.
1473  if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
1474  (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
1475  F.hasSection()))
1477  }
1478 
1479  // Don't allow instructions which won't be materialized to impact outlining
1480  // analysis.
1481  if (MI.isMetaInstruction())
1483 
1485 }
1486 
1489  const outliner::OutlinedFunction &OF) const {
1490 
1491  // Strip out any CFI instructions
1492  bool Changed = true;
1493  while (Changed) {
1494  Changed = false;
1495  auto I = MBB.begin();
1496  auto E = MBB.end();
1497  for (; I != E; ++I) {
1498  if (I->isCFIInstruction()) {
1499  I->removeFromParent();
1500  Changed = true;
1501  break;
1502  }
1503  }
1504  }
1505 
1506  MBB.addLiveIn(RISCV::X5);
1507 
1508  // Add in a return instruction to the end of the outlined frame.
1509  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1510  .addReg(RISCV::X0, RegState::Define)
1511  .addReg(RISCV::X5)
1512  .addImm(0));
1513 }
1514 
1517  MachineFunction &MF, outliner::Candidate &C) const {
1518 
1519  // Add in a call instruction to the outlined function at the given location.
1520  It = MBB.insert(It,
1521  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1522  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1523  RISCVII::MO_CALL));
1524  return It;
1525 }
1526 
1527 // MIR printer helper function to annotate Operands with a comment.
1529  const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1530  const TargetRegisterInfo *TRI) const {
1531  // Print a generic comment for this operand if there is one.
1532  std::string GenericComment =
1534  if (!GenericComment.empty())
1535  return GenericComment;
1536 
1537  // If not, we must have an immediate operand.
1538  if (!Op.isImm())
1539  return std::string();
1540 
1541  std::string Comment;
1542  raw_string_ostream OS(Comment);
1543 
1544  uint64_t TSFlags = MI.getDesc().TSFlags;
1545 
1546  // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
1547  // operand of vector codegen pseudos.
1548  if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
1549  MI.getOpcode() == RISCV::PseudoVSETVLI ||
1550  MI.getOpcode() == RISCV::PseudoVSETIVLI ||
1551  MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
1552  OpIdx == 2) {
1553  unsigned Imm = MI.getOperand(OpIdx).getImm();
1555  } else if (RISCVII::hasSEWOp(TSFlags) &&
1556  OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
1557  unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
1558  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1559  assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
1560  OS << "e" << SEW;
1561  } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
1562  OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
1563  unsigned Policy = MI.getOperand(OpIdx).getImm();
1565  "Invalid Policy Value");
1566  OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
1567  << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
1568  }
1569 
1570  OS.flush();
1571  return Comment;
1572 }
1573 
1574 // clang-format off
1575 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1576  RISCV::PseudoV##OP##_##TYPE##_##LMUL
1577 
1578 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
1579  CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1580  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1581  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1582  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1583 
1584 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
1585  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1586  case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
1587 
1588 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
1589  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1590  case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
1591 
1592 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1593  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1594  case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
1595 
1596 #define CASE_VFMA_SPLATS(OP) \
1597  CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
1598  case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
1599  case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
1600 // clang-format on
1601 
1603  unsigned &SrcOpIdx1,
1604  unsigned &SrcOpIdx2) const {
1605  const MCInstrDesc &Desc = MI.getDesc();
1606  if (!Desc.isCommutable())
1607  return false;
1608 
1609  switch (MI.getOpcode()) {
1610  case CASE_VFMA_SPLATS(FMADD):
1611  case CASE_VFMA_SPLATS(FMSUB):
1612  case CASE_VFMA_SPLATS(FMACC):
1613  case CASE_VFMA_SPLATS(FMSAC):
1614  case CASE_VFMA_SPLATS(FNMADD):
1615  case CASE_VFMA_SPLATS(FNMSUB):
1616  case CASE_VFMA_SPLATS(FNMACC):
1617  case CASE_VFMA_SPLATS(FNMSAC):
1618  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1619  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1620  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1621  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1622  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1623  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1624  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1625  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1626  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1627  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1628  // If the tail policy is undisturbed we can't commute.
1629  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1630  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1631  return false;
1632 
1633  // For these instructions we can only swap operand 1 and operand 3 by
1634  // changing the opcode.
1635  unsigned CommutableOpIdx1 = 1;
1636  unsigned CommutableOpIdx2 = 3;
1637  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1638  CommutableOpIdx2))
1639  return false;
1640  return true;
1641  }
1642  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1646  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1647  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1648  // If the tail policy is undisturbed we can't commute.
1649  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1650  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1651  return false;
1652 
1653  // For these instructions we have more freedom. We can commute with the
1654  // other multiplicand or with the addend/subtrahend/minuend.
1655 
1656  // Any fixed operand must be from source 1, 2 or 3.
1657  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1658  return false;
1659  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1660  return false;
1661 
1662  // It both ops are fixed one must be the tied source.
1663  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1664  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1665  return false;
1666 
1667  // Look for two different register operands assumed to be commutable
1668  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1669  // needed.
1670  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1671  SrcOpIdx2 == CommuteAnyOperandIndex) {
1672  // At least one of operands to be commuted is not specified and
1673  // this method is free to choose appropriate commutable operands.
1674  unsigned CommutableOpIdx1 = SrcOpIdx1;
1675  if (SrcOpIdx1 == SrcOpIdx2) {
1676  // Both of operands are not fixed. Set one of commutable
1677  // operands to the tied source.
1678  CommutableOpIdx1 = 1;
1679  } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1680  // Only one of the operands is not fixed.
1681  CommutableOpIdx1 = SrcOpIdx2;
1682  }
1683 
1684  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1685  // operand and assign its index to CommutableOpIdx2.
1686  unsigned CommutableOpIdx2;
1687  if (CommutableOpIdx1 != 1) {
1688  // If we haven't already used the tied source, we must use it now.
1689  CommutableOpIdx2 = 1;
1690  } else {
1691  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1692 
1693  // The commuted operands should have different registers.
1694  // Otherwise, the commute transformation does not change anything and
1695  // is useless. We use this as a hint to make our decision.
1696  if (Op1Reg != MI.getOperand(2).getReg())
1697  CommutableOpIdx2 = 2;
1698  else
1699  CommutableOpIdx2 = 3;
1700  }
1701 
1702  // Assign the found pair of commutable indices to SrcOpIdx1 and
1703  // SrcOpIdx2 to return those values.
1704  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1705  CommutableOpIdx2))
1706  return false;
1707  }
1708 
1709  return true;
1710  }
1711  }
1712 
1713  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1714 }
1715 
1716 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1717  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1718  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1719  break;
1720 
1721 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
1722  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1723  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1724  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1725  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1726 
1727 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
1728  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1729  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
1730 
1731 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
1732  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1733  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
1734 
1735 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1736  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1737  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
1738 
1739 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1740  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
1741  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
1742  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
1743 
1745  bool NewMI,
1746  unsigned OpIdx1,
1747  unsigned OpIdx2) const {
1748  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1749  if (NewMI)
1750  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1751  return MI;
1752  };
1753 
1754  switch (MI.getOpcode()) {
1755  case CASE_VFMA_SPLATS(FMACC):
1756  case CASE_VFMA_SPLATS(FMADD):
1757  case CASE_VFMA_SPLATS(FMSAC):
1758  case CASE_VFMA_SPLATS(FMSUB):
1759  case CASE_VFMA_SPLATS(FNMACC):
1760  case CASE_VFMA_SPLATS(FNMADD):
1761  case CASE_VFMA_SPLATS(FNMSAC):
1762  case CASE_VFMA_SPLATS(FNMSUB):
1763  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1764  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1765  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1766  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1767  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1768  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1769  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1770  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1771  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1772  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1773  // It only make sense to toggle these between clobbering the
1774  // addend/subtrahend/minuend one of the multiplicands.
1775  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1776  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1777  unsigned Opc;
1778  switch (MI.getOpcode()) {
1779  default:
1780  llvm_unreachable("Unexpected opcode");
1781  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1782  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1789  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
1793  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1794  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1795  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1796  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1797  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1798  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1799  }
1800 
1801  auto &WorkingMI = cloneIfNew(MI);
1802  WorkingMI.setDesc(get(Opc));
1803  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1804  OpIdx1, OpIdx2);
1805  }
1806  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1810  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1811  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1812  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1813  // If one of the operands, is the addend we need to change opcode.
1814  // Otherwise we're just swapping 2 of the multiplicands.
1815  if (OpIdx1 == 3 || OpIdx2 == 3) {
1816  unsigned Opc;
1817  switch (MI.getOpcode()) {
1818  default:
1819  llvm_unreachable("Unexpected opcode");
1820  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
1824  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1825  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1826  }
1827 
1828  auto &WorkingMI = cloneIfNew(MI);
1829  WorkingMI.setDesc(get(Opc));
1830  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1831  OpIdx1, OpIdx2);
1832  }
1833  // Let the default code handle it.
1834  break;
1835  }
1836  }
1837 
1838  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1839 }
1840 
1841 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1842 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1843 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1844 #undef CASE_VFMA_SPLATS
1845 #undef CASE_VFMA_OPCODE_LMULS
1846 #undef CASE_VFMA_OPCODE_COMMON
1847 
1848 // clang-format off
1849 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1850  RISCV::PseudoV##OP##_##LMUL##_TIED
1851 
1852 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
1853  CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1854  case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1855  case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1856  case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1857  case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1858 
1859 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1860  CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1861  case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
1862 // clang-format on
1863 
1864 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1865  case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1866  NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1867  break;
1868 
1869 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
1870  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1871  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1872  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1873  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1874  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1875 
1876 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1877  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1878  CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
1879 
1881  LiveVariables *LV,
1882  LiveIntervals *LIS) const {
1883  switch (MI.getOpcode()) {
1884  default:
1885  break;
1886  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
1887  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
1888  case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1889  case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1890  case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1891  case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1892  // If the tail policy is undisturbed we can't convert.
1893  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
1894  MI.getNumExplicitOperands() == 6);
1895  if ((MI.getOperand(5).getImm() & 1) == 0)
1896  return nullptr;
1897 
1898  // clang-format off
1899  unsigned NewOpc;
1900  switch (MI.getOpcode()) {
1901  default:
1902  llvm_unreachable("Unexpected opcode");
1909  }
1910  // clang-format on
1911 
1912  MachineBasicBlock &MBB = *MI.getParent();
1913  MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
1914  .add(MI.getOperand(0))
1915  .add(MI.getOperand(1))
1916  .add(MI.getOperand(2))
1917  .add(MI.getOperand(3))
1918  .add(MI.getOperand(4));
1919  MIB.copyImplicitOps(MI);
1920 
1921  if (LV) {
1922  unsigned NumOps = MI.getNumOperands();
1923  for (unsigned I = 1; I < NumOps; ++I) {
1924  MachineOperand &Op = MI.getOperand(I);
1925  if (Op.isReg() && Op.isKill())
1926  LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1927  }
1928  }
1929 
1930  if (LIS) {
1931  SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1932 
1933  if (MI.getOperand(0).isEarlyClobber()) {
1934  // Use operand 1 was tied to early-clobber def operand 0, so its live
1935  // interval could have ended at an early-clobber slot. Now they are not
1936  // tied we need to update it to the normal register slot.
1937  LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
1939  if (S->end == Idx.getRegSlot(true))
1940  S->end = Idx.getRegSlot();
1941  }
1942  }
1943 
1944  return MIB;
1945  }
1946  }
1947 
1948  return nullptr;
1949 }
1950 
1951 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1952 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1953 #undef CASE_WIDEOP_OPCODE_LMULS
1954 #undef CASE_WIDEOP_OPCODE_COMMON
1955 
1959  const DebugLoc &DL, Register DestReg,
1960  int64_t Amount,
1961  MachineInstr::MIFlag Flag) const {
1962  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1963  assert(Amount % 8 == 0 &&
1964  "Reserve the stack by the multiple of one vector size.");
1965 
1967  int64_t NumOfVReg = Amount / 8;
1968 
1969  BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), DestReg).setMIFlag(Flag);
1970  assert(isInt<32>(NumOfVReg) &&
1971  "Expect the number of vector registers within 32-bits.");
1972  if (isPowerOf2_32(NumOfVReg)) {
1973  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1974  if (ShiftAmount == 0)
1975  return;
1976  BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
1977  .addReg(DestReg, RegState::Kill)
1978  .addImm(ShiftAmount)
1979  .setMIFlag(Flag);
1980  } else if (STI.hasStdExtZba() &&
1981  ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
1982  (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
1983  (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
1984  // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
1985  unsigned Opc;
1986  uint32_t ShiftAmount;
1987  if (NumOfVReg % 9 == 0) {
1988  Opc = RISCV::SH3ADD;
1989  ShiftAmount = Log2_64(NumOfVReg / 9);
1990  } else if (NumOfVReg % 5 == 0) {
1991  Opc = RISCV::SH2ADD;
1992  ShiftAmount = Log2_64(NumOfVReg / 5);
1993  } else if (NumOfVReg % 3 == 0) {
1994  Opc = RISCV::SH1ADD;
1995  ShiftAmount = Log2_64(NumOfVReg / 3);
1996  } else {
1997  llvm_unreachable("Unexpected number of vregs");
1998  }
1999  if (ShiftAmount)
2000  BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2001  .addReg(DestReg, RegState::Kill)
2002  .addImm(ShiftAmount)
2003  .setMIFlag(Flag);
2004  BuildMI(MBB, II, DL, get(Opc), DestReg)
2005  .addReg(DestReg, RegState::Kill)
2006  .addReg(DestReg)
2007  .setMIFlag(Flag);
2008  } else if (isPowerOf2_32(NumOfVReg - 1)) {
2009  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2010  uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
2011  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2012  .addReg(DestReg)
2013  .addImm(ShiftAmount)
2014  .setMIFlag(Flag);
2015  BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
2016  .addReg(ScaledRegister, RegState::Kill)
2017  .addReg(DestReg, RegState::Kill)
2018  .setMIFlag(Flag);
2019  } else if (isPowerOf2_32(NumOfVReg + 1)) {
2020  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2021  uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
2022  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2023  .addReg(DestReg)
2024  .addImm(ShiftAmount)
2025  .setMIFlag(Flag);
2026  BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
2027  .addReg(ScaledRegister, RegState::Kill)
2028  .addReg(DestReg, RegState::Kill)
2029  .setMIFlag(Flag);
2030  } else {
2031  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2032  movImm(MBB, II, DL, N, NumOfVReg, Flag);
2033  if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
2035  MF.getFunction(),
2036  "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2037  "offset."});
2038  BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
2039  .addReg(DestReg, RegState::Kill)
2041  .setMIFlag(Flag);
2042  }
2043 }
2044 
2045 // Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
2047  return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
2048  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
2049 }
2050 
2051 // Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
2053  return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
2054  MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
2055 }
2056 
2057 // Returns true if this is the zext.b pattern, andi rd, rs1, 255.
2059  return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
2060  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
2061 }
2062 
2063 static bool isRVVWholeLoadStore(unsigned Opcode) {
2064  switch (Opcode) {
2065  default:
2066  return false;
2067  case RISCV::VS1R_V:
2068  case RISCV::VS2R_V:
2069  case RISCV::VS4R_V:
2070  case RISCV::VS8R_V:
2071  case RISCV::VL1RE8_V:
2072  case RISCV::VL2RE8_V:
2073  case RISCV::VL4RE8_V:
2074  case RISCV::VL8RE8_V:
2075  case RISCV::VL1RE16_V:
2076  case RISCV::VL2RE16_V:
2077  case RISCV::VL4RE16_V:
2078  case RISCV::VL8RE16_V:
2079  case RISCV::VL1RE32_V:
2080  case RISCV::VL2RE32_V:
2081  case RISCV::VL4RE32_V:
2082  case RISCV::VL8RE32_V:
2083  case RISCV::VL1RE64_V:
2084  case RISCV::VL2RE64_V:
2085  case RISCV::VL4RE64_V:
2086  case RISCV::VL8RE64_V:
2087  return true;
2088  }
2089 }
2090 
2092  // RVV lacks any support for immediate addressing for stack addresses, so be
2093  // conservative.
2094  unsigned Opcode = MI.getOpcode();
2095  if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2096  !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
2097  return false;
2098  return true;
2099 }
2100 
2103  switch (Opcode) {
2104  default:
2105  return None;
2106  case RISCV::PseudoVSPILL2_M1:
2107  case RISCV::PseudoVRELOAD2_M1:
2108  return std::make_pair(2u, 1u);
2109  case RISCV::PseudoVSPILL2_M2:
2110  case RISCV::PseudoVRELOAD2_M2:
2111  return std::make_pair(2u, 2u);
2112  case RISCV::PseudoVSPILL2_M4:
2113  case RISCV::PseudoVRELOAD2_M4:
2114  return std::make_pair(2u, 4u);
2115  case RISCV::PseudoVSPILL3_M1:
2116  case RISCV::PseudoVRELOAD3_M1:
2117  return std::make_pair(3u, 1u);
2118  case RISCV::PseudoVSPILL3_M2:
2119  case RISCV::PseudoVRELOAD3_M2:
2120  return std::make_pair(3u, 2u);
2121  case RISCV::PseudoVSPILL4_M1:
2122  case RISCV::PseudoVRELOAD4_M1:
2123  return std::make_pair(4u, 1u);
2124  case RISCV::PseudoVSPILL4_M2:
2125  case RISCV::PseudoVRELOAD4_M2:
2126  return std::make_pair(4u, 2u);
2127  case RISCV::PseudoVSPILL5_M1:
2128  case RISCV::PseudoVRELOAD5_M1:
2129  return std::make_pair(5u, 1u);
2130  case RISCV::PseudoVSPILL6_M1:
2131  case RISCV::PseudoVRELOAD6_M1:
2132  return std::make_pair(6u, 1u);
2133  case RISCV::PseudoVSPILL7_M1:
2134  case RISCV::PseudoVRELOAD7_M1:
2135  return std::make_pair(7u, 1u);
2136  case RISCV::PseudoVSPILL8_M1:
2137  case RISCV::PseudoVRELOAD8_M1:
2138  return std::make_pair(8u, 1u);
2139  }
2140 }
2141 
2143  return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
2144  !MI.isInlineAsm();
2145 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:109
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:28
llvm::RISCVII::getSEWOpNum
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:188
llvm::LoongArchII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: LoongArchBaseInfo.h:32
llvm::RISCVII::isRVVWideningReduction
static bool isRVVWideningReduction(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:163
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:1016
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:1337
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::RISCVInstrInfo::shouldOutlineFromFunctionByDefault
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Definition: RISCVInstrInfo.cpp:1382
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:214
llvm::MachineInstrBuilder::copyImplicitOps
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
Definition: MachineInstrBuilder.h:321
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:255
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:54
llvm::MCRegisterInfo::getName
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
Definition: MCRegisterInfo.h:485
llvm::RISCVInstrInfo::getBrCond
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
Definition: RISCVInstrInfo.cpp:774
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
PreferWholeRegisterMove
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
llvm::RISCVCC::COND_GEU
@ COND_GEU
Definition: RISCVInstrInfo.h:36
llvm::RISCVOp::OPERAND_SIMM6
@ OPERAND_SIMM6
Definition: RISCVBaseInfo.h:240
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:243
llvm::RISCVCC::getOppositeBranchCondition
CondCode getOppositeBranchCondition(CondCode)
Definition: RISCVInstrInfo.cpp:793
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1009
llvm::RISCVCC::COND_INVALID
@ COND_INVALID
Definition: RISCVInstrInfo.h:37
llvm::RISCVOp::OPERAND_SIMM5_PLUS1
@ OPERAND_SIMM5_PLUS1
Definition: RISCVBaseInfo.h:239
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1079
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:713
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:60
llvm::RISCVInstrInfo::getOutliningType
outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1426
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:731
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:252
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:585
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:628
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:211
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::AArch64SysReg::lookupSysRegByName
const SysReg * lookupSysRegByName(StringRef)
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:145
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:221
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1181
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:246
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:454
llvm::RISCVII::TAIL_AGNOSTIC
@ TAIL_AGNOSTIC
Definition: RISCVBaseInfo.h:120
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2068
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1802
llvm::LiveRange::Segment
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:176
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:125
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:140
llvm::RISCVII::getVecPolicyOpNum
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:197
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:112
llvm::RISCVVType::isValidSEW
static bool isValidSEW(unsigned SEW)
Definition: RISCVBaseInfo.h:411
llvm::RISCVII::MASK_AGNOSTIC
@ MASK_AGNOSTIC
Definition: RISCVBaseInfo.h:121
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:237
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::RISCVII::hasSEWOp
static bool hasSEWOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:151
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:320
CASE_OPERAND_UIMM
#define CASE_OPERAND_UIMM(NUM)
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:552
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:513
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:33
llvm::RISCVVType::isTailAgnostic
static bool isTailAgnostic(unsigned VType)
Definition: RISCVBaseInfo.h:452
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:215
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
Definition: RISCVInstrInfo.cpp:951
llvm::RISCVInstrInfo::insertOutlinedCall
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1515
llvm::RISCVOp::OPERAND_UIMM8_LSB000
@ OPERAND_UIMM8_LSB000
Definition: RISCVBaseInfo.h:235
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:186
llvm::Optional
Definition: APInt.h:33
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:205
llvm::RISCVInstrInfo::foldMemoryOperandImpl
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Definition: RISCVInstrInfo.cpp:637
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
STLExtras.h
llvm::MachineBasicBlock::back
MachineInstr & back()
Definition: MachineBasicBlock.h:285
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1025
Log2SEW
unsigned Log2SEW
Definition: RISCVInsertVSETVLI.cpp:823
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:458
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:214
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:159
RISCVMatInt.h
llvm::RISCVOp::OPERAND_SIMM6_NONZERO
@ OPERAND_SIMM6_NONZERO
Definition: RISCVBaseInfo.h:241
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:67
llvm::RISCVVType::getSEW
static unsigned getSEW(unsigned VType)
Definition: RISCVBaseInfo.h:447
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:1032
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::RISCV::isZEXT_W
bool isZEXT_W(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2052
RISCVGenInstrInfo
llvm::RISCVInstrInfo::convertToThreeAddress
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Definition: RISCVInstrInfo.cpp:1880
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:746
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1363
MachineRegisterInfo.h
llvm::RISCV::isFaultFirstLoad
bool isFaultFirstLoad(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2142
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1025
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:187
CASE_VFMA_OPCODE_LMULS_MF4
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1588
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:111
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:365
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
llvm::MCInstrDesc::TSFlags
uint64_t TSFlags
Definition: MCInstrDesc.h:205
llvm::Log2_64
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:553
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:165
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:216
llvm::RISCVCC::COND_LT
@ COND_LT
Definition: RISCVInstrInfo.h:33
llvm::RISCVVPseudosTable
Definition: RISCVInstrInfo.cpp:45
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:782
llvm::RISCVSubtarget::hasStdExtZmmul
bool hasStdExtZmmul() const
Definition: RISCVSubtarget.h:185
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::RISCVOp::OPERAND_RVKRNUM
@ OPERAND_RVKRNUM
Definition: RISCVBaseInfo.h:251
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:762
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1602
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:812
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
TBB
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
Definition: RISCVRedundantCopyElimination.cpp:76
LiveVariables.h
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:547
llvm::LiveVariables::replaceKillInstruction
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Definition: LiveVariables.cpp:752
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::RISCVII::hasVLOp
static bool hasVLOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:155
llvm::RISCVCC::COND_LTU
@ COND_LTU
Definition: RISCVInstrInfo.h:35
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:478
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:147
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:1331
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:315
getOppositeBranchCondition
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
Definition: ARCInstrInfo.cpp:102
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:87
llvm::raw_ostream::flush
void flush()
Definition: raw_ostream.h:185
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:880
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:451
llvm::MCRegisterInfo::isSubRegisterEq
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
Definition: MCRegisterInfo.h:568
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
llvm::RISCV::isZEXT_B
bool isZEXT_B(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2058
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:58
llvm::TargetInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
Definition: TargetInstrInfo.cpp:1424
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::RISCVOp::OPERAND_VTYPEI11
@ OPERAND_VTYPEI11
Definition: RISCVBaseInfo.h:250
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:427
llvm::None
const NoneType None
Definition: None.h:24
llvm::RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
Definition: RISCVBaseInfo.h:242
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:1299
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1731
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:647
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1378
llvm::RISCVOp::OPERAND_UIMM_SHFL
@ OPERAND_UIMM_SHFL
Definition: RISCVBaseInfo.h:248
llvm::TargetInstrInfo::createMIROperandComment
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
Definition: TargetInstrInfo.cpp:1347
TSFlags
uint64_t TSFlags
Definition: RISCVInsertVSETVLI.cpp:775
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:656
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::DataLayout::isBigEndian
bool isBigEndian() const
Definition: DataLayout.h:245
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::cl::opt< bool >
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:118
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::RISCVVType::decodeVLMUL
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
Definition: RISCVBaseInfo.cpp:147
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:274
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:76
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:469
LiveIntervals.h
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVII::getVLOpNum
static unsigned getVLOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:177
llvm::RISCVII::getMergeOpNum
static unsigned getMergeOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:171
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:209
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:1354
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:37
llvm::RISCVCC::COND_EQ
@ COND_EQ
Definition: RISCVInstrInfo.h:31
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:1276
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:1371
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:483
llvm::TargetRegisterInfo::eliminateFrameIndex
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const =0
This method must be overriden to eliminate abstract frame indices from instructions which may use the...
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:440
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::RISCVMachineFunctionInfo
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
Definition: RISCVMachineFunctionInfo.h:47
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:264
MachineFunctionPass.h
isConvertibleToVMV_V_V
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
Definition: RISCVInstrInfo.cpp:123
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:553
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:567
llvm::LoongArchII::MO_CALL
@ MO_CALL
Definition: LoongArchBaseInfo.h:30
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:672
llvm::AArch64SysReg::SysReg::Encoding
unsigned Encoding
Definition: AArch64BaseInfo.h:667
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:261
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:202
llvm::RISCVMatInt::RegX0
@ RegX0
Definition: RISCVMatInt.h:25
llvm::RISCVInstrInfo::createMIROperandComment
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:1528
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1104
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1596
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:82
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:159
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:112
llvm::RISCVInstrInfo::getVLENFactoredAmount
void getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:1956
llvm::RISCVII::hasMergeOp
static bool hasMergeOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:147
llvm::MachineFunction
Definition: MachineFunction.h:257
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1592
llvm::RISCVOp::OPERAND_SIMM5
@ OPERAND_SIMM5
Definition: RISCVBaseInfo.h:238
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:384
llvm::ArrayRef< unsigned >
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:728
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:561
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
CASE_WIDEOP_OPCODE_LMULS
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1859
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:137
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:134
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:695
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:110
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:838
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
CC
auto CC
Definition: RISCVRedundantCopyElimination.cpp:79
llvm::RISCVInstrInfo::buildOutlinedFrame
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1487
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:914
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
getCondFromBranchOpc
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition: RISCVInstrInfo.cpp:740
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:718
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:404
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:378
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
CASE_WIDEOP_CHANGE_OPCODE_LMULS
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1876
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:248
llvm::RISCVVType::printVType
void printVType(unsigned VType, raw_ostream &OS)
Definition: RISCVBaseInfo.cpp:163
llvm::RISCVVType::getVLMUL
static RISCVII::VLMUL getVLMUL(unsigned VType)
Definition: RISCVBaseInfo.h:423
llvm::RISCVOp::OPERAND_UIMM8_LSB00
@ OPERAND_UIMM8_LSB00
Definition: RISCVBaseInfo.h:234
llvm::RISCVMachineFunctionInfo::getBranchRelaxationScratchFrameIndex
int getBranchRelaxationScratchFrameIndex() const
Definition: RISCVMachineFunctionInfo.h:93
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:622
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1387
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:280
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:652
llvm::DestSourcePair
Definition: TargetInstrInfo.h:69
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
CASE_WIDEOP_OPCODE_LMULS_MF4
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1852
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1327
llvm::RISCVOp::OPERAND_VTYPEI10
@ OPERAND_VTYPEI10
Definition: RISCVBaseInfo.h:249
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_UIMMLOG2XLEN_NONZERO
Definition: RISCVBaseInfo.h:247
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1437
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1028
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:51
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
llvm::RISCV::isRVVSpill
bool isRVVSpill(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2091
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:197
RISCVInstrInfo.h
llvm::LiveIntervals
Definition: LiveIntervals.h:53
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:94
llvm::RISCVCC::COND_GE
@ COND_GE
Definition: RISCVInstrInfo.h:34
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:200
llvm::RISCVMatInt::RegReg
@ RegReg
Definition: RISCVMatInt.h:24
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:439
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1082
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1744
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
RISCVSubtarget.h
llvm::LoongArchII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: LoongArchBaseInfo.h:33
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:545
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:546
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:255
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1379
SEW
unsigned SEW
Definition: RISCVInsertVSETVLI.cpp:825
llvm::RISCVCC::CondCode
CondCode
Definition: RISCVInstrInfo.h:30
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:215
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:357
CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1869
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1018
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:305
MachineInstrBuilder.h
llvm::RISCVMatInt::RegImm
@ RegImm
Definition: RISCVMatInt.h:22
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:294
llvm::RISCVOp::OPERAND_ZERO
@ OPERAND_ZERO
Definition: RISCVBaseInfo.h:237
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:1128
RISCVMachineFunctionInfo.h
llvm::LiveRange::getSegmentContaining
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:285
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:277
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1735
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:108
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::RISCV::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:2102
llvm::Function::hasMinSize
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:661
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1134
isRVVWholeLoadStore
static bool isRVVWholeLoadStore(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:2063
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::RISCVOp::OPERAND_SIMM12_LSB00000
@ OPERAND_SIMM12_LSB00000
Definition: RISCVBaseInfo.h:244
llvm::RISCVOp::OPERAND_UIMM7_LSB00
@ OPERAND_UIMM7_LSB00
Definition: RISCVBaseInfo.h:233
llvm::LiveVariables
Definition: LiveVariables.h:47
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::cl::desc
Definition: CommandLine.h:413
RegisterScavenging.h
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:151
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1739
llvm::MachineInstrBundleIterator< const MachineInstr >
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:463
llvm::RISCV::isSEXT_W
bool isSEXT_W(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2046
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:76
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:227
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:237
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:307
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:314
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1056
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:287
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::RISCVCC::COND_NE
@ COND_NE
Definition: RISCVInstrInfo.h:32
RISCVTargetMachine.h