LLVM  14.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/MC/MCInstBuilder.h"
30 
31 using namespace llvm;
32 
33 #define GEN_CHECK_COMPRESS_INSTR
34 #include "RISCVGenCompressInstEmitter.inc"
35 
36 #define GET_INSTRINFO_CTOR_DTOR
37 #include "RISCVGenInstrInfo.inc"
38 
39 namespace llvm {
40 namespace RISCVVPseudosTable {
41 
42 using namespace RISCV;
43 
44 #define GET_RISCVVPseudosTable_IMPL
45 #include "RISCVGenSearchableTables.inc"
46 
47 } // namespace RISCVVPseudosTable
48 } // namespace llvm
49 
51  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
52  STI(STI) {}
53 
55  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
56  return MCInstBuilder(RISCV::C_NOP);
57  return MCInstBuilder(RISCV::ADDI)
58  .addReg(RISCV::X0)
59  .addReg(RISCV::X0)
60  .addImm(0);
61 }
62 
64  int &FrameIndex) const {
65  switch (MI.getOpcode()) {
66  default:
67  return 0;
68  case RISCV::LB:
69  case RISCV::LBU:
70  case RISCV::LH:
71  case RISCV::LHU:
72  case RISCV::FLH:
73  case RISCV::LW:
74  case RISCV::FLW:
75  case RISCV::LWU:
76  case RISCV::LD:
77  case RISCV::FLD:
78  break;
79  }
80 
81  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
82  MI.getOperand(2).getImm() == 0) {
83  FrameIndex = MI.getOperand(1).getIndex();
84  return MI.getOperand(0).getReg();
85  }
86 
87  return 0;
88 }
89 
91  int &FrameIndex) const {
92  switch (MI.getOpcode()) {
93  default:
94  return 0;
95  case RISCV::SB:
96  case RISCV::SH:
97  case RISCV::SW:
98  case RISCV::FSH:
99  case RISCV::FSW:
100  case RISCV::SD:
101  case RISCV::FSD:
102  break;
103  }
104 
105  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
106  MI.getOperand(2).getImm() == 0) {
107  FrameIndex = MI.getOperand(1).getIndex();
108  return MI.getOperand(0).getReg();
109  }
110 
111  return 0;
112 }
113 
114 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
115  unsigned NumRegs) {
116  // We really want the positive remainder mod 32 here, that happens to be
117  // easily obtainable with a mask.
118  return ((DstReg - SrcReg) & 0x1f) < NumRegs;
119 }
120 
123  const DebugLoc &DL, MCRegister DstReg,
124  MCRegister SrcReg, bool KillSrc) const {
125  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
126  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
127  .addReg(SrcReg, getKillRegState(KillSrc))
128  .addImm(0);
129  return;
130  }
131 
132  // FPR->FPR copies and VR->VR copies.
133  unsigned Opc;
134  bool IsScalableVector = true;
135  unsigned NF = 1;
136  unsigned LMul = 1;
137  unsigned SubRegIdx = RISCV::sub_vrm1_0;
138  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
139  Opc = RISCV::FSGNJ_H;
140  IsScalableVector = false;
141  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
142  Opc = RISCV::FSGNJ_S;
143  IsScalableVector = false;
144  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
145  Opc = RISCV::FSGNJ_D;
146  IsScalableVector = false;
147  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
148  Opc = RISCV::PseudoVMV1R_V;
149  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
150  Opc = RISCV::PseudoVMV2R_V;
151  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
152  Opc = RISCV::PseudoVMV4R_V;
153  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
154  Opc = RISCV::PseudoVMV8R_V;
155  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
156  Opc = RISCV::PseudoVMV1R_V;
157  SubRegIdx = RISCV::sub_vrm1_0;
158  NF = 2;
159  LMul = 1;
160  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
161  Opc = RISCV::PseudoVMV2R_V;
162  SubRegIdx = RISCV::sub_vrm2_0;
163  NF = 2;
164  LMul = 2;
165  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
166  Opc = RISCV::PseudoVMV4R_V;
167  SubRegIdx = RISCV::sub_vrm4_0;
168  NF = 2;
169  LMul = 4;
170  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
171  Opc = RISCV::PseudoVMV1R_V;
172  SubRegIdx = RISCV::sub_vrm1_0;
173  NF = 3;
174  LMul = 1;
175  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
176  Opc = RISCV::PseudoVMV2R_V;
177  SubRegIdx = RISCV::sub_vrm2_0;
178  NF = 3;
179  LMul = 2;
180  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
181  Opc = RISCV::PseudoVMV1R_V;
182  SubRegIdx = RISCV::sub_vrm1_0;
183  NF = 4;
184  LMul = 1;
185  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
186  Opc = RISCV::PseudoVMV2R_V;
187  SubRegIdx = RISCV::sub_vrm2_0;
188  NF = 4;
189  LMul = 2;
190  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
191  Opc = RISCV::PseudoVMV1R_V;
192  SubRegIdx = RISCV::sub_vrm1_0;
193  NF = 5;
194  LMul = 1;
195  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
196  Opc = RISCV::PseudoVMV1R_V;
197  SubRegIdx = RISCV::sub_vrm1_0;
198  NF = 6;
199  LMul = 1;
200  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
201  Opc = RISCV::PseudoVMV1R_V;
202  SubRegIdx = RISCV::sub_vrm1_0;
203  NF = 7;
204  LMul = 1;
205  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
206  Opc = RISCV::PseudoVMV1R_V;
207  SubRegIdx = RISCV::sub_vrm1_0;
208  NF = 8;
209  LMul = 1;
210  } else {
211  llvm_unreachable("Impossible reg-to-reg copy");
212  }
213 
214  if (IsScalableVector) {
215  if (NF == 1) {
216  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
217  .addReg(SrcReg, getKillRegState(KillSrc));
218  } else {
220 
221  int I = 0, End = NF, Incr = 1;
222  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
223  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
224  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMul)) {
225  I = NF - 1;
226  End = -1;
227  Incr = -1;
228  }
229 
230  for (; I != End; I += Incr) {
231  BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I))
232  .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
233  getKillRegState(KillSrc));
234  }
235  }
236  } else {
237  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
238  .addReg(SrcReg, getKillRegState(KillSrc))
239  .addReg(SrcReg, getKillRegState(KillSrc));
240  }
241 }
242 
245  Register SrcReg, bool IsKill, int FI,
246  const TargetRegisterClass *RC,
247  const TargetRegisterInfo *TRI) const {
248  DebugLoc DL;
249  if (I != MBB.end())
250  DL = I->getDebugLoc();
251 
252  MachineFunction *MF = MBB.getParent();
253  MachineFrameInfo &MFI = MF->getFrameInfo();
254 
255  unsigned Opcode;
256  bool IsScalableVector = true;
257  bool IsZvlsseg = true;
258  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
259  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
260  RISCV::SW : RISCV::SD;
261  IsScalableVector = false;
262  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
263  Opcode = RISCV::FSH;
264  IsScalableVector = false;
265  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
266  Opcode = RISCV::FSW;
267  IsScalableVector = false;
268  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
269  Opcode = RISCV::FSD;
270  IsScalableVector = false;
271  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
272  Opcode = RISCV::PseudoVSPILL_M1;
273  IsZvlsseg = false;
274  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
275  Opcode = RISCV::PseudoVSPILL_M2;
276  IsZvlsseg = false;
277  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
278  Opcode = RISCV::PseudoVSPILL_M4;
279  IsZvlsseg = false;
280  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
281  Opcode = RISCV::PseudoVSPILL_M8;
282  IsZvlsseg = false;
283  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
284  Opcode = RISCV::PseudoVSPILL2_M1;
285  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
286  Opcode = RISCV::PseudoVSPILL2_M2;
287  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
288  Opcode = RISCV::PseudoVSPILL2_M4;
289  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
290  Opcode = RISCV::PseudoVSPILL3_M1;
291  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
292  Opcode = RISCV::PseudoVSPILL3_M2;
293  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
294  Opcode = RISCV::PseudoVSPILL4_M1;
295  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
296  Opcode = RISCV::PseudoVSPILL4_M2;
297  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
298  Opcode = RISCV::PseudoVSPILL5_M1;
299  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
300  Opcode = RISCV::PseudoVSPILL6_M1;
301  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
302  Opcode = RISCV::PseudoVSPILL7_M1;
303  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
304  Opcode = RISCV::PseudoVSPILL8_M1;
305  else
306  llvm_unreachable("Can't store this register to stack slot");
307 
308  if (IsScalableVector) {
312 
314  auto MIB = BuildMI(MBB, I, DL, get(Opcode));
315  if (IsZvlsseg) {
316  // We need a GPR register to hold the incremented address for each subreg
317  // after expansion.
318  Register AddrInc =
319  MF->getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
320  MIB.addReg(AddrInc, RegState::Define);
321  }
322  MIB.addReg(SrcReg, getKillRegState(IsKill))
323  .addFrameIndex(FI)
324  .addMemOperand(MMO);
325  if (IsZvlsseg) {
326  // For spilling/reloading Zvlsseg registers, append the dummy field for
327  // the scaled vector length. The argument will be used when expanding
328  // these pseudo instructions.
329  MIB.addReg(RISCV::X0);
330  }
331  } else {
334  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
335 
336  BuildMI(MBB, I, DL, get(Opcode))
337  .addReg(SrcReg, getKillRegState(IsKill))
338  .addFrameIndex(FI)
339  .addImm(0)
340  .addMemOperand(MMO);
341  }
342 }
343 
346  Register DstReg, int FI,
347  const TargetRegisterClass *RC,
348  const TargetRegisterInfo *TRI) const {
349  DebugLoc DL;
350  if (I != MBB.end())
351  DL = I->getDebugLoc();
352 
353  MachineFunction *MF = MBB.getParent();
354  MachineFrameInfo &MFI = MF->getFrameInfo();
355 
356  unsigned Opcode;
357  bool IsScalableVector = true;
358  bool IsZvlsseg = true;
359  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
360  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
361  RISCV::LW : RISCV::LD;
362  IsScalableVector = false;
363  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
364  Opcode = RISCV::FLH;
365  IsScalableVector = false;
366  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
367  Opcode = RISCV::FLW;
368  IsScalableVector = false;
369  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
370  Opcode = RISCV::FLD;
371  IsScalableVector = false;
372  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
373  Opcode = RISCV::PseudoVRELOAD_M1;
374  IsZvlsseg = false;
375  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
376  Opcode = RISCV::PseudoVRELOAD_M2;
377  IsZvlsseg = false;
378  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
379  Opcode = RISCV::PseudoVRELOAD_M4;
380  IsZvlsseg = false;
381  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
382  Opcode = RISCV::PseudoVRELOAD_M8;
383  IsZvlsseg = false;
384  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
385  Opcode = RISCV::PseudoVRELOAD2_M1;
386  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
387  Opcode = RISCV::PseudoVRELOAD2_M2;
388  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
389  Opcode = RISCV::PseudoVRELOAD2_M4;
390  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
391  Opcode = RISCV::PseudoVRELOAD3_M1;
392  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
393  Opcode = RISCV::PseudoVRELOAD3_M2;
394  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
395  Opcode = RISCV::PseudoVRELOAD4_M1;
396  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
397  Opcode = RISCV::PseudoVRELOAD4_M2;
398  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
399  Opcode = RISCV::PseudoVRELOAD5_M1;
400  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
401  Opcode = RISCV::PseudoVRELOAD6_M1;
402  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
403  Opcode = RISCV::PseudoVRELOAD7_M1;
404  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
405  Opcode = RISCV::PseudoVRELOAD8_M1;
406  else
407  llvm_unreachable("Can't load this register from stack slot");
408 
409  if (IsScalableVector) {
413 
415  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg);
416  if (IsZvlsseg) {
417  // We need a GPR register to hold the incremented address for each subreg
418  // after expansion.
419  Register AddrInc =
420  MF->getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
421  MIB.addReg(AddrInc, RegState::Define);
422  }
423  MIB.addFrameIndex(FI).addMemOperand(MMO);
424  if (IsZvlsseg) {
425  // For spilling/reloading Zvlsseg registers, append the dummy field for
426  // the scaled vector length. The argument will be used when expanding
427  // these pseudo instructions.
428  MIB.addReg(RISCV::X0);
429  }
430  } else {
433  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
434 
435  BuildMI(MBB, I, DL, get(Opcode), DstReg)
436  .addFrameIndex(FI)
437  .addImm(0)
438  .addMemOperand(MMO);
439  }
440 }
441 
444  const DebugLoc &DL, Register DstReg, uint64_t Val,
445  MachineInstr::MIFlag Flag) const {
446  MachineFunction *MF = MBB.getParent();
448  Register SrcReg = RISCV::X0;
449  Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
450  unsigned Num = 0;
451 
452  if (!STI.is64Bit() && !isInt<32>(Val))
453  report_fatal_error("Should only materialize 32-bit constants for RV32");
454 
456  RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
457  assert(!Seq.empty());
458 
459  for (RISCVMatInt::Inst &Inst : Seq) {
460  // Write the final result to DstReg if it's the last instruction in the Seq.
461  // Otherwise, write the result to the temp register.
462  if (++Num == Seq.size())
463  Result = DstReg;
464 
465  if (Inst.Opc == RISCV::LUI) {
466  BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
467  .addImm(Inst.Imm)
468  .setMIFlag(Flag);
469  } else if (Inst.Opc == RISCV::ADDUW) {
470  BuildMI(MBB, MBBI, DL, get(RISCV::ADDUW), Result)
471  .addReg(SrcReg, RegState::Kill)
472  .addReg(RISCV::X0)
473  .setMIFlag(Flag);
474  } else {
475  BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
476  .addReg(SrcReg, RegState::Kill)
477  .addImm(Inst.Imm)
478  .setMIFlag(Flag);
479  }
480  // Only the first instruction has X0 as its source.
481  SrcReg = Result;
482  }
483 }
484 
486  switch (Opc) {
487  default:
488  return RISCVCC::COND_INVALID;
489  case RISCV::BEQ:
490  return RISCVCC::COND_EQ;
491  case RISCV::BNE:
492  return RISCVCC::COND_NE;
493  case RISCV::BLT:
494  return RISCVCC::COND_LT;
495  case RISCV::BGE:
496  return RISCVCC::COND_GE;
497  case RISCV::BLTU:
498  return RISCVCC::COND_LTU;
499  case RISCV::BGEU:
500  return RISCVCC::COND_GEU;
501  }
502 }
503 
504 // The contents of values added to Cond are not examined outside of
505 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
506 // push BranchOpcode, Reg1, Reg2.
509  // Block ends with fall-through condbranch.
510  assert(LastInst.getDesc().isConditionalBranch() &&
511  "Unknown conditional branch");
512  Target = LastInst.getOperand(2).getMBB();
513  unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
514  Cond.push_back(MachineOperand::CreateImm(CC));
515  Cond.push_back(LastInst.getOperand(0));
516  Cond.push_back(LastInst.getOperand(1));
517 }
518 
520  switch (CC) {
521  default:
522  llvm_unreachable("Unknown condition code!");
523  case RISCVCC::COND_EQ:
524  return get(RISCV::BEQ);
525  case RISCVCC::COND_NE:
526  return get(RISCV::BNE);
527  case RISCVCC::COND_LT:
528  return get(RISCV::BLT);
529  case RISCVCC::COND_GE:
530  return get(RISCV::BGE);
531  case RISCVCC::COND_LTU:
532  return get(RISCV::BLTU);
533  case RISCVCC::COND_GEU:
534  return get(RISCV::BGEU);
535  }
536 }
537 
539  switch (CC) {
540  default:
541  llvm_unreachable("Unrecognized conditional branch");
542  case RISCVCC::COND_EQ:
543  return RISCVCC::COND_NE;
544  case RISCVCC::COND_NE:
545  return RISCVCC::COND_EQ;
546  case RISCVCC::COND_LT:
547  return RISCVCC::COND_GE;
548  case RISCVCC::COND_GE:
549  return RISCVCC::COND_LT;
550  case RISCVCC::COND_LTU:
551  return RISCVCC::COND_GEU;
552  case RISCVCC::COND_GEU:
553  return RISCVCC::COND_LTU;
554  }
555 }
556 
558  MachineBasicBlock *&TBB,
559  MachineBasicBlock *&FBB,
561  bool AllowModify) const {
562  TBB = FBB = nullptr;
563  Cond.clear();
564 
565  // If the block has no terminators, it just falls into the block after it.
567  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
568  return false;
569 
570  // Count the number of terminators and find the first unconditional or
571  // indirect branch.
572  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
573  int NumTerminators = 0;
574  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
575  J++) {
576  NumTerminators++;
577  if (J->getDesc().isUnconditionalBranch() ||
578  J->getDesc().isIndirectBranch()) {
579  FirstUncondOrIndirectBr = J.getReverse();
580  }
581  }
582 
583  // If AllowModify is true, we can erase any terminators after
584  // FirstUncondOrIndirectBR.
585  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
586  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
587  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
588  NumTerminators--;
589  }
590  I = FirstUncondOrIndirectBr;
591  }
592 
593  // We can't handle blocks that end in an indirect branch.
594  if (I->getDesc().isIndirectBranch())
595  return true;
596 
597  // We can't handle blocks with more than 2 terminators.
598  if (NumTerminators > 2)
599  return true;
600 
601  // Handle a single unconditional branch.
602  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
603  TBB = getBranchDestBlock(*I);
604  return false;
605  }
606 
607  // Handle a single conditional branch.
608  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
609  parseCondBranch(*I, TBB, Cond);
610  return false;
611  }
612 
613  // Handle a conditional branch followed by an unconditional branch.
614  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
615  I->getDesc().isUnconditionalBranch()) {
616  parseCondBranch(*std::prev(I), TBB, Cond);
617  FBB = getBranchDestBlock(*I);
618  return false;
619  }
620 
621  // Otherwise, we can't handle this.
622  return true;
623 }
624 
626  int *BytesRemoved) const {
627  if (BytesRemoved)
628  *BytesRemoved = 0;
630  if (I == MBB.end())
631  return 0;
632 
633  if (!I->getDesc().isUnconditionalBranch() &&
634  !I->getDesc().isConditionalBranch())
635  return 0;
636 
637  // Remove the branch.
638  if (BytesRemoved)
639  *BytesRemoved += getInstSizeInBytes(*I);
640  I->eraseFromParent();
641 
642  I = MBB.end();
643 
644  if (I == MBB.begin())
645  return 1;
646  --I;
647  if (!I->getDesc().isConditionalBranch())
648  return 1;
649 
650  // Remove the branch.
651  if (BytesRemoved)
652  *BytesRemoved += getInstSizeInBytes(*I);
653  I->eraseFromParent();
654  return 2;
655 }
656 
657 // Inserts a branch into the end of the specific MachineBasicBlock, returning
658 // the number of instructions inserted.
661  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
662  if (BytesAdded)
663  *BytesAdded = 0;
664 
665  // Shouldn't be a fall through.
666  assert(TBB && "insertBranch must not be told to insert a fallthrough");
667  assert((Cond.size() == 3 || Cond.size() == 0) &&
668  "RISCV branch conditions have two components!");
669 
670  // Unconditional branch.
671  if (Cond.empty()) {
672  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
673  if (BytesAdded)
674  *BytesAdded += getInstSizeInBytes(MI);
675  return 1;
676  }
677 
678  // Either a one or two-way conditional branch.
679  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
680  MachineInstr &CondMI =
681  *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
682  if (BytesAdded)
683  *BytesAdded += getInstSizeInBytes(CondMI);
684 
685  // One-way conditional branch.
686  if (!FBB)
687  return 1;
688 
689  // Two-way conditional branch.
690  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
691  if (BytesAdded)
692  *BytesAdded += getInstSizeInBytes(MI);
693  return 2;
694 }
695 
697  MachineBasicBlock &DestBB,
698  const DebugLoc &DL,
699  int64_t BrOffset,
700  RegScavenger *RS) const {
701  assert(RS && "RegScavenger required for long branching");
702  assert(MBB.empty() &&
703  "new block should be inserted for expanding unconditional branch");
704  assert(MBB.pred_size() == 1);
705 
706  MachineFunction *MF = MBB.getParent();
708 
709  if (!isInt<32>(BrOffset))
711  "Branch offsets outside of the signed 32-bit range not supported");
712 
713  // FIXME: A virtual register must be used initially, as the register
714  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
715  // uses the same workaround).
716  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
717  auto II = MBB.end();
718 
719  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
720  .addReg(ScratchReg, RegState::Define | RegState::Dead)
721  .addMBB(&DestBB, RISCVII::MO_CALL);
722 
723  RS->enterBasicBlockEnd(MBB);
724  unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
725  MI.getIterator(), false, 0);
726  MRI.replaceRegWith(ScratchReg, Scav);
727  MRI.clearVirtRegs();
728  RS->setRegUsed(Scav);
729  return 8;
730 }
731 
734  assert((Cond.size() == 3) && "Invalid branch condition!");
735  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
736  Cond[0].setImm(getOppositeBranchCondition(CC));
737  return false;
738 }
739 
742  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
743  // The branch target is always the last operand.
744  int NumOp = MI.getNumExplicitOperands();
745  return MI.getOperand(NumOp - 1).getMBB();
746 }
747 
749  int64_t BrOffset) const {
750  unsigned XLen = STI.getXLen();
751  // Ideally we could determine the supported branch offset from the
752  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
753  // PseudoBR.
754  switch (BranchOp) {
755  default:
756  llvm_unreachable("Unexpected opcode!");
757  case RISCV::BEQ:
758  case RISCV::BNE:
759  case RISCV::BLT:
760  case RISCV::BGE:
761  case RISCV::BLTU:
762  case RISCV::BGEU:
763  return isIntN(13, BrOffset);
764  case RISCV::JAL:
765  case RISCV::PseudoBR:
766  return isIntN(21, BrOffset);
767  case RISCV::PseudoJump:
768  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
769  }
770 }
771 
773  unsigned Opcode = MI.getOpcode();
774 
775  switch (Opcode) {
776  default: {
777  if (MI.getParent() && MI.getParent()->getParent()) {
778  const auto MF = MI.getMF();
779  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
780  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
781  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
782  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
783  if (isCompressibleInst(MI, &ST, MRI, STI))
784  return 2;
785  }
786  return get(Opcode).getSize();
787  }
789  case TargetOpcode::IMPLICIT_DEF:
790  case TargetOpcode::KILL:
791  case TargetOpcode::DBG_VALUE:
792  return 0;
793  // These values are determined based on RISCVExpandAtomicPseudoInsts,
794  // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
795  // pseudos are expanded.
796  case RISCV::PseudoCALLReg:
797  case RISCV::PseudoCALL:
798  case RISCV::PseudoJump:
799  case RISCV::PseudoTAIL:
800  case RISCV::PseudoLLA:
801  case RISCV::PseudoLA:
802  case RISCV::PseudoLA_TLS_IE:
803  case RISCV::PseudoLA_TLS_GD:
804  return 8;
805  case RISCV::PseudoAtomicLoadNand32:
806  case RISCV::PseudoAtomicLoadNand64:
807  return 20;
808  case RISCV::PseudoMaskedAtomicSwap32:
809  case RISCV::PseudoMaskedAtomicLoadAdd32:
810  case RISCV::PseudoMaskedAtomicLoadSub32:
811  return 28;
812  case RISCV::PseudoMaskedAtomicLoadNand32:
813  return 32;
814  case RISCV::PseudoMaskedAtomicLoadMax32:
815  case RISCV::PseudoMaskedAtomicLoadMin32:
816  return 44;
817  case RISCV::PseudoMaskedAtomicLoadUMax32:
818  case RISCV::PseudoMaskedAtomicLoadUMin32:
819  return 36;
820  case RISCV::PseudoCmpXchg32:
821  case RISCV::PseudoCmpXchg64:
822  return 16;
823  case RISCV::PseudoMaskedCmpXchg32:
824  return 32;
827  const MachineFunction &MF = *MI.getParent()->getParent();
828  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
829  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
830  *TM.getMCAsmInfo());
831  }
832  case RISCV::PseudoVSPILL2_M1:
833  case RISCV::PseudoVSPILL2_M2:
834  case RISCV::PseudoVSPILL2_M4:
835  case RISCV::PseudoVSPILL3_M1:
836  case RISCV::PseudoVSPILL3_M2:
837  case RISCV::PseudoVSPILL4_M1:
838  case RISCV::PseudoVSPILL4_M2:
839  case RISCV::PseudoVSPILL5_M1:
840  case RISCV::PseudoVSPILL6_M1:
841  case RISCV::PseudoVSPILL7_M1:
842  case RISCV::PseudoVSPILL8_M1:
843  case RISCV::PseudoVRELOAD2_M1:
844  case RISCV::PseudoVRELOAD2_M2:
845  case RISCV::PseudoVRELOAD2_M4:
846  case RISCV::PseudoVRELOAD3_M1:
847  case RISCV::PseudoVRELOAD3_M2:
848  case RISCV::PseudoVRELOAD4_M1:
849  case RISCV::PseudoVRELOAD4_M2:
850  case RISCV::PseudoVRELOAD5_M1:
851  case RISCV::PseudoVRELOAD6_M1:
852  case RISCV::PseudoVRELOAD7_M1:
853  case RISCV::PseudoVRELOAD8_M1: {
854  // The values are determined based on expandVSPILL and expandVRELOAD that
855  // expand the pseudos depending on NF.
856  unsigned NF = isRVVSpillForZvlsseg(Opcode)->first;
857  return 4 * (2 * NF - 1);
858  }
859  }
860 }
861 
863  const unsigned Opcode = MI.getOpcode();
864  switch (Opcode) {
865  default:
866  break;
867  case RISCV::FSGNJ_D:
868  case RISCV::FSGNJ_S:
869  // The canonical floating-point move is fsgnj rd, rs, rs.
870  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
871  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
872  case RISCV::ADDI:
873  case RISCV::ORI:
874  case RISCV::XORI:
875  return (MI.getOperand(1).isReg() &&
876  MI.getOperand(1).getReg() == RISCV::X0) ||
877  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
878  }
879  return MI.isAsCheapAsAMove();
880 }
881 
884  if (MI.isMoveReg())
885  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
886  switch (MI.getOpcode()) {
887  default:
888  break;
889  case RISCV::ADDI:
890  // Operand 1 can be a frameindex but callers expect registers
891  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
892  MI.getOperand(2).getImm() == 0)
893  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
894  break;
895  case RISCV::FSGNJ_D:
896  case RISCV::FSGNJ_S:
897  // The canonical floating-point move is fsgnj rd, rs, rs.
898  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
899  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
900  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
901  break;
902  }
903  return None;
904 }
905 
907  StringRef &ErrInfo) const {
908  const MCInstrInfo *MCII = STI.getInstrInfo();
909  MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
910 
911  for (auto &OI : enumerate(Desc.operands())) {
912  unsigned OpType = OI.value().OperandType;
913  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
915  const MachineOperand &MO = MI.getOperand(OI.index());
916  if (MO.isImm()) {
917  int64_t Imm = MO.getImm();
918  bool Ok;
919  switch (OpType) {
920  default:
921  llvm_unreachable("Unexpected operand type");
923  Ok = isUInt<4>(Imm);
924  break;
926  Ok = isUInt<5>(Imm);
927  break;
929  Ok = isUInt<12>(Imm);
930  break;
932  Ok = isInt<12>(Imm);
933  break;
935  Ok = isUInt<20>(Imm);
936  break;
938  if (STI.getTargetTriple().isArch64Bit())
939  Ok = isUInt<6>(Imm);
940  else
941  Ok = isUInt<5>(Imm);
942  break;
943  }
944  if (!Ok) {
945  ErrInfo = "Invalid immediate";
946  return false;
947  }
948  }
949  }
950  }
951 
952  return true;
953 }
954 
955 // Return true if get the base operand, byte offset of an instruction and the
956 // memory width. Width is the size of memory that is being loaded/stored.
958  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
959  unsigned &Width, const TargetRegisterInfo *TRI) const {
960  if (!LdSt.mayLoadOrStore())
961  return false;
962 
963  // Here we assume the standard RISC-V ISA, which uses a base+offset
964  // addressing mode. You'll need to relax these conditions to support custom
965  // load/stores instructions.
966  if (LdSt.getNumExplicitOperands() != 3)
967  return false;
968  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
969  return false;
970 
971  if (!LdSt.hasOneMemOperand())
972  return false;
973 
974  Width = (*LdSt.memoperands_begin())->getSize();
975  BaseReg = &LdSt.getOperand(1);
976  Offset = LdSt.getOperand(2).getImm();
977  return true;
978 }
979 
981  const MachineInstr &MIa, const MachineInstr &MIb) const {
982  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
983  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
984 
987  return false;
988 
989  // Retrieve the base register, offset from the base register and width. Width
990  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
991  // base registers are identical, and the offset of a lower memory access +
992  // the width doesn't overlap the offset of a higher memory access,
993  // then the memory accesses are different.
995  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
996  int64_t OffsetA = 0, OffsetB = 0;
997  unsigned int WidthA = 0, WidthB = 0;
998  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
999  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1000  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1001  int LowOffset = std::min(OffsetA, OffsetB);
1002  int HighOffset = std::max(OffsetA, OffsetB);
1003  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1004  if (LowOffset + LowWidth <= HighOffset)
1005  return true;
1006  }
1007  }
1008  return false;
1009 }
1010 
1011 std::pair<unsigned, unsigned>
1013  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1014  return std::make_pair(TF & Mask, TF & ~Mask);
1015 }
1016 
1019  using namespace RISCVII;
1020  static const std::pair<unsigned, const char *> TargetFlags[] = {
1021  {MO_CALL, "riscv-call"},
1022  {MO_PLT, "riscv-plt"},
1023  {MO_LO, "riscv-lo"},
1024  {MO_HI, "riscv-hi"},
1025  {MO_PCREL_LO, "riscv-pcrel-lo"},
1026  {MO_PCREL_HI, "riscv-pcrel-hi"},
1027  {MO_GOT_HI, "riscv-got-hi"},
1028  {MO_TPREL_LO, "riscv-tprel-lo"},
1029  {MO_TPREL_HI, "riscv-tprel-hi"},
1030  {MO_TPREL_ADD, "riscv-tprel-add"},
1031  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1032  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1033  return makeArrayRef(TargetFlags);
1034 }
1036  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1037  const Function &F = MF.getFunction();
1038 
1039  // Can F be deduplicated by the linker? If it can, don't outline from it.
1040  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1041  return false;
1042 
1043  // Don't outline from functions with section markings; the program could
1044  // expect that all the code is in the named section.
1045  if (F.hasSection())
1046  return false;
1047 
1048  // It's safe to outline from MF.
1049  return true;
1050 }
1051 
1053  unsigned &Flags) const {
1054  // More accurate safety checking is done in getOutliningCandidateInfo.
1055  return true;
1056 }
1057 
1058 // Enum values indicating how an outlined call should be constructed.
1061 };
1062 
1064  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1065 
1066  // First we need to filter out candidates where the X5 register (IE t0) can't
1067  // be used to setup the function call.
1068  auto CannotInsertCall = [](outliner::Candidate &C) {
1069  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1070 
1071  C.initLRU(*TRI);
1072  LiveRegUnits LRU = C.LRU;
1073  return !LRU.available(RISCV::X5);
1074  };
1075 
1076  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1077 
1078  // If the sequence doesn't have enough candidates left, then we're done.
1079  if (RepeatedSequenceLocs.size() < 2)
1080  return outliner::OutlinedFunction();
1081 
1082  unsigned SequenceSize = 0;
1083 
1084  auto I = RepeatedSequenceLocs[0].front();
1085  auto E = std::next(RepeatedSequenceLocs[0].back());
1086  for (; I != E; ++I)
1087  SequenceSize += getInstSizeInBytes(*I);
1088 
1089  // call t0, function = 8 bytes.
1090  unsigned CallOverhead = 8;
1091  for (auto &C : RepeatedSequenceLocs)
1092  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1093 
1094  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1095  unsigned FrameOverhead = 4;
1096  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1097  .getFeatureBits()[RISCV::FeatureStdExtC])
1098  FrameOverhead = 2;
1099 
1100  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1101  FrameOverhead, MachineOutlinerDefault);
1102 }
1103 
1106  unsigned Flags) const {
1107  MachineInstr &MI = *MBBI;
1108  MachineBasicBlock *MBB = MI.getParent();
1109  const TargetRegisterInfo *TRI =
1111 
1112  // Positions generally can't safely be outlined.
1113  if (MI.isPosition()) {
1114  // We can manually strip out CFI instructions later.
1115  if (MI.isCFIInstruction())
1117 
1119  }
1120 
1121  // Don't trust the user to write safe inline assembly.
1122  if (MI.isInlineAsm())
1124 
1125  // We can't outline branches to other basic blocks.
1126  if (MI.isTerminator() && !MBB->succ_empty())
1128 
1129  // We need support for tail calls to outlined functions before return
1130  // statements can be allowed.
1131  if (MI.isReturn())
1133 
1134  // Don't allow modifying the X5 register which we use for return addresses for
1135  // these outlined functions.
1136  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1137  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1139 
1140  // Make sure the operands don't reference something unsafe.
1141  for (const auto &MO : MI.operands())
1142  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1144 
1145  // Don't allow instructions which won't be materialized to impact outlining
1146  // analysis.
1147  if (MI.isMetaInstruction())
1149 
1151 }
1152 
1155  const outliner::OutlinedFunction &OF) const {
1156 
1157  // Strip out any CFI instructions
1158  bool Changed = true;
1159  while (Changed) {
1160  Changed = false;
1161  auto I = MBB.begin();
1162  auto E = MBB.end();
1163  for (; I != E; ++I) {
1164  if (I->isCFIInstruction()) {
1165  I->removeFromParent();
1166  Changed = true;
1167  break;
1168  }
1169  }
1170  }
1171 
1172  MBB.addLiveIn(RISCV::X5);
1173 
1174  // Add in a return instruction to the end of the outlined frame.
1175  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1176  .addReg(RISCV::X0, RegState::Define)
1177  .addReg(RISCV::X5)
1178  .addImm(0));
1179 }
1180 
1183  MachineFunction &MF, const outliner::Candidate &C) const {
1184 
1185  // Add in a call instruction to the outlined function at the given location.
1186  It = MBB.insert(It,
1187  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1188  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1189  RISCVII::MO_CALL));
1190  return It;
1191 }
1192 
1193 // clang-format off
1194 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1195  RISCV::PseudoV##OP##_##TYPE##_##LMUL
1196 
1197 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1198  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1199  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1200  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1201  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1202  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1203  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1204  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1205 
1206 #define CASE_VFMA_SPLATS(OP) \
1207  CASE_VFMA_OPCODE_LMULS(OP, VF16): \
1208  case CASE_VFMA_OPCODE_LMULS(OP, VF32): \
1209  case CASE_VFMA_OPCODE_LMULS(OP, VF64)
1210 // clang-format on
1211 
1213  unsigned &SrcOpIdx1,
1214  unsigned &SrcOpIdx2) const {
1215  const MCInstrDesc &Desc = MI.getDesc();
1216  if (!Desc.isCommutable())
1217  return false;
1218 
1219  switch (MI.getOpcode()) {
1220  case CASE_VFMA_SPLATS(FMADD):
1221  case CASE_VFMA_SPLATS(FMSUB):
1222  case CASE_VFMA_SPLATS(FMACC):
1223  case CASE_VFMA_SPLATS(FMSAC):
1224  case CASE_VFMA_SPLATS(FNMADD):
1225  case CASE_VFMA_SPLATS(FNMSUB):
1226  case CASE_VFMA_SPLATS(FNMACC):
1227  case CASE_VFMA_SPLATS(FNMSAC):
1228  case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1229  case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1230  case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1231  case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV):
1232  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1233  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1234  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1235  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1236  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1237  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1238  // If the tail policy is undisturbed we can't commute.
1239  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1240  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1241  return false;
1242 
1243  // For these instructions we can only swap operand 1 and operand 3 by
1244  // changing the opcode.
1245  unsigned CommutableOpIdx1 = 1;
1246  unsigned CommutableOpIdx2 = 3;
1247  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1248  CommutableOpIdx2))
1249  return false;
1250  return true;
1251  }
1252  case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1253  case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1254  case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1255  case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV):
1256  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1257  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1258  // If the tail policy is undisturbed we can't commute.
1259  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1260  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1261  return false;
1262 
1263  // For these instructions we have more freedom. We can commute with the
1264  // other multiplicand or with the addend/subtrahend/minuend.
1265 
1266  // Any fixed operand must be from source 1, 2 or 3.
1267  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1268  return false;
1269  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1270  return false;
1271 
1272  // It both ops are fixed one must be the tied source.
1273  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1274  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1275  return false;
1276 
1277  // Look for two different register operands assumed to be commutable
1278  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1279  // needed.
1280  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1281  SrcOpIdx2 == CommuteAnyOperandIndex) {
1282  // At least one of operands to be commuted is not specified and
1283  // this method is free to choose appropriate commutable operands.
1284  unsigned CommutableOpIdx1 = SrcOpIdx1;
1285  if (SrcOpIdx1 == SrcOpIdx2) {
1286  // Both of operands are not fixed. Set one of commutable
1287  // operands to the tied source.
1288  CommutableOpIdx1 = 1;
1289  } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1290  // Only one of the operands is not fixed.
1291  CommutableOpIdx1 = SrcOpIdx2;
1292  }
1293 
1294  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1295  // operand and assign its index to CommutableOpIdx2.
1296  unsigned CommutableOpIdx2;
1297  if (CommutableOpIdx1 != 1) {
1298  // If we haven't already used the tied source, we must use it now.
1299  CommutableOpIdx2 = 1;
1300  } else {
1301  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1302 
1303  // The commuted operands should have different registers.
1304  // Otherwise, the commute transformation does not change anything and
1305  // is useless. We use this as a hint to make our decision.
1306  if (Op1Reg != MI.getOperand(2).getReg())
1307  CommutableOpIdx2 = 2;
1308  else
1309  CommutableOpIdx2 = 3;
1310  }
1311 
1312  // Assign the found pair of commutable indices to SrcOpIdx1 and
1313  // SrcOpIdx2 to return those values.
1314  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1315  CommutableOpIdx2))
1316  return false;
1317  }
1318 
1319  return true;
1320  }
1321  }
1322 
1323  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1324 }
1325 
1326 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1327  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1328  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1329  break;
1330 
1331 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1332  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1333  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1334  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1335  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1336  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1337  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1338  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1339 
1340 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1341  CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF16) \
1342  CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF32) \
1343  CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF64)
1344 
1346  bool NewMI,
1347  unsigned OpIdx1,
1348  unsigned OpIdx2) const {
1349  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1350  if (NewMI)
1351  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1352  return MI;
1353  };
1354 
1355  switch (MI.getOpcode()) {
1356  case CASE_VFMA_SPLATS(FMACC):
1357  case CASE_VFMA_SPLATS(FMADD):
1358  case CASE_VFMA_SPLATS(FMSAC):
1359  case CASE_VFMA_SPLATS(FMSUB):
1360  case CASE_VFMA_SPLATS(FNMACC):
1361  case CASE_VFMA_SPLATS(FNMADD):
1362  case CASE_VFMA_SPLATS(FNMSAC):
1363  case CASE_VFMA_SPLATS(FNMSUB):
1364  case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1365  case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1366  case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1367  case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV):
1368  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1369  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1370  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1371  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1372  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1373  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1374  // It only make sense to toggle these between clobbering the
1375  // addend/subtrahend/minuend one of the multiplicands.
1376  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1377  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1378  unsigned Opc;
1379  switch (MI.getOpcode()) {
1380  default:
1381  llvm_unreachable("Unexpected opcode");
1382  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1383  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1390  CASE_VFMA_CHANGE_OPCODE_LMULS(FMACC, FMADD, VV)
1394  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1395  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1396  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1397  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1398  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1399  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1400  }
1401 
1402  auto &WorkingMI = cloneIfNew(MI);
1403  WorkingMI.setDesc(get(Opc));
1404  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1405  OpIdx1, OpIdx2);
1406  }
1407  case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1408  case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1409  case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1410  case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV):
1411  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1412  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1413  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1414  // If one of the operands, is the addend we need to change opcode.
1415  // Otherwise we're just swapping 2 of the multiplicands.
1416  if (OpIdx1 == 3 || OpIdx2 == 3) {
1417  unsigned Opc;
1418  switch (MI.getOpcode()) {
1419  default:
1420  llvm_unreachable("Unexpected opcode");
1421  CASE_VFMA_CHANGE_OPCODE_LMULS(FMADD, FMACC, VV)
1425  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1426  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1427  }
1428 
1429  auto &WorkingMI = cloneIfNew(MI);
1430  WorkingMI.setDesc(get(Opc));
1431  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1432  OpIdx1, OpIdx2);
1433  }
1434  // Let the default code handle it.
1435  break;
1436  }
1437  }
1438 
1439  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1440 }
1441 
1442 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1443 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1444 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1445 #undef CASE_VFMA_SPLATS
1446 #undef CASE_VFMA_OPCODE_LMULS
1447 #undef CASE_VFMA_OPCODE_COMMON
1448 
1449 // clang-format off
1450 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1451  RISCV::PseudoV##OP##_##LMUL##_TIED
1452 
1453 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1454  CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1455  case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1456  case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1457  case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1458  case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1459  case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1460 // clang-format on
1461 
1462 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1463  case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1464  NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1465  break;
1466 
1467 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1468  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1469  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1470  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1471  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1472  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1473  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1474 
1477  switch (MI.getOpcode()) {
1478  default:
1479  break;
1480  case CASE_WIDEOP_OPCODE_LMULS(FWADD_WV):
1481  case CASE_WIDEOP_OPCODE_LMULS(FWSUB_WV):
1482  case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1483  case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1484  case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1485  case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1486  // clang-format off
1487  unsigned NewOpc;
1488  switch (MI.getOpcode()) {
1489  default:
1490  llvm_unreachable("Unexpected opcode");
1497  }
1498  //clang-format on
1499 
1500  MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
1501  .add(MI.getOperand(0))
1502  .add(MI.getOperand(1))
1503  .add(MI.getOperand(2))
1504  .add(MI.getOperand(3))
1505  .add(MI.getOperand(4));
1506  MIB.copyImplicitOps(MI);
1507 
1508  if (LV) {
1509  unsigned NumOps = MI.getNumOperands();
1510  for (unsigned I = 1; I < NumOps; ++I) {
1511  MachineOperand &Op = MI.getOperand(I);
1512  if (Op.isReg() && Op.isKill())
1513  LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1514  }
1515  }
1516 
1517  return MIB;
1518  }
1519  }
1520 
1521  return nullptr;
1522 }
1523 
1524 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1525 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1526 #undef CASE_WIDEOP_OPCODE_LMULS
1527 #undef CASE_WIDEOP_OPCODE_COMMON
1528 
1532  const DebugLoc &DL,
1533  int64_t Amount,
1534  MachineInstr::MIFlag Flag) const {
1535  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1536  assert(Amount % 8 == 0 &&
1537  "Reserve the stack by the multiple of one vector size.");
1538 
1540  const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1541  int64_t NumOfVReg = Amount / 8;
1542 
1543  Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1544  BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL)
1545  .setMIFlag(Flag);
1546  assert(isInt<32>(NumOfVReg) &&
1547  "Expect the number of vector registers within 32-bits.");
1548  if (isPowerOf2_32(NumOfVReg)) {
1549  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1550  if (ShiftAmount == 0)
1551  return VL;
1552  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
1553  .addReg(VL, RegState::Kill)
1554  .addImm(ShiftAmount)
1555  .setMIFlag(Flag);
1556  } else if (isPowerOf2_32(NumOfVReg - 1)) {
1557  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1558  uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
1559  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1560  .addReg(VL)
1561  .addImm(ShiftAmount)
1562  .setMIFlag(Flag);
1563  BuildMI(MBB, II, DL, TII->get(RISCV::ADD), VL)
1564  .addReg(ScaledRegister, RegState::Kill)
1565  .addReg(VL, RegState::Kill)
1566  .setMIFlag(Flag);
1567  } else if (isPowerOf2_32(NumOfVReg + 1)) {
1568  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1569  uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
1570  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1571  .addReg(VL)
1572  .addImm(ShiftAmount)
1573  .setMIFlag(Flag);
1574  BuildMI(MBB, II, DL, TII->get(RISCV::SUB), VL)
1575  .addReg(ScaledRegister, RegState::Kill)
1576  .addReg(VL, RegState::Kill)
1577  .setMIFlag(Flag);
1578  } else {
1579  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1580  if (!isInt<12>(NumOfVReg))
1581  movImm(MBB, II, DL, N, NumOfVReg);
1582  else {
1583  BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), N)
1584  .addReg(RISCV::X0)
1585  .addImm(NumOfVReg)
1586  .setMIFlag(Flag);
1587  }
1588  if (!MF.getSubtarget<RISCVSubtarget>().hasStdExtM())
1590  MF.getFunction(),
1591  "M-extension must be enabled to calculate the vscaled size/offset."});
1592  BuildMI(MBB, II, DL, TII->get(RISCV::MUL), VL)
1593  .addReg(VL, RegState::Kill)
1595  .setMIFlag(Flag);
1596  }
1597 
1598  return VL;
1599 }
1600 
1601 static bool isRVVWholeLoadStore(unsigned Opcode) {
1602  switch (Opcode) {
1603  default:
1604  return false;
1605  case RISCV::VS1R_V:
1606  case RISCV::VS2R_V:
1607  case RISCV::VS4R_V:
1608  case RISCV::VS8R_V:
1609  case RISCV::VL1RE8_V:
1610  case RISCV::VL2RE8_V:
1611  case RISCV::VL4RE8_V:
1612  case RISCV::VL8RE8_V:
1613  case RISCV::VL1RE16_V:
1614  case RISCV::VL2RE16_V:
1615  case RISCV::VL4RE16_V:
1616  case RISCV::VL8RE16_V:
1617  case RISCV::VL1RE32_V:
1618  case RISCV::VL2RE32_V:
1619  case RISCV::VL4RE32_V:
1620  case RISCV::VL8RE32_V:
1621  case RISCV::VL1RE64_V:
1622  case RISCV::VL2RE64_V:
1623  case RISCV::VL4RE64_V:
1624  case RISCV::VL8RE64_V:
1625  return true;
1626  }
1627 }
1628 
1629 bool RISCVInstrInfo::isRVVSpill(const MachineInstr &MI, bool CheckFIs) const {
1630  // RVV lacks any support for immediate addressing for stack addresses, so be
1631  // conservative.
1632  unsigned Opcode = MI.getOpcode();
1633  if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1634  !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
1635  return false;
1636  return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
1637  return MO.isFI();
1638  });
1639 }
1640 
1643  switch (Opcode) {
1644  default:
1645  return None;
1646  case RISCV::PseudoVSPILL2_M1:
1647  case RISCV::PseudoVRELOAD2_M1:
1648  return std::make_pair(2u, 1u);
1649  case RISCV::PseudoVSPILL2_M2:
1650  case RISCV::PseudoVRELOAD2_M2:
1651  return std::make_pair(2u, 2u);
1652  case RISCV::PseudoVSPILL2_M4:
1653  case RISCV::PseudoVRELOAD2_M4:
1654  return std::make_pair(2u, 4u);
1655  case RISCV::PseudoVSPILL3_M1:
1656  case RISCV::PseudoVRELOAD3_M1:
1657  return std::make_pair(3u, 1u);
1658  case RISCV::PseudoVSPILL3_M2:
1659  case RISCV::PseudoVRELOAD3_M2:
1660  return std::make_pair(3u, 2u);
1661  case RISCV::PseudoVSPILL4_M1:
1662  case RISCV::PseudoVRELOAD4_M1:
1663  return std::make_pair(4u, 1u);
1664  case RISCV::PseudoVSPILL4_M2:
1665  case RISCV::PseudoVRELOAD4_M2:
1666  return std::make_pair(4u, 2u);
1667  case RISCV::PseudoVSPILL5_M1:
1668  case RISCV::PseudoVRELOAD5_M1:
1669  return std::make_pair(5u, 1u);
1670  case RISCV::PseudoVSPILL6_M1:
1671  case RISCV::PseudoVRELOAD6_M1:
1672  return std::make_pair(6u, 1u);
1673  case RISCV::PseudoVSPILL7_M1:
1674  case RISCV::PseudoVRELOAD7_M1:
1675  return std::make_pair(7u, 1u);
1676  case RISCV::PseudoVSPILL8_M1:
1677  case RISCV::PseudoVRELOAD8_M1:
1678  return std::make_pair(8u, 1u);
1679  }
1680 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:21
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:732
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:103
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:1018
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::MachineInstrBuilder::copyImplicitOps
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
Definition: MachineInstrBuilder.h:315
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:248
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:50
llvm::RISCVInstrInfo::getBrCond
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
Definition: RISCVInstrInfo.cpp:519
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::RISCVCC::COND_GEU
@ COND_GEU
Definition: RISCVInstrInfo.h:35
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:177
llvm::RISCVCC::getOppositeBranchCondition
CondCode getOppositeBranchCondition(CondCode)
Definition: RISCVInstrInfo.cpp:538
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:158
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1004
llvm::RISCVCC::COND_INVALID
@ COND_INVALID
Definition: RISCVInstrInfo.h:36
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1028
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:726
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:61
llvm::RISCVInstrInfo::getOutliningType
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1105
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:708
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:180
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:564
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:137
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:179
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:430
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:152
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:1977
ErrorHandling.h
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1728
llvm::ISD::EH_LABEL
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:988
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:124
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:75
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::LiveRegUnits::available
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:124
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:321
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:555
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:486
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:34
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:128
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
unsigned insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS=nullptr) const override
Definition: RISCVInstrInfo.cpp:696
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:181
llvm::Optional
Definition: APInt.h:33
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
STLExtras.h
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:741
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:164
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:141
RISCVMatInt.h
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:63
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:748
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
RISCVGenInstrInfo
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:723
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::MachineFunction::iterator
BasicBlockListType::iterator iterator
Definition: MachineFunction.h:790
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1376
MachineRegisterInfo.h
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:980
llvm::RISCVInstrInfo::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode) const
Definition: RISCVInstrInfo.cpp:1642
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:126
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:328
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:636
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:167
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RISCVCC::COND_LT
@ COND_LT
Definition: RISCVInstrInfo.h:32
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:773
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:537
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:507
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1212
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:75
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:557
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:157
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
LiveVariables.h
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::LiveVariables::replaceKillInstruction
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Definition: LiveVariables.cpp:674
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:195
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::RISCVCC::COND_LTU
@ COND_LTU
Definition: RISCVInstrInfo.h:34
llvm::RISCVOp::OPERAND_UIMM5
@ OPERAND_UIMM5
Definition: RISCVBaseInfo.h:174
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:472
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:146
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:1012
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:278
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
llvm::RISCVInstrInfo::isRVVSpill
bool isRVVSpill(const MachineInstr &MI, bool CheckFIs) const
Definition: RISCVInstrInfo.cpp:1629
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:89
llvm::RISCVOp::OPERAND_UIMM12
@ OPERAND_UIMM12
Definition: RISCVBaseInfo.h:176
llvm::RISCVInstrInfo::insertOutlinedCall
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, const outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1181
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:625
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:214
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:243
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:94
llvm::LiveRegUnits
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:54
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:23
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:980
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1059
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:626
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:114
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:75
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:453
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:1035
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:38
llvm::RISCVOp::OPERAND_UIMM20
@ OPERAND_UIMM20
Definition: RISCVBaseInfo.h:178
llvm::RISCVCC::COND_EQ
@ COND_EQ
Definition: RISCVInstrInfo.h:30
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:957
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:1052
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:467
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:267
MachineFunctionPass.h
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:556
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:541
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:642
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:225
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:202
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:883
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1206
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:80
llvm::MachineFunction
Definition: MachineFunction.h:230
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1197
llvm::RISCVII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: RISCVBaseInfo.h:153
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:347
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:704
llvm::RISCVInstrInfo
Definition: RISCVInstrInfo.h:43
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:552
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1554
CASE_WIDEOP_OPCODE_LMULS
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1453
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:179
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:489
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:95
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:442
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:840
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::RISCVInstrInfo::buildOutlinedFrame
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1153
llvm::RISCVInstrInfo::getVLENFactoredAmount
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:1529
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:659
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:158
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
getCondFromBranchOpc
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition: RISCVInstrInfo.cpp:485
llvm::RISCVOp::OPERAND_UIMM4
@ OPERAND_UIMM4
Definition: RISCVBaseInfo.h:173
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:777
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:367
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:380
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
CASE_WIDEOP_CHANGE_OPCODE_LMULS
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1467
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:228
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:25
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:592
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1063
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:276
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:622
llvm::DestSourcePair
Definition: TargetInstrInfo.h:68
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:321
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1312
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1457
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:983
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:53
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:164
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:132
RISCVInstrInfo.h
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:90
llvm::RISCVCC::COND_GE
@ COND_GE
Definition: RISCVInstrInfo.h:33
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:202
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:323
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:137
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:410
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:476
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:862
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:154
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1345
RISCVSubtarget.h
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:148
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:344
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:121
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:107
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1060
llvm::RISCVCC::CondCode
CondCode
Definition: RISCVInstrInfo.h:29
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1003
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:296
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:906
RISCVMachineFunctionInfo.h
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:240
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1331
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1094
isRVVWholeLoadStore
static bool isRVVWholeLoadStore(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:1601
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::RISCVInstrInfo::convertToThreeAddress
MachineInstr * convertToThreeAddress(MachineFunction::iterator &MBB, MachineInstr &MI, LiveVariables *LV) const override
Definition: RISCVInstrInfo.cpp:1475
llvm::LiveVariables
Definition: LiveVariables.h:46
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:62
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
RegisterScavenging.h
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:105
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1340
llvm::MachineInstrBundleIterator< MachineInstr >
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:75
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:170
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:235
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:308
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:772
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:159
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:282
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:23
llvm::RISCVCC::COND_NE
@ COND_NE
Definition: RISCVInstrInfo.h:31
RISCVTargetMachine.h