LLVM  13.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/MC/MCInstBuilder.h"
29 
30 using namespace llvm;
31 
32 #define GEN_CHECK_COMPRESS_INSTR
33 #include "RISCVGenCompressInstEmitter.inc"
34 
35 #define GET_INSTRINFO_CTOR_DTOR
36 #include "RISCVGenInstrInfo.inc"
37 
38 namespace llvm {
39 namespace RISCVVPseudosTable {
40 
41 using namespace RISCV;
42 
43 #define GET_RISCVVPseudosTable_IMPL
44 #include "RISCVGenSearchableTables.inc"
45 
46 } // namespace RISCVVPseudosTable
47 } // namespace llvm
48 
50  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
51  STI(STI) {}
52 
54  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
55  return MCInstBuilder(RISCV::C_NOP);
56  return MCInstBuilder(RISCV::ADDI)
57  .addReg(RISCV::X0)
58  .addReg(RISCV::X0)
59  .addImm(0);
60 }
61 
63  int &FrameIndex) const {
64  switch (MI.getOpcode()) {
65  default:
66  return 0;
67  case RISCV::LB:
68  case RISCV::LBU:
69  case RISCV::LH:
70  case RISCV::LHU:
71  case RISCV::FLH:
72  case RISCV::LW:
73  case RISCV::FLW:
74  case RISCV::LWU:
75  case RISCV::LD:
76  case RISCV::FLD:
77  break;
78  }
79 
80  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
81  MI.getOperand(2).getImm() == 0) {
82  FrameIndex = MI.getOperand(1).getIndex();
83  return MI.getOperand(0).getReg();
84  }
85 
86  return 0;
87 }
88 
90  int &FrameIndex) const {
91  switch (MI.getOpcode()) {
92  default:
93  return 0;
94  case RISCV::SB:
95  case RISCV::SH:
96  case RISCV::SW:
97  case RISCV::FSH:
98  case RISCV::FSW:
99  case RISCV::SD:
100  case RISCV::FSD:
101  break;
102  }
103 
104  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
105  MI.getOperand(2).getImm() == 0) {
106  FrameIndex = MI.getOperand(1).getIndex();
107  return MI.getOperand(0).getReg();
108  }
109 
110  return 0;
111 }
112 
113 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
114  unsigned NumRegs) {
115  // We really want the positive remainder mod 32 here, that happens to be
116  // easily obtainable with a mask.
117  return ((DstReg - SrcReg) & 0x1f) < NumRegs;
118 }
119 
122  const DebugLoc &DL, MCRegister DstReg,
123  MCRegister SrcReg, bool KillSrc) const {
124  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
125  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
126  .addReg(SrcReg, getKillRegState(KillSrc))
127  .addImm(0);
128  return;
129  }
130 
131  // FPR->FPR copies and VR->VR copies.
132  unsigned Opc;
133  bool IsScalableVector = true;
134  unsigned NF = 1;
135  unsigned LMul = 1;
136  unsigned SubRegIdx = RISCV::sub_vrm1_0;
137  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
138  Opc = RISCV::FSGNJ_H;
139  IsScalableVector = false;
140  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
141  Opc = RISCV::FSGNJ_S;
142  IsScalableVector = false;
143  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
144  Opc = RISCV::FSGNJ_D;
145  IsScalableVector = false;
146  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
147  Opc = RISCV::PseudoVMV1R_V;
148  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
149  Opc = RISCV::PseudoVMV2R_V;
150  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
151  Opc = RISCV::PseudoVMV4R_V;
152  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
153  Opc = RISCV::PseudoVMV8R_V;
154  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
155  Opc = RISCV::PseudoVMV1R_V;
156  SubRegIdx = RISCV::sub_vrm1_0;
157  NF = 2;
158  LMul = 1;
159  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
160  Opc = RISCV::PseudoVMV2R_V;
161  SubRegIdx = RISCV::sub_vrm2_0;
162  NF = 2;
163  LMul = 2;
164  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
165  Opc = RISCV::PseudoVMV4R_V;
166  SubRegIdx = RISCV::sub_vrm4_0;
167  NF = 2;
168  LMul = 4;
169  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
170  Opc = RISCV::PseudoVMV1R_V;
171  SubRegIdx = RISCV::sub_vrm1_0;
172  NF = 3;
173  LMul = 1;
174  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
175  Opc = RISCV::PseudoVMV2R_V;
176  SubRegIdx = RISCV::sub_vrm2_0;
177  NF = 3;
178  LMul = 2;
179  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
180  Opc = RISCV::PseudoVMV1R_V;
181  SubRegIdx = RISCV::sub_vrm1_0;
182  NF = 4;
183  LMul = 1;
184  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
185  Opc = RISCV::PseudoVMV2R_V;
186  SubRegIdx = RISCV::sub_vrm2_0;
187  NF = 4;
188  LMul = 2;
189  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
190  Opc = RISCV::PseudoVMV1R_V;
191  SubRegIdx = RISCV::sub_vrm1_0;
192  NF = 5;
193  LMul = 1;
194  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
195  Opc = RISCV::PseudoVMV1R_V;
196  SubRegIdx = RISCV::sub_vrm1_0;
197  NF = 6;
198  LMul = 1;
199  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
200  Opc = RISCV::PseudoVMV1R_V;
201  SubRegIdx = RISCV::sub_vrm1_0;
202  NF = 7;
203  LMul = 1;
204  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
205  Opc = RISCV::PseudoVMV1R_V;
206  SubRegIdx = RISCV::sub_vrm1_0;
207  NF = 8;
208  LMul = 1;
209  } else {
210  llvm_unreachable("Impossible reg-to-reg copy");
211  }
212 
213  if (IsScalableVector) {
214  if (NF == 1) {
215  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
216  .addReg(SrcReg, getKillRegState(KillSrc));
217  } else {
219 
220  int I = 0, End = NF, Incr = 1;
221  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
222  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
223  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMul)) {
224  I = NF - 1;
225  End = -1;
226  Incr = -1;
227  }
228 
229  for (; I != End; I += Incr) {
230  BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I))
231  .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
232  getKillRegState(KillSrc));
233  }
234  }
235  } else {
236  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
237  .addReg(SrcReg, getKillRegState(KillSrc))
238  .addReg(SrcReg, getKillRegState(KillSrc));
239  }
240 }
241 
244  Register SrcReg, bool IsKill, int FI,
245  const TargetRegisterClass *RC,
246  const TargetRegisterInfo *TRI) const {
247  DebugLoc DL;
248  if (I != MBB.end())
249  DL = I->getDebugLoc();
250 
251  MachineFunction *MF = MBB.getParent();
252  MachineFrameInfo &MFI = MF->getFrameInfo();
253 
254  unsigned Opcode;
255  bool IsScalableVector = true;
256  bool IsZvlsseg = true;
257  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
258  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
259  RISCV::SW : RISCV::SD;
260  IsScalableVector = false;
261  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
262  Opcode = RISCV::FSH;
263  IsScalableVector = false;
264  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
265  Opcode = RISCV::FSW;
266  IsScalableVector = false;
267  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
268  Opcode = RISCV::FSD;
269  IsScalableVector = false;
270  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
271  Opcode = RISCV::PseudoVSPILL_M1;
272  IsZvlsseg = false;
273  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
274  Opcode = RISCV::PseudoVSPILL_M2;
275  IsZvlsseg = false;
276  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
277  Opcode = RISCV::PseudoVSPILL_M4;
278  IsZvlsseg = false;
279  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
280  Opcode = RISCV::PseudoVSPILL_M8;
281  IsZvlsseg = false;
282  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
283  Opcode = RISCV::PseudoVSPILL2_M1;
284  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
285  Opcode = RISCV::PseudoVSPILL2_M2;
286  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
287  Opcode = RISCV::PseudoVSPILL2_M4;
288  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
289  Opcode = RISCV::PseudoVSPILL3_M1;
290  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
291  Opcode = RISCV::PseudoVSPILL3_M2;
292  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
293  Opcode = RISCV::PseudoVSPILL4_M1;
294  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
295  Opcode = RISCV::PseudoVSPILL4_M2;
296  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
297  Opcode = RISCV::PseudoVSPILL5_M1;
298  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
299  Opcode = RISCV::PseudoVSPILL6_M1;
300  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
301  Opcode = RISCV::PseudoVSPILL7_M1;
302  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
303  Opcode = RISCV::PseudoVSPILL8_M1;
304  else
305  llvm_unreachable("Can't store this register to stack slot");
306 
307  if (IsScalableVector) {
311 
313  auto MIB = BuildMI(MBB, I, DL, get(Opcode))
314  .addReg(SrcReg, getKillRegState(IsKill))
315  .addFrameIndex(FI)
316  .addMemOperand(MMO);
317  if (IsZvlsseg) {
318  // For spilling/reloading Zvlsseg registers, append the dummy field for
319  // the scaled vector length. The argument will be used when expanding
320  // these pseudo instructions.
321  MIB.addReg(RISCV::X0);
322  }
323  } else {
326  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
327 
328  BuildMI(MBB, I, DL, get(Opcode))
329  .addReg(SrcReg, getKillRegState(IsKill))
330  .addFrameIndex(FI)
331  .addImm(0)
332  .addMemOperand(MMO);
333  }
334 }
335 
338  Register DstReg, int FI,
339  const TargetRegisterClass *RC,
340  const TargetRegisterInfo *TRI) const {
341  DebugLoc DL;
342  if (I != MBB.end())
343  DL = I->getDebugLoc();
344 
345  MachineFunction *MF = MBB.getParent();
346  MachineFrameInfo &MFI = MF->getFrameInfo();
347 
348  unsigned Opcode;
349  bool IsScalableVector = true;
350  bool IsZvlsseg = true;
351  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
352  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
353  RISCV::LW : RISCV::LD;
354  IsScalableVector = false;
355  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
356  Opcode = RISCV::FLH;
357  IsScalableVector = false;
358  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
359  Opcode = RISCV::FLW;
360  IsScalableVector = false;
361  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
362  Opcode = RISCV::FLD;
363  IsScalableVector = false;
364  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
365  Opcode = RISCV::PseudoVRELOAD_M1;
366  IsZvlsseg = false;
367  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
368  Opcode = RISCV::PseudoVRELOAD_M2;
369  IsZvlsseg = false;
370  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
371  Opcode = RISCV::PseudoVRELOAD_M4;
372  IsZvlsseg = false;
373  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
374  Opcode = RISCV::PseudoVRELOAD_M8;
375  IsZvlsseg = false;
376  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
377  Opcode = RISCV::PseudoVRELOAD2_M1;
378  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
379  Opcode = RISCV::PseudoVRELOAD2_M2;
380  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
381  Opcode = RISCV::PseudoVRELOAD2_M4;
382  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
383  Opcode = RISCV::PseudoVRELOAD3_M1;
384  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
385  Opcode = RISCV::PseudoVRELOAD3_M2;
386  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
387  Opcode = RISCV::PseudoVRELOAD4_M1;
388  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
389  Opcode = RISCV::PseudoVRELOAD4_M2;
390  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
391  Opcode = RISCV::PseudoVRELOAD5_M1;
392  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
393  Opcode = RISCV::PseudoVRELOAD6_M1;
394  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
395  Opcode = RISCV::PseudoVRELOAD7_M1;
396  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
397  Opcode = RISCV::PseudoVRELOAD8_M1;
398  else
399  llvm_unreachable("Can't load this register from stack slot");
400 
401  if (IsScalableVector) {
405 
407  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
408  .addFrameIndex(FI)
409  .addMemOperand(MMO);
410  if (IsZvlsseg) {
411  // For spilling/reloading Zvlsseg registers, append the dummy field for
412  // the scaled vector length. The argument will be used when expanding
413  // these pseudo instructions.
414  MIB.addReg(RISCV::X0);
415  }
416  } else {
419  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
420 
421  BuildMI(MBB, I, DL, get(Opcode), DstReg)
422  .addFrameIndex(FI)
423  .addImm(0)
424  .addMemOperand(MMO);
425  }
426 }
427 
430  const DebugLoc &DL, Register DstReg, uint64_t Val,
431  MachineInstr::MIFlag Flag) const {
432  MachineFunction *MF = MBB.getParent();
434  bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
435  Register SrcReg = RISCV::X0;
436  Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
437  unsigned Num = 0;
438 
439  if (!IsRV64 && !isInt<32>(Val))
440  report_fatal_error("Should only materialize 32-bit constants for RV32");
441 
443  assert(Seq.size() > 0);
444 
445  for (RISCVMatInt::Inst &Inst : Seq) {
446  // Write the final result to DstReg if it's the last instruction in the Seq.
447  // Otherwise, write the result to the temp register.
448  if (++Num == Seq.size())
449  Result = DstReg;
450 
451  if (Inst.Opc == RISCV::LUI) {
452  BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
453  .addImm(Inst.Imm)
454  .setMIFlag(Flag);
455  } else {
456  BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
457  .addReg(SrcReg, RegState::Kill)
458  .addImm(Inst.Imm)
459  .setMIFlag(Flag);
460  }
461  // Only the first instruction has X0 as its source.
462  SrcReg = Result;
463  }
464 }
465 
466 // The contents of values added to Cond are not examined outside of
467 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
468 // push BranchOpcode, Reg1, Reg2.
471  // Block ends with fall-through condbranch.
472  assert(LastInst.getDesc().isConditionalBranch() &&
473  "Unknown conditional branch");
474  Target = LastInst.getOperand(2).getMBB();
475  Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
476  Cond.push_back(LastInst.getOperand(0));
477  Cond.push_back(LastInst.getOperand(1));
478 }
479 
480 static unsigned getOppositeBranchOpcode(int Opc) {
481  switch (Opc) {
482  default:
483  llvm_unreachable("Unrecognized conditional branch");
484  case RISCV::BEQ:
485  return RISCV::BNE;
486  case RISCV::BNE:
487  return RISCV::BEQ;
488  case RISCV::BLT:
489  return RISCV::BGE;
490  case RISCV::BGE:
491  return RISCV::BLT;
492  case RISCV::BLTU:
493  return RISCV::BGEU;
494  case RISCV::BGEU:
495  return RISCV::BLTU;
496  }
497 }
498 
500  MachineBasicBlock *&TBB,
501  MachineBasicBlock *&FBB,
503  bool AllowModify) const {
504  TBB = FBB = nullptr;
505  Cond.clear();
506 
507  // If the block has no terminators, it just falls into the block after it.
509  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
510  return false;
511 
512  // Count the number of terminators and find the first unconditional or
513  // indirect branch.
514  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
515  int NumTerminators = 0;
516  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
517  J++) {
518  NumTerminators++;
519  if (J->getDesc().isUnconditionalBranch() ||
520  J->getDesc().isIndirectBranch()) {
521  FirstUncondOrIndirectBr = J.getReverse();
522  }
523  }
524 
525  // If AllowModify is true, we can erase any terminators after
526  // FirstUncondOrIndirectBR.
527  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
528  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
529  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
530  NumTerminators--;
531  }
532  I = FirstUncondOrIndirectBr;
533  }
534 
535  // We can't handle blocks that end in an indirect branch.
536  if (I->getDesc().isIndirectBranch())
537  return true;
538 
539  // We can't handle blocks with more than 2 terminators.
540  if (NumTerminators > 2)
541  return true;
542 
543  // Handle a single unconditional branch.
544  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
545  TBB = getBranchDestBlock(*I);
546  return false;
547  }
548 
549  // Handle a single conditional branch.
550  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
551  parseCondBranch(*I, TBB, Cond);
552  return false;
553  }
554 
555  // Handle a conditional branch followed by an unconditional branch.
556  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
557  I->getDesc().isUnconditionalBranch()) {
558  parseCondBranch(*std::prev(I), TBB, Cond);
559  FBB = getBranchDestBlock(*I);
560  return false;
561  }
562 
563  // Otherwise, we can't handle this.
564  return true;
565 }
566 
568  int *BytesRemoved) const {
569  if (BytesRemoved)
570  *BytesRemoved = 0;
572  if (I == MBB.end())
573  return 0;
574 
575  if (!I->getDesc().isUnconditionalBranch() &&
576  !I->getDesc().isConditionalBranch())
577  return 0;
578 
579  // Remove the branch.
580  if (BytesRemoved)
581  *BytesRemoved += getInstSizeInBytes(*I);
582  I->eraseFromParent();
583 
584  I = MBB.end();
585 
586  if (I == MBB.begin())
587  return 1;
588  --I;
589  if (!I->getDesc().isConditionalBranch())
590  return 1;
591 
592  // Remove the branch.
593  if (BytesRemoved)
594  *BytesRemoved += getInstSizeInBytes(*I);
595  I->eraseFromParent();
596  return 2;
597 }
598 
599 // Inserts a branch into the end of the specific MachineBasicBlock, returning
600 // the number of instructions inserted.
603  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
604  if (BytesAdded)
605  *BytesAdded = 0;
606 
607  // Shouldn't be a fall through.
608  assert(TBB && "insertBranch must not be told to insert a fallthrough");
609  assert((Cond.size() == 3 || Cond.size() == 0) &&
610  "RISCV branch conditions have two components!");
611 
612  // Unconditional branch.
613  if (Cond.empty()) {
614  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
615  if (BytesAdded)
616  *BytesAdded += getInstSizeInBytes(MI);
617  return 1;
618  }
619 
620  // Either a one or two-way conditional branch.
621  unsigned Opc = Cond[0].getImm();
622  MachineInstr &CondMI =
623  *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
624  if (BytesAdded)
625  *BytesAdded += getInstSizeInBytes(CondMI);
626 
627  // One-way conditional branch.
628  if (!FBB)
629  return 1;
630 
631  // Two-way conditional branch.
632  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
633  if (BytesAdded)
634  *BytesAdded += getInstSizeInBytes(MI);
635  return 2;
636 }
637 
639  MachineBasicBlock &DestBB,
640  const DebugLoc &DL,
641  int64_t BrOffset,
642  RegScavenger *RS) const {
643  assert(RS && "RegScavenger required for long branching");
644  assert(MBB.empty() &&
645  "new block should be inserted for expanding unconditional branch");
646  assert(MBB.pred_size() == 1);
647 
648  MachineFunction *MF = MBB.getParent();
650 
651  if (!isInt<32>(BrOffset))
653  "Branch offsets outside of the signed 32-bit range not supported");
654 
655  // FIXME: A virtual register must be used initially, as the register
656  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
657  // uses the same workaround).
658  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
659  auto II = MBB.end();
660 
661  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
662  .addReg(ScratchReg, RegState::Define | RegState::Dead)
663  .addMBB(&DestBB, RISCVII::MO_CALL);
664 
665  RS->enterBasicBlockEnd(MBB);
666  unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
667  MI.getIterator(), false, 0);
668  MRI.replaceRegWith(ScratchReg, Scav);
669  MRI.clearVirtRegs();
670  RS->setRegUsed(Scav);
671  return 8;
672 }
673 
676  assert((Cond.size() == 3) && "Invalid branch condition!");
677  Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
678  return false;
679 }
680 
683  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
684  // The branch target is always the last operand.
685  int NumOp = MI.getNumExplicitOperands();
686  return MI.getOperand(NumOp - 1).getMBB();
687 }
688 
690  int64_t BrOffset) const {
691  unsigned XLen = STI.getXLen();
692  // Ideally we could determine the supported branch offset from the
693  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
694  // PseudoBR.
695  switch (BranchOp) {
696  default:
697  llvm_unreachable("Unexpected opcode!");
698  case RISCV::BEQ:
699  case RISCV::BNE:
700  case RISCV::BLT:
701  case RISCV::BGE:
702  case RISCV::BLTU:
703  case RISCV::BGEU:
704  return isIntN(13, BrOffset);
705  case RISCV::JAL:
706  case RISCV::PseudoBR:
707  return isIntN(21, BrOffset);
708  case RISCV::PseudoJump:
709  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
710  }
711 }
712 
714  unsigned Opcode = MI.getOpcode();
715 
716  switch (Opcode) {
717  default: {
718  if (MI.getParent() && MI.getParent()->getParent()) {
719  const auto MF = MI.getMF();
720  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
721  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
722  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
723  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
724  if (isCompressibleInst(MI, &ST, MRI, STI))
725  return 2;
726  }
727  return get(Opcode).getSize();
728  }
730  case TargetOpcode::IMPLICIT_DEF:
731  case TargetOpcode::KILL:
732  case TargetOpcode::DBG_VALUE:
733  return 0;
734  // These values are determined based on RISCVExpandAtomicPseudoInsts,
735  // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
736  // pseudos are expanded.
737  case RISCV::PseudoCALLReg:
738  case RISCV::PseudoCALL:
739  case RISCV::PseudoJump:
740  case RISCV::PseudoTAIL:
741  case RISCV::PseudoLLA:
742  case RISCV::PseudoLA:
743  case RISCV::PseudoLA_TLS_IE:
744  case RISCV::PseudoLA_TLS_GD:
745  return 8;
746  case RISCV::PseudoAtomicLoadNand32:
747  case RISCV::PseudoAtomicLoadNand64:
748  return 20;
749  case RISCV::PseudoMaskedAtomicSwap32:
750  case RISCV::PseudoMaskedAtomicLoadAdd32:
751  case RISCV::PseudoMaskedAtomicLoadSub32:
752  return 28;
753  case RISCV::PseudoMaskedAtomicLoadNand32:
754  return 32;
755  case RISCV::PseudoMaskedAtomicLoadMax32:
756  case RISCV::PseudoMaskedAtomicLoadMin32:
757  return 44;
758  case RISCV::PseudoMaskedAtomicLoadUMax32:
759  case RISCV::PseudoMaskedAtomicLoadUMin32:
760  return 36;
761  case RISCV::PseudoCmpXchg32:
762  case RISCV::PseudoCmpXchg64:
763  return 16;
764  case RISCV::PseudoMaskedCmpXchg32:
765  return 32;
768  const MachineFunction &MF = *MI.getParent()->getParent();
769  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
770  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
771  *TM.getMCAsmInfo());
772  }
773  case RISCV::PseudoVSPILL2_M1:
774  case RISCV::PseudoVSPILL2_M2:
775  case RISCV::PseudoVSPILL2_M4:
776  case RISCV::PseudoVSPILL3_M1:
777  case RISCV::PseudoVSPILL3_M2:
778  case RISCV::PseudoVSPILL4_M1:
779  case RISCV::PseudoVSPILL4_M2:
780  case RISCV::PseudoVSPILL5_M1:
781  case RISCV::PseudoVSPILL6_M1:
782  case RISCV::PseudoVSPILL7_M1:
783  case RISCV::PseudoVSPILL8_M1:
784  case RISCV::PseudoVRELOAD2_M1:
785  case RISCV::PseudoVRELOAD2_M2:
786  case RISCV::PseudoVRELOAD2_M4:
787  case RISCV::PseudoVRELOAD3_M1:
788  case RISCV::PseudoVRELOAD3_M2:
789  case RISCV::PseudoVRELOAD4_M1:
790  case RISCV::PseudoVRELOAD4_M2:
791  case RISCV::PseudoVRELOAD5_M1:
792  case RISCV::PseudoVRELOAD6_M1:
793  case RISCV::PseudoVRELOAD7_M1:
794  case RISCV::PseudoVRELOAD8_M1: {
795  // The values are determined based on expandVSPILL and expandVRELOAD that
796  // expand the pseudos depending on NF.
797  unsigned NF = isRVVSpillForZvlsseg(Opcode)->first;
798  return 4 * (2 * NF - 1);
799  }
800  }
801 }
802 
804  const unsigned Opcode = MI.getOpcode();
805  switch (Opcode) {
806  default:
807  break;
808  case RISCV::FSGNJ_D:
809  case RISCV::FSGNJ_S:
810  // The canonical floating-point move is fsgnj rd, rs, rs.
811  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
812  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
813  case RISCV::ADDI:
814  case RISCV::ORI:
815  case RISCV::XORI:
816  return (MI.getOperand(1).isReg() &&
817  MI.getOperand(1).getReg() == RISCV::X0) ||
818  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
819  }
820  return MI.isAsCheapAsAMove();
821 }
822 
825  if (MI.isMoveReg())
826  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
827  switch (MI.getOpcode()) {
828  default:
829  break;
830  case RISCV::ADDI:
831  // Operand 1 can be a frameindex but callers expect registers
832  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
833  MI.getOperand(2).getImm() == 0)
834  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
835  break;
836  case RISCV::FSGNJ_D:
837  case RISCV::FSGNJ_S:
838  // The canonical floating-point move is fsgnj rd, rs, rs.
839  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
840  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
841  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
842  break;
843  }
844  return None;
845 }
846 
848  StringRef &ErrInfo) const {
849  const MCInstrInfo *MCII = STI.getInstrInfo();
850  MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
851 
852  for (auto &OI : enumerate(Desc.operands())) {
853  unsigned OpType = OI.value().OperandType;
854  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
856  const MachineOperand &MO = MI.getOperand(OI.index());
857  if (MO.isImm()) {
858  int64_t Imm = MO.getImm();
859  bool Ok;
860  switch (OpType) {
861  default:
862  llvm_unreachable("Unexpected operand type");
864  Ok = isUInt<4>(Imm);
865  break;
867  Ok = isUInt<5>(Imm);
868  break;
870  Ok = isUInt<12>(Imm);
871  break;
873  Ok = isInt<12>(Imm);
874  break;
876  Ok = isUInt<20>(Imm);
877  break;
879  if (STI.getTargetTriple().isArch64Bit())
880  Ok = isUInt<6>(Imm);
881  else
882  Ok = isUInt<5>(Imm);
883  break;
884  }
885  if (!Ok) {
886  ErrInfo = "Invalid immediate";
887  return false;
888  }
889  }
890  }
891  }
892 
893  return true;
894 }
895 
896 // Return true if get the base operand, byte offset of an instruction and the
897 // memory width. Width is the size of memory that is being loaded/stored.
899  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
900  unsigned &Width, const TargetRegisterInfo *TRI) const {
901  if (!LdSt.mayLoadOrStore())
902  return false;
903 
904  // Here we assume the standard RISC-V ISA, which uses a base+offset
905  // addressing mode. You'll need to relax these conditions to support custom
906  // load/stores instructions.
907  if (LdSt.getNumExplicitOperands() != 3)
908  return false;
909  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
910  return false;
911 
912  if (!LdSt.hasOneMemOperand())
913  return false;
914 
915  Width = (*LdSt.memoperands_begin())->getSize();
916  BaseReg = &LdSt.getOperand(1);
917  Offset = LdSt.getOperand(2).getImm();
918  return true;
919 }
920 
922  const MachineInstr &MIa, const MachineInstr &MIb) const {
923  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
924  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
925 
928  return false;
929 
930  // Retrieve the base register, offset from the base register and width. Width
931  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
932  // base registers are identical, and the offset of a lower memory access +
933  // the width doesn't overlap the offset of a higher memory access,
934  // then the memory accesses are different.
936  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
937  int64_t OffsetA = 0, OffsetB = 0;
938  unsigned int WidthA = 0, WidthB = 0;
939  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
940  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
941  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
942  int LowOffset = std::min(OffsetA, OffsetB);
943  int HighOffset = std::max(OffsetA, OffsetB);
944  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
945  if (LowOffset + LowWidth <= HighOffset)
946  return true;
947  }
948  }
949  return false;
950 }
951 
952 std::pair<unsigned, unsigned>
954  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
955  return std::make_pair(TF & Mask, TF & ~Mask);
956 }
957 
960  using namespace RISCVII;
961  static const std::pair<unsigned, const char *> TargetFlags[] = {
962  {MO_CALL, "riscv-call"},
963  {MO_PLT, "riscv-plt"},
964  {MO_LO, "riscv-lo"},
965  {MO_HI, "riscv-hi"},
966  {MO_PCREL_LO, "riscv-pcrel-lo"},
967  {MO_PCREL_HI, "riscv-pcrel-hi"},
968  {MO_GOT_HI, "riscv-got-hi"},
969  {MO_TPREL_LO, "riscv-tprel-lo"},
970  {MO_TPREL_HI, "riscv-tprel-hi"},
971  {MO_TPREL_ADD, "riscv-tprel-add"},
972  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
973  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
974  return makeArrayRef(TargetFlags);
975 }
977  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
978  const Function &F = MF.getFunction();
979 
980  // Can F be deduplicated by the linker? If it can, don't outline from it.
981  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
982  return false;
983 
984  // Don't outline from functions with section markings; the program could
985  // expect that all the code is in the named section.
986  if (F.hasSection())
987  return false;
988 
989  // It's safe to outline from MF.
990  return true;
991 }
992 
994  unsigned &Flags) const {
995  // More accurate safety checking is done in getOutliningCandidateInfo.
996  return true;
997 }
998 
999 // Enum values indicating how an outlined call should be constructed.
1002 };
1003 
1005  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1006 
1007  // First we need to filter out candidates where the X5 register (IE t0) can't
1008  // be used to setup the function call.
1009  auto CannotInsertCall = [](outliner::Candidate &C) {
1010  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1011 
1012  C.initLRU(*TRI);
1013  LiveRegUnits LRU = C.LRU;
1014  return !LRU.available(RISCV::X5);
1015  };
1016 
1017  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1018 
1019  // If the sequence doesn't have enough candidates left, then we're done.
1020  if (RepeatedSequenceLocs.size() < 2)
1021  return outliner::OutlinedFunction();
1022 
1023  unsigned SequenceSize = 0;
1024 
1025  auto I = RepeatedSequenceLocs[0].front();
1026  auto E = std::next(RepeatedSequenceLocs[0].back());
1027  for (; I != E; ++I)
1028  SequenceSize += getInstSizeInBytes(*I);
1029 
1030  // call t0, function = 8 bytes.
1031  unsigned CallOverhead = 8;
1032  for (auto &C : RepeatedSequenceLocs)
1033  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1034 
1035  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1036  unsigned FrameOverhead = 4;
1037  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1038  .getFeatureBits()[RISCV::FeatureStdExtC])
1039  FrameOverhead = 2;
1040 
1041  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1042  FrameOverhead, MachineOutlinerDefault);
1043 }
1044 
1047  unsigned Flags) const {
1048  MachineInstr &MI = *MBBI;
1049  MachineBasicBlock *MBB = MI.getParent();
1050  const TargetRegisterInfo *TRI =
1052 
1053  // Positions generally can't safely be outlined.
1054  if (MI.isPosition()) {
1055  // We can manually strip out CFI instructions later.
1056  if (MI.isCFIInstruction())
1058 
1060  }
1061 
1062  // Don't trust the user to write safe inline assembly.
1063  if (MI.isInlineAsm())
1065 
1066  // We can't outline branches to other basic blocks.
1067  if (MI.isTerminator() && !MBB->succ_empty())
1069 
1070  // We need support for tail calls to outlined functions before return
1071  // statements can be allowed.
1072  if (MI.isReturn())
1074 
1075  // Don't allow modifying the X5 register which we use for return addresses for
1076  // these outlined functions.
1077  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1078  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1080 
1081  // Make sure the operands don't reference something unsafe.
1082  for (const auto &MO : MI.operands())
1083  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
1085 
1086  // Don't allow instructions which won't be materialized to impact outlining
1087  // analysis.
1088  if (MI.isMetaInstruction())
1090 
1092 }
1093 
1096  const outliner::OutlinedFunction &OF) const {
1097 
1098  // Strip out any CFI instructions
1099  bool Changed = true;
1100  while (Changed) {
1101  Changed = false;
1102  auto I = MBB.begin();
1103  auto E = MBB.end();
1104  for (; I != E; ++I) {
1105  if (I->isCFIInstruction()) {
1106  I->removeFromParent();
1107  Changed = true;
1108  break;
1109  }
1110  }
1111  }
1112 
1113  MBB.addLiveIn(RISCV::X5);
1114 
1115  // Add in a return instruction to the end of the outlined frame.
1116  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1117  .addReg(RISCV::X0, RegState::Define)
1118  .addReg(RISCV::X5)
1119  .addImm(0));
1120 }
1121 
1124  MachineFunction &MF, const outliner::Candidate &C) const {
1125 
1126  // Add in a call instruction to the outlined function at the given location.
1127  It = MBB.insert(It,
1128  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1129  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1130  RISCVII::MO_CALL));
1131  return It;
1132 }
1133 
1134 // clang-format off
1135 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1136  RISCV::PseudoV##OP##_##TYPE##_##LMUL##_COMMUTABLE
1137 
1138 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1139  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1140  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1141  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1142  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1143  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1144  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1145  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1146 
1147 #define CASE_VFMA_SPLATS(OP) \
1148  CASE_VFMA_OPCODE_LMULS(OP, VF16): \
1149  case CASE_VFMA_OPCODE_LMULS(OP, VF32): \
1150  case CASE_VFMA_OPCODE_LMULS(OP, VF64)
1151 // clang-format on
1152 
1154  unsigned &SrcOpIdx1,
1155  unsigned &SrcOpIdx2) const {
1156  const MCInstrDesc &Desc = MI.getDesc();
1157  if (!Desc.isCommutable())
1158  return false;
1159 
1160  switch (MI.getOpcode()) {
1161  case CASE_VFMA_SPLATS(FMADD):
1162  case CASE_VFMA_SPLATS(FMSUB):
1163  case CASE_VFMA_SPLATS(FMACC):
1164  case CASE_VFMA_SPLATS(FMSAC):
1165  case CASE_VFMA_SPLATS(FNMADD):
1166  case CASE_VFMA_SPLATS(FNMSUB):
1167  case CASE_VFMA_SPLATS(FNMACC):
1168  case CASE_VFMA_SPLATS(FNMSAC):
1169  case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1170  case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1171  case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1172  case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV): {
1173  // For these instructions we can only swap operand 1 and operand 3 by
1174  // changing the opcode.
1175  unsigned CommutableOpIdx1 = 1;
1176  unsigned CommutableOpIdx2 = 3;
1177  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1178  CommutableOpIdx2))
1179  return false;
1180  return true;
1181  }
1182  case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1183  case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1184  case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1185  case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV): {
1186  // For these instructions we have more freedom. We can commute with the
1187  // other multiplicand or with the addend/subtrahend/minuend.
1188 
1189  // Any fixed operand must be from source 1, 2 or 3.
1190  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1191  return false;
1192  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1193  return false;
1194 
1195  // It both ops are fixed one must be the tied source.
1196  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1197  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1198  return false;
1199 
1200  // Look for two different register operands assumed to be commutable
1201  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1202  // needed.
1203  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1204  SrcOpIdx2 == CommuteAnyOperandIndex) {
1205  // At least one of operands to be commuted is not specified and
1206  // this method is free to choose appropriate commutable operands.
1207  unsigned CommutableOpIdx1 = SrcOpIdx1;
1208  if (SrcOpIdx1 == SrcOpIdx2) {
1209  // Both of operands are not fixed. Set one of commutable
1210  // operands to the tied source.
1211  CommutableOpIdx1 = 1;
1212  } else if (SrcOpIdx1 == CommutableOpIdx1) {
1213  // Only one of the operands is not fixed.
1214  CommutableOpIdx1 = SrcOpIdx2;
1215  }
1216 
1217  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1218  // operand and assign its index to CommutableOpIdx2.
1219  unsigned CommutableOpIdx2;
1220  if (CommutableOpIdx1 != 1) {
1221  // If we haven't already used the tied source, we must use it now.
1222  CommutableOpIdx2 = 1;
1223  } else {
1224  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1225 
1226  // The commuted operands should have different registers.
1227  // Otherwise, the commute transformation does not change anything and
1228  // is useless. We use this as a hint to make our decision.
1229  if (Op1Reg != MI.getOperand(2).getReg())
1230  CommutableOpIdx2 = 2;
1231  else
1232  CommutableOpIdx2 = 3;
1233  }
1234 
1235  // Assign the found pair of commutable indices to SrcOpIdx1 and
1236  // SrcOpIdx2 to return those values.
1237  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1238  CommutableOpIdx2))
1239  return false;
1240  }
1241 
1242  return true;
1243  }
1244  }
1245 
1246  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1247 }
1248 
1249 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1250  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_COMMUTABLE: \
1251  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_COMMUTABLE; \
1252  break;
1253 
1254 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1255  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1256  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1257  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1258  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1259  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1260  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1261  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1262 
1263 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1264  CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF16) \
1265  CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF32) \
1266  CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF64)
1267 
1269  bool NewMI,
1270  unsigned OpIdx1,
1271  unsigned OpIdx2) const {
1272  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1273  if (NewMI)
1274  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1275  return MI;
1276  };
1277 
1278  switch (MI.getOpcode()) {
1279  case CASE_VFMA_SPLATS(FMACC):
1280  case CASE_VFMA_SPLATS(FMADD):
1281  case CASE_VFMA_SPLATS(FMSAC):
1282  case CASE_VFMA_SPLATS(FMSUB):
1283  case CASE_VFMA_SPLATS(FNMACC):
1284  case CASE_VFMA_SPLATS(FNMADD):
1285  case CASE_VFMA_SPLATS(FNMSAC):
1286  case CASE_VFMA_SPLATS(FNMSUB):
1287  case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1288  case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1289  case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1290  case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV): {
1291  // It only make sense to toggle these between clobbering the
1292  // addend/subtrahend/minuend one of the multiplicands.
1293  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1294  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1295  unsigned Opc;
1296  switch (MI.getOpcode()) {
1297  default:
1298  llvm_unreachable("Unexpected opcode");
1299  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1300  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1307  CASE_VFMA_CHANGE_OPCODE_LMULS(FMACC, FMADD, VV)
1311  }
1312 
1313  auto &WorkingMI = cloneIfNew(MI);
1314  WorkingMI.setDesc(get(Opc));
1315  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1316  OpIdx1, OpIdx2);
1317  }
1318  case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1319  case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1320  case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1321  case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV): {
1322  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1323  // If one of the operands, is the addend we need to change opcode.
1324  // Otherwise we're just swapping 2 of the multiplicands.
1325  if (OpIdx1 == 3 || OpIdx2 == 3) {
1326  unsigned Opc;
1327  switch (MI.getOpcode()) {
1328  default:
1329  llvm_unreachable("Unexpected opcode");
1330  CASE_VFMA_CHANGE_OPCODE_LMULS(FMADD, FMACC, VV)
1334  }
1335 
1336  auto &WorkingMI = cloneIfNew(MI);
1337  WorkingMI.setDesc(get(Opc));
1338  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1339  OpIdx1, OpIdx2);
1340  }
1341  // Let the default code handle it.
1342  break;
1343  }
1344  }
1345 
1346  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1347 }
1348 
1349 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1350 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1351 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1352 #undef CASE_VFMA_SPLATS
1353 #undef CASE_VFMA_OPCODE_LMULS
1354 #undef CASE_VFMA_OPCODE_COMMON
1355 
1359  int64_t Amount) const {
1360  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1361  assert(Amount % 8 == 0 &&
1362  "Reserve the stack by the multiple of one vector size.");
1363 
1365  const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1366  DebugLoc DL = II->getDebugLoc();
1367  int64_t NumOfVReg = Amount / 8;
1368 
1369  Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1370  BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
1371  assert(isInt<12>(NumOfVReg) &&
1372  "Expect the number of vector registers within 12-bits.");
1373  if (isPowerOf2_32(NumOfVReg)) {
1374  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1375  if (ShiftAmount == 0)
1376  return VL;
1377  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
1378  .addReg(VL, RegState::Kill)
1379  .addImm(ShiftAmount);
1380  } else {
1381  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1382  BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), N)
1383  .addReg(RISCV::X0)
1384  .addImm(NumOfVReg);
1385  if (!MF.getSubtarget<RISCVSubtarget>().hasStdExtM())
1387  MF.getFunction(),
1388  "M-extension must be enabled to calculate the vscaled size/offset."});
1389  BuildMI(MBB, II, DL, TII->get(RISCV::MUL), VL)
1390  .addReg(VL, RegState::Kill)
1391  .addReg(N, RegState::Kill);
1392  }
1393 
1394  return VL;
1395 }
1396 
1399  switch (Opcode) {
1400  default:
1401  return None;
1402  case RISCV::PseudoVSPILL2_M1:
1403  case RISCV::PseudoVRELOAD2_M1:
1404  return std::make_pair(2u, 1u);
1405  case RISCV::PseudoVSPILL2_M2:
1406  case RISCV::PseudoVRELOAD2_M2:
1407  return std::make_pair(2u, 2u);
1408  case RISCV::PseudoVSPILL2_M4:
1409  case RISCV::PseudoVRELOAD2_M4:
1410  return std::make_pair(2u, 4u);
1411  case RISCV::PseudoVSPILL3_M1:
1412  case RISCV::PseudoVRELOAD3_M1:
1413  return std::make_pair(3u, 1u);
1414  case RISCV::PseudoVSPILL3_M2:
1415  case RISCV::PseudoVRELOAD3_M2:
1416  return std::make_pair(3u, 2u);
1417  case RISCV::PseudoVSPILL4_M1:
1418  case RISCV::PseudoVRELOAD4_M1:
1419  return std::make_pair(4u, 1u);
1420  case RISCV::PseudoVSPILL4_M2:
1421  case RISCV::PseudoVRELOAD4_M2:
1422  return std::make_pair(4u, 2u);
1423  case RISCV::PseudoVSPILL5_M1:
1424  case RISCV::PseudoVRELOAD5_M1:
1425  return std::make_pair(5u, 1u);
1426  case RISCV::PseudoVSPILL6_M1:
1427  case RISCV::PseudoVRELOAD6_M1:
1428  return std::make_pair(6u, 1u);
1429  case RISCV::PseudoVSPILL7_M1:
1430  case RISCV::PseudoVRELOAD7_M1:
1431  return std::make_pair(7u, 1u);
1432  case RISCV::PseudoVSPILL8_M1:
1433  case RISCV::PseudoVRELOAD8_M1:
1434  return std::make_pair(8u, 1u);
1435  }
1436 }
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:19
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:674
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:100
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:959
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:132
llvm
Definition: AllocatorList.h:23
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:248
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:49
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::SystemZISD::TM
@ TM
Definition: SystemZISelLowering.h:65
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:117
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:158
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:993
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1017
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:726
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:225
llvm::Function
Definition: Function.h:61
llvm::RISCVInstrInfo::getOutliningType
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1046
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:697
is64Bit
static bool is64Bit(const char *name)
Definition: X86Disassembler.cpp:1005
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:120
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:566
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:51
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:124
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:119
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:430
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:1923
ErrorHandling.h
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1674
llvm::ISD::EH_LABEL
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:978
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:101
llvm::RISCVII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: RISCVBaseInfo.h:96
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:75
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::LiveRegUnits::available
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:124
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:231
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:102
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:316
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:549
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:475
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:34
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
unsigned insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS=nullptr) const override
Definition: RISCVInstrInfo.cpp:638
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:154
llvm::Optional
Definition: APInt.h:33
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
STLExtras.h
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:183
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:682
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:164
RISCVMatInt.h
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:62
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:689
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
RISCVGenInstrInfo
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:712
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1376
MachineRegisterInfo.h
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:970
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:91
llvm::RISCVInstrInfo::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode) const
Definition: RISCVInstrInfo.cpp:1398
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:328
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:565
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:167
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:147
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:770
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:534
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:469
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1153
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:488
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:75
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:499
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, bool IsRV64)
Definition: RISCVMatInt.cpp:78
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:97
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:196
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::RISCVOp::OPERAND_UIMM5
@ OPERAND_UIMM5
Definition: RISCVBaseInfo.h:115
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:473
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:147
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:953
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:278
llvm::report_fatal_error
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:89
llvm::RISCVOp::OPERAND_UIMM12
@ OPERAND_UIMM12
Definition: RISCVBaseInfo.h:116
llvm::RISCVInstrInfo::insertOutlinedCall
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, const outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1122
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:567
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:45
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:242
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:93
llvm::LiveRegUnits
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:53
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:23
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:49
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:921
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:95
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1000
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:555
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:153
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:279
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:113
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:75
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:318
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:451
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:976
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:38
llvm::RISCVOp::OPERAND_UIMM20
@ OPERAND_UIMM20
Definition: RISCVBaseInfo.h:118
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:898
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:993
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:471
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:100
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:266
MachineFunctionPass.h
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:550
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:522
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:571
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:225
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:203
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:98
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:824
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1147
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:80
llvm::MachineFunction
Definition: MachineFunction.h:227
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1138
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:347
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:725
llvm::RISCVInstrInfo
Definition: RISCVInstrInfo.h:27
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:549
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:167
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:478
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:94
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:428
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:810
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::RISCVInstrInfo::buildOutlinedFrame
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1094
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:601
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
llvm::RISCVOp::OPERAND_UIMM4
@ OPERAND_UIMM4
Definition: RISCVBaseInfo.h:114
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:777
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:367
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:73
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:380
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:228
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:25
llvm::RISCVInstrInfo::getVLENFactoredAmount
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, int64_t Amount) const
Definition: RISCVInstrInfo.cpp:1356
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:521
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1004
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:274
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:551
llvm::DestSourcePair
Definition: TargetInstrInfo.h:68
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1337
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1457
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:973
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:53
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:131
RISCVInstrInfo.h
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:214
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:89
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:202
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:320
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:403
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:474
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:803
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1268
RISCVSubtarget.h
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:336
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:509
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:120
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:107
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1001
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:995
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:234
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:296
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:329
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:847
RISCVMachineFunctionInfo.h
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:350
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:240
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1254
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1078
getOppositeBranchOpcode
static unsigned getOppositeBranchOpcode(int Opc)
Definition: RISCVInstrInfo.cpp:480
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:62
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
RegisterScavenging.h
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:104
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1263
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:107
llvm::MachineInstrBundleIterator< MachineInstr >
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:75
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:113
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:236
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:309
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:713
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:282
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:22
RISCVTargetMachine.h