LLVM 17.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1//===-- RISCVInstrInfo.cpp - RISC-V Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVInstrInfo.h"
15#include "RISCV.h"
17#include "RISCVSubtarget.h"
18#include "RISCVTargetMachine.h"
19#include "llvm/ADT/STLExtras.h"
34
35using namespace llvm;
36
37#define GEN_CHECK_COMPRESS_INSTR
38#include "RISCVGenCompressInstEmitter.inc"
39
40#define GET_INSTRINFO_CTOR_DTOR
41#define GET_INSTRINFO_NAMED_OPS
42#include "RISCVGenInstrInfo.inc"
43
45 "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
46 cl::desc("Prefer whole register move for vector registers."));
47
49 "riscv-force-machine-combiner-strategy", cl::Hidden,
50 cl::desc("Force machine combiner to use a specific strategy for machine "
51 "trace metrics evaluation."),
52 cl::init(MachineTraceStrategy::TS_NumStrategies),
53 cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local",
54 "Local strategy."),
55 clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr",
56 "MinInstrCount strategy.")));
57
59
60using namespace RISCV;
61
62#define GET_RISCVVPseudosTable_IMPL
63#include "RISCVGenSearchableTables.inc"
64
65} // namespace llvm::RISCVVPseudosTable
66
68 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
69 STI(STI) {}
70
72 if (STI.hasStdExtCOrZca())
73 return MCInstBuilder(RISCV::C_NOP);
74 return MCInstBuilder(RISCV::ADDI)
75 .addReg(RISCV::X0)
76 .addReg(RISCV::X0)
77 .addImm(0);
78}
79
81 int &FrameIndex) const {
82 unsigned Dummy;
83 return isLoadFromStackSlot(MI, FrameIndex, Dummy);
84}
85
87 int &FrameIndex,
88 unsigned &MemBytes) const {
89 switch (MI.getOpcode()) {
90 default:
91 return 0;
92 case RISCV::LB:
93 case RISCV::LBU:
94 MemBytes = 1;
95 break;
96 case RISCV::LH:
97 case RISCV::LHU:
98 case RISCV::FLH:
99 MemBytes = 2;
100 break;
101 case RISCV::LW:
102 case RISCV::FLW:
103 case RISCV::LWU:
104 MemBytes = 4;
105 break;
106 case RISCV::LD:
107 case RISCV::FLD:
108 MemBytes = 8;
109 break;
110 }
111
112 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
113 MI.getOperand(2).getImm() == 0) {
114 FrameIndex = MI.getOperand(1).getIndex();
115 return MI.getOperand(0).getReg();
116 }
117
118 return 0;
119}
120
122 int &FrameIndex) const {
123 unsigned Dummy;
124 return isStoreToStackSlot(MI, FrameIndex, Dummy);
125}
126
128 int &FrameIndex,
129 unsigned &MemBytes) const {
130 switch (MI.getOpcode()) {
131 default:
132 return 0;
133 case RISCV::SB:
134 MemBytes = 1;
135 break;
136 case RISCV::SH:
137 case RISCV::FSH:
138 MemBytes = 2;
139 break;
140 case RISCV::SW:
141 case RISCV::FSW:
142 MemBytes = 4;
143 break;
144 case RISCV::SD:
145 case RISCV::FSD:
146 MemBytes = 8;
147 break;
148 }
149
150 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
151 MI.getOperand(2).getImm() == 0) {
152 FrameIndex = MI.getOperand(1).getIndex();
153 return MI.getOperand(0).getReg();
154 }
155
156 return 0;
157}
158
159static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
160 unsigned NumRegs) {
161 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
162}
163
165 const MachineBasicBlock &MBB,
168 RISCVII::VLMUL LMul) {
170 return false;
171
172 assert(MBBI->getOpcode() == TargetOpcode::COPY &&
173 "Unexpected COPY instruction.");
174 Register SrcReg = MBBI->getOperand(1).getReg();
176
177 bool FoundDef = false;
178 bool FirstVSetVLI = false;
179 unsigned FirstSEW = 0;
180 while (MBBI != MBB.begin()) {
181 --MBBI;
182 if (MBBI->isMetaInstruction())
183 continue;
184
185 if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
186 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
187 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
188 // There is a vsetvli between COPY and source define instruction.
189 // vy = def_vop ... (producing instruction)
190 // ...
191 // vsetvli
192 // ...
193 // vx = COPY vy
194 if (!FoundDef) {
195 if (!FirstVSetVLI) {
196 FirstVSetVLI = true;
197 unsigned FirstVType = MBBI->getOperand(2).getImm();
198 RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
199 FirstSEW = RISCVVType::getSEW(FirstVType);
200 // The first encountered vsetvli must have the same lmul as the
201 // register class of COPY.
202 if (FirstLMul != LMul)
203 return false;
204 }
205 // Only permit `vsetvli x0, x0, vtype` between COPY and the source
206 // define instruction.
207 if (MBBI->getOperand(0).getReg() != RISCV::X0)
208 return false;
209 if (MBBI->getOperand(1).isImm())
210 return false;
211 if (MBBI->getOperand(1).getReg() != RISCV::X0)
212 return false;
213 continue;
214 }
215
216 // MBBI is the first vsetvli before the producing instruction.
217 unsigned VType = MBBI->getOperand(2).getImm();
218 // If there is a vsetvli between COPY and the producing instruction.
219 if (FirstVSetVLI) {
220 // If SEW is different, return false.
221 if (RISCVVType::getSEW(VType) != FirstSEW)
222 return false;
223 }
224
225 // If the vsetvli is tail undisturbed, keep the whole register move.
226 if (!RISCVVType::isTailAgnostic(VType))
227 return false;
228
229 // The checking is conservative. We only have register classes for
230 // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
231 // for fractional LMUL operations. However, we could not use the vsetvli
232 // lmul for widening operations. The result of widening operation is
233 // 2 x LMUL.
234 return LMul == RISCVVType::getVLMUL(VType);
235 } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
236 return false;
237 } else if (MBBI->getNumDefs()) {
238 // Check all the instructions which will change VL.
239 // For example, vleff has implicit def VL.
240 if (MBBI->modifiesRegister(RISCV::VL))
241 return false;
242
243 // Only converting whole register copies to vmv.v.v when the defining
244 // value appears in the explicit operands.
245 for (const MachineOperand &MO : MBBI->explicit_operands()) {
246 if (!MO.isReg() || !MO.isDef())
247 continue;
248 if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
249 // We only permit the source of COPY has the same LMUL as the defined
250 // operand.
251 // There are cases we need to keep the whole register copy if the LMUL
252 // is different.
253 // For example,
254 // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
255 // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
256 // # The COPY may be created by vlmul_trunc intrinsic.
257 // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
258 //
259 // After widening, the valid value will be 4 x e32 elements. If we
260 // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
261 // FIXME: The COPY of subregister of Zvlsseg register will not be able
262 // to convert to vmv.v.[v|i] under the constraint.
263 if (MO.getReg() != SrcReg)
264 return false;
265
266 // In widening reduction instructions with LMUL_1 input vector case,
267 // only checking the LMUL is insufficient due to reduction result is
268 // always LMUL_1.
269 // For example,
270 // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
271 // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
272 // $v26 = COPY killed renamable $v8
273 // After widening, The valid value will be 1 x e16 elements. If we
274 // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
275 uint64_t TSFlags = MBBI->getDesc().TSFlags;
277 return false;
278
279 // If the producing instruction does not depend on vsetvli, do not
280 // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
282 return false;
283
284 // Found the definition.
285 FoundDef = true;
286 DefMBBI = MBBI;
287 break;
288 }
289 }
290 }
291 }
292
293 return false;
294}
295
298 const DebugLoc &DL, MCRegister DstReg,
299 MCRegister SrcReg, bool KillSrc) const {
301
302 if (RISCV::GPRPF64RegClass.contains(DstReg))
303 DstReg = TRI->getSubReg(DstReg, RISCV::sub_32);
304 if (RISCV::GPRPF64RegClass.contains(SrcReg))
305 SrcReg = TRI->getSubReg(SrcReg, RISCV::sub_32);
306
307 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
308 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
309 .addReg(SrcReg, getKillRegState(KillSrc))
310 .addImm(0);
311 return;
312 }
313
314 // Handle copy from csr
315 if (RISCV::VCSRRegClass.contains(SrcReg) &&
316 RISCV::GPRRegClass.contains(DstReg)) {
317 BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
318 .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding)
319 .addReg(RISCV::X0);
320 return;
321 }
322
323 // FPR->FPR copies and VR->VR copies.
324 unsigned Opc;
325 bool IsScalableVector = true;
326 unsigned NF = 1;
328 unsigned SubRegIdx = RISCV::sub_vrm1_0;
329 if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
330 if (!STI.hasStdExtZfh() && STI.hasStdExtZfhmin()) {
331 // Zfhmin subset doesn't have FSGNJ_H, replaces FSGNJ_H with FSGNJ_S.
332 DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
333 &RISCV::FPR32RegClass);
334 SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
335 &RISCV::FPR32RegClass);
336 Opc = RISCV::FSGNJ_S;
337 } else {
338 Opc = RISCV::FSGNJ_H;
339 }
340 IsScalableVector = false;
341 } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
342 Opc = RISCV::FSGNJ_S;
343 IsScalableVector = false;
344 } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
345 Opc = RISCV::FSGNJ_D;
346 IsScalableVector = false;
347 } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
348 Opc = RISCV::VMV1R_V;
349 LMul = RISCVII::LMUL_1;
350 } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
351 Opc = RISCV::VMV2R_V;
352 LMul = RISCVII::LMUL_2;
353 } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
354 Opc = RISCV::VMV4R_V;
355 LMul = RISCVII::LMUL_4;
356 } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
357 Opc = RISCV::VMV8R_V;
358 LMul = RISCVII::LMUL_8;
359 } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
360 Opc = RISCV::VMV1R_V;
361 SubRegIdx = RISCV::sub_vrm1_0;
362 NF = 2;
363 LMul = RISCVII::LMUL_1;
364 } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
365 Opc = RISCV::VMV2R_V;
366 SubRegIdx = RISCV::sub_vrm2_0;
367 NF = 2;
368 LMul = RISCVII::LMUL_2;
369 } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
370 Opc = RISCV::VMV4R_V;
371 SubRegIdx = RISCV::sub_vrm4_0;
372 NF = 2;
373 LMul = RISCVII::LMUL_4;
374 } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
375 Opc = RISCV::VMV1R_V;
376 SubRegIdx = RISCV::sub_vrm1_0;
377 NF = 3;
378 LMul = RISCVII::LMUL_1;
379 } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
380 Opc = RISCV::VMV2R_V;
381 SubRegIdx = RISCV::sub_vrm2_0;
382 NF = 3;
383 LMul = RISCVII::LMUL_2;
384 } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
385 Opc = RISCV::VMV1R_V;
386 SubRegIdx = RISCV::sub_vrm1_0;
387 NF = 4;
388 LMul = RISCVII::LMUL_1;
389 } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
390 Opc = RISCV::VMV2R_V;
391 SubRegIdx = RISCV::sub_vrm2_0;
392 NF = 4;
393 LMul = RISCVII::LMUL_2;
394 } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
395 Opc = RISCV::VMV1R_V;
396 SubRegIdx = RISCV::sub_vrm1_0;
397 NF = 5;
398 LMul = RISCVII::LMUL_1;
399 } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
400 Opc = RISCV::VMV1R_V;
401 SubRegIdx = RISCV::sub_vrm1_0;
402 NF = 6;
403 LMul = RISCVII::LMUL_1;
404 } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
405 Opc = RISCV::VMV1R_V;
406 SubRegIdx = RISCV::sub_vrm1_0;
407 NF = 7;
408 LMul = RISCVII::LMUL_1;
409 } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
410 Opc = RISCV::VMV1R_V;
411 SubRegIdx = RISCV::sub_vrm1_0;
412 NF = 8;
413 LMul = RISCVII::LMUL_1;
414 } else {
415 llvm_unreachable("Impossible reg-to-reg copy");
416 }
417
418 if (IsScalableVector) {
419 bool UseVMV_V_V = false;
421 unsigned VIOpc;
422 if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
423 UseVMV_V_V = true;
424 // We only need to handle LMUL = 1/2/4/8 here because we only define
425 // vector register classes for LMUL = 1/2/4/8.
426 switch (LMul) {
427 default:
428 llvm_unreachable("Impossible LMUL for vector register copy.");
429 case RISCVII::LMUL_1:
430 Opc = RISCV::PseudoVMV_V_V_M1;
431 VIOpc = RISCV::PseudoVMV_V_I_M1;
432 break;
433 case RISCVII::LMUL_2:
434 Opc = RISCV::PseudoVMV_V_V_M2;
435 VIOpc = RISCV::PseudoVMV_V_I_M2;
436 break;
437 case RISCVII::LMUL_4:
438 Opc = RISCV::PseudoVMV_V_V_M4;
439 VIOpc = RISCV::PseudoVMV_V_I_M4;
440 break;
441 case RISCVII::LMUL_8:
442 Opc = RISCV::PseudoVMV_V_V_M8;
443 VIOpc = RISCV::PseudoVMV_V_I_M8;
444 break;
445 }
446 }
447
448 bool UseVMV_V_I = false;
449 if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
450 UseVMV_V_I = true;
451 Opc = VIOpc;
452 }
453
454 if (NF == 1) {
455 auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
456 if (UseVMV_V_I)
457 MIB = MIB.add(DefMBBI->getOperand(1));
458 else
459 MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
460 if (UseVMV_V_V) {
461 const MCInstrDesc &Desc = DefMBBI->getDesc();
462 MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
463 MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
464 MIB.addReg(RISCV::VL, RegState::Implicit);
465 MIB.addReg(RISCV::VTYPE, RegState::Implicit);
466 }
467 } else {
468 int I = 0, End = NF, Incr = 1;
469 unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
470 unsigned DstEncoding = TRI->getEncodingValue(DstReg);
471 unsigned LMulVal;
472 bool Fractional;
473 std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
474 assert(!Fractional && "It is impossible be fractional lmul here.");
475 if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
476 I = NF - 1;
477 End = -1;
478 Incr = -1;
479 }
480
481 for (; I != End; I += Incr) {
482 auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
483 TRI->getSubReg(DstReg, SubRegIdx + I));
484 if (UseVMV_V_I)
485 MIB = MIB.add(DefMBBI->getOperand(1));
486 else
487 MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
488 getKillRegState(KillSrc));
489 if (UseVMV_V_V) {
490 const MCInstrDesc &Desc = DefMBBI->getDesc();
491 MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
492 MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
493 MIB.addReg(RISCV::VL, RegState::Implicit);
494 MIB.addReg(RISCV::VTYPE, RegState::Implicit);
495 }
496 }
497 }
498 } else {
499 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
500 .addReg(SrcReg, getKillRegState(KillSrc))
501 .addReg(SrcReg, getKillRegState(KillSrc));
502 }
503}
504
507 Register SrcReg, bool IsKill, int FI,
508 const TargetRegisterClass *RC,
509 const TargetRegisterInfo *TRI,
510 Register VReg) const {
511 DebugLoc DL;
512 if (I != MBB.end())
513 DL = I->getDebugLoc();
514
516 MachineFrameInfo &MFI = MF->getFrameInfo();
517
518 unsigned Opcode;
519 bool IsScalableVector = true;
520 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
521 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
522 RISCV::SW : RISCV::SD;
523 IsScalableVector = false;
524 } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
525 Opcode = RISCV::PseudoRV32ZdinxSD;
526 IsScalableVector = false;
527 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
528 Opcode = RISCV::FSH;
529 IsScalableVector = false;
530 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
531 Opcode = RISCV::FSW;
532 IsScalableVector = false;
533 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
534 Opcode = RISCV::FSD;
535 IsScalableVector = false;
536 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
537 Opcode = RISCV::VS1R_V;
538 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
539 Opcode = RISCV::VS2R_V;
540 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
541 Opcode = RISCV::VS4R_V;
542 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
543 Opcode = RISCV::VS8R_V;
544 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
545 Opcode = RISCV::PseudoVSPILL2_M1;
546 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
547 Opcode = RISCV::PseudoVSPILL2_M2;
548 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
549 Opcode = RISCV::PseudoVSPILL2_M4;
550 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
551 Opcode = RISCV::PseudoVSPILL3_M1;
552 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
553 Opcode = RISCV::PseudoVSPILL3_M2;
554 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
555 Opcode = RISCV::PseudoVSPILL4_M1;
556 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
557 Opcode = RISCV::PseudoVSPILL4_M2;
558 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
559 Opcode = RISCV::PseudoVSPILL5_M1;
560 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
561 Opcode = RISCV::PseudoVSPILL6_M1;
562 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
563 Opcode = RISCV::PseudoVSPILL7_M1;
564 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
565 Opcode = RISCV::PseudoVSPILL8_M1;
566 else
567 llvm_unreachable("Can't store this register to stack slot");
568
569 if (IsScalableVector) {
573
575 BuildMI(MBB, I, DL, get(Opcode))
576 .addReg(SrcReg, getKillRegState(IsKill))
577 .addFrameIndex(FI)
578 .addMemOperand(MMO);
579 } else {
582 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
583
584 BuildMI(MBB, I, DL, get(Opcode))
585 .addReg(SrcReg, getKillRegState(IsKill))
586 .addFrameIndex(FI)
587 .addImm(0)
588 .addMemOperand(MMO);
589 }
590}
591
594 Register DstReg, int FI,
595 const TargetRegisterClass *RC,
596 const TargetRegisterInfo *TRI,
597 Register VReg) const {
598 DebugLoc DL;
599 if (I != MBB.end())
600 DL = I->getDebugLoc();
601
603 MachineFrameInfo &MFI = MF->getFrameInfo();
604
605 unsigned Opcode;
606 bool IsScalableVector = true;
607 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
608 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
609 RISCV::LW : RISCV::LD;
610 IsScalableVector = false;
611 } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
612 Opcode = RISCV::PseudoRV32ZdinxLD;
613 IsScalableVector = false;
614 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
615 Opcode = RISCV::FLH;
616 IsScalableVector = false;
617 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
618 Opcode = RISCV::FLW;
619 IsScalableVector = false;
620 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
621 Opcode = RISCV::FLD;
622 IsScalableVector = false;
623 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
624 Opcode = RISCV::VL1RE8_V;
625 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
626 Opcode = RISCV::VL2RE8_V;
627 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
628 Opcode = RISCV::VL4RE8_V;
629 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
630 Opcode = RISCV::VL8RE8_V;
631 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
632 Opcode = RISCV::PseudoVRELOAD2_M1;
633 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
634 Opcode = RISCV::PseudoVRELOAD2_M2;
635 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
636 Opcode = RISCV::PseudoVRELOAD2_M4;
637 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
638 Opcode = RISCV::PseudoVRELOAD3_M1;
639 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
640 Opcode = RISCV::PseudoVRELOAD3_M2;
641 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
642 Opcode = RISCV::PseudoVRELOAD4_M1;
643 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
644 Opcode = RISCV::PseudoVRELOAD4_M2;
645 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
646 Opcode = RISCV::PseudoVRELOAD5_M1;
647 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
648 Opcode = RISCV::PseudoVRELOAD6_M1;
649 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
650 Opcode = RISCV::PseudoVRELOAD7_M1;
651 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
652 Opcode = RISCV::PseudoVRELOAD8_M1;
653 else
654 llvm_unreachable("Can't load this register from stack slot");
655
656 if (IsScalableVector) {
660
662 BuildMI(MBB, I, DL, get(Opcode), DstReg)
663 .addFrameIndex(FI)
664 .addMemOperand(MMO);
665 } else {
668 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
669
670 BuildMI(MBB, I, DL, get(Opcode), DstReg)
671 .addFrameIndex(FI)
672 .addImm(0)
673 .addMemOperand(MMO);
674 }
675}
676
679 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
680 VirtRegMap *VRM) const {
681 const MachineFrameInfo &MFI = MF.getFrameInfo();
682
683 // The below optimizations narrow the load so they are only valid for little
684 // endian.
685 // TODO: Support big endian by adding an offset into the frame object?
686 if (MF.getDataLayout().isBigEndian())
687 return nullptr;
688
689 // Fold load from stack followed by sext.w into lw.
690 // TODO: Fold with sext.b, sext.h, zext.b, zext.h, zext.w?
691 if (Ops.size() != 1 || Ops[0] != 1)
692 return nullptr;
693
694 unsigned LoadOpc;
695 switch (MI.getOpcode()) {
696 default:
697 if (RISCV::isSEXT_W(MI)) {
698 LoadOpc = RISCV::LW;
699 break;
700 }
701 if (RISCV::isZEXT_W(MI)) {
702 LoadOpc = RISCV::LWU;
703 break;
704 }
705 if (RISCV::isZEXT_B(MI)) {
706 LoadOpc = RISCV::LBU;
707 break;
708 }
709 return nullptr;
710 case RISCV::SEXT_H:
711 LoadOpc = RISCV::LH;
712 break;
713 case RISCV::SEXT_B:
714 LoadOpc = RISCV::LB;
715 break;
716 case RISCV::ZEXT_H_RV32:
717 case RISCV::ZEXT_H_RV64:
718 LoadOpc = RISCV::LHU;
719 break;
720 }
721
723 MachinePointerInfo::getFixedStack(MF, FrameIndex),
725 MFI.getObjectAlign(FrameIndex));
726
727 Register DstReg = MI.getOperand(0).getReg();
728 return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
729 DstReg)
730 .addFrameIndex(FrameIndex)
731 .addImm(0)
732 .addMemOperand(MMO);
733}
734
737 const DebugLoc &DL, Register DstReg, uint64_t Val,
738 MachineInstr::MIFlag Flag) const {
739 Register SrcReg = RISCV::X0;
740
741 if (!STI.is64Bit() && !isInt<32>(Val))
742 report_fatal_error("Should only materialize 32-bit constants for RV32");
743
745 RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
746 assert(!Seq.empty());
747
748 for (RISCVMatInt::Inst &Inst : Seq) {
749 switch (Inst.getOpndKind()) {
750 case RISCVMatInt::Imm:
751 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
752 .addImm(Inst.getImm())
753 .setMIFlag(Flag);
754 break;
756 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
757 .addReg(SrcReg, RegState::Kill)
758 .addReg(RISCV::X0)
759 .setMIFlag(Flag);
760 break;
762 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
763 .addReg(SrcReg, RegState::Kill)
764 .addReg(SrcReg, RegState::Kill)
765 .setMIFlag(Flag);
766 break;
768 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
769 .addReg(SrcReg, RegState::Kill)
770 .addImm(Inst.getImm())
771 .setMIFlag(Flag);
772 break;
773 }
774
775 // Only the first instruction has X0 as its source.
776 SrcReg = DstReg;
777 }
778}
779
781 switch (Opc) {
782 default:
784 case RISCV::BEQ:
785 return RISCVCC::COND_EQ;
786 case RISCV::BNE:
787 return RISCVCC::COND_NE;
788 case RISCV::BLT:
789 return RISCVCC::COND_LT;
790 case RISCV::BGE:
791 return RISCVCC::COND_GE;
792 case RISCV::BLTU:
793 return RISCVCC::COND_LTU;
794 case RISCV::BGEU:
795 return RISCVCC::COND_GEU;
796 }
797}
798
799// The contents of values added to Cond are not examined outside of
800// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
801// push BranchOpcode, Reg1, Reg2.
804 // Block ends with fall-through condbranch.
805 assert(LastInst.getDesc().isConditionalBranch() &&
806 "Unknown conditional branch");
807 Target = LastInst.getOperand(2).getMBB();
808 unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
810 Cond.push_back(LastInst.getOperand(0));
811 Cond.push_back(LastInst.getOperand(1));
812}
813
815 switch (CC) {
816 default:
817 llvm_unreachable("Unknown condition code!");
818 case RISCVCC::COND_EQ:
819 return get(RISCV::BEQ);
820 case RISCVCC::COND_NE:
821 return get(RISCV::BNE);
822 case RISCVCC::COND_LT:
823 return get(RISCV::BLT);
824 case RISCVCC::COND_GE:
825 return get(RISCV::BGE);
827 return get(RISCV::BLTU);
829 return get(RISCV::BGEU);
830 }
831}
832
834 switch (CC) {
835 default:
836 llvm_unreachable("Unrecognized conditional branch");
837 case RISCVCC::COND_EQ:
838 return RISCVCC::COND_NE;
839 case RISCVCC::COND_NE:
840 return RISCVCC::COND_EQ;
841 case RISCVCC::COND_LT:
842 return RISCVCC::COND_GE;
843 case RISCVCC::COND_GE:
844 return RISCVCC::COND_LT;
846 return RISCVCC::COND_GEU;
848 return RISCVCC::COND_LTU;
849 }
850}
851
854 MachineBasicBlock *&FBB,
856 bool AllowModify) const {
857 TBB = FBB = nullptr;
858 Cond.clear();
859
860 // If the block has no terminators, it just falls into the block after it.
862 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
863 return false;
864
865 // Count the number of terminators and find the first unconditional or
866 // indirect branch.
867 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
868 int NumTerminators = 0;
869 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
870 J++) {
871 NumTerminators++;
872 if (J->getDesc().isUnconditionalBranch() ||
873 J->getDesc().isIndirectBranch()) {
874 FirstUncondOrIndirectBr = J.getReverse();
875 }
876 }
877
878 // If AllowModify is true, we can erase any terminators after
879 // FirstUncondOrIndirectBR.
880 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
881 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
882 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
883 NumTerminators--;
884 }
885 I = FirstUncondOrIndirectBr;
886 }
887
888 // We can't handle blocks that end in an indirect branch.
889 if (I->getDesc().isIndirectBranch())
890 return true;
891
892 // We can't handle blocks with more than 2 terminators.
893 if (NumTerminators > 2)
894 return true;
895
896 // Handle a single unconditional branch.
897 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
899 return false;
900 }
901
902 // Handle a single conditional branch.
903 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
905 return false;
906 }
907
908 // Handle a conditional branch followed by an unconditional branch.
909 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
910 I->getDesc().isUnconditionalBranch()) {
911 parseCondBranch(*std::prev(I), TBB, Cond);
912 FBB = getBranchDestBlock(*I);
913 return false;
914 }
915
916 // Otherwise, we can't handle this.
917 return true;
918}
919
921 int *BytesRemoved) const {
922 if (BytesRemoved)
923 *BytesRemoved = 0;
925 if (I == MBB.end())
926 return 0;
927
928 if (!I->getDesc().isUnconditionalBranch() &&
929 !I->getDesc().isConditionalBranch())
930 return 0;
931
932 // Remove the branch.
933 if (BytesRemoved)
934 *BytesRemoved += getInstSizeInBytes(*I);
935 I->eraseFromParent();
936
937 I = MBB.end();
938
939 if (I == MBB.begin())
940 return 1;
941 --I;
942 if (!I->getDesc().isConditionalBranch())
943 return 1;
944
945 // Remove the branch.
946 if (BytesRemoved)
947 *BytesRemoved += getInstSizeInBytes(*I);
948 I->eraseFromParent();
949 return 2;
950}
951
952// Inserts a branch into the end of the specific MachineBasicBlock, returning
953// the number of instructions inserted.
956 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
957 if (BytesAdded)
958 *BytesAdded = 0;
959
960 // Shouldn't be a fall through.
961 assert(TBB && "insertBranch must not be told to insert a fallthrough");
962 assert((Cond.size() == 3 || Cond.size() == 0) &&
963 "RISC-V branch conditions have two components!");
964
965 // Unconditional branch.
966 if (Cond.empty()) {
967 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
968 if (BytesAdded)
969 *BytesAdded += getInstSizeInBytes(MI);
970 return 1;
971 }
972
973 // Either a one or two-way conditional branch.
974 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
975 MachineInstr &CondMI =
976 *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
977 if (BytesAdded)
978 *BytesAdded += getInstSizeInBytes(CondMI);
979
980 // One-way conditional branch.
981 if (!FBB)
982 return 1;
983
984 // Two-way conditional branch.
985 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
986 if (BytesAdded)
987 *BytesAdded += getInstSizeInBytes(MI);
988 return 2;
989}
990
992 MachineBasicBlock &DestBB,
993 MachineBasicBlock &RestoreBB,
994 const DebugLoc &DL, int64_t BrOffset,
995 RegScavenger *RS) const {
996 assert(RS && "RegScavenger required for long branching");
997 assert(MBB.empty() &&
998 "new block should be inserted for expanding unconditional branch");
999 assert(MBB.pred_size() == 1);
1000 assert(RestoreBB.empty() &&
1001 "restore block should be inserted for restoring clobbered registers");
1002
1007
1008 if (!isInt<32>(BrOffset))
1010 "Branch offsets outside of the signed 32-bit range not supported");
1011
1012 // FIXME: A virtual register must be used initially, as the register
1013 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
1014 // uses the same workaround).
1015 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1016 auto II = MBB.end();
1017 // We may also update the jump target to RestoreBB later.
1018 MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
1019 .addReg(ScratchReg, RegState::Define | RegState::Dead)
1020 .addMBB(&DestBB, RISCVII::MO_CALL);
1021
1023 Register TmpGPR =
1024 RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
1025 /*RestoreAfter=*/false, /*SpAdj=*/0,
1026 /*AllowSpill=*/false);
1027 if (TmpGPR != RISCV::NoRegister)
1028 RS->setRegUsed(TmpGPR);
1029 else {
1030 // The case when there is no scavenged register needs special handling.
1031
1032 // Pick s11 because it doesn't make a difference.
1033 TmpGPR = RISCV::X27;
1034
1035 int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
1036 if (FrameIndex == -1)
1037 report_fatal_error("underestimated function size");
1038
1039 storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
1040 &RISCV::GPRRegClass, TRI, Register());
1041 TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1042 /*SpAdj=*/0, /*FIOperandNum=*/1);
1043
1044 MI.getOperand(1).setMBB(&RestoreBB);
1045
1046 loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
1047 &RISCV::GPRRegClass, TRI, Register());
1048 TRI->eliminateFrameIndex(RestoreBB.back(),
1049 /*SpAdj=*/0, /*FIOperandNum=*/1);
1050 }
1051
1052 MRI.replaceRegWith(ScratchReg, TmpGPR);
1053 MRI.clearVirtRegs();
1054}
1055
1058 assert((Cond.size() == 3) && "Invalid branch condition!");
1059 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1061 return false;
1062}
1063
1066 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1067 // The branch target is always the last operand.
1068 int NumOp = MI.getNumExplicitOperands();
1069 return MI.getOperand(NumOp - 1).getMBB();
1070}
1071
1073 int64_t BrOffset) const {
1074 unsigned XLen = STI.getXLen();
1075 // Ideally we could determine the supported branch offset from the
1076 // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1077 // PseudoBR.
1078 switch (BranchOp) {
1079 default:
1080 llvm_unreachable("Unexpected opcode!");
1081 case RISCV::BEQ:
1082 case RISCV::BNE:
1083 case RISCV::BLT:
1084 case RISCV::BGE:
1085 case RISCV::BLTU:
1086 case RISCV::BGEU:
1087 return isIntN(13, BrOffset);
1088 case RISCV::JAL:
1089 case RISCV::PseudoBR:
1090 return isIntN(21, BrOffset);
1091 case RISCV::PseudoJump:
1092 return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
1093 }
1094}
1095
1096// If the operation has a predicated pseudo instruction, return the pseudo
1097// instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
1098// TODO: Support more operations.
1099unsigned getPredicatedOpcode(unsigned Opcode) {
1100 switch (Opcode) {
1101 case RISCV::ADD: return RISCV::PseudoCCADD; break;
1102 case RISCV::SUB: return RISCV::PseudoCCSUB; break;
1103 case RISCV::AND: return RISCV::PseudoCCAND; break;
1104 case RISCV::OR: return RISCV::PseudoCCOR; break;
1105 case RISCV::XOR: return RISCV::PseudoCCXOR; break;
1106
1107 case RISCV::ADDW: return RISCV::PseudoCCADDW; break;
1108 case RISCV::SUBW: return RISCV::PseudoCCSUBW; break;
1109 }
1110
1111 return RISCV::INSTRUCTION_LIST_END;
1112}
1113
1114/// Identify instructions that can be folded into a CCMOV instruction, and
1115/// return the defining instruction.
1117 const MachineRegisterInfo &MRI,
1118 const TargetInstrInfo *TII) {
1119 if (!Reg.isVirtual())
1120 return nullptr;
1121 if (!MRI.hasOneNonDBGUse(Reg))
1122 return nullptr;
1123 MachineInstr *MI = MRI.getVRegDef(Reg);
1124 if (!MI)
1125 return nullptr;
1126 // Check if MI can be predicated and folded into the CCMOV.
1127 if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
1128 return nullptr;
1129 // Check if MI has any other defs or physreg uses.
1130 for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) {
1131 // Reject frame index operands, PEI can't handle the predicated pseudos.
1132 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1133 return nullptr;
1134 if (!MO.isReg())
1135 continue;
1136 // MI can't have any tied operands, that would conflict with predication.
1137 if (MO.isTied())
1138 return nullptr;
1139 if (MO.isDef())
1140 return nullptr;
1141 // Allow constant physregs.
1142 if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(MO.getReg()))
1143 return nullptr;
1144 }
1145 bool DontMoveAcrossStores = true;
1146 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
1147 return nullptr;
1148 return MI;
1149}
1150
1153 unsigned &TrueOp, unsigned &FalseOp,
1154 bool &Optimizable) const {
1155 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1156 "Unknown select instruction");
1157 // CCMOV operands:
1158 // 0: Def.
1159 // 1: LHS of compare.
1160 // 2: RHS of compare.
1161 // 3: Condition code.
1162 // 4: False use.
1163 // 5: True use.
1164 TrueOp = 5;
1165 FalseOp = 4;
1166 Cond.push_back(MI.getOperand(1));
1167 Cond.push_back(MI.getOperand(2));
1168 Cond.push_back(MI.getOperand(3));
1169 // We can only fold when we support short forward branch opt.
1170 Optimizable = STI.hasShortForwardBranchOpt();
1171 return false;
1172}
1173
1177 bool PreferFalse) const {
1178 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1179 "Unknown select instruction");
1180 if (!STI.hasShortForwardBranchOpt())
1181 return nullptr;
1182
1183 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1185 canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
1186 bool Invert = !DefMI;
1187 if (!DefMI)
1188 DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
1189 if (!DefMI)
1190 return nullptr;
1191
1192 // Find new register class to use.
1193 MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
1194 Register DestReg = MI.getOperand(0).getReg();
1195 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
1196 if (!MRI.constrainRegClass(DestReg, PreviousClass))
1197 return nullptr;
1198
1199 unsigned PredOpc = getPredicatedOpcode(DefMI->getOpcode());
1200 assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
1201
1202 // Create a new predicated version of DefMI.
1203 MachineInstrBuilder NewMI =
1204 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
1205
1206 // Copy the condition portion.
1207 NewMI.add(MI.getOperand(1));
1208 NewMI.add(MI.getOperand(2));
1209
1210 // Add condition code, inverting if necessary.
1211 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
1212 if (Invert)
1214 NewMI.addImm(CC);
1215
1216 // Copy the false register.
1217 NewMI.add(FalseReg);
1218
1219 // Copy all the DefMI operands.
1220 const MCInstrDesc &DefDesc = DefMI->getDesc();
1221 for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
1222 NewMI.add(DefMI->getOperand(i));
1223
1224 // Update SeenMIs set: register newly created MI and erase removed DefMI.
1225 SeenMIs.insert(NewMI);
1226 SeenMIs.erase(DefMI);
1227
1228 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
1229 // DefMI would be invalid when tranferred inside the loop. Checking for a
1230 // loop is expensive, but at least remove kill flags if they are in different
1231 // BBs.
1232 if (DefMI->getParent() != MI.getParent())
1233 NewMI->clearKillInfo();
1234
1235 // The caller will erase MI, but not DefMI.
1237 return NewMI;
1238}
1239
1241 if (MI.isMetaInstruction())
1242 return 0;
1243
1244 unsigned Opcode = MI.getOpcode();
1245
1246 if (Opcode == TargetOpcode::INLINEASM ||
1247 Opcode == TargetOpcode::INLINEASM_BR) {
1248 const MachineFunction &MF = *MI.getParent()->getParent();
1249 const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1250 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1251 *TM.getMCAsmInfo());
1252 }
1253
1254 if (!MI.memoperands_empty()) {
1255 MachineMemOperand *MMO = *(MI.memoperands_begin());
1256 const MachineFunction &MF = *MI.getParent()->getParent();
1257 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1258 if (ST.hasStdExtZihintntl() && MMO->isNonTemporal()) {
1259 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1260 if (isCompressibleInst(MI, STI))
1261 return 4; // c.ntl.all + c.load/c.store
1262 return 6; // c.ntl.all + load/store
1263 }
1264 return 8; // ntl.all + load/store
1265 }
1266 }
1267
1268 if (MI.getParent() && MI.getParent()->getParent()) {
1269 if (isCompressibleInst(MI, STI))
1270 return 2;
1271 }
1272 return get(Opcode).getSize();
1273}
1274
1276 const unsigned Opcode = MI.getOpcode();
1277 switch (Opcode) {
1278 default:
1279 break;
1280 case RISCV::FSGNJ_D:
1281 case RISCV::FSGNJ_S:
1282 case RISCV::FSGNJ_H:
1283 case RISCV::FSGNJ_D_INX:
1284 case RISCV::FSGNJ_D_IN32X:
1285 case RISCV::FSGNJ_S_INX:
1286 case RISCV::FSGNJ_H_INX:
1287 // The canonical floating-point move is fsgnj rd, rs, rs.
1288 return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1289 MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1290 case RISCV::ADDI:
1291 case RISCV::ORI:
1292 case RISCV::XORI:
1293 return (MI.getOperand(1).isReg() &&
1294 MI.getOperand(1).getReg() == RISCV::X0) ||
1295 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1296 }
1297 return MI.isAsCheapAsAMove();
1298}
1299
1300std::optional<DestSourcePair>
1302 if (MI.isMoveReg())
1303 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1304 switch (MI.getOpcode()) {
1305 default:
1306 break;
1307 case RISCV::ADDI:
1308 // Operand 1 can be a frameindex but callers expect registers
1309 if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1310 MI.getOperand(2).getImm() == 0)
1311 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1312 break;
1313 case RISCV::FSGNJ_D:
1314 case RISCV::FSGNJ_S:
1315 case RISCV::FSGNJ_H:
1316 case RISCV::FSGNJ_D_INX:
1317 case RISCV::FSGNJ_D_IN32X:
1318 case RISCV::FSGNJ_S_INX:
1319 case RISCV::FSGNJ_H_INX:
1320 // The canonical floating-point move is fsgnj rd, rs, rs.
1321 if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1322 MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1323 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1324 break;
1325 }
1326 return std::nullopt;
1327}
1328
1330 if (ForceMachineCombinerStrategy.getNumOccurrences() == 0) {
1331 // The option is unused. Choose Local strategy only for in-order cores. When
1332 // scheduling model is unspecified, use MinInstrCount strategy as more
1333 // generic one.
1334 const auto &SchedModel = STI.getSchedModel();
1335 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1338 }
1339 // The strategy was forced by the option.
1341}
1342
1344 MachineInstr &OldMI2,
1345 MachineInstr &NewMI1,
1346 MachineInstr &NewMI2) const {
1347 uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
1348 NewMI1.setFlags(IntersectedFlags);
1349 NewMI2.setFlags(IntersectedFlags);
1350}
1351
1354 SmallVectorImpl<MachineInstr *> &InsInstrs) const {
1355 int16_t FrmOpIdx =
1356 RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
1357 if (FrmOpIdx < 0) {
1358 assert(all_of(InsInstrs,
1359 [](MachineInstr *MI) {
1360 return RISCV::getNamedOperandIdx(MI->getOpcode(),
1361 RISCV::OpName::frm) < 0;
1362 }) &&
1363 "New instructions require FRM whereas the old one does not have it");
1364 return;
1365 }
1366
1367 const MachineOperand &FRM = Root.getOperand(FrmOpIdx);
1368 MachineFunction &MF = *Root.getMF();
1369
1370 for (auto *NewMI : InsInstrs) {
1371 assert(static_cast<unsigned>(RISCV::getNamedOperandIdx(
1372 NewMI->getOpcode(), RISCV::OpName::frm)) ==
1373 NewMI->getNumOperands() &&
1374 "Instruction has unexpected number of operands");
1375 MachineInstrBuilder MIB(MF, NewMI);
1376 MIB.add(FRM);
1377 if (FRM.getImm() == RISCVFPRndMode::DYN)
1378 MIB.addUse(RISCV::FRM, RegState::Implicit);
1379 }
1380}
1381
1382static bool isFADD(unsigned Opc) {
1383 switch (Opc) {
1384 default:
1385 return false;
1386 case RISCV::FADD_H:
1387 case RISCV::FADD_S:
1388 case RISCV::FADD_D:
1389 return true;
1390 }
1391}
1392
1393static bool isFSUB(unsigned Opc) {
1394 switch (Opc) {
1395 default:
1396 return false;
1397 case RISCV::FSUB_H:
1398 case RISCV::FSUB_S:
1399 case RISCV::FSUB_D:
1400 return true;
1401 }
1402}
1403
1404static bool isFMUL(unsigned Opc) {
1405 switch (Opc) {
1406 default:
1407 return false;
1408 case RISCV::FMUL_H:
1409 case RISCV::FMUL_S:
1410 case RISCV::FMUL_D:
1411 return true;
1412 }
1413}
1414
1416 bool &Commuted) const {
1417 if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
1418 return false;
1419
1420 const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
1421 unsigned OperandIdx = Commuted ? 2 : 1;
1422 const MachineInstr &Sibling =
1423 *MRI.getVRegDef(Inst.getOperand(OperandIdx).getReg());
1424
1425 int16_t InstFrmOpIdx =
1426 RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
1427 int16_t SiblingFrmOpIdx =
1428 RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
1429
1430 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1431 RISCV::hasEqualFRM(Inst, Sibling);
1432}
1433
1435 bool Invert) const {
1436 unsigned Opc = Inst.getOpcode();
1437 if (Invert) {
1438 auto InverseOpcode = getInverseOpcode(Opc);
1439 if (!InverseOpcode)
1440 return false;
1441 Opc = *InverseOpcode;
1442 }
1443
1444 if (isFADD(Opc) || isFMUL(Opc))
1447
1448 switch (Opc) {
1449 default:
1450 return false;
1451 case RISCV::ADD:
1452 case RISCV::ADDW:
1453 case RISCV::AND:
1454 case RISCV::OR:
1455 case RISCV::XOR:
1456 // From RISC-V ISA spec, if both the high and low bits of the same product
1457 // are required, then the recommended code sequence is:
1458 //
1459 // MULH[[S]U] rdh, rs1, rs2
1460 // MUL rdl, rs1, rs2
1461 // (source register specifiers must be in same order and rdh cannot be the
1462 // same as rs1 or rs2)
1463 //
1464 // Microarchitectures can then fuse these into a single multiply operation
1465 // instead of performing two separate multiplies.
1466 // MachineCombiner may reassociate MUL operands and lose the fusion
1467 // opportunity.
1468 case RISCV::MUL:
1469 case RISCV::MULW:
1470 case RISCV::MIN:
1471 case RISCV::MINU:
1472 case RISCV::MAX:
1473 case RISCV::MAXU:
1474 case RISCV::FMIN_H:
1475 case RISCV::FMIN_S:
1476 case RISCV::FMIN_D:
1477 case RISCV::FMAX_H:
1478 case RISCV::FMAX_S:
1479 case RISCV::FMAX_D:
1480 return true;
1481 }
1482
1483 return false;
1484}
1485
1486std::optional<unsigned>
1487RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
1488 switch (Opcode) {
1489 default:
1490 return std::nullopt;
1491 case RISCV::FADD_H:
1492 return RISCV::FSUB_H;
1493 case RISCV::FADD_S:
1494 return RISCV::FSUB_S;
1495 case RISCV::FADD_D:
1496 return RISCV::FSUB_D;
1497 case RISCV::FSUB_H:
1498 return RISCV::FADD_H;
1499 case RISCV::FSUB_S:
1500 return RISCV::FADD_S;
1501 case RISCV::FSUB_D:
1502 return RISCV::FADD_D;
1503 case RISCV::ADD:
1504 return RISCV::SUB;
1505 case RISCV::SUB:
1506 return RISCV::ADD;
1507 case RISCV::ADDW:
1508 return RISCV::SUBW;
1509 case RISCV::SUBW:
1510 return RISCV::ADDW;
1511 }
1512}
1513
1515 const MachineOperand &MO,
1516 bool DoRegPressureReduce) {
1517 if (!MO.isReg() || !MO.getReg().isVirtual())
1518 return false;
1519 const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1520 MachineInstr *MI = MRI.getVRegDef(MO.getReg());
1521 if (!MI || !isFMUL(MI->getOpcode()))
1522 return false;
1523
1526 return false;
1527
1528 // Try combining even if fmul has more than one use as it eliminates
1529 // dependency between fadd(fsub) and fmul. However, it can extend liveranges
1530 // for fmul operands, so reject the transformation in register pressure
1531 // reduction mode.
1532 if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1533 return false;
1534
1535 // Do not combine instructions from different basic blocks.
1536 if (Root.getParent() != MI->getParent())
1537 return false;
1538 return RISCV::hasEqualFRM(Root, *MI);
1539}
1540
1541static bool
1544 bool DoRegPressureReduce) {
1545 unsigned Opc = Root.getOpcode();
1546 bool IsFAdd = isFADD(Opc);
1547 if (!IsFAdd && !isFSUB(Opc))
1548 return false;
1549 bool Added = false;
1550 if (canCombineFPFusedMultiply(Root, Root.getOperand(1),
1551 DoRegPressureReduce)) {
1554 Added = true;
1555 }
1556 if (canCombineFPFusedMultiply(Root, Root.getOperand(2),
1557 DoRegPressureReduce)) {
1560 Added = true;
1561 }
1562 return Added;
1563}
1564
1565static bool getFPPatterns(MachineInstr &Root,
1567 bool DoRegPressureReduce) {
1568 return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
1569}
1570
1573 bool DoRegPressureReduce) const {
1574
1575 if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
1576 return true;
1577
1578 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
1579 DoRegPressureReduce);
1580}
1581
1582static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc,
1584 switch (RootOpc) {
1585 default:
1586 llvm_unreachable("Unexpected opcode");
1587 case RISCV::FADD_H:
1588 return RISCV::FMADD_H;
1589 case RISCV::FADD_S:
1590 return RISCV::FMADD_S;
1591 case RISCV::FADD_D:
1592 return RISCV::FMADD_D;
1593 case RISCV::FSUB_H:
1594 return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
1595 : RISCV::FNMSUB_H;
1596 case RISCV::FSUB_S:
1597 return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
1598 : RISCV::FNMSUB_S;
1599 case RISCV::FSUB_D:
1600 return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
1601 : RISCV::FNMSUB_D;
1602 }
1603}
1604
1606 switch (Pattern) {
1607 default:
1608 llvm_unreachable("Unexpected pattern");
1611 return 2;
1614 return 1;
1615 }
1616}
1617
1622 MachineFunction *MF = Root.getMF();
1625
1626 MachineOperand &Mul1 = Prev.getOperand(1);
1627 MachineOperand &Mul2 = Prev.getOperand(2);
1628 MachineOperand &Dst = Root.getOperand(0);
1630
1631 Register DstReg = Dst.getReg();
1632 unsigned FusedOpc = getFPFusedMultiplyOpcode(Root.getOpcode(), Pattern);
1633 auto IntersectedFlags = Root.getFlags() & Prev.getFlags();
1634 DebugLoc MergedLoc =
1636
1638 BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
1639 .addReg(Mul1.getReg(), getKillRegState(Mul1.isKill()))
1640 .addReg(Mul2.getReg(), getKillRegState(Mul2.isKill()))
1641 .addReg(Addend.getReg(), getKillRegState(Addend.isKill()))
1642 .setMIFlags(IntersectedFlags);
1643
1644 // Mul operands are not killed anymore.
1645 Mul1.setIsKill(false);
1646 Mul2.setIsKill(false);
1647
1648 InsInstrs.push_back(MIB);
1649 if (MRI.hasOneNonDBGUse(Prev.getOperand(0).getReg()))
1650 DelInstrs.push_back(&Prev);
1651 DelInstrs.push_back(&Root);
1652}
1653
1658 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1660 switch (Pattern) {
1661 default:
1663 DelInstrs, InstrIdxForVirtReg);
1664 return;
1667 MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(1).getReg());
1668 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
1669 return;
1670 }
1673 MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(2).getReg());
1674 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
1675 return;
1676 }
1677 }
1678}
1679
1681 StringRef &ErrInfo) const {
1682 MCInstrDesc const &Desc = MI.getDesc();
1683
1684 for (const auto &[Index, Operand] : enumerate(Desc.operands())) {
1685 unsigned OpType = Operand.OperandType;
1686 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1688 const MachineOperand &MO = MI.getOperand(Index);
1689 if (MO.isImm()) {
1690 int64_t Imm = MO.getImm();
1691 bool Ok;
1692 switch (OpType) {
1693 default:
1694 llvm_unreachable("Unexpected operand type");
1695
1696 // clang-format off
1697#define CASE_OPERAND_UIMM(NUM) \
1698 case RISCVOp::OPERAND_UIMM##NUM: \
1699 Ok = isUInt<NUM>(Imm); \
1700 break;
1711 // clang-format on
1713 Ok = isShiftedUInt<1, 1>(Imm);
1714 break;
1716 Ok = isShiftedUInt<5, 2>(Imm);
1717 break;
1719 Ok = isShiftedUInt<6, 2>(Imm);
1720 break;
1722 Ok = isShiftedUInt<5, 3>(Imm);
1723 break;
1725 Ok = isUInt<8>(Imm) && Imm >= 32;
1726 break;
1728 Ok = isShiftedUInt<6, 3>(Imm);
1729 break;
1731 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1732 break;
1734 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
1735 break;
1737 Ok = Imm == 0;
1738 break;
1740 Ok = isInt<5>(Imm);
1741 break;
1743 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1744 break;
1746 Ok = isInt<6>(Imm);
1747 break;
1749 Ok = Imm != 0 && isInt<6>(Imm);
1750 break;
1752 Ok = isUInt<10>(Imm);
1753 break;
1755 Ok = isUInt<11>(Imm);
1756 break;
1758 Ok = isInt<12>(Imm);
1759 break;
1761 Ok = isShiftedInt<7, 5>(Imm);
1762 break;
1764 Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1765 break;
1767 Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1768 Ok = Ok && Imm != 0;
1769 break;
1771 Ok = (isUInt<5>(Imm) && Imm != 0) ||
1772 (Imm >= 0xfffe0 && Imm <= 0xfffff);
1773 break;
1775 Ok = Imm >= 0 && Imm <= 10;
1776 break;
1778 Ok = Imm >= 0 && Imm <= 7;
1779 break;
1781 Ok = Imm >= 1 && Imm <= 10;
1782 break;
1784 Ok = Imm >= 2 && Imm <= 14;
1785 break;
1786 }
1787 if (!Ok) {
1788 ErrInfo = "Invalid immediate";
1789 return false;
1790 }
1791 }
1792 }
1793 }
1794
1795 const uint64_t TSFlags = Desc.TSFlags;
1797 unsigned OpIdx = RISCVII::getMergeOpNum(Desc);
1798 if (MI.findTiedOperandIdx(0) != OpIdx) {
1799 ErrInfo = "Merge op improperly tied";
1800 return false;
1801 }
1802 }
1804 const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
1805 if (!Op.isImm() && !Op.isReg()) {
1806 ErrInfo = "Invalid operand type for VL operand";
1807 return false;
1808 }
1809 if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1810 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1811 auto *RC = MRI.getRegClass(Op.getReg());
1812 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1813 ErrInfo = "Invalid register class for VL operand";
1814 return false;
1815 }
1816 }
1817 if (!RISCVII::hasSEWOp(TSFlags)) {
1818 ErrInfo = "VL operand w/o SEW operand?";
1819 return false;
1820 }
1821 }
1823 unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
1824 uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
1825 if (Log2SEW > 31) {
1826 ErrInfo = "Unexpected SEW value";
1827 return false;
1828 }
1829 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1831 ErrInfo = "Unexpected SEW value";
1832 return false;
1833 }
1834 }
1836 unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
1837 uint64_t Policy = MI.getOperand(OpIdx).getImm();
1839 ErrInfo = "Invalid Policy Value";
1840 return false;
1841 }
1842 if (!RISCVII::hasVLOp(TSFlags)) {
1843 ErrInfo = "policy operand w/o VL operand?";
1844 return false;
1845 }
1846
1847 // VecPolicy operands can only exist on instructions with passthru/merge
1848 // arguments. Note that not all arguments with passthru have vec policy
1849 // operands- some instructions have implicit policies.
1850 unsigned UseOpIdx;
1851 if (!MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
1852 ErrInfo = "policy operand w/o tied operand?";
1853 return false;
1854 }
1855 }
1856
1857 return true;
1858}
1859
1860// Return true if get the base operand, byte offset of an instruction and the
1861// memory width. Width is the size of memory that is being loaded/stored.
1863 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1864 unsigned &Width, const TargetRegisterInfo *TRI) const {
1865 if (!LdSt.mayLoadOrStore())
1866 return false;
1867
1868 // Here we assume the standard RISC-V ISA, which uses a base+offset
1869 // addressing mode. You'll need to relax these conditions to support custom
1870 // load/stores instructions.
1871 if (LdSt.getNumExplicitOperands() != 3)
1872 return false;
1873 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1874 return false;
1875
1876 if (!LdSt.hasOneMemOperand())
1877 return false;
1878
1879 Width = (*LdSt.memoperands_begin())->getSize();
1880 BaseReg = &LdSt.getOperand(1);
1881 Offset = LdSt.getOperand(2).getImm();
1882 return true;
1883}
1884
1886 const MachineInstr &MIa, const MachineInstr &MIb) const {
1887 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1888 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1889
1892 return false;
1893
1894 // Retrieve the base register, offset from the base register and width. Width
1895 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
1896 // base registers are identical, and the offset of a lower memory access +
1897 // the width doesn't overlap the offset of a higher memory access,
1898 // then the memory accesses are different.
1900 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1901 int64_t OffsetA = 0, OffsetB = 0;
1902 unsigned int WidthA = 0, WidthB = 0;
1903 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1904 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1905 if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1906 int LowOffset = std::min(OffsetA, OffsetB);
1907 int HighOffset = std::max(OffsetA, OffsetB);
1908 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1909 if (LowOffset + LowWidth <= HighOffset)
1910 return true;
1911 }
1912 }
1913 return false;
1914}
1915
1916std::pair<unsigned, unsigned>
1918 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1919 return std::make_pair(TF & Mask, TF & ~Mask);
1920}
1921
1924 using namespace RISCVII;
1925 static const std::pair<unsigned, const char *> TargetFlags[] = {
1926 {MO_CALL, "riscv-call"},
1927 {MO_PLT, "riscv-plt"},
1928 {MO_LO, "riscv-lo"},
1929 {MO_HI, "riscv-hi"},
1930 {MO_PCREL_LO, "riscv-pcrel-lo"},
1931 {MO_PCREL_HI, "riscv-pcrel-hi"},
1932 {MO_GOT_HI, "riscv-got-hi"},
1933 {MO_TPREL_LO, "riscv-tprel-lo"},
1934 {MO_TPREL_HI, "riscv-tprel-hi"},
1935 {MO_TPREL_ADD, "riscv-tprel-add"},
1936 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1937 {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1938 return ArrayRef(TargetFlags);
1939}
1941 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1942 const Function &F = MF.getFunction();
1943
1944 // Can F be deduplicated by the linker? If it can, don't outline from it.
1945 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1946 return false;
1947
1948 // Don't outline from functions with section markings; the program could
1949 // expect that all the code is in the named section.
1950 if (F.hasSection())
1951 return false;
1952
1953 // It's safe to outline from MF.
1954 return true;
1955}
1956
1958 unsigned &Flags) const {
1959 // More accurate safety checking is done in getOutliningCandidateInfo.
1961}
1962
1963// Enum values indicating how an outlined call should be constructed.
1967
1969 MachineFunction &MF) const {
1970 return MF.getFunction().hasMinSize();
1971}
1972
1973std::optional<outliner::OutlinedFunction>
1975 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1976
1977 // First we need to filter out candidates where the X5 register (IE t0) can't
1978 // be used to setup the function call.
1979 auto CannotInsertCall = [](outliner::Candidate &C) {
1980 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1981 return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
1982 };
1983
1984 llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1985
1986 // If the sequence doesn't have enough candidates left, then we're done.
1987 if (RepeatedSequenceLocs.size() < 2)
1988 return std::nullopt;
1989
1990 unsigned SequenceSize = 0;
1991
1992 auto I = RepeatedSequenceLocs[0].front();
1993 auto E = std::next(RepeatedSequenceLocs[0].back());
1994 for (; I != E; ++I)
1995 SequenceSize += getInstSizeInBytes(*I);
1996
1997 // call t0, function = 8 bytes.
1998 unsigned CallOverhead = 8;
1999 for (auto &C : RepeatedSequenceLocs)
2000 C.setCallInfo(MachineOutlinerDefault, CallOverhead);
2001
2002 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
2003 unsigned FrameOverhead = 4;
2004 if (RepeatedSequenceLocs[0]
2005 .getMF()
2006 ->getSubtarget<RISCVSubtarget>()
2007 .hasStdExtCOrZca())
2008 FrameOverhead = 2;
2009
2010 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
2011 FrameOverhead, MachineOutlinerDefault);
2012}
2013
2016 unsigned Flags) const {
2017 MachineInstr &MI = *MBBI;
2018 MachineBasicBlock *MBB = MI.getParent();
2019 const TargetRegisterInfo *TRI =
2021 const auto &F = MI.getMF()->getFunction();
2022
2023 // We can manually strip out CFI instructions later.
2024 if (MI.isCFIInstruction())
2025 // If current function has exception handling code, we can't outline &
2026 // strip these CFI instructions since it may break .eh_frame section
2027 // needed in unwinding.
2028 return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
2030
2031 // We need support for tail calls to outlined functions before return
2032 // statements can be allowed.
2033 if (MI.isReturn())
2035
2036 // Don't allow modifying the X5 register which we use for return addresses for
2037 // these outlined functions.
2038 if (MI.modifiesRegister(RISCV::X5, TRI) ||
2039 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2041
2042 // Make sure the operands don't reference something unsafe.
2043 for (const auto &MO : MI.operands()) {
2044
2045 // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
2046 // if any possible.
2047 if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
2048 (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
2049 F.hasSection()))
2051 }
2052
2054}
2055
2058 const outliner::OutlinedFunction &OF) const {
2059
2060 // Strip out any CFI instructions
2061 bool Changed = true;
2062 while (Changed) {
2063 Changed = false;
2064 auto I = MBB.begin();
2065 auto E = MBB.end();
2066 for (; I != E; ++I) {
2067 if (I->isCFIInstruction()) {
2068 I->removeFromParent();
2069 Changed = true;
2070 break;
2071 }
2072 }
2073 }
2074
2075 MBB.addLiveIn(RISCV::X5);
2076
2077 // Add in a return instruction to the end of the outlined frame.
2078 MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
2079 .addReg(RISCV::X0, RegState::Define)
2080 .addReg(RISCV::X5)
2081 .addImm(0));
2082}
2083
2087
2088 // Add in a call instruction to the outlined function at the given location.
2089 It = MBB.insert(It,
2090 BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
2091 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
2093 return It;
2094}
2095
2096// MIR printer helper function to annotate Operands with a comment.
2098 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2099 const TargetRegisterInfo *TRI) const {
2100 // Print a generic comment for this operand if there is one.
2101 std::string GenericComment =
2103 if (!GenericComment.empty())
2104 return GenericComment;
2105
2106 // If not, we must have an immediate operand.
2107 if (!Op.isImm())
2108 return std::string();
2109
2110 std::string Comment;
2111 raw_string_ostream OS(Comment);
2112
2113 uint64_t TSFlags = MI.getDesc().TSFlags;
2114
2115 // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
2116 // operand of vector codegen pseudos.
2117 if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
2118 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2119 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2120 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2121 OpIdx == 2) {
2122 unsigned Imm = MI.getOperand(OpIdx).getImm();
2124 } else if (RISCVII::hasSEWOp(TSFlags) &&
2125 OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
2126 unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
2127 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2128 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2129 OS << "e" << SEW;
2130 } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
2131 OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
2132 unsigned Policy = MI.getOperand(OpIdx).getImm();
2134 "Invalid Policy Value");
2135 OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
2136 << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
2137 }
2138
2139 OS.flush();
2140 return Comment;
2141}
2142
2143// clang-format off
2144#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
2145 RISCV::PseudoV##OP##_##TYPE##_##LMUL
2146
2147#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
2148 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
2149 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
2150 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
2151 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2152
2153#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
2154 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
2155 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2156
2157#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
2158 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
2159 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2160
2161#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
2162 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
2163 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2164
2165#define CASE_VFMA_SPLATS(OP) \
2166 CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
2167 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
2168 case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
2169// clang-format on
2170
2172 unsigned &SrcOpIdx1,
2173 unsigned &SrcOpIdx2) const {
2174 const MCInstrDesc &Desc = MI.getDesc();
2175 if (!Desc.isCommutable())
2176 return false;
2177
2178 switch (MI.getOpcode()) {
2179 case RISCV::TH_MVEQZ:
2180 case RISCV::TH_MVNEZ:
2181 // We can't commute operands if operand 2 (i.e., rs1 in
2182 // mveqz/mvnez rd,rs1,rs2) is the zero-register (as it is
2183 // not valid as the in/out-operand 1).
2184 if (MI.getOperand(2).getReg() == RISCV::X0)
2185 return false;
2186 // Operands 1 and 2 are commutable, if we switch the opcode.
2187 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
2188 case RISCV::TH_MULA:
2189 case RISCV::TH_MULAW:
2190 case RISCV::TH_MULAH:
2191 case RISCV::TH_MULS:
2192 case RISCV::TH_MULSW:
2193 case RISCV::TH_MULSH:
2194 // Operands 2 and 3 are commutable.
2195 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
2196 case RISCV::PseudoCCMOVGPR:
2197 // Operands 4 and 5 are commutable.
2198 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2199 case CASE_VFMA_SPLATS(FMADD):
2200 case CASE_VFMA_SPLATS(FMSUB):
2201 case CASE_VFMA_SPLATS(FMACC):
2202 case CASE_VFMA_SPLATS(FMSAC):
2205 case CASE_VFMA_SPLATS(FNMACC):
2206 case CASE_VFMA_SPLATS(FNMSAC):
2207 case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
2208 case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
2209 case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
2210 case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
2211 case CASE_VFMA_OPCODE_LMULS(MADD, VX):
2212 case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
2213 case CASE_VFMA_OPCODE_LMULS(MACC, VX):
2214 case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
2215 case CASE_VFMA_OPCODE_LMULS(MACC, VV):
2216 case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
2217 // If the tail policy is undisturbed we can't commute.
2218 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
2219 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2220 return false;
2221
2222 // For these instructions we can only swap operand 1 and operand 3 by
2223 // changing the opcode.
2224 unsigned CommutableOpIdx1 = 1;
2225 unsigned CommutableOpIdx2 = 3;
2226 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2227 CommutableOpIdx2))
2228 return false;
2229 return true;
2230 }
2231 case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
2235 case CASE_VFMA_OPCODE_LMULS(MADD, VV):
2236 case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
2237 // If the tail policy is undisturbed we can't commute.
2238 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
2239 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2240 return false;
2241
2242 // For these instructions we have more freedom. We can commute with the
2243 // other multiplicand or with the addend/subtrahend/minuend.
2244
2245 // Any fixed operand must be from source 1, 2 or 3.
2246 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2247 return false;
2248 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2249 return false;
2250
2251 // It both ops are fixed one must be the tied source.
2252 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2253 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2254 return false;
2255
2256 // Look for two different register operands assumed to be commutable
2257 // regardless of the FMA opcode. The FMA opcode is adjusted later if
2258 // needed.
2259 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2260 SrcOpIdx2 == CommuteAnyOperandIndex) {
2261 // At least one of operands to be commuted is not specified and
2262 // this method is free to choose appropriate commutable operands.
2263 unsigned CommutableOpIdx1 = SrcOpIdx1;
2264 if (SrcOpIdx1 == SrcOpIdx2) {
2265 // Both of operands are not fixed. Set one of commutable
2266 // operands to the tied source.
2267 CommutableOpIdx1 = 1;
2268 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2269 // Only one of the operands is not fixed.
2270 CommutableOpIdx1 = SrcOpIdx2;
2271 }
2272
2273 // CommutableOpIdx1 is well defined now. Let's choose another commutable
2274 // operand and assign its index to CommutableOpIdx2.
2275 unsigned CommutableOpIdx2;
2276 if (CommutableOpIdx1 != 1) {
2277 // If we haven't already used the tied source, we must use it now.
2278 CommutableOpIdx2 = 1;
2279 } else {
2280 Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
2281
2282 // The commuted operands should have different registers.
2283 // Otherwise, the commute transformation does not change anything and
2284 // is useless. We use this as a hint to make our decision.
2285 if (Op1Reg != MI.getOperand(2).getReg())
2286 CommutableOpIdx2 = 2;
2287 else
2288 CommutableOpIdx2 = 3;
2289 }
2290
2291 // Assign the found pair of commutable indices to SrcOpIdx1 and
2292 // SrcOpIdx2 to return those values.
2293 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2294 CommutableOpIdx2))
2295 return false;
2296 }
2297
2298 return true;
2299 }
2300 }
2301
2302 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2303}
2304
2305#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
2306 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
2307 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
2308 break;
2309
2310#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
2311 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
2312 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
2313 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
2314 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2315
2316#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
2317 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
2318 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2319
2320#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
2321 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
2322 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2323
2324#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
2325 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
2326 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2327
2328#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
2329 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
2330 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
2331 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
2332
2334 bool NewMI,
2335 unsigned OpIdx1,
2336 unsigned OpIdx2) const {
2337 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2338 if (NewMI)
2339 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2340 return MI;
2341 };
2342
2343 switch (MI.getOpcode()) {
2344 case RISCV::TH_MVEQZ:
2345 case RISCV::TH_MVNEZ: {
2346 auto &WorkingMI = cloneIfNew(MI);
2347 WorkingMI.setDesc(get(MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
2348 : RISCV::TH_MVEQZ));
2349 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1,
2350 OpIdx2);
2351 }
2352 case RISCV::PseudoCCMOVGPR: {
2353 // CCMOV can be commuted by inverting the condition.
2354 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
2356 auto &WorkingMI = cloneIfNew(MI);
2357 WorkingMI.getOperand(3).setImm(CC);
2358 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
2359 OpIdx1, OpIdx2);
2360 }
2361 case CASE_VFMA_SPLATS(FMACC):
2362 case CASE_VFMA_SPLATS(FMADD):
2363 case CASE_VFMA_SPLATS(FMSAC):
2364 case CASE_VFMA_SPLATS(FMSUB):
2365 case CASE_VFMA_SPLATS(FNMACC):
2367 case CASE_VFMA_SPLATS(FNMSAC):
2369 case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
2370 case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
2371 case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
2372 case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
2373 case CASE_VFMA_OPCODE_LMULS(MADD, VX):
2374 case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
2375 case CASE_VFMA_OPCODE_LMULS(MACC, VX):
2376 case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
2377 case CASE_VFMA_OPCODE_LMULS(MACC, VV):
2378 case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
2379 // It only make sense to toggle these between clobbering the
2380 // addend/subtrahend/minuend one of the multiplicands.
2381 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
2382 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
2383 unsigned Opc;
2384 switch (MI.getOpcode()) {
2385 default:
2386 llvm_unreachable("Unexpected opcode");
2387 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
2388 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
2395 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
2399 CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
2400 CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
2401 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
2402 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
2403 CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
2404 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
2405 }
2406
2407 auto &WorkingMI = cloneIfNew(MI);
2408 WorkingMI.setDesc(get(Opc));
2409 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2410 OpIdx1, OpIdx2);
2411 }
2412 case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
2416 case CASE_VFMA_OPCODE_LMULS(MADD, VV):
2417 case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
2418 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
2419 // If one of the operands, is the addend we need to change opcode.
2420 // Otherwise we're just swapping 2 of the multiplicands.
2421 if (OpIdx1 == 3 || OpIdx2 == 3) {
2422 unsigned Opc;
2423 switch (MI.getOpcode()) {
2424 default:
2425 llvm_unreachable("Unexpected opcode");
2426 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
2430 CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
2431 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
2432 }
2433
2434 auto &WorkingMI = cloneIfNew(MI);
2435 WorkingMI.setDesc(get(Opc));
2436 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2437 OpIdx1, OpIdx2);
2438 }
2439 // Let the default code handle it.
2440 break;
2441 }
2442 }
2443
2444 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2445}
2446
2447#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2448#undef CASE_VFMA_CHANGE_OPCODE_LMULS
2449#undef CASE_VFMA_CHANGE_OPCODE_COMMON
2450#undef CASE_VFMA_SPLATS
2451#undef CASE_VFMA_OPCODE_LMULS
2452#undef CASE_VFMA_OPCODE_COMMON
2453
2454// clang-format off
2455#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
2456 RISCV::PseudoV##OP##_##LMUL##_TIED
2457
2458#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
2459 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
2460 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
2461 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
2462 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
2463 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2464
2465#define CASE_WIDEOP_OPCODE_LMULS(OP) \
2466 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
2467 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2468// clang-format on
2469
2470#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
2471 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
2472 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
2473 break;
2474
2475#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
2476 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
2477 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
2478 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
2479 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
2480 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2481
2482#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
2483 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
2484 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2485
2487 LiveVariables *LV,
2488 LiveIntervals *LIS) const {
2489 switch (MI.getOpcode()) {
2490 default:
2491 break;
2492 case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
2493 case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
2494 case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
2495 case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
2496 case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
2497 case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
2498 // If the tail policy is undisturbed we can't convert.
2499 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
2500 MI.getNumExplicitOperands() == 6);
2501 if ((MI.getOperand(5).getImm() & 1) == 0)
2502 return nullptr;
2503
2504 // clang-format off
2505 unsigned NewOpc;
2506 switch (MI.getOpcode()) {
2507 default:
2508 llvm_unreachable("Unexpected opcode");
2515 }
2516 // clang-format on
2517
2518 MachineBasicBlock &MBB = *MI.getParent();
2519 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
2520 .add(MI.getOperand(0))
2521 .add(MI.getOperand(1))
2522 .add(MI.getOperand(2))
2523 .add(MI.getOperand(3))
2524 .add(MI.getOperand(4));
2525 MIB.copyImplicitOps(MI);
2526
2527 if (LV) {
2528 unsigned NumOps = MI.getNumOperands();
2529 for (unsigned I = 1; I < NumOps; ++I) {
2530 MachineOperand &Op = MI.getOperand(I);
2531 if (Op.isReg() && Op.isKill())
2532 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
2533 }
2534 }
2535
2536 if (LIS) {
2538
2539 if (MI.getOperand(0).isEarlyClobber()) {
2540 // Use operand 1 was tied to early-clobber def operand 0, so its live
2541 // interval could have ended at an early-clobber slot. Now they are not
2542 // tied we need to update it to the normal register slot.
2543 LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
2545 if (S->end == Idx.getRegSlot(true))
2546 S->end = Idx.getRegSlot();
2547 }
2548 }
2549
2550 return MIB;
2551 }
2552 }
2553
2554 return nullptr;
2555}
2556
2557#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
2558#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
2559#undef CASE_WIDEOP_OPCODE_LMULS
2560#undef CASE_WIDEOP_OPCODE_COMMON
2561
2565 const DebugLoc &DL, Register DestReg,
2566 int64_t Amount,
2567 MachineInstr::MIFlag Flag) const {
2568 assert(Amount > 0 && "There is no need to get VLEN scaled value.");
2569 assert(Amount % 8 == 0 &&
2570 "Reserve the stack by the multiple of one vector size.");
2571
2573 int64_t NumOfVReg = Amount / 8;
2574
2575 BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), DestReg).setMIFlag(Flag);
2576 assert(isInt<32>(NumOfVReg) &&
2577 "Expect the number of vector registers within 32-bits.");
2578 if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
2579 uint32_t ShiftAmount = Log2_32(NumOfVReg);
2580 if (ShiftAmount == 0)
2581 return;
2582 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2583 .addReg(DestReg, RegState::Kill)
2584 .addImm(ShiftAmount)
2585 .setMIFlag(Flag);
2586 } else if (STI.hasStdExtZba() &&
2587 ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
2588 (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
2589 (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
2590 // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
2591 unsigned Opc;
2592 uint32_t ShiftAmount;
2593 if (NumOfVReg % 9 == 0) {
2594 Opc = RISCV::SH3ADD;
2595 ShiftAmount = Log2_64(NumOfVReg / 9);
2596 } else if (NumOfVReg % 5 == 0) {
2597 Opc = RISCV::SH2ADD;
2598 ShiftAmount = Log2_64(NumOfVReg / 5);
2599 } else if (NumOfVReg % 3 == 0) {
2600 Opc = RISCV::SH1ADD;
2601 ShiftAmount = Log2_64(NumOfVReg / 3);
2602 } else {
2603 llvm_unreachable("Unexpected number of vregs");
2604 }
2605 if (ShiftAmount)
2606 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2607 .addReg(DestReg, RegState::Kill)
2608 .addImm(ShiftAmount)
2609 .setMIFlag(Flag);
2610 BuildMI(MBB, II, DL, get(Opc), DestReg)
2611 .addReg(DestReg, RegState::Kill)
2612 .addReg(DestReg)
2613 .setMIFlag(Flag);
2614 } else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
2615 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2616 uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
2617 BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2618 .addReg(DestReg)
2619 .addImm(ShiftAmount)
2620 .setMIFlag(Flag);
2621 BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
2622 .addReg(ScaledRegister, RegState::Kill)
2623 .addReg(DestReg, RegState::Kill)
2624 .setMIFlag(Flag);
2625 } else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
2626 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2627 uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
2628 BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2629 .addReg(DestReg)
2630 .addImm(ShiftAmount)
2631 .setMIFlag(Flag);
2632 BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
2633 .addReg(ScaledRegister, RegState::Kill)
2634 .addReg(DestReg, RegState::Kill)
2635 .setMIFlag(Flag);
2636 } else {
2637 Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2638 movImm(MBB, II, DL, N, NumOfVReg, Flag);
2639 if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
2641 MF.getFunction(),
2642 "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2643 "offset."});
2644 BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
2645 .addReg(DestReg, RegState::Kill)
2647 .setMIFlag(Flag);
2648 }
2649}
2650
2653 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
2654 {{MONontemporalBit0, "riscv-nontemporal-domain-bit-0"},
2655 {MONontemporalBit1, "riscv-nontemporal-domain-bit-1"}};
2656 return ArrayRef(TargetFlags);
2657}
2658
2659// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
2661 return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
2662 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
2663}
2664
2665// Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
2667 return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
2668 MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
2669}
2670
2671// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
2673 return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
2674 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
2675}
2676
2677static bool isRVVWholeLoadStore(unsigned Opcode) {
2678 switch (Opcode) {
2679 default:
2680 return false;
2681 case RISCV::VS1R_V:
2682 case RISCV::VS2R_V:
2683 case RISCV::VS4R_V:
2684 case RISCV::VS8R_V:
2685 case RISCV::VL1RE8_V:
2686 case RISCV::VL2RE8_V:
2687 case RISCV::VL4RE8_V:
2688 case RISCV::VL8RE8_V:
2689 case RISCV::VL1RE16_V:
2690 case RISCV::VL2RE16_V:
2691 case RISCV::VL4RE16_V:
2692 case RISCV::VL8RE16_V:
2693 case RISCV::VL1RE32_V:
2694 case RISCV::VL2RE32_V:
2695 case RISCV::VL4RE32_V:
2696 case RISCV::VL8RE32_V:
2697 case RISCV::VL1RE64_V:
2698 case RISCV::VL2RE64_V:
2699 case RISCV::VL4RE64_V:
2700 case RISCV::VL8RE64_V:
2701 return true;
2702 }
2703}
2704
2706 // RVV lacks any support for immediate addressing for stack addresses, so be
2707 // conservative.
2708 unsigned Opcode = MI.getOpcode();
2709 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2710 !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
2711 return false;
2712 return true;
2713}
2714
2715std::optional<std::pair<unsigned, unsigned>>
2717 switch (Opcode) {
2718 default:
2719 return std::nullopt;
2720 case RISCV::PseudoVSPILL2_M1:
2721 case RISCV::PseudoVRELOAD2_M1:
2722 return std::make_pair(2u, 1u);
2723 case RISCV::PseudoVSPILL2_M2:
2724 case RISCV::PseudoVRELOAD2_M2:
2725 return std::make_pair(2u, 2u);
2726 case RISCV::PseudoVSPILL2_M4:
2727 case RISCV::PseudoVRELOAD2_M4:
2728 return std::make_pair(2u, 4u);
2729 case RISCV::PseudoVSPILL3_M1:
2730 case RISCV::PseudoVRELOAD3_M1:
2731 return std::make_pair(3u, 1u);
2732 case RISCV::PseudoVSPILL3_M2:
2733 case RISCV::PseudoVRELOAD3_M2:
2734 return std::make_pair(3u, 2u);
2735 case RISCV::PseudoVSPILL4_M1:
2736 case RISCV::PseudoVRELOAD4_M1:
2737 return std::make_pair(4u, 1u);
2738 case RISCV::PseudoVSPILL4_M2:
2739 case RISCV::PseudoVRELOAD4_M2:
2740 return std::make_pair(4u, 2u);
2741 case RISCV::PseudoVSPILL5_M1:
2742 case RISCV::PseudoVRELOAD5_M1:
2743 return std::make_pair(5u, 1u);
2744 case RISCV::PseudoVSPILL6_M1:
2745 case RISCV::PseudoVRELOAD6_M1:
2746 return std::make_pair(6u, 1u);
2747 case RISCV::PseudoVSPILL7_M1:
2748 case RISCV::PseudoVRELOAD7_M1:
2749 return std::make_pair(7u, 1u);
2750 case RISCV::PseudoVSPILL8_M1:
2751 case RISCV::PseudoVRELOAD8_M1:
2752 return std::make_pair(8u, 1u);
2753 }
2754}
2755
2757 return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
2758 !MI.isInlineAsm();
2759}
2760
2761bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
2762 int16_t MI1FrmOpIdx =
2763 RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
2764 int16_t MI2FrmOpIdx =
2765 RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
2766 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
2767 return false;
2768 MachineOperand FrmOp1 = MI1.getOperand(MI1FrmOpIdx);
2769 MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx);
2770 return FrmOp1.getImm() == FrmOp2.getImm();
2771}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerDefault
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:678
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
bool End
Definition: ELF_riscv.cpp:464
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
#define P(N)
const char LLVMTargetMachineRef TM
unsigned UseOpIdx
unsigned Log2SEW
unsigned SEW
uint64_t TSFlags
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, MachineCombinerPattern Pattern)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isFSUB(unsigned Opc)
MachineOutlinerConstructionID
static bool isFMUL(unsigned Opc)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
#define CASE_OPERAND_UIMM(NUM)
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
static bool isFADD(unsigned Opc)
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
@ Flags
Definition: TextStubV5.cpp:93
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
bool isBigEndian() const
Definition: DataLayout.h:239
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:646
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:481
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:317
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
unsigned pred_size() const
reverse_iterator rend()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:313
void setFlags(unsigned flags)
Definition: MachineInstr.h:366
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:357
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:513
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:746
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:731
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:445
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint16_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:352
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
const RISCVSubtarget & STI
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
void getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool hasStdExtCOrZca() const
unsigned getXLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:379
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:642
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
CondCode getOppositeBranchCondition(CondCode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool hasMergeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static unsigned getMergeOpNum(const MCInstrDesc &Desc)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ Kill
The last use of a register.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:703
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:413
@ Offset
Definition: DWP.cpp:406
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2430
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:297
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:388
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:382
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:261
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2113
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:557
#define N
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.