LLVM 19.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1//===-- RISCVInstrInfo.cpp - RISC-V Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVInstrInfo.h"
15#include "RISCV.h"
17#include "RISCVSubtarget.h"
18#include "RISCVTargetMachine.h"
19#include "llvm/ADT/STLExtras.h"
36
37using namespace llvm;
38
39#define GEN_CHECK_COMPRESS_INSTR
40#include "RISCVGenCompressInstEmitter.inc"
41
42#define GET_INSTRINFO_CTOR_DTOR
43#define GET_INSTRINFO_NAMED_OPS
44#include "RISCVGenInstrInfo.inc"
45
47 "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
48 cl::desc("Prefer whole register move for vector registers."));
49
51 "riscv-force-machine-combiner-strategy", cl::Hidden,
52 cl::desc("Force machine combiner to use a specific strategy for machine "
53 "trace metrics evaluation."),
54 cl::init(MachineTraceStrategy::TS_NumStrategies),
55 cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local",
56 "Local strategy."),
57 clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr",
58 "MinInstrCount strategy.")));
59
61
62using namespace RISCV;
63
64#define GET_RISCVVPseudosTable_IMPL
65#include "RISCVGenSearchableTables.inc"
66
67} // namespace llvm::RISCVVPseudosTable
68
69namespace llvm::RISCV {
70
71#define GET_RISCVMaskedPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
73
74} // end namespace llvm::RISCV
75
77 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
78 STI(STI) {}
79
81 if (STI.hasStdExtCOrZca())
82 return MCInstBuilder(RISCV::C_NOP);
83 return MCInstBuilder(RISCV::ADDI)
84 .addReg(RISCV::X0)
85 .addReg(RISCV::X0)
86 .addImm(0);
87}
88
90 int &FrameIndex) const {
91 unsigned Dummy;
92 return isLoadFromStackSlot(MI, FrameIndex, Dummy);
93}
94
96 int &FrameIndex,
97 unsigned &MemBytes) const {
98 switch (MI.getOpcode()) {
99 default:
100 return 0;
101 case RISCV::LB:
102 case RISCV::LBU:
103 MemBytes = 1;
104 break;
105 case RISCV::LH:
106 case RISCV::LHU:
107 case RISCV::FLH:
108 MemBytes = 2;
109 break;
110 case RISCV::LW:
111 case RISCV::FLW:
112 case RISCV::LWU:
113 MemBytes = 4;
114 break;
115 case RISCV::LD:
116 case RISCV::FLD:
117 MemBytes = 8;
118 break;
119 }
120
121 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
122 MI.getOperand(2).getImm() == 0) {
123 FrameIndex = MI.getOperand(1).getIndex();
124 return MI.getOperand(0).getReg();
125 }
126
127 return 0;
128}
129
131 int &FrameIndex) const {
132 unsigned Dummy;
133 return isStoreToStackSlot(MI, FrameIndex, Dummy);
134}
135
137 int &FrameIndex,
138 unsigned &MemBytes) const {
139 switch (MI.getOpcode()) {
140 default:
141 return 0;
142 case RISCV::SB:
143 MemBytes = 1;
144 break;
145 case RISCV::SH:
146 case RISCV::FSH:
147 MemBytes = 2;
148 break;
149 case RISCV::SW:
150 case RISCV::FSW:
151 MemBytes = 4;
152 break;
153 case RISCV::SD:
154 case RISCV::FSD:
155 MemBytes = 8;
156 break;
157 }
158
159 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
160 MI.getOperand(2).getImm() == 0) {
161 FrameIndex = MI.getOperand(1).getIndex();
162 return MI.getOperand(0).getReg();
163 }
164
165 return 0;
166}
167
168static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
169 unsigned NumRegs) {
170 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
171}
172
174 const MachineBasicBlock &MBB,
177 RISCVII::VLMUL LMul) {
179 return false;
180
181 assert(MBBI->getOpcode() == TargetOpcode::COPY &&
182 "Unexpected COPY instruction.");
183 Register SrcReg = MBBI->getOperand(1).getReg();
185
186 bool FoundDef = false;
187 bool FirstVSetVLI = false;
188 unsigned FirstSEW = 0;
189 while (MBBI != MBB.begin()) {
190 --MBBI;
191 if (MBBI->isMetaInstruction())
192 continue;
193
194 if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
195 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
196 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
197 // There is a vsetvli between COPY and source define instruction.
198 // vy = def_vop ... (producing instruction)
199 // ...
200 // vsetvli
201 // ...
202 // vx = COPY vy
203 if (!FoundDef) {
204 if (!FirstVSetVLI) {
205 FirstVSetVLI = true;
206 unsigned FirstVType = MBBI->getOperand(2).getImm();
207 RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
208 FirstSEW = RISCVVType::getSEW(FirstVType);
209 // The first encountered vsetvli must have the same lmul as the
210 // register class of COPY.
211 if (FirstLMul != LMul)
212 return false;
213 }
214 // Only permit `vsetvli x0, x0, vtype` between COPY and the source
215 // define instruction.
216 if (MBBI->getOperand(0).getReg() != RISCV::X0)
217 return false;
218 if (MBBI->getOperand(1).isImm())
219 return false;
220 if (MBBI->getOperand(1).getReg() != RISCV::X0)
221 return false;
222 continue;
223 }
224
225 // MBBI is the first vsetvli before the producing instruction.
226 unsigned VType = MBBI->getOperand(2).getImm();
227 // If there is a vsetvli between COPY and the producing instruction.
228 if (FirstVSetVLI) {
229 // If SEW is different, return false.
230 if (RISCVVType::getSEW(VType) != FirstSEW)
231 return false;
232 }
233
234 // If the vsetvli is tail undisturbed, keep the whole register move.
235 if (!RISCVVType::isTailAgnostic(VType))
236 return false;
237
238 // The checking is conservative. We only have register classes for
239 // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
240 // for fractional LMUL operations. However, we could not use the vsetvli
241 // lmul for widening operations. The result of widening operation is
242 // 2 x LMUL.
243 return LMul == RISCVVType::getVLMUL(VType);
244 } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
245 return false;
246 } else if (MBBI->getNumDefs()) {
247 // Check all the instructions which will change VL.
248 // For example, vleff has implicit def VL.
249 if (MBBI->modifiesRegister(RISCV::VL, /*TRI=*/nullptr))
250 return false;
251
252 // Only converting whole register copies to vmv.v.v when the defining
253 // value appears in the explicit operands.
254 for (const MachineOperand &MO : MBBI->explicit_operands()) {
255 if (!MO.isReg() || !MO.isDef())
256 continue;
257 if (!FoundDef && TRI->regsOverlap(MO.getReg(), SrcReg)) {
258 // We only permit the source of COPY has the same LMUL as the defined
259 // operand.
260 // There are cases we need to keep the whole register copy if the LMUL
261 // is different.
262 // For example,
263 // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
264 // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
265 // # The COPY may be created by vlmul_trunc intrinsic.
266 // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
267 //
268 // After widening, the valid value will be 4 x e32 elements. If we
269 // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
270 // FIXME: The COPY of subregister of Zvlsseg register will not be able
271 // to convert to vmv.v.[v|i] under the constraint.
272 if (MO.getReg() != SrcReg)
273 return false;
274
275 // In widening reduction instructions with LMUL_1 input vector case,
276 // only checking the LMUL is insufficient due to reduction result is
277 // always LMUL_1.
278 // For example,
279 // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
280 // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
281 // $v26 = COPY killed renamable $v8
282 // After widening, The valid value will be 1 x e16 elements. If we
283 // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
284 uint64_t TSFlags = MBBI->getDesc().TSFlags;
286 return false;
287
288 // If the producing instruction does not depend on vsetvli, do not
289 // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
290 if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
291 return false;
292
293 // Found the definition.
294 FoundDef = true;
295 DefMBBI = MBBI;
296 break;
297 }
298 }
299 }
300 }
301
302 return false;
303}
304
307 const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc,
308 const TargetRegisterClass *RegClass) const {
310 RISCVII::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags);
311 unsigned NF = RISCVRI::getNF(RegClass->TSFlags);
312
313 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
314 uint16_t DstEncoding = TRI->getEncodingValue(DstReg);
315 auto [LMulVal, Fractional] = RISCVVType::decodeVLMUL(LMul);
316 assert(!Fractional && "It is impossible be fractional lmul here.");
317 unsigned NumRegs = NF * LMulVal;
318 bool ReversedCopy =
319 forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NumRegs);
320 if (ReversedCopy) {
321 // If the src and dest overlap when copying a tuple, we need to copy the
322 // registers in reverse.
323 SrcEncoding += NumRegs - 1;
324 DstEncoding += NumRegs - 1;
325 }
326
327 unsigned I = 0;
328 auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)
329 -> std::tuple<RISCVII::VLMUL, const TargetRegisterClass &, unsigned,
330 unsigned, unsigned> {
331 if (ReversedCopy) {
332 // For reversed copying, if there are enough aligned registers(8/4/2), we
333 // can do a larger copy(LMUL8/4/2).
334 // Besides, we have already known that DstEncoding is larger than
335 // SrcEncoding in forwardCopyWillClobberTuple, so the difference between
336 // DstEncoding and SrcEncoding should be >= LMUL value we try to use to
337 // avoid clobbering.
338 uint16_t Diff = DstEncoding - SrcEncoding;
339 if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
340 DstEncoding % 8 == 7)
341 return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
342 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
343 if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
344 DstEncoding % 4 == 3)
345 return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
346 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
347 if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
348 DstEncoding % 2 == 1)
349 return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
350 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
351 // Or we should do LMUL1 copying.
352 return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
353 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
354 }
355
356 // For forward copying, if source register encoding and destination register
357 // encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying.
358 if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
359 return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
360 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
361 if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
362 return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
363 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
364 if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
365 return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
366 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
367 // Or we should do LMUL1 copying.
368 return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
369 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
370 };
371 auto FindRegWithEncoding = [TRI](const TargetRegisterClass &RegClass,
372 uint16_t Encoding) {
373 MCRegister Reg = RISCV::V0 + Encoding;
374 if (&RegClass == &RISCV::VRRegClass)
375 return Reg;
376 return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
377 };
378 while (I != NumRegs) {
379 // For non-segment copying, we only do this once as the registers are always
380 // aligned.
381 // For segment copying, we may do this several times. If the registers are
382 // aligned to larger LMUL, we can eliminate some copyings.
383 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
384 GetCopyInfo(SrcEncoding, DstEncoding);
385 auto [NumCopied, _] = RISCVVType::decodeVLMUL(LMulCopied);
386
388 if (LMul == LMulCopied &&
389 isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
390 Opc = VVOpc;
391 if (DefMBBI->getOpcode() == VIOpc)
392 Opc = VIOpc;
393 }
394
395 // Emit actual copying.
396 // For reversed copying, the encoding should be decreased.
397 MCRegister ActualSrcReg = FindRegWithEncoding(
398 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
399 MCRegister ActualDstReg = FindRegWithEncoding(
400 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
401
402 auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), ActualDstReg);
403 bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_I;
404 bool UseVMV = UseVMV_V_I || RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_V;
405 if (UseVMV)
406 MIB.addReg(ActualDstReg, RegState::Undef);
407 if (UseVMV_V_I)
408 MIB = MIB.add(DefMBBI->getOperand(2));
409 else
410 MIB = MIB.addReg(ActualSrcReg, getKillRegState(KillSrc));
411 if (UseVMV) {
412 const MCInstrDesc &Desc = DefMBBI->getDesc();
413 MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
414 MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
415 MIB.addImm(0); // tu, mu
416 MIB.addReg(RISCV::VL, RegState::Implicit);
417 MIB.addReg(RISCV::VTYPE, RegState::Implicit);
418 }
419
420 // If we are copying reversely, we should decrease the encoding.
421 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
422 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
423 I += NumCopied;
424 }
425}
426
429 const DebugLoc &DL, MCRegister DstReg,
430 MCRegister SrcReg, bool KillSrc) const {
432
433 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
434 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
435 .addReg(SrcReg, getKillRegState(KillSrc))
436 .addImm(0);
437 return;
438 }
439
440 if (RISCV::GPRPairRegClass.contains(DstReg, SrcReg)) {
441 // Emit an ADDI for both parts of GPRPair.
442 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
443 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
444 .addReg(TRI->getSubReg(SrcReg, RISCV::sub_gpr_even),
445 getKillRegState(KillSrc))
446 .addImm(0);
447 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
448 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
449 .addReg(TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd),
450 getKillRegState(KillSrc))
451 .addImm(0);
452 return;
453 }
454
455 // Handle copy from csr
456 if (RISCV::VCSRRegClass.contains(SrcReg) &&
457 RISCV::GPRRegClass.contains(DstReg)) {
458 BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
459 .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding)
460 .addReg(RISCV::X0);
461 return;
462 }
463
464 if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
465 unsigned Opc;
466 if (STI.hasStdExtZfh()) {
467 Opc = RISCV::FSGNJ_H;
468 } else {
469 assert(STI.hasStdExtF() &&
470 (STI.hasStdExtZfhmin() || STI.hasStdExtZfbfmin()) &&
471 "Unexpected extensions");
472 // Zfhmin/Zfbfmin doesn't have FSGNJ_H, replace FSGNJ_H with FSGNJ_S.
473 DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
474 &RISCV::FPR32RegClass);
475 SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
476 &RISCV::FPR32RegClass);
477 Opc = RISCV::FSGNJ_S;
478 }
479 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
480 .addReg(SrcReg, getKillRegState(KillSrc))
481 .addReg(SrcReg, getKillRegState(KillSrc));
482 return;
483 }
484
485 if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
486 BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_S), DstReg)
487 .addReg(SrcReg, getKillRegState(KillSrc))
488 .addReg(SrcReg, getKillRegState(KillSrc));
489 return;
490 }
491
492 if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
493 BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_D), DstReg)
494 .addReg(SrcReg, getKillRegState(KillSrc))
495 .addReg(SrcReg, getKillRegState(KillSrc));
496 return;
497 }
498
499 if (RISCV::FPR32RegClass.contains(DstReg) &&
500 RISCV::GPRRegClass.contains(SrcReg)) {
501 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_W_X), DstReg)
502 .addReg(SrcReg, getKillRegState(KillSrc));
503 return;
504 }
505
506 if (RISCV::GPRRegClass.contains(DstReg) &&
507 RISCV::FPR32RegClass.contains(SrcReg)) {
508 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_X_W), DstReg)
509 .addReg(SrcReg, getKillRegState(KillSrc));
510 return;
511 }
512
513 if (RISCV::FPR64RegClass.contains(DstReg) &&
514 RISCV::GPRRegClass.contains(SrcReg)) {
515 assert(STI.getXLen() == 64 && "Unexpected GPR size");
516 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_D_X), DstReg)
517 .addReg(SrcReg, getKillRegState(KillSrc));
518 return;
519 }
520
521 if (RISCV::GPRRegClass.contains(DstReg) &&
522 RISCV::FPR64RegClass.contains(SrcReg)) {
523 assert(STI.getXLen() == 64 && "Unexpected GPR size");
524 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_X_D), DstReg)
525 .addReg(SrcReg, getKillRegState(KillSrc));
526 return;
527 }
528
529 // VR->VR copies.
530 static const TargetRegisterClass *RVVRegClasses[] = {
531 &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
532 &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN2M2RegClass,
533 &RISCV::VRN2M4RegClass, &RISCV::VRN3M1RegClass, &RISCV::VRN3M2RegClass,
534 &RISCV::VRN4M1RegClass, &RISCV::VRN4M2RegClass, &RISCV::VRN5M1RegClass,
535 &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, &RISCV::VRN8M1RegClass};
536 for (const auto &RegClass : RVVRegClasses) {
537 if (RegClass->contains(DstReg, SrcReg)) {
538 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RegClass);
539 return;
540 }
541 }
542
543 llvm_unreachable("Impossible reg-to-reg copy");
544}
545
548 Register SrcReg, bool IsKill, int FI,
549 const TargetRegisterClass *RC,
550 const TargetRegisterInfo *TRI,
551 Register VReg) const {
553 MachineFrameInfo &MFI = MF->getFrameInfo();
554
555 unsigned Opcode;
556 bool IsScalableVector = true;
557 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
558 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
559 RISCV::SW : RISCV::SD;
560 IsScalableVector = false;
561 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
562 Opcode = RISCV::PseudoRV32ZdinxSD;
563 IsScalableVector = false;
564 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
565 Opcode = RISCV::FSH;
566 IsScalableVector = false;
567 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
568 Opcode = RISCV::FSW;
569 IsScalableVector = false;
570 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
571 Opcode = RISCV::FSD;
572 IsScalableVector = false;
573 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
574 Opcode = RISCV::VS1R_V;
575 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
576 Opcode = RISCV::VS2R_V;
577 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
578 Opcode = RISCV::VS4R_V;
579 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
580 Opcode = RISCV::VS8R_V;
581 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
582 Opcode = RISCV::PseudoVSPILL2_M1;
583 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
584 Opcode = RISCV::PseudoVSPILL2_M2;
585 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
586 Opcode = RISCV::PseudoVSPILL2_M4;
587 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
588 Opcode = RISCV::PseudoVSPILL3_M1;
589 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
590 Opcode = RISCV::PseudoVSPILL3_M2;
591 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
592 Opcode = RISCV::PseudoVSPILL4_M1;
593 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
594 Opcode = RISCV::PseudoVSPILL4_M2;
595 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
596 Opcode = RISCV::PseudoVSPILL5_M1;
597 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
598 Opcode = RISCV::PseudoVSPILL6_M1;
599 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
600 Opcode = RISCV::PseudoVSPILL7_M1;
601 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
602 Opcode = RISCV::PseudoVSPILL8_M1;
603 else
604 llvm_unreachable("Can't store this register to stack slot");
605
606 if (IsScalableVector) {
610
612 BuildMI(MBB, I, DebugLoc(), get(Opcode))
613 .addReg(SrcReg, getKillRegState(IsKill))
614 .addFrameIndex(FI)
615 .addMemOperand(MMO);
616 } else {
619 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
620
621 BuildMI(MBB, I, DebugLoc(), get(Opcode))
622 .addReg(SrcReg, getKillRegState(IsKill))
623 .addFrameIndex(FI)
624 .addImm(0)
625 .addMemOperand(MMO);
626 }
627}
628
631 Register DstReg, int FI,
632 const TargetRegisterClass *RC,
633 const TargetRegisterInfo *TRI,
634 Register VReg) const {
636 MachineFrameInfo &MFI = MF->getFrameInfo();
637
638 unsigned Opcode;
639 bool IsScalableVector = true;
640 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
641 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
642 RISCV::LW : RISCV::LD;
643 IsScalableVector = false;
644 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
645 Opcode = RISCV::PseudoRV32ZdinxLD;
646 IsScalableVector = false;
647 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
648 Opcode = RISCV::FLH;
649 IsScalableVector = false;
650 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
651 Opcode = RISCV::FLW;
652 IsScalableVector = false;
653 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
654 Opcode = RISCV::FLD;
655 IsScalableVector = false;
656 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
657 Opcode = RISCV::VL1RE8_V;
658 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
659 Opcode = RISCV::VL2RE8_V;
660 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::VL4RE8_V;
662 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VL8RE8_V;
664 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
665 Opcode = RISCV::PseudoVRELOAD2_M1;
666 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
667 Opcode = RISCV::PseudoVRELOAD2_M2;
668 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
669 Opcode = RISCV::PseudoVRELOAD2_M4;
670 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVRELOAD3_M1;
672 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVRELOAD3_M2;
674 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVRELOAD4_M1;
676 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVRELOAD4_M2;
678 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVRELOAD5_M1;
680 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVRELOAD6_M1;
682 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVRELOAD7_M1;
684 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVRELOAD8_M1;
686 else
687 llvm_unreachable("Can't load this register from stack slot");
688
689 if (IsScalableVector) {
693
695 BuildMI(MBB, I, DebugLoc(), get(Opcode), DstReg)
696 .addFrameIndex(FI)
697 .addMemOperand(MMO);
698 } else {
701 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
702
703 BuildMI(MBB, I, DebugLoc(), get(Opcode), DstReg)
704 .addFrameIndex(FI)
705 .addImm(0)
706 .addMemOperand(MMO);
707 }
708}
709
712 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
713 VirtRegMap *VRM) const {
714 const MachineFrameInfo &MFI = MF.getFrameInfo();
715
716 // The below optimizations narrow the load so they are only valid for little
717 // endian.
718 // TODO: Support big endian by adding an offset into the frame object?
719 if (MF.getDataLayout().isBigEndian())
720 return nullptr;
721
722 // Fold load from stack followed by sext.b/sext.h/sext.w/zext.b/zext.h/zext.w.
723 if (Ops.size() != 1 || Ops[0] != 1)
724 return nullptr;
725
726 unsigned LoadOpc;
727 switch (MI.getOpcode()) {
728 default:
729 if (RISCV::isSEXT_W(MI)) {
730 LoadOpc = RISCV::LW;
731 break;
732 }
733 if (RISCV::isZEXT_W(MI)) {
734 LoadOpc = RISCV::LWU;
735 break;
736 }
737 if (RISCV::isZEXT_B(MI)) {
738 LoadOpc = RISCV::LBU;
739 break;
740 }
741 return nullptr;
742 case RISCV::SEXT_H:
743 LoadOpc = RISCV::LH;
744 break;
745 case RISCV::SEXT_B:
746 LoadOpc = RISCV::LB;
747 break;
748 case RISCV::ZEXT_H_RV32:
749 case RISCV::ZEXT_H_RV64:
750 LoadOpc = RISCV::LHU;
751 break;
752 }
753
755 MachinePointerInfo::getFixedStack(MF, FrameIndex),
757 MFI.getObjectAlign(FrameIndex));
758
759 Register DstReg = MI.getOperand(0).getReg();
760 return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
761 DstReg)
762 .addFrameIndex(FrameIndex)
763 .addImm(0)
764 .addMemOperand(MMO);
765}
766
769 const DebugLoc &DL, Register DstReg, uint64_t Val,
770 MachineInstr::MIFlag Flag, bool DstRenamable,
771 bool DstIsDead) const {
772 Register SrcReg = RISCV::X0;
773
774 // For RV32, allow a sign or unsigned 32 bit value.
775 if (!STI.is64Bit() && !isInt<32>(Val)) {
776 // If have a uimm32 it will still fit in a register so we can allow it.
777 if (!isUInt<32>(Val))
778 report_fatal_error("Should only materialize 32-bit constants for RV32");
779
780 // Sign extend for generateInstSeq.
781 Val = SignExtend64<32>(Val);
782 }
783
785 assert(!Seq.empty());
786
787 bool SrcRenamable = false;
788 unsigned Num = 0;
789
790 for (const RISCVMatInt::Inst &Inst : Seq) {
791 bool LastItem = ++Num == Seq.size();
792 unsigned DstRegState = getDeadRegState(DstIsDead && LastItem) |
793 getRenamableRegState(DstRenamable);
794 unsigned SrcRegState = getKillRegState(SrcReg != RISCV::X0) |
795 getRenamableRegState(SrcRenamable);
796 switch (Inst.getOpndKind()) {
797 case RISCVMatInt::Imm:
798 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
799 .addReg(DstReg, RegState::Define | DstRegState)
800 .addImm(Inst.getImm())
801 .setMIFlag(Flag);
802 break;
804 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
805 .addReg(DstReg, RegState::Define | DstRegState)
806 .addReg(SrcReg, SrcRegState)
807 .addReg(RISCV::X0)
808 .setMIFlag(Flag);
809 break;
811 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
812 .addReg(DstReg, RegState::Define | DstRegState)
813 .addReg(SrcReg, SrcRegState)
814 .addReg(SrcReg, SrcRegState)
815 .setMIFlag(Flag);
816 break;
818 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
819 .addReg(DstReg, RegState::Define | DstRegState)
820 .addReg(SrcReg, SrcRegState)
821 .addImm(Inst.getImm())
822 .setMIFlag(Flag);
823 break;
824 }
825
826 // Only the first instruction has X0 as its source.
827 SrcReg = DstReg;
828 SrcRenamable = DstRenamable;
829 }
830}
831
833 switch (Opc) {
834 default:
836 case RISCV::BEQ:
837 return RISCVCC::COND_EQ;
838 case RISCV::BNE:
839 return RISCVCC::COND_NE;
840 case RISCV::BLT:
841 return RISCVCC::COND_LT;
842 case RISCV::BGE:
843 return RISCVCC::COND_GE;
844 case RISCV::BLTU:
845 return RISCVCC::COND_LTU;
846 case RISCV::BGEU:
847 return RISCVCC::COND_GEU;
848 }
849}
850
851// The contents of values added to Cond are not examined outside of
852// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
853// push BranchOpcode, Reg1, Reg2.
856 // Block ends with fall-through condbranch.
857 assert(LastInst.getDesc().isConditionalBranch() &&
858 "Unknown conditional branch");
859 Target = LastInst.getOperand(2).getMBB();
860 unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
862 Cond.push_back(LastInst.getOperand(0));
863 Cond.push_back(LastInst.getOperand(1));
864}
865
867 switch (CC) {
868 default:
869 llvm_unreachable("Unknown condition code!");
870 case RISCVCC::COND_EQ:
871 return RISCV::BEQ;
872 case RISCVCC::COND_NE:
873 return RISCV::BNE;
874 case RISCVCC::COND_LT:
875 return RISCV::BLT;
876 case RISCVCC::COND_GE:
877 return RISCV::BGE;
879 return RISCV::BLTU;
881 return RISCV::BGEU;
882 }
883}
884
886 return get(RISCVCC::getBrCond(CC));
887}
888
890 switch (CC) {
891 default:
892 llvm_unreachable("Unrecognized conditional branch");
893 case RISCVCC::COND_EQ:
894 return RISCVCC::COND_NE;
895 case RISCVCC::COND_NE:
896 return RISCVCC::COND_EQ;
897 case RISCVCC::COND_LT:
898 return RISCVCC::COND_GE;
899 case RISCVCC::COND_GE:
900 return RISCVCC::COND_LT;
902 return RISCVCC::COND_GEU;
904 return RISCVCC::COND_LTU;
905 }
906}
907
910 MachineBasicBlock *&FBB,
912 bool AllowModify) const {
913 TBB = FBB = nullptr;
914 Cond.clear();
915
916 // If the block has no terminators, it just falls into the block after it.
918 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
919 return false;
920
921 // Count the number of terminators and find the first unconditional or
922 // indirect branch.
923 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
924 int NumTerminators = 0;
925 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
926 J++) {
927 NumTerminators++;
928 if (J->getDesc().isUnconditionalBranch() ||
929 J->getDesc().isIndirectBranch()) {
930 FirstUncondOrIndirectBr = J.getReverse();
931 }
932 }
933
934 // If AllowModify is true, we can erase any terminators after
935 // FirstUncondOrIndirectBR.
936 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
937 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
938 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
939 NumTerminators--;
940 }
941 I = FirstUncondOrIndirectBr;
942 }
943
944 // We can't handle blocks that end in an indirect branch.
945 if (I->getDesc().isIndirectBranch())
946 return true;
947
948 // We can't handle Generic branch opcodes from Global ISel.
949 if (I->isPreISelOpcode())
950 return true;
951
952 // We can't handle blocks with more than 2 terminators.
953 if (NumTerminators > 2)
954 return true;
955
956 // Handle a single unconditional branch.
957 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
959 return false;
960 }
961
962 // Handle a single conditional branch.
963 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
965 return false;
966 }
967
968 // Handle a conditional branch followed by an unconditional branch.
969 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
970 I->getDesc().isUnconditionalBranch()) {
971 parseCondBranch(*std::prev(I), TBB, Cond);
972 FBB = getBranchDestBlock(*I);
973 return false;
974 }
975
976 // Otherwise, we can't handle this.
977 return true;
978}
979
981 int *BytesRemoved) const {
982 if (BytesRemoved)
983 *BytesRemoved = 0;
985 if (I == MBB.end())
986 return 0;
987
988 if (!I->getDesc().isUnconditionalBranch() &&
989 !I->getDesc().isConditionalBranch())
990 return 0;
991
992 // Remove the branch.
993 if (BytesRemoved)
994 *BytesRemoved += getInstSizeInBytes(*I);
995 I->eraseFromParent();
996
997 I = MBB.end();
998
999 if (I == MBB.begin())
1000 return 1;
1001 --I;
1002 if (!I->getDesc().isConditionalBranch())
1003 return 1;
1004
1005 // Remove the branch.
1006 if (BytesRemoved)
1007 *BytesRemoved += getInstSizeInBytes(*I);
1008 I->eraseFromParent();
1009 return 2;
1010}
1011
1012// Inserts a branch into the end of the specific MachineBasicBlock, returning
1013// the number of instructions inserted.
1016 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
1017 if (BytesAdded)
1018 *BytesAdded = 0;
1019
1020 // Shouldn't be a fall through.
1021 assert(TBB && "insertBranch must not be told to insert a fallthrough");
1022 assert((Cond.size() == 3 || Cond.size() == 0) &&
1023 "RISC-V branch conditions have two components!");
1024
1025 // Unconditional branch.
1026 if (Cond.empty()) {
1027 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
1028 if (BytesAdded)
1029 *BytesAdded += getInstSizeInBytes(MI);
1030 return 1;
1031 }
1032
1033 // Either a one or two-way conditional branch.
1034 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1035 MachineInstr &CondMI =
1036 *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
1037 if (BytesAdded)
1038 *BytesAdded += getInstSizeInBytes(CondMI);
1039
1040 // One-way conditional branch.
1041 if (!FBB)
1042 return 1;
1043
1044 // Two-way conditional branch.
1045 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
1046 if (BytesAdded)
1047 *BytesAdded += getInstSizeInBytes(MI);
1048 return 2;
1049}
1050
1052 MachineBasicBlock &DestBB,
1053 MachineBasicBlock &RestoreBB,
1054 const DebugLoc &DL, int64_t BrOffset,
1055 RegScavenger *RS) const {
1056 assert(RS && "RegScavenger required for long branching");
1057 assert(MBB.empty() &&
1058 "new block should be inserted for expanding unconditional branch");
1059 assert(MBB.pred_size() == 1);
1060 assert(RestoreBB.empty() &&
1061 "restore block should be inserted for restoring clobbered registers");
1062
1067
1068 if (!isInt<32>(BrOffset))
1070 "Branch offsets outside of the signed 32-bit range not supported");
1071
1072 // FIXME: A virtual register must be used initially, as the register
1073 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
1074 // uses the same workaround).
1075 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1076 auto II = MBB.end();
1077 // We may also update the jump target to RestoreBB later.
1078 MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
1079 .addReg(ScratchReg, RegState::Define | RegState::Dead)
1080 .addMBB(&DestBB, RISCVII::MO_CALL);
1081
1083 Register TmpGPR =
1084 RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
1085 /*RestoreAfter=*/false, /*SpAdj=*/0,
1086 /*AllowSpill=*/false);
1087 if (TmpGPR != RISCV::NoRegister)
1088 RS->setRegUsed(TmpGPR);
1089 else {
1090 // The case when there is no scavenged register needs special handling.
1091
1092 // Pick s11 because it doesn't make a difference.
1093 TmpGPR = RISCV::X27;
1094
1095 int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
1096 if (FrameIndex == -1)
1097 report_fatal_error("underestimated function size");
1098
1099 storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
1100 &RISCV::GPRRegClass, TRI, Register());
1101 TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1102 /*SpAdj=*/0, /*FIOperandNum=*/1);
1103
1104 MI.getOperand(1).setMBB(&RestoreBB);
1105
1106 loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
1107 &RISCV::GPRRegClass, TRI, Register());
1108 TRI->eliminateFrameIndex(RestoreBB.back(),
1109 /*SpAdj=*/0, /*FIOperandNum=*/1);
1110 }
1111
1112 MRI.replaceRegWith(ScratchReg, TmpGPR);
1113 MRI.clearVirtRegs();
1114}
1115
1118 assert((Cond.size() == 3) && "Invalid branch condition!");
1119 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1121 return false;
1122}
1123
1125 MachineBasicBlock *MBB = MI.getParent();
1127
1128 MachineBasicBlock *TBB, *FBB;
1130 if (analyzeBranch(*MBB, TBB, FBB, Cond, /*AllowModify=*/false))
1131 return false;
1132
1133 RISCVCC::CondCode CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1135
1137 return false;
1138
1139 // For two constants C0 and C1 from
1140 // ```
1141 // li Y, C0
1142 // li Z, C1
1143 // ```
1144 // 1. if C1 = C0 + 1
1145 // we can turn:
1146 // (a) blt Y, X -> bge X, Z
1147 // (b) bge Y, X -> blt X, Z
1148 //
1149 // 2. if C1 = C0 - 1
1150 // we can turn:
1151 // (a) blt X, Y -> bge Z, X
1152 // (b) bge X, Y -> blt Z, X
1153 //
1154 // To make sure this optimization is really beneficial, we only
1155 // optimize for cases where Y had only one use (i.e. only used by the branch).
1156
1157 // Right now we only care about LI (i.e. ADDI x0, imm)
1158 auto isLoadImm = [](const MachineInstr *MI, int64_t &Imm) -> bool {
1159 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&
1160 MI->getOperand(1).getReg() == RISCV::X0) {
1161 Imm = MI->getOperand(2).getImm();
1162 return true;
1163 }
1164 return false;
1165 };
1166 // Either a load from immediate instruction or X0.
1167 auto isFromLoadImm = [&](const MachineOperand &Op, int64_t &Imm) -> bool {
1168 if (!Op.isReg())
1169 return false;
1170 Register Reg = Op.getReg();
1171 return Reg.isVirtual() && isLoadImm(MRI.getVRegDef(Reg), Imm);
1172 };
1173
1174 MachineOperand &LHS = MI.getOperand(0);
1175 MachineOperand &RHS = MI.getOperand(1);
1176 // Try to find the register for constant Z; return
1177 // invalid register otherwise.
1178 auto searchConst = [&](int64_t C1) -> Register {
1180 auto DefC1 = std::find_if(++II, E, [&](const MachineInstr &I) -> bool {
1181 int64_t Imm;
1182 return isLoadImm(&I, Imm) && Imm == C1 &&
1183 I.getOperand(0).getReg().isVirtual();
1184 });
1185 if (DefC1 != E)
1186 return DefC1->getOperand(0).getReg();
1187
1188 return Register();
1189 };
1190
1191 bool Modify = false;
1192 int64_t C0;
1193 if (isFromLoadImm(LHS, C0) && MRI.hasOneUse(LHS.getReg())) {
1194 // Might be case 1.
1195 // Signed integer overflow is UB. (UINT64_MAX is bigger so we don't need
1196 // to worry about unsigned overflow here)
1197 if (C0 < INT64_MAX)
1198 if (Register RegZ = searchConst(C0 + 1)) {
1200 Cond[1] = MachineOperand::CreateReg(RHS.getReg(), /*isDef=*/false);
1201 Cond[2] = MachineOperand::CreateReg(RegZ, /*isDef=*/false);
1202 // We might extend the live range of Z, clear its kill flag to
1203 // account for this.
1204 MRI.clearKillFlags(RegZ);
1205 Modify = true;
1206 }
1207 } else if (isFromLoadImm(RHS, C0) && MRI.hasOneUse(RHS.getReg())) {
1208 // Might be case 2.
1209 // For unsigned cases, we don't want C1 to wrap back to UINT64_MAX
1210 // when C0 is zero.
1211 if ((CC == RISCVCC::COND_GE || CC == RISCVCC::COND_LT) || C0)
1212 if (Register RegZ = searchConst(C0 - 1)) {
1214 Cond[1] = MachineOperand::CreateReg(RegZ, /*isDef=*/false);
1215 Cond[2] = MachineOperand::CreateReg(LHS.getReg(), /*isDef=*/false);
1216 // We might extend the live range of Z, clear its kill flag to
1217 // account for this.
1218 MRI.clearKillFlags(RegZ);
1219 Modify = true;
1220 }
1221 }
1222
1223 if (!Modify)
1224 return false;
1225
1226 // Build the new branch and remove the old one.
1227 BuildMI(*MBB, MI, MI.getDebugLoc(),
1228 getBrCond(static_cast<RISCVCC::CondCode>(Cond[0].getImm())))
1229 .add(Cond[1])
1230 .add(Cond[2])
1231 .addMBB(TBB);
1232 MI.eraseFromParent();
1233
1234 return true;
1235}
1236
1239 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1240 // The branch target is always the last operand.
1241 int NumOp = MI.getNumExplicitOperands();
1242 return MI.getOperand(NumOp - 1).getMBB();
1243}
1244
1246 int64_t BrOffset) const {
1247 unsigned XLen = STI.getXLen();
1248 // Ideally we could determine the supported branch offset from the
1249 // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1250 // PseudoBR.
1251 switch (BranchOp) {
1252 default:
1253 llvm_unreachable("Unexpected opcode!");
1254 case RISCV::BEQ:
1255 case RISCV::BNE:
1256 case RISCV::BLT:
1257 case RISCV::BGE:
1258 case RISCV::BLTU:
1259 case RISCV::BGEU:
1260 return isIntN(13, BrOffset);
1261 case RISCV::JAL:
1262 case RISCV::PseudoBR:
1263 return isIntN(21, BrOffset);
1264 case RISCV::PseudoJump:
1265 return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
1266 }
1267}
1268
1269// If the operation has a predicated pseudo instruction, return the pseudo
1270// instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
1271// TODO: Support more operations.
1272unsigned getPredicatedOpcode(unsigned Opcode) {
1273 switch (Opcode) {
1274 case RISCV::ADD: return RISCV::PseudoCCADD; break;
1275 case RISCV::SUB: return RISCV::PseudoCCSUB; break;
1276 case RISCV::SLL: return RISCV::PseudoCCSLL; break;
1277 case RISCV::SRL: return RISCV::PseudoCCSRL; break;
1278 case RISCV::SRA: return RISCV::PseudoCCSRA; break;
1279 case RISCV::AND: return RISCV::PseudoCCAND; break;
1280 case RISCV::OR: return RISCV::PseudoCCOR; break;
1281 case RISCV::XOR: return RISCV::PseudoCCXOR; break;
1282
1283 case RISCV::ADDI: return RISCV::PseudoCCADDI; break;
1284 case RISCV::SLLI: return RISCV::PseudoCCSLLI; break;
1285 case RISCV::SRLI: return RISCV::PseudoCCSRLI; break;
1286 case RISCV::SRAI: return RISCV::PseudoCCSRAI; break;
1287 case RISCV::ANDI: return RISCV::PseudoCCANDI; break;
1288 case RISCV::ORI: return RISCV::PseudoCCORI; break;
1289 case RISCV::XORI: return RISCV::PseudoCCXORI; break;
1290
1291 case RISCV::ADDW: return RISCV::PseudoCCADDW; break;
1292 case RISCV::SUBW: return RISCV::PseudoCCSUBW; break;
1293 case RISCV::SLLW: return RISCV::PseudoCCSLLW; break;
1294 case RISCV::SRLW: return RISCV::PseudoCCSRLW; break;
1295 case RISCV::SRAW: return RISCV::PseudoCCSRAW; break;
1296
1297 case RISCV::ADDIW: return RISCV::PseudoCCADDIW; break;
1298 case RISCV::SLLIW: return RISCV::PseudoCCSLLIW; break;
1299 case RISCV::SRLIW: return RISCV::PseudoCCSRLIW; break;
1300 case RISCV::SRAIW: return RISCV::PseudoCCSRAIW; break;
1301
1302 case RISCV::ANDN: return RISCV::PseudoCCANDN; break;
1303 case RISCV::ORN: return RISCV::PseudoCCORN; break;
1304 case RISCV::XNOR: return RISCV::PseudoCCXNOR; break;
1305 }
1306
1307 return RISCV::INSTRUCTION_LIST_END;
1308}
1309
1310/// Identify instructions that can be folded into a CCMOV instruction, and
1311/// return the defining instruction.
1313 const MachineRegisterInfo &MRI,
1314 const TargetInstrInfo *TII) {
1315 if (!Reg.isVirtual())
1316 return nullptr;
1317 if (!MRI.hasOneNonDBGUse(Reg))
1318 return nullptr;
1319 MachineInstr *MI = MRI.getVRegDef(Reg);
1320 if (!MI)
1321 return nullptr;
1322 // Check if MI can be predicated and folded into the CCMOV.
1323 if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
1324 return nullptr;
1325 // Don't predicate li idiom.
1326 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&
1327 MI->getOperand(1).getReg() == RISCV::X0)
1328 return nullptr;
1329 // Check if MI has any other defs or physreg uses.
1330 for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) {
1331 // Reject frame index operands, PEI can't handle the predicated pseudos.
1332 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1333 return nullptr;
1334 if (!MO.isReg())
1335 continue;
1336 // MI can't have any tied operands, that would conflict with predication.
1337 if (MO.isTied())
1338 return nullptr;
1339 if (MO.isDef())
1340 return nullptr;
1341 // Allow constant physregs.
1342 if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(MO.getReg()))
1343 return nullptr;
1344 }
1345 bool DontMoveAcrossStores = true;
1346 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
1347 return nullptr;
1348 return MI;
1349}
1350
1353 unsigned &TrueOp, unsigned &FalseOp,
1354 bool &Optimizable) const {
1355 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1356 "Unknown select instruction");
1357 // CCMOV operands:
1358 // 0: Def.
1359 // 1: LHS of compare.
1360 // 2: RHS of compare.
1361 // 3: Condition code.
1362 // 4: False use.
1363 // 5: True use.
1364 TrueOp = 5;
1365 FalseOp = 4;
1366 Cond.push_back(MI.getOperand(1));
1367 Cond.push_back(MI.getOperand(2));
1368 Cond.push_back(MI.getOperand(3));
1369 // We can only fold when we support short forward branch opt.
1370 Optimizable = STI.hasShortForwardBranchOpt();
1371 return false;
1372}
1373
1377 bool PreferFalse) const {
1378 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1379 "Unknown select instruction");
1380 if (!STI.hasShortForwardBranchOpt())
1381 return nullptr;
1382
1383 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1385 canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
1386 bool Invert = !DefMI;
1387 if (!DefMI)
1388 DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
1389 if (!DefMI)
1390 return nullptr;
1391
1392 // Find new register class to use.
1393 MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
1394 Register DestReg = MI.getOperand(0).getReg();
1395 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
1396 if (!MRI.constrainRegClass(DestReg, PreviousClass))
1397 return nullptr;
1398
1399 unsigned PredOpc = getPredicatedOpcode(DefMI->getOpcode());
1400 assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
1401
1402 // Create a new predicated version of DefMI.
1403 MachineInstrBuilder NewMI =
1404 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
1405
1406 // Copy the condition portion.
1407 NewMI.add(MI.getOperand(1));
1408 NewMI.add(MI.getOperand(2));
1409
1410 // Add condition code, inverting if necessary.
1411 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
1412 if (Invert)
1414 NewMI.addImm(CC);
1415
1416 // Copy the false register.
1417 NewMI.add(FalseReg);
1418
1419 // Copy all the DefMI operands.
1420 const MCInstrDesc &DefDesc = DefMI->getDesc();
1421 for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
1422 NewMI.add(DefMI->getOperand(i));
1423
1424 // Update SeenMIs set: register newly created MI and erase removed DefMI.
1425 SeenMIs.insert(NewMI);
1426 SeenMIs.erase(DefMI);
1427
1428 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
1429 // DefMI would be invalid when tranferred inside the loop. Checking for a
1430 // loop is expensive, but at least remove kill flags if they are in different
1431 // BBs.
1432 if (DefMI->getParent() != MI.getParent())
1433 NewMI->clearKillInfo();
1434
1435 // The caller will erase MI, but not DefMI.
1437 return NewMI;
1438}
1439
1441 if (MI.isMetaInstruction())
1442 return 0;
1443
1444 unsigned Opcode = MI.getOpcode();
1445
1446 if (Opcode == TargetOpcode::INLINEASM ||
1447 Opcode == TargetOpcode::INLINEASM_BR) {
1448 const MachineFunction &MF = *MI.getParent()->getParent();
1449 const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1450 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1451 *TM.getMCAsmInfo());
1452 }
1453
1454 if (!MI.memoperands_empty()) {
1455 MachineMemOperand *MMO = *(MI.memoperands_begin());
1456 const MachineFunction &MF = *MI.getParent()->getParent();
1457 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1458 if (ST.hasStdExtZihintntl() && MMO->isNonTemporal()) {
1459 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1460 if (isCompressibleInst(MI, STI))
1461 return 4; // c.ntl.all + c.load/c.store
1462 return 6; // c.ntl.all + load/store
1463 }
1464 return 8; // ntl.all + load/store
1465 }
1466 }
1467
1468 if (Opcode == TargetOpcode::BUNDLE)
1469 return getInstBundleLength(MI);
1470
1471 if (MI.getParent() && MI.getParent()->getParent()) {
1472 if (isCompressibleInst(MI, STI))
1473 return 2;
1474 }
1475
1476 switch (Opcode) {
1477 case TargetOpcode::STACKMAP:
1478 // The upper bound for a stackmap intrinsic is the full length of its shadow
1480 case TargetOpcode::PATCHPOINT:
1481 // The size of the patchpoint intrinsic is the number of bytes requested
1483 case TargetOpcode::STATEPOINT: {
1484 // The size of the statepoint intrinsic is the number of bytes requested
1485 unsigned NumBytes = StatepointOpers(&MI).getNumPatchBytes();
1486 // No patch bytes means at most a PseudoCall is emitted
1487 return std::max(NumBytes, 8U);
1488 }
1489 default:
1490 return get(Opcode).getSize();
1491 }
1492}
1493
1494unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
1495 unsigned Size = 0;
1497 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
1498 while (++I != E && I->isInsideBundle()) {
1499 assert(!I->isBundle() && "No nested bundle!");
1501 }
1502 return Size;
1503}
1504
1506 const unsigned Opcode = MI.getOpcode();
1507 switch (Opcode) {
1508 default:
1509 break;
1510 case RISCV::FSGNJ_D:
1511 case RISCV::FSGNJ_S:
1512 case RISCV::FSGNJ_H:
1513 case RISCV::FSGNJ_D_INX:
1514 case RISCV::FSGNJ_D_IN32X:
1515 case RISCV::FSGNJ_S_INX:
1516 case RISCV::FSGNJ_H_INX:
1517 // The canonical floating-point move is fsgnj rd, rs, rs.
1518 return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1519 MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1520 case RISCV::ADDI:
1521 case RISCV::ORI:
1522 case RISCV::XORI:
1523 return (MI.getOperand(1).isReg() &&
1524 MI.getOperand(1).getReg() == RISCV::X0) ||
1525 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1526 }
1527 return MI.isAsCheapAsAMove();
1528}
1529
1530std::optional<DestSourcePair>
1532 if (MI.isMoveReg())
1533 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1534 switch (MI.getOpcode()) {
1535 default:
1536 break;
1537 case RISCV::ADDI:
1538 // Operand 1 can be a frameindex but callers expect registers
1539 if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1540 MI.getOperand(2).getImm() == 0)
1541 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1542 break;
1543 case RISCV::FSGNJ_D:
1544 case RISCV::FSGNJ_S:
1545 case RISCV::FSGNJ_H:
1546 case RISCV::FSGNJ_D_INX:
1547 case RISCV::FSGNJ_D_IN32X:
1548 case RISCV::FSGNJ_S_INX:
1549 case RISCV::FSGNJ_H_INX:
1550 // The canonical floating-point move is fsgnj rd, rs, rs.
1551 if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1552 MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1553 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1554 break;
1555 }
1556 return std::nullopt;
1557}
1558
1560 if (ForceMachineCombinerStrategy.getNumOccurrences() == 0) {
1561 // The option is unused. Choose Local strategy only for in-order cores. When
1562 // scheduling model is unspecified, use MinInstrCount strategy as more
1563 // generic one.
1564 const auto &SchedModel = STI.getSchedModel();
1565 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1568 }
1569 // The strategy was forced by the option.
1571}
1572
1574 MachineInstr &Root, unsigned &Pattern,
1575 SmallVectorImpl<MachineInstr *> &InsInstrs) const {
1576 int16_t FrmOpIdx =
1577 RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
1578 if (FrmOpIdx < 0) {
1579 assert(all_of(InsInstrs,
1580 [](MachineInstr *MI) {
1581 return RISCV::getNamedOperandIdx(MI->getOpcode(),
1582 RISCV::OpName::frm) < 0;
1583 }) &&
1584 "New instructions require FRM whereas the old one does not have it");
1585 return;
1586 }
1587
1588 const MachineOperand &FRM = Root.getOperand(FrmOpIdx);
1589 MachineFunction &MF = *Root.getMF();
1590
1591 for (auto *NewMI : InsInstrs) {
1592 // We'd already added the FRM operand.
1593 if (static_cast<unsigned>(RISCV::getNamedOperandIdx(
1594 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
1595 continue;
1596 MachineInstrBuilder MIB(MF, NewMI);
1597 MIB.add(FRM);
1598 if (FRM.getImm() == RISCVFPRndMode::DYN)
1599 MIB.addUse(RISCV::FRM, RegState::Implicit);
1600 }
1601}
1602
1603static bool isFADD(unsigned Opc) {
1604 switch (Opc) {
1605 default:
1606 return false;
1607 case RISCV::FADD_H:
1608 case RISCV::FADD_S:
1609 case RISCV::FADD_D:
1610 return true;
1611 }
1612}
1613
1614static bool isFSUB(unsigned Opc) {
1615 switch (Opc) {
1616 default:
1617 return false;
1618 case RISCV::FSUB_H:
1619 case RISCV::FSUB_S:
1620 case RISCV::FSUB_D:
1621 return true;
1622 }
1623}
1624
1625static bool isFMUL(unsigned Opc) {
1626 switch (Opc) {
1627 default:
1628 return false;
1629 case RISCV::FMUL_H:
1630 case RISCV::FMUL_S:
1631 case RISCV::FMUL_D:
1632 return true;
1633 }
1634}
1635
1636bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
1637 bool Invert) const {
1638#define OPCODE_LMUL_CASE(OPC) \
1639 case RISCV::OPC##_M1: \
1640 case RISCV::OPC##_M2: \
1641 case RISCV::OPC##_M4: \
1642 case RISCV::OPC##_M8: \
1643 case RISCV::OPC##_MF2: \
1644 case RISCV::OPC##_MF4: \
1645 case RISCV::OPC##_MF8
1646
1647#define OPCODE_LMUL_MASK_CASE(OPC) \
1648 case RISCV::OPC##_M1_MASK: \
1649 case RISCV::OPC##_M2_MASK: \
1650 case RISCV::OPC##_M4_MASK: \
1651 case RISCV::OPC##_M8_MASK: \
1652 case RISCV::OPC##_MF2_MASK: \
1653 case RISCV::OPC##_MF4_MASK: \
1654 case RISCV::OPC##_MF8_MASK
1655
1656 unsigned Opcode = Inst.getOpcode();
1657 if (Invert) {
1658 if (auto InvOpcode = getInverseOpcode(Opcode))
1659 Opcode = *InvOpcode;
1660 else
1661 return false;
1662 }
1663
1664 // clang-format off
1665 switch (Opcode) {
1666 default:
1667 return false;
1668 OPCODE_LMUL_CASE(PseudoVADD_VV):
1669 OPCODE_LMUL_MASK_CASE(PseudoVADD_VV):
1670 OPCODE_LMUL_CASE(PseudoVMUL_VV):
1671 OPCODE_LMUL_MASK_CASE(PseudoVMUL_VV):
1672 return true;
1673 }
1674 // clang-format on
1675
1676#undef OPCODE_LMUL_MASK_CASE
1677#undef OPCODE_LMUL_CASE
1678}
1679
1680bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,
1681 const MachineInstr &Prev) const {
1682 if (!areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()))
1683 return false;
1684
1685 assert(Root.getMF() == Prev.getMF());
1686 const MachineRegisterInfo *MRI = &Root.getMF()->getRegInfo();
1687 const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
1688
1689 // Make sure vtype operands are also the same.
1690 const MCInstrDesc &Desc = get(Root.getOpcode());
1691 const uint64_t TSFlags = Desc.TSFlags;
1692
1693 auto checkImmOperand = [&](unsigned OpIdx) {
1694 return Root.getOperand(OpIdx).getImm() == Prev.getOperand(OpIdx).getImm();
1695 };
1696
1697 auto checkRegOperand = [&](unsigned OpIdx) {
1698 return Root.getOperand(OpIdx).getReg() == Prev.getOperand(OpIdx).getReg();
1699 };
1700
1701 // PassThru
1702 // TODO: Potentially we can loosen the condition to consider Root to be
1703 // associable with Prev if Root has NoReg as passthru. In which case we
1704 // also need to loosen the condition on vector policies between these.
1705 if (!checkRegOperand(1))
1706 return false;
1707
1708 // SEW
1709 if (RISCVII::hasSEWOp(TSFlags) &&
1710 !checkImmOperand(RISCVII::getSEWOpNum(Desc)))
1711 return false;
1712
1713 // Mask
1714 if (RISCVII::usesMaskPolicy(TSFlags)) {
1715 const MachineBasicBlock *MBB = Root.getParent();
1718 Register MI1VReg;
1719
1720 bool SeenMI2 = false;
1721 for (auto End = MBB->rend(), It = It1; It != End; ++It) {
1722 if (It == It2) {
1723 SeenMI2 = true;
1724 if (!MI1VReg.isValid())
1725 // There is no V0 def between Root and Prev; they're sharing the
1726 // same V0.
1727 break;
1728 }
1729
1730 if (It->modifiesRegister(RISCV::V0, TRI)) {
1731 Register SrcReg = It->getOperand(1).getReg();
1732 // If it's not VReg it'll be more difficult to track its defs, so
1733 // bailing out here just to be safe.
1734 if (!SrcReg.isVirtual())
1735 return false;
1736
1737 if (!MI1VReg.isValid()) {
1738 // This is the V0 def for Root.
1739 MI1VReg = SrcReg;
1740 continue;
1741 }
1742
1743 // Some random mask updates.
1744 if (!SeenMI2)
1745 continue;
1746
1747 // This is the V0 def for Prev; check if it's the same as that of
1748 // Root.
1749 if (MI1VReg != SrcReg)
1750 return false;
1751 else
1752 break;
1753 }
1754 }
1755
1756 // If we haven't encountered Prev, it's likely that this function was
1757 // called in a wrong way (e.g. Root is before Prev).
1758 assert(SeenMI2 && "Prev is expected to appear before Root");
1759 }
1760
1761 // Tail / Mask policies
1762 if (RISCVII::hasVecPolicyOp(TSFlags) &&
1763 !checkImmOperand(RISCVII::getVecPolicyOpNum(Desc)))
1764 return false;
1765
1766 // VL
1767 if (RISCVII::hasVLOp(TSFlags)) {
1768 unsigned OpIdx = RISCVII::getVLOpNum(Desc);
1769 const MachineOperand &Op1 = Root.getOperand(OpIdx);
1770 const MachineOperand &Op2 = Prev.getOperand(OpIdx);
1771 if (Op1.getType() != Op2.getType())
1772 return false;
1773 switch (Op1.getType()) {
1775 if (Op1.getReg() != Op2.getReg())
1776 return false;
1777 break;
1779 if (Op1.getImm() != Op2.getImm())
1780 return false;
1781 break;
1782 default:
1783 llvm_unreachable("Unrecognized VL operand type");
1784 }
1785 }
1786
1787 // Rounding modes
1788 if (RISCVII::hasRoundModeOp(TSFlags) &&
1789 !checkImmOperand(RISCVII::getVLOpNum(Desc) - 1))
1790 return false;
1791
1792 return true;
1793}
1794
1795// Most of our RVV pseudos have passthru operand, so the real operands
1796// start from index = 2.
1797bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
1798 bool &Commuted) const {
1799 const MachineBasicBlock *MBB = Inst.getParent();
1802 "Expect the present of passthrough operand.");
1803 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
1804 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(3).getReg());
1805
1806 // If only one operand has the same or inverse opcode and it's the second
1807 // source operand, the operands must be commuted.
1808 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
1809 areRVVInstsReassociable(Inst, *MI2);
1810 if (Commuted)
1811 std::swap(MI1, MI2);
1812
1813 return areRVVInstsReassociable(Inst, *MI1) &&
1814 (isVectorAssociativeAndCommutative(*MI1) ||
1815 isVectorAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
1817 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
1818}
1819
1821 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
1822 if (!isVectorAssociativeAndCommutative(Inst) &&
1823 !isVectorAssociativeAndCommutative(Inst, /*Invert=*/true))
1825
1826 const MachineOperand &Op1 = Inst.getOperand(2);
1827 const MachineOperand &Op2 = Inst.getOperand(3);
1829
1830 // We need virtual register definitions for the operands that we will
1831 // reassociate.
1832 MachineInstr *MI1 = nullptr;
1833 MachineInstr *MI2 = nullptr;
1834 if (Op1.isReg() && Op1.getReg().isVirtual())
1835 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
1836 if (Op2.isReg() && Op2.getReg().isVirtual())
1837 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
1838
1839 // And at least one operand must be defined in MBB.
1840 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
1841}
1842
1844 const MachineInstr &Root, unsigned Pattern,
1845 std::array<unsigned, 5> &OperandIndices) const {
1847 if (RISCV::getRVVMCOpcode(Root.getOpcode())) {
1848 // Skip the passthrough operand, so increment all indices by one.
1849 for (unsigned I = 0; I < 5; ++I)
1850 ++OperandIndices[I];
1851 }
1852}
1853
1855 bool &Commuted) const {
1856 if (isVectorAssociativeAndCommutative(Inst) ||
1857 isVectorAssociativeAndCommutative(Inst, /*Invert=*/true))
1858 return hasReassociableVectorSibling(Inst, Commuted);
1859
1860 if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
1861 return false;
1862
1863 const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
1864 unsigned OperandIdx = Commuted ? 2 : 1;
1865 const MachineInstr &Sibling =
1866 *MRI.getVRegDef(Inst.getOperand(OperandIdx).getReg());
1867
1868 int16_t InstFrmOpIdx =
1869 RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
1870 int16_t SiblingFrmOpIdx =
1871 RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
1872
1873 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1874 RISCV::hasEqualFRM(Inst, Sibling);
1875}
1876
1878 bool Invert) const {
1879 if (isVectorAssociativeAndCommutative(Inst, Invert))
1880 return true;
1881
1882 unsigned Opc = Inst.getOpcode();
1883 if (Invert) {
1884 auto InverseOpcode = getInverseOpcode(Opc);
1885 if (!InverseOpcode)
1886 return false;
1887 Opc = *InverseOpcode;
1888 }
1889
1890 if (isFADD(Opc) || isFMUL(Opc))
1893
1894 switch (Opc) {
1895 default:
1896 return false;
1897 case RISCV::ADD:
1898 case RISCV::ADDW:
1899 case RISCV::AND:
1900 case RISCV::OR:
1901 case RISCV::XOR:
1902 // From RISC-V ISA spec, if both the high and low bits of the same product
1903 // are required, then the recommended code sequence is:
1904 //
1905 // MULH[[S]U] rdh, rs1, rs2
1906 // MUL rdl, rs1, rs2
1907 // (source register specifiers must be in same order and rdh cannot be the
1908 // same as rs1 or rs2)
1909 //
1910 // Microarchitectures can then fuse these into a single multiply operation
1911 // instead of performing two separate multiplies.
1912 // MachineCombiner may reassociate MUL operands and lose the fusion
1913 // opportunity.
1914 case RISCV::MUL:
1915 case RISCV::MULW:
1916 case RISCV::MIN:
1917 case RISCV::MINU:
1918 case RISCV::MAX:
1919 case RISCV::MAXU:
1920 case RISCV::FMIN_H:
1921 case RISCV::FMIN_S:
1922 case RISCV::FMIN_D:
1923 case RISCV::FMAX_H:
1924 case RISCV::FMAX_S:
1925 case RISCV::FMAX_D:
1926 return true;
1927 }
1928
1929 return false;
1930}
1931
1932std::optional<unsigned>
1933RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
1934#define RVV_OPC_LMUL_CASE(OPC, INV) \
1935 case RISCV::OPC##_M1: \
1936 return RISCV::INV##_M1; \
1937 case RISCV::OPC##_M2: \
1938 return RISCV::INV##_M2; \
1939 case RISCV::OPC##_M4: \
1940 return RISCV::INV##_M4; \
1941 case RISCV::OPC##_M8: \
1942 return RISCV::INV##_M8; \
1943 case RISCV::OPC##_MF2: \
1944 return RISCV::INV##_MF2; \
1945 case RISCV::OPC##_MF4: \
1946 return RISCV::INV##_MF4; \
1947 case RISCV::OPC##_MF8: \
1948 return RISCV::INV##_MF8
1949
1950#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
1951 case RISCV::OPC##_M1_MASK: \
1952 return RISCV::INV##_M1_MASK; \
1953 case RISCV::OPC##_M2_MASK: \
1954 return RISCV::INV##_M2_MASK; \
1955 case RISCV::OPC##_M4_MASK: \
1956 return RISCV::INV##_M4_MASK; \
1957 case RISCV::OPC##_M8_MASK: \
1958 return RISCV::INV##_M8_MASK; \
1959 case RISCV::OPC##_MF2_MASK: \
1960 return RISCV::INV##_MF2_MASK; \
1961 case RISCV::OPC##_MF4_MASK: \
1962 return RISCV::INV##_MF4_MASK; \
1963 case RISCV::OPC##_MF8_MASK: \
1964 return RISCV::INV##_MF8_MASK
1965
1966 switch (Opcode) {
1967 default:
1968 return std::nullopt;
1969 case RISCV::FADD_H:
1970 return RISCV::FSUB_H;
1971 case RISCV::FADD_S:
1972 return RISCV::FSUB_S;
1973 case RISCV::FADD_D:
1974 return RISCV::FSUB_D;
1975 case RISCV::FSUB_H:
1976 return RISCV::FADD_H;
1977 case RISCV::FSUB_S:
1978 return RISCV::FADD_S;
1979 case RISCV::FSUB_D:
1980 return RISCV::FADD_D;
1981 case RISCV::ADD:
1982 return RISCV::SUB;
1983 case RISCV::SUB:
1984 return RISCV::ADD;
1985 case RISCV::ADDW:
1986 return RISCV::SUBW;
1987 case RISCV::SUBW:
1988 return RISCV::ADDW;
1989 // clang-format off
1990 RVV_OPC_LMUL_CASE(PseudoVADD_VV, PseudoVSUB_VV);
1991 RVV_OPC_LMUL_MASK_CASE(PseudoVADD_VV, PseudoVSUB_VV);
1992 RVV_OPC_LMUL_CASE(PseudoVSUB_VV, PseudoVADD_VV);
1993 RVV_OPC_LMUL_MASK_CASE(PseudoVSUB_VV, PseudoVADD_VV);
1994 // clang-format on
1995 }
1996
1997#undef RVV_OPC_LMUL_MASK_CASE
1998#undef RVV_OPC_LMUL_CASE
1999}
2000
2002 const MachineOperand &MO,
2003 bool DoRegPressureReduce) {
2004 if (!MO.isReg() || !MO.getReg().isVirtual())
2005 return false;
2006 const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
2007 MachineInstr *MI = MRI.getVRegDef(MO.getReg());
2008 if (!MI || !isFMUL(MI->getOpcode()))
2009 return false;
2010
2013 return false;
2014
2015 // Try combining even if fmul has more than one use as it eliminates
2016 // dependency between fadd(fsub) and fmul. However, it can extend liveranges
2017 // for fmul operands, so reject the transformation in register pressure
2018 // reduction mode.
2019 if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2020 return false;
2021
2022 // Do not combine instructions from different basic blocks.
2023 if (Root.getParent() != MI->getParent())
2024 return false;
2025 return RISCV::hasEqualFRM(Root, *MI);
2026}
2027
2029 SmallVectorImpl<unsigned> &Patterns,
2030 bool DoRegPressureReduce) {
2031 unsigned Opc = Root.getOpcode();
2032 bool IsFAdd = isFADD(Opc);
2033 if (!IsFAdd && !isFSUB(Opc))
2034 return false;
2035 bool Added = false;
2036 if (canCombineFPFusedMultiply(Root, Root.getOperand(1),
2037 DoRegPressureReduce)) {
2040 Added = true;
2041 }
2042 if (canCombineFPFusedMultiply(Root, Root.getOperand(2),
2043 DoRegPressureReduce)) {
2046 Added = true;
2047 }
2048 return Added;
2049}
2050
2051static bool getFPPatterns(MachineInstr &Root,
2052 SmallVectorImpl<unsigned> &Patterns,
2053 bool DoRegPressureReduce) {
2054 return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
2055}
2056
2057/// Utility routine that checks if \param MO is defined by an
2058/// \param CombineOpc instruction in the basic block \param MBB
2060 const MachineOperand &MO,
2061 unsigned CombineOpc) {
2063 const MachineInstr *MI = nullptr;
2064
2065 if (MO.isReg() && MO.getReg().isVirtual())
2066 MI = MRI.getUniqueVRegDef(MO.getReg());
2067 // And it needs to be in the trace (otherwise, it won't have a depth).
2068 if (!MI || MI->getParent() != &MBB || MI->getOpcode() != CombineOpc)
2069 return nullptr;
2070 // Must only used by the user we combine with.
2071 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2072 return nullptr;
2073
2074 return MI;
2075}
2076
2077/// Utility routine that checks if \param MO is defined by a SLLI in \param
2078/// MBB that can be combined by splitting across 2 SHXADD instructions. The
2079/// first SHXADD shift amount is given by \param OuterShiftAmt.
2081 const MachineOperand &MO,
2082 unsigned OuterShiftAmt) {
2083 const MachineInstr *ShiftMI = canCombine(MBB, MO, RISCV::SLLI);
2084 if (!ShiftMI)
2085 return false;
2086
2087 unsigned InnerShiftAmt = ShiftMI->getOperand(2).getImm();
2088 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2089 return false;
2090
2091 return true;
2092}
2093
2094// Returns the shift amount from a SHXADD instruction. Returns 0 if the
2095// instruction is not a SHXADD.
2096static unsigned getSHXADDShiftAmount(unsigned Opc) {
2097 switch (Opc) {
2098 default:
2099 return 0;
2100 case RISCV::SH1ADD:
2101 return 1;
2102 case RISCV::SH2ADD:
2103 return 2;
2104 case RISCV::SH3ADD:
2105 return 3;
2106 }
2107}
2108
2109// Look for opportunities to combine (sh3add Z, (add X, (slli Y, 5))) into
2110// (sh3add (sh2add Y, Z), X).
2111static bool getSHXADDPatterns(const MachineInstr &Root,
2112 SmallVectorImpl<unsigned> &Patterns) {
2113 unsigned ShiftAmt = getSHXADDShiftAmount(Root.getOpcode());
2114 if (!ShiftAmt)
2115 return false;
2116
2117 const MachineBasicBlock &MBB = *Root.getParent();
2118
2119 const MachineInstr *AddMI = canCombine(MBB, Root.getOperand(2), RISCV::ADD);
2120 if (!AddMI)
2121 return false;
2122
2123 bool Found = false;
2124 if (canCombineShiftIntoShXAdd(MBB, AddMI->getOperand(1), ShiftAmt)) {
2126 Found = true;
2127 }
2128 if (canCombineShiftIntoShXAdd(MBB, AddMI->getOperand(2), ShiftAmt)) {
2130 Found = true;
2131 }
2132
2133 return Found;
2134}
2135
2137 switch (Pattern) {
2143 default:
2145 }
2146}
2147
2149 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
2150 bool DoRegPressureReduce) const {
2151
2152 if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
2153 return true;
2154
2155 if (getSHXADDPatterns(Root, Patterns))
2156 return true;
2157
2158 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
2159 DoRegPressureReduce);
2160}
2161
2162static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern) {
2163 switch (RootOpc) {
2164 default:
2165 llvm_unreachable("Unexpected opcode");
2166 case RISCV::FADD_H:
2167 return RISCV::FMADD_H;
2168 case RISCV::FADD_S:
2169 return RISCV::FMADD_S;
2170 case RISCV::FADD_D:
2171 return RISCV::FMADD_D;
2172 case RISCV::FSUB_H:
2173 return Pattern == RISCVMachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
2174 : RISCV::FNMSUB_H;
2175 case RISCV::FSUB_S:
2176 return Pattern == RISCVMachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
2177 : RISCV::FNMSUB_S;
2178 case RISCV::FSUB_D:
2179 return Pattern == RISCVMachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
2180 : RISCV::FNMSUB_D;
2181 }
2182}
2183
2184static unsigned getAddendOperandIdx(unsigned Pattern) {
2185 switch (Pattern) {
2186 default:
2187 llvm_unreachable("Unexpected pattern");
2190 return 2;
2193 return 1;
2194 }
2195}
2196
2198 unsigned Pattern,
2201 MachineFunction *MF = Root.getMF();
2204
2205 MachineOperand &Mul1 = Prev.getOperand(1);
2206 MachineOperand &Mul2 = Prev.getOperand(2);
2207 MachineOperand &Dst = Root.getOperand(0);
2209
2210 Register DstReg = Dst.getReg();
2211 unsigned FusedOpc = getFPFusedMultiplyOpcode(Root.getOpcode(), Pattern);
2212 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
2213 DebugLoc MergedLoc =
2215
2216 bool Mul1IsKill = Mul1.isKill();
2217 bool Mul2IsKill = Mul2.isKill();
2218 bool AddendIsKill = Addend.isKill();
2219
2220 // We need to clear kill flags since we may be extending the live range past
2221 // a kill. If the mul had kill flags, we can preserve those since we know
2222 // where the previous range stopped.
2223 MRI.clearKillFlags(Mul1.getReg());
2224 MRI.clearKillFlags(Mul2.getReg());
2225
2227 BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
2228 .addReg(Mul1.getReg(), getKillRegState(Mul1IsKill))
2229 .addReg(Mul2.getReg(), getKillRegState(Mul2IsKill))
2230 .addReg(Addend.getReg(), getKillRegState(AddendIsKill))
2231 .setMIFlags(IntersectedFlags);
2232
2233 InsInstrs.push_back(MIB);
2234 if (MRI.hasOneNonDBGUse(Prev.getOperand(0).getReg()))
2235 DelInstrs.push_back(&Prev);
2236 DelInstrs.push_back(&Root);
2237}
2238
2239// Combine patterns like (sh3add Z, (add X, (slli Y, 5))) to
2240// (sh3add (sh2add Y, Z), X) if the shift amount can be split across two
2241// shXadd instructions. The outer shXadd keeps its original opcode.
2242static void
2243genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx,
2246 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {
2247 MachineFunction *MF = Root.getMF();
2250
2251 unsigned OuterShiftAmt = getSHXADDShiftAmount(Root.getOpcode());
2252 assert(OuterShiftAmt != 0 && "Unexpected opcode");
2253
2254 MachineInstr *AddMI = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
2255 MachineInstr *ShiftMI =
2256 MRI.getUniqueVRegDef(AddMI->getOperand(AddOpIdx).getReg());
2257
2258 unsigned InnerShiftAmt = ShiftMI->getOperand(2).getImm();
2259 assert(InnerShiftAmt >= OuterShiftAmt && "Unexpected shift amount");
2260
2261 unsigned InnerOpc;
2262 switch (InnerShiftAmt - OuterShiftAmt) {
2263 default:
2264 llvm_unreachable("Unexpected shift amount");
2265 case 0:
2266 InnerOpc = RISCV::ADD;
2267 break;
2268 case 1:
2269 InnerOpc = RISCV::SH1ADD;
2270 break;
2271 case 2:
2272 InnerOpc = RISCV::SH2ADD;
2273 break;
2274 case 3:
2275 InnerOpc = RISCV::SH3ADD;
2276 break;
2277 }
2278
2279 const MachineOperand &X = AddMI->getOperand(3 - AddOpIdx);
2280 const MachineOperand &Y = ShiftMI->getOperand(1);
2281 const MachineOperand &Z = Root.getOperand(1);
2282
2283 Register NewVR = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2284
2285 auto MIB1 = BuildMI(*MF, MIMetadata(Root), TII->get(InnerOpc), NewVR)
2286 .addReg(Y.getReg(), getKillRegState(Y.isKill()))
2287 .addReg(Z.getReg(), getKillRegState(Z.isKill()));
2288 auto MIB2 = BuildMI(*MF, MIMetadata(Root), TII->get(Root.getOpcode()),
2289 Root.getOperand(0).getReg())
2290 .addReg(NewVR, RegState::Kill)
2291 .addReg(X.getReg(), getKillRegState(X.isKill()));
2292
2293 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2294 InsInstrs.push_back(MIB1);
2295 InsInstrs.push_back(MIB2);
2296 DelInstrs.push_back(ShiftMI);
2297 DelInstrs.push_back(AddMI);
2298 DelInstrs.push_back(&Root);
2299}
2300
2302 MachineInstr &Root, unsigned Pattern,
2305 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2307 switch (Pattern) {
2308 default:
2310 DelInstrs, InstrIdxForVirtReg);
2311 return;
2314 MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(1).getReg());
2315 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
2316 return;
2317 }
2320 MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(2).getReg());
2321 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
2322 return;
2323 }
2325 genShXAddAddShift(Root, 1, InsInstrs, DelInstrs, InstrIdxForVirtReg);
2326 return;
2328 genShXAddAddShift(Root, 2, InsInstrs, DelInstrs, InstrIdxForVirtReg);
2329 return;
2330 }
2331}
2332
2334 StringRef &ErrInfo) const {
2335 MCInstrDesc const &Desc = MI.getDesc();
2336
2337 for (const auto &[Index, Operand] : enumerate(Desc.operands())) {
2338 unsigned OpType = Operand.OperandType;
2339 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
2341 const MachineOperand &MO = MI.getOperand(Index);
2342 if (MO.isImm()) {
2343 int64_t Imm = MO.getImm();
2344 bool Ok;
2345 switch (OpType) {
2346 default:
2347 llvm_unreachable("Unexpected operand type");
2348
2349 // clang-format off
2350#define CASE_OPERAND_UIMM(NUM) \
2351 case RISCVOp::OPERAND_UIMM##NUM: \
2352 Ok = isUInt<NUM>(Imm); \
2353 break;
2364 // clang-format on
2366 Ok = isShiftedUInt<1, 1>(Imm);
2367 break;
2369 Ok = isShiftedUInt<5, 2>(Imm);
2370 break;
2372 Ok = isShiftedUInt<6, 2>(Imm);
2373 break;
2375 Ok = isShiftedUInt<5, 3>(Imm);
2376 break;
2378 Ok = isUInt<8>(Imm) && Imm >= 32;
2379 break;
2381 Ok = isShiftedUInt<6, 3>(Imm);
2382 break;
2384 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2385 break;
2387 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2388 break;
2390 Ok = Imm == 0;
2391 break;
2393 Ok = isInt<5>(Imm);
2394 break;
2396 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2397 break;
2399 Ok = isInt<6>(Imm);
2400 break;
2402 Ok = Imm != 0 && isInt<6>(Imm);
2403 break;
2405 Ok = isUInt<10>(Imm);
2406 break;
2408 Ok = isUInt<11>(Imm);
2409 break;
2411 Ok = isInt<12>(Imm);
2412 break;
2414 Ok = isShiftedInt<7, 5>(Imm);
2415 break;
2417 Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2418 break;
2420 Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2421 Ok = Ok && Imm != 0;
2422 break;
2424 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2425 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2426 break;
2428 Ok = Imm >= 0 && Imm <= 10;
2429 break;
2431 Ok = Imm >= 0 && Imm <= 7;
2432 break;
2434 Ok = Imm >= 1 && Imm <= 10;
2435 break;
2437 Ok = Imm >= 2 && Imm <= 14;
2438 break;
2440 Ok = (Imm & 0xf) == 0;
2441 break;
2442 }
2443 if (!Ok) {
2444 ErrInfo = "Invalid immediate";
2445 return false;
2446 }
2447 }
2448 }
2449 }
2450
2451 const uint64_t TSFlags = Desc.TSFlags;
2452 if (RISCVII::hasVLOp(TSFlags)) {
2453 const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
2454 if (!Op.isImm() && !Op.isReg()) {
2455 ErrInfo = "Invalid operand type for VL operand";
2456 return false;
2457 }
2458 if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
2459 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2460 auto *RC = MRI.getRegClass(Op.getReg());
2461 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2462 ErrInfo = "Invalid register class for VL operand";
2463 return false;
2464 }
2465 }
2466 if (!RISCVII::hasSEWOp(TSFlags)) {
2467 ErrInfo = "VL operand w/o SEW operand?";
2468 return false;
2469 }
2470 }
2471 if (RISCVII::hasSEWOp(TSFlags)) {
2472 unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
2473 if (!MI.getOperand(OpIdx).isImm()) {
2474 ErrInfo = "SEW value expected to be an immediate";
2475 return false;
2476 }
2477 uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
2478 if (Log2SEW > 31) {
2479 ErrInfo = "Unexpected SEW value";
2480 return false;
2481 }
2482 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2483 if (!RISCVVType::isValidSEW(SEW)) {
2484 ErrInfo = "Unexpected SEW value";
2485 return false;
2486 }
2487 }
2488 if (RISCVII::hasVecPolicyOp(TSFlags)) {
2489 unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
2490 if (!MI.getOperand(OpIdx).isImm()) {
2491 ErrInfo = "Policy operand expected to be an immediate";
2492 return false;
2493 }
2494 uint64_t Policy = MI.getOperand(OpIdx).getImm();
2496 ErrInfo = "Invalid Policy Value";
2497 return false;
2498 }
2499 if (!RISCVII::hasVLOp(TSFlags)) {
2500 ErrInfo = "policy operand w/o VL operand?";
2501 return false;
2502 }
2503
2504 // VecPolicy operands can only exist on instructions with passthru/merge
2505 // arguments. Note that not all arguments with passthru have vec policy
2506 // operands- some instructions have implicit policies.
2507 unsigned UseOpIdx;
2508 if (!MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2509 ErrInfo = "policy operand w/o tied operand?";
2510 return false;
2511 }
2512 }
2513
2514 return true;
2515}
2516
2518 const MachineInstr &AddrI,
2519 ExtAddrMode &AM) const {
2520 switch (MemI.getOpcode()) {
2521 default:
2522 return false;
2523 case RISCV::LB:
2524 case RISCV::LBU:
2525 case RISCV::LH:
2526 case RISCV::LHU:
2527 case RISCV::LW:
2528 case RISCV::LWU:
2529 case RISCV::LD:
2530 case RISCV::FLH:
2531 case RISCV::FLW:
2532 case RISCV::FLD:
2533 case RISCV::SB:
2534 case RISCV::SH:
2535 case RISCV::SW:
2536 case RISCV::SD:
2537 case RISCV::FSH:
2538 case RISCV::FSW:
2539 case RISCV::FSD:
2540 break;
2541 }
2542
2543 if (MemI.getOperand(0).getReg() == Reg)
2544 return false;
2545
2546 if (AddrI.getOpcode() != RISCV::ADDI || !AddrI.getOperand(1).isReg() ||
2547 !AddrI.getOperand(2).isImm())
2548 return false;
2549
2550 int64_t OldOffset = MemI.getOperand(2).getImm();
2551 int64_t Disp = AddrI.getOperand(2).getImm();
2552 int64_t NewOffset = OldOffset + Disp;
2553 if (!STI.is64Bit())
2554 NewOffset = SignExtend64<32>(NewOffset);
2555
2556 if (!isInt<12>(NewOffset))
2557 return false;
2558
2559 AM.BaseReg = AddrI.getOperand(1).getReg();
2560 AM.ScaledReg = 0;
2561 AM.Scale = 0;
2562 AM.Displacement = NewOffset;
2564 return true;
2565}
2566
2568 const ExtAddrMode &AM) const {
2569
2570 const DebugLoc &DL = MemI.getDebugLoc();
2571 MachineBasicBlock &MBB = *MemI.getParent();
2572
2573 assert(AM.ScaledReg == 0 && AM.Scale == 0 &&
2574 "Addressing mode not supported for folding");
2575
2576 return BuildMI(MBB, MemI, DL, get(MemI.getOpcode()))
2577 .addReg(MemI.getOperand(0).getReg(),
2578 MemI.mayLoad() ? RegState::Define : 0)
2579 .addReg(AM.BaseReg)
2580 .addImm(AM.Displacement)
2581 .setMemRefs(MemI.memoperands())
2582 .setMIFlags(MemI.getFlags());
2583}
2584
2587 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
2588 const TargetRegisterInfo *TRI) const {
2589 if (!LdSt.mayLoadOrStore())
2590 return false;
2591
2592 // Conservatively, only handle scalar loads/stores for now.
2593 switch (LdSt.getOpcode()) {
2594 case RISCV::LB:
2595 case RISCV::LBU:
2596 case RISCV::SB:
2597 case RISCV::LH:
2598 case RISCV::LHU:
2599 case RISCV::FLH:
2600 case RISCV::SH:
2601 case RISCV::FSH:
2602 case RISCV::LW:
2603 case RISCV::LWU:
2604 case RISCV::FLW:
2605 case RISCV::SW:
2606 case RISCV::FSW:
2607 case RISCV::LD:
2608 case RISCV::FLD:
2609 case RISCV::SD:
2610 case RISCV::FSD:
2611 break;
2612 default:
2613 return false;
2614 }
2615 const MachineOperand *BaseOp;
2616 OffsetIsScalable = false;
2617 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
2618 return false;
2619 BaseOps.push_back(BaseOp);
2620 return true;
2621}
2622
2623// TODO: This was copied from SIInstrInfo. Could it be lifted to a common
2624// helper?
2627 const MachineInstr &MI2,
2629 // Only examine the first "base" operand of each instruction, on the
2630 // assumption that it represents the real base address of the memory access.
2631 // Other operands are typically offsets or indices from this base address.
2632 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
2633 return true;
2634
2635 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
2636 return false;
2637
2638 auto MO1 = *MI1.memoperands_begin();
2639 auto MO2 = *MI2.memoperands_begin();
2640 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2641 return false;
2642
2643 auto Base1 = MO1->getValue();
2644 auto Base2 = MO2->getValue();
2645 if (!Base1 || !Base2)
2646 return false;
2647 Base1 = getUnderlyingObject(Base1);
2648 Base2 = getUnderlyingObject(Base2);
2649
2650 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2651 return false;
2652
2653 return Base1 == Base2;
2654}
2655
2657 ArrayRef<const MachineOperand *> BaseOps1, int64_t Offset1,
2658 bool OffsetIsScalable1, ArrayRef<const MachineOperand *> BaseOps2,
2659 int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize,
2660 unsigned NumBytes) const {
2661 // If the mem ops (to be clustered) do not have the same base ptr, then they
2662 // should not be clustered
2663 if (!BaseOps1.empty() && !BaseOps2.empty()) {
2664 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
2665 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
2666 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
2667 return false;
2668 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
2669 // If only one base op is empty, they do not have the same base ptr
2670 return false;
2671 }
2672
2673 unsigned CacheLineSize =
2674 BaseOps1.front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
2675 // Assume a cache line size of 64 bytes if no size is set in RISCVSubtarget.
2677 // Cluster if the memory operations are on the same or a neighbouring cache
2678 // line, but limit the maximum ClusterSize to avoid creating too much
2679 // additional register pressure.
2680 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) < CacheLineSize;
2681}
2682
2683// Set BaseReg (the base register operand), Offset (the byte offset being
2684// accessed) and the access Width of the passed instruction that reads/writes
2685// memory. Returns false if the instruction does not read/write memory or the
2686// BaseReg/Offset/Width can't be determined. Is not guaranteed to always
2687// recognise base operands and offsets in all cases.
2688// TODO: Add an IsScalable bool ref argument (like the equivalent AArch64
2689// function) and set it as appropriate.
2691 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
2692 LocationSize &Width, const TargetRegisterInfo *TRI) const {
2693 if (!LdSt.mayLoadOrStore())
2694 return false;
2695
2696 // Here we assume the standard RISC-V ISA, which uses a base+offset
2697 // addressing mode. You'll need to relax these conditions to support custom
2698 // load/store instructions.
2699 if (LdSt.getNumExplicitOperands() != 3)
2700 return false;
2701 if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) ||
2702 !LdSt.getOperand(2).isImm())
2703 return false;
2704
2705 if (!LdSt.hasOneMemOperand())
2706 return false;
2707
2708 Width = (*LdSt.memoperands_begin())->getSize();
2709 BaseReg = &LdSt.getOperand(1);
2710 Offset = LdSt.getOperand(2).getImm();
2711 return true;
2712}
2713
2715 const MachineInstr &MIa, const MachineInstr &MIb) const {
2716 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
2717 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
2718
2721 return false;
2722
2723 // Retrieve the base register, offset from the base register and width. Width
2724 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
2725 // base registers are identical, and the offset of a lower memory access +
2726 // the width doesn't overlap the offset of a higher memory access,
2727 // then the memory accesses are different.
2729 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
2730 int64_t OffsetA = 0, OffsetB = 0;
2731 LocationSize WidthA = 0, WidthB = 0;
2732 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
2733 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
2734 if (BaseOpA->isIdenticalTo(*BaseOpB)) {
2735 int LowOffset = std::min(OffsetA, OffsetB);
2736 int HighOffset = std::max(OffsetA, OffsetB);
2737 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2738 if (LowWidth.hasValue() &&
2739 LowOffset + (int)LowWidth.getValue() <= HighOffset)
2740 return true;
2741 }
2742 }
2743 return false;
2744}
2745
2746std::pair<unsigned, unsigned>
2748 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
2749 return std::make_pair(TF & Mask, TF & ~Mask);
2750}
2751
2754 using namespace RISCVII;
2755 static const std::pair<unsigned, const char *> TargetFlags[] = {
2756 {MO_CALL, "riscv-call"},
2757 {MO_LO, "riscv-lo"},
2758 {MO_HI, "riscv-hi"},
2759 {MO_PCREL_LO, "riscv-pcrel-lo"},
2760 {MO_PCREL_HI, "riscv-pcrel-hi"},
2761 {MO_GOT_HI, "riscv-got-hi"},
2762 {MO_TPREL_LO, "riscv-tprel-lo"},
2763 {MO_TPREL_HI, "riscv-tprel-hi"},
2764 {MO_TPREL_ADD, "riscv-tprel-add"},
2765 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
2766 {MO_TLS_GD_HI, "riscv-tls-gd-hi"},
2767 {MO_TLSDESC_HI, "riscv-tlsdesc-hi"},
2768 {MO_TLSDESC_LOAD_LO, "riscv-tlsdesc-load-lo"},
2769 {MO_TLSDESC_ADD_LO, "riscv-tlsdesc-add-lo"},
2770 {MO_TLSDESC_CALL, "riscv-tlsdesc-call"}};
2771 return ArrayRef(TargetFlags);
2772}
2774 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
2775 const Function &F = MF.getFunction();
2776
2777 // Can F be deduplicated by the linker? If it can, don't outline from it.
2778 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
2779 return false;
2780
2781 // Don't outline from functions with section markings; the program could
2782 // expect that all the code is in the named section.
2783 if (F.hasSection())
2784 return false;
2785
2786 // It's safe to outline from MF.
2787 return true;
2788}
2789
2791 unsigned &Flags) const {
2792 // More accurate safety checking is done in getOutliningCandidateInfo.
2794}
2795
2796// Enum values indicating how an outlined call should be constructed.
2800
2802 MachineFunction &MF) const {
2803 return MF.getFunction().hasMinSize();
2804}
2805
2806std::optional<outliner::OutlinedFunction>
2808 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
2809
2810 // First we need to filter out candidates where the X5 register (IE t0) can't
2811 // be used to setup the function call.
2812 auto CannotInsertCall = [](outliner::Candidate &C) {
2813 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
2814 return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
2815 };
2816
2817 llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
2818
2819 // If the sequence doesn't have enough candidates left, then we're done.
2820 if (RepeatedSequenceLocs.size() < 2)
2821 return std::nullopt;
2822
2823 unsigned SequenceSize = 0;
2824
2825 for (auto &MI : RepeatedSequenceLocs[0])
2826 SequenceSize += getInstSizeInBytes(MI);
2827
2828 // call t0, function = 8 bytes.
2829 unsigned CallOverhead = 8;
2830 for (auto &C : RepeatedSequenceLocs)
2831 C.setCallInfo(MachineOutlinerDefault, CallOverhead);
2832
2833 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
2834 unsigned FrameOverhead = 4;
2835 if (RepeatedSequenceLocs[0]
2836 .getMF()
2837 ->getSubtarget<RISCVSubtarget>()
2838 .hasStdExtCOrZca())
2839 FrameOverhead = 2;
2840
2841 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
2842 FrameOverhead, MachineOutlinerDefault);
2843}
2844
2847 unsigned Flags) const {
2848 MachineInstr &MI = *MBBI;
2849 MachineBasicBlock *MBB = MI.getParent();
2850 const TargetRegisterInfo *TRI =
2852 const auto &F = MI.getMF()->getFunction();
2853
2854 // We can manually strip out CFI instructions later.
2855 if (MI.isCFIInstruction())
2856 // If current function has exception handling code, we can't outline &
2857 // strip these CFI instructions since it may break .eh_frame section
2858 // needed in unwinding.
2859 return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
2861
2862 // We need support for tail calls to outlined functions before return
2863 // statements can be allowed.
2864 if (MI.isReturn())
2866
2867 // Don't allow modifying the X5 register which we use for return addresses for
2868 // these outlined functions.
2869 if (MI.modifiesRegister(RISCV::X5, TRI) ||
2870 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2872
2873 // Make sure the operands don't reference something unsafe.
2874 for (const auto &MO : MI.operands()) {
2875
2876 // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
2877 // if any possible.
2878 if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
2879 (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
2880 F.hasSection()))
2882 }
2883
2885}
2886
2889 const outliner::OutlinedFunction &OF) const {
2890
2891 // Strip out any CFI instructions
2892 bool Changed = true;
2893 while (Changed) {
2894 Changed = false;
2895 auto I = MBB.begin();
2896 auto E = MBB.end();
2897 for (; I != E; ++I) {
2898 if (I->isCFIInstruction()) {
2899 I->removeFromParent();
2900 Changed = true;
2901 break;
2902 }
2903 }
2904 }
2905
2906 MBB.addLiveIn(RISCV::X5);
2907
2908 // Add in a return instruction to the end of the outlined frame.
2909 MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
2910 .addReg(RISCV::X0, RegState::Define)
2911 .addReg(RISCV::X5)
2912 .addImm(0));
2913}
2914
2918
2919 // Add in a call instruction to the outlined function at the given location.
2920 It = MBB.insert(It,
2921 BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
2922 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
2924 return It;
2925}
2926
2927std::optional<RegImmPair> RISCVInstrInfo::isAddImmediate(const MachineInstr &MI,
2928 Register Reg) const {
2929 // TODO: Handle cases where Reg is a super- or sub-register of the
2930 // destination register.
2931 const MachineOperand &Op0 = MI.getOperand(0);
2932 if (!Op0.isReg() || Reg != Op0.getReg())
2933 return std::nullopt;
2934
2935 // Don't consider ADDIW as a candidate because the caller may not be aware
2936 // of its sign extension behaviour.
2937 if (MI.getOpcode() == RISCV::ADDI && MI.getOperand(1).isReg() &&
2938 MI.getOperand(2).isImm())
2939 return RegImmPair{MI.getOperand(1).getReg(), MI.getOperand(2).getImm()};
2940
2941 return std::nullopt;
2942}
2943
2944// MIR printer helper function to annotate Operands with a comment.
2946 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2947 const TargetRegisterInfo *TRI) const {
2948 // Print a generic comment for this operand if there is one.
2949 std::string GenericComment =
2951 if (!GenericComment.empty())
2952 return GenericComment;
2953
2954 // If not, we must have an immediate operand.
2955 if (!Op.isImm())
2956 return std::string();
2957
2958 std::string Comment;
2959 raw_string_ostream OS(Comment);
2960
2961 uint64_t TSFlags = MI.getDesc().TSFlags;
2962
2963 // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
2964 // operand of vector codegen pseudos.
2965 if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
2966 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2967 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2968 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2969 OpIdx == 2) {
2970 unsigned Imm = MI.getOperand(OpIdx).getImm();
2972 } else if (RISCVII::hasSEWOp(TSFlags) &&
2973 OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
2974 unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
2975 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2976 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2977 OS << "e" << SEW;
2978 } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
2979 OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
2980 unsigned Policy = MI.getOperand(OpIdx).getImm();
2982 "Invalid Policy Value");
2983 OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
2984 << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
2985 }
2986
2987 OS.flush();
2988 return Comment;
2989}
2990
2991// clang-format off
2992#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
2993 RISCV::Pseudo##OP##_##LMUL
2994
2995#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
2996 RISCV::Pseudo##OP##_##LMUL##_MASK
2997
2998#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
2999 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3000 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3001
3002#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3003 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3004 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3005 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3006 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3007 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3008 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3009
3010#define CASE_RVV_OPCODE_UNMASK(OP) \
3011 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3012 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3013
3014#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3015 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3016 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3017 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3018 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3019 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3020 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3021
3022#define CASE_RVV_OPCODE_MASK(OP) \
3023 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3024 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3025
3026#define CASE_RVV_OPCODE_WIDEN(OP) \
3027 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3028 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3029
3030#define CASE_RVV_OPCODE(OP) \
3031 CASE_RVV_OPCODE_UNMASK(OP): \
3032 case CASE_RVV_OPCODE_MASK(OP)
3033// clang-format on
3034
3035// clang-format off
3036#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3037 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3038
3039#define CASE_VMA_OPCODE_LMULS_M1(OP, TYPE) \
3040 CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3041 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3042 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3043 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3044
3045#define CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE) \
3046 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3047 case CASE_VMA_OPCODE_LMULS_M1(OP, TYPE)
3048
3049#define CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE) \
3050 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3051 case CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE)
3052
3053#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3054 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3055 case CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE)
3056
3057// VFMA instructions are SEW specific.
3058#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3059 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3060
3061#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3062 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3063 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3064 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3065 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3066
3067#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3068 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3069 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3070
3071#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3072 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3073 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3074
3075#define CASE_VFMA_OPCODE_VV(OP) \
3076 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3077 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3078 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3079
3080#define CASE_VFMA_SPLATS(OP) \
3081 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3082 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3083 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3084// clang-format on
3085
3087 unsigned &SrcOpIdx1,
3088 unsigned &SrcOpIdx2) const {
3089 const MCInstrDesc &Desc = MI.getDesc();
3090 if (!Desc.isCommutable())
3091 return false;
3092
3093 switch (MI.getOpcode()) {
3094 case RISCV::TH_MVEQZ:
3095 case RISCV::TH_MVNEZ:
3096 // We can't commute operands if operand 2 (i.e., rs1 in
3097 // mveqz/mvnez rd,rs1,rs2) is the zero-register (as it is
3098 // not valid as the in/out-operand 1).
3099 if (MI.getOperand(2).getReg() == RISCV::X0)
3100 return false;
3101 // Operands 1 and 2 are commutable, if we switch the opcode.
3102 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3103 case RISCV::TH_MULA:
3104 case RISCV::TH_MULAW:
3105 case RISCV::TH_MULAH:
3106 case RISCV::TH_MULS:
3107 case RISCV::TH_MULSW:
3108 case RISCV::TH_MULSH:
3109 // Operands 2 and 3 are commutable.
3110 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3111 case RISCV::PseudoCCMOVGPRNoX0:
3112 case RISCV::PseudoCCMOVGPR:
3113 // Operands 4 and 5 are commutable.
3114 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3115 case CASE_RVV_OPCODE(VADD_VV):
3116 case CASE_RVV_OPCODE(VAND_VV):
3117 case CASE_RVV_OPCODE(VOR_VV):
3118 case CASE_RVV_OPCODE(VXOR_VV):
3119 case CASE_RVV_OPCODE_MASK(VMSEQ_VV):
3120 case CASE_RVV_OPCODE_MASK(VMSNE_VV):
3121 case CASE_RVV_OPCODE(VMIN_VV):
3122 case CASE_RVV_OPCODE(VMINU_VV):
3123 case CASE_RVV_OPCODE(VMAX_VV):
3124 case CASE_RVV_OPCODE(VMAXU_VV):
3125 case CASE_RVV_OPCODE(VMUL_VV):
3126 case CASE_RVV_OPCODE(VMULH_VV):
3127 case CASE_RVV_OPCODE(VMULHU_VV):
3128 case CASE_RVV_OPCODE_WIDEN(VWADD_VV):
3129 case CASE_RVV_OPCODE_WIDEN(VWADDU_VV):
3130 case CASE_RVV_OPCODE_WIDEN(VWMUL_VV):
3131 case CASE_RVV_OPCODE_WIDEN(VWMULU_VV):
3132 case CASE_RVV_OPCODE_WIDEN(VWMACC_VV):
3133 case CASE_RVV_OPCODE_WIDEN(VWMACCU_VV):
3134 case CASE_RVV_OPCODE_UNMASK(VADC_VVM):
3135 // Operands 2 and 3 are commutable.
3136 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3137 case CASE_VFMA_SPLATS(FMADD):
3138 case CASE_VFMA_SPLATS(FMSUB):
3139 case CASE_VFMA_SPLATS(FMACC):
3140 case CASE_VFMA_SPLATS(FMSAC):
3143 case CASE_VFMA_SPLATS(FNMACC):
3144 case CASE_VFMA_SPLATS(FNMSAC):
3145 case CASE_VFMA_OPCODE_VV(FMACC):
3146 case CASE_VFMA_OPCODE_VV(FMSAC):
3147 case CASE_VFMA_OPCODE_VV(FNMACC):
3148 case CASE_VFMA_OPCODE_VV(FNMSAC):
3149 case CASE_VMA_OPCODE_LMULS(MADD, VX):
3150 case CASE_VMA_OPCODE_LMULS(NMSUB, VX):
3151 case CASE_VMA_OPCODE_LMULS(MACC, VX):
3152 case CASE_VMA_OPCODE_LMULS(NMSAC, VX):
3153 case CASE_VMA_OPCODE_LMULS(MACC, VV):
3154 case CASE_VMA_OPCODE_LMULS(NMSAC, VV): {
3155 // If the tail policy is undisturbed we can't commute.
3156 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
3157 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3158 return false;
3159
3160 // For these instructions we can only swap operand 1 and operand 3 by
3161 // changing the opcode.
3162 unsigned CommutableOpIdx1 = 1;
3163 unsigned CommutableOpIdx2 = 3;
3164 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3165 CommutableOpIdx2))
3166 return false;
3167 return true;
3168 }
3169 case CASE_VFMA_OPCODE_VV(FMADD):
3173 case CASE_VMA_OPCODE_LMULS(MADD, VV):
3174 case CASE_VMA_OPCODE_LMULS(NMSUB, VV): {
3175 // If the tail policy is undisturbed we can't commute.
3176 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
3177 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3178 return false;
3179
3180 // For these instructions we have more freedom. We can commute with the
3181 // other multiplicand or with the addend/subtrahend/minuend.
3182
3183 // Any fixed operand must be from source 1, 2 or 3.
3184 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3185 return false;
3186 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3187 return false;
3188
3189 // It both ops are fixed one must be the tied source.
3190 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3191 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3192 return false;
3193
3194 // Look for two different register operands assumed to be commutable
3195 // regardless of the FMA opcode. The FMA opcode is adjusted later if
3196 // needed.
3197 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3198 SrcOpIdx2 == CommuteAnyOperandIndex) {
3199 // At least one of operands to be commuted is not specified and
3200 // this method is free to choose appropriate commutable operands.
3201 unsigned CommutableOpIdx1 = SrcOpIdx1;
3202 if (SrcOpIdx1 == SrcOpIdx2) {
3203 // Both of operands are not fixed. Set one of commutable
3204 // operands to the tied source.
3205 CommutableOpIdx1 = 1;
3206 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3207 // Only one of the operands is not fixed.
3208 CommutableOpIdx1 = SrcOpIdx2;
3209 }
3210
3211 // CommutableOpIdx1 is well defined now. Let's choose another commutable
3212 // operand and assign its index to CommutableOpIdx2.
3213 unsigned CommutableOpIdx2;
3214 if (CommutableOpIdx1 != 1) {
3215 // If we haven't already used the tied source, we must use it now.
3216 CommutableOpIdx2 = 1;
3217 } else {
3218 Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
3219
3220 // The commuted operands should have different registers.
3221 // Otherwise, the commute transformation does not change anything and
3222 // is useless. We use this as a hint to make our decision.
3223 if (Op1Reg != MI.getOperand(2).getReg())
3224 CommutableOpIdx2 = 2;
3225 else
3226 CommutableOpIdx2 = 3;
3227 }
3228
3229 // Assign the found pair of commutable indices to SrcOpIdx1 and
3230 // SrcOpIdx2 to return those values.
3231 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3232 CommutableOpIdx2))
3233 return false;
3234 }
3235
3236 return true;
3237 }
3238 }
3239
3240 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
3241}
3242
3243// clang-format off
3244#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3245 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3246 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3247 break;
3248
3249#define CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
3250 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3251 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3252 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3253 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3254
3255#define CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
3256 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3257 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
3258
3259#define CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
3260 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3261 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
3262
3263#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3264 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3265 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
3266
3267#define CASE_VMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3268 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
3269 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
3270 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
3271
3272// VFMA depends on SEW.
3273#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3274 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3275 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3276 break;
3277
3278#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3279 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3280 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3281 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3282 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3283
3284#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3285 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3286 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3287
3288#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3289 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3290 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3291 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3292
3293#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3294 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3295 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3296
3297#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE, SEW) \
3298 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8, SEW) \
3299 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW)
3300
3301#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3302 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3303 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3304 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3305
3307 bool NewMI,
3308 unsigned OpIdx1,
3309 unsigned OpIdx2) const {
3310 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
3311 if (NewMI)
3312 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
3313 return MI;
3314 };
3315
3316 switch (MI.getOpcode()) {
3317 case RISCV::TH_MVEQZ:
3318 case RISCV::TH_MVNEZ: {
3319 auto &WorkingMI = cloneIfNew(MI);
3320 WorkingMI.setDesc(get(MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3321 : RISCV::TH_MVEQZ));
3322 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1,
3323 OpIdx2);
3324 }
3325 case RISCV::PseudoCCMOVGPRNoX0:
3326 case RISCV::PseudoCCMOVGPR: {
3327 // CCMOV can be commuted by inverting the condition.
3328 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
3330 auto &WorkingMI = cloneIfNew(MI);
3331 WorkingMI.getOperand(3).setImm(CC);
3332 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
3333 OpIdx1, OpIdx2);
3334 }
3335 case CASE_VFMA_SPLATS(FMACC):
3336 case CASE_VFMA_SPLATS(FMADD):
3337 case CASE_VFMA_SPLATS(FMSAC):
3338 case CASE_VFMA_SPLATS(FMSUB):
3339 case CASE_VFMA_SPLATS(FNMACC):
3341 case CASE_VFMA_SPLATS(FNMSAC):
3343 case CASE_VFMA_OPCODE_VV(FMACC):
3344 case CASE_VFMA_OPCODE_VV(FMSAC):
3345 case CASE_VFMA_OPCODE_VV(FNMACC):
3346 case CASE_VFMA_OPCODE_VV(FNMSAC):
3347 case CASE_VMA_OPCODE_LMULS(MADD, VX):
3348 case CASE_VMA_OPCODE_LMULS(NMSUB, VX):
3349 case CASE_VMA_OPCODE_LMULS(MACC, VX):
3350 case CASE_VMA_OPCODE_LMULS(NMSAC, VX):
3351 case CASE_VMA_OPCODE_LMULS(MACC, VV):
3352 case CASE_VMA_OPCODE_LMULS(NMSAC, VV): {
3353 // It only make sense to toggle these between clobbering the
3354 // addend/subtrahend/minuend one of the multiplicands.
3355 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
3356 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
3357 unsigned Opc;
3358 switch (MI.getOpcode()) {
3359 default:
3360 llvm_unreachable("Unexpected opcode");
3361 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
3362 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
3369 CASE_VFMA_CHANGE_OPCODE_VV(FMACC, FMADD)
3373 CASE_VMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
3374 CASE_VMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
3375 CASE_VMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
3376 CASE_VMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
3377 CASE_VMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
3378 CASE_VMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
3379 }
3380
3381 auto &WorkingMI = cloneIfNew(MI);
3382 WorkingMI.setDesc(get(Opc));
3383 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
3384 OpIdx1, OpIdx2);
3385 }
3386 case CASE_VFMA_OPCODE_VV(FMADD):
3390 case CASE_VMA_OPCODE_LMULS(MADD, VV):
3391 case CASE_VMA_OPCODE_LMULS(NMSUB, VV): {
3392 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
3393 // If one of the operands, is the addend we need to change opcode.
3394 // Otherwise we're just swapping 2 of the multiplicands.
3395 if (OpIdx1 == 3 || OpIdx2 == 3) {
3396 unsigned Opc;
3397 switch (MI.getOpcode()) {
3398 default:
3399 llvm_unreachable("Unexpected opcode");
3400 CASE_VFMA_CHANGE_OPCODE_VV(FMADD, FMACC)
3404 CASE_VMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
3405 CASE_VMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
3406 }
3407
3408 auto &WorkingMI = cloneIfNew(MI);
3409 WorkingMI.setDesc(get(Opc));
3410 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
3411 OpIdx1, OpIdx2);
3412 }
3413 // Let the default code handle it.
3414 break;
3415 }
3416 }
3417
3418 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
3419}
3420
3421#undef CASE_RVV_OPCODE_UNMASK_LMUL
3422#undef CASE_RVV_OPCODE_MASK_LMUL
3423#undef CASE_RVV_OPCODE_LMUL
3424#undef CASE_RVV_OPCODE_UNMASK_WIDEN
3425#undef CASE_RVV_OPCODE_UNMASK
3426#undef CASE_RVV_OPCODE_MASK_WIDEN
3427#undef CASE_RVV_OPCODE_MASK
3428#undef CASE_RVV_OPCODE_WIDEN
3429#undef CASE_RVV_OPCODE
3430
3431#undef CASE_VMA_OPCODE_COMMON
3432#undef CASE_VMA_OPCODE_LMULS_M1
3433#undef CASE_VMA_OPCODE_LMULS_MF2
3434#undef CASE_VMA_OPCODE_LMULS_MF4
3435#undef CASE_VMA_OPCODE_LMULS
3436#undef CASE_VFMA_OPCODE_COMMON
3437#undef CASE_VFMA_OPCODE_LMULS_M1
3438#undef CASE_VFMA_OPCODE_LMULS_MF2
3439#undef CASE_VFMA_OPCODE_LMULS_MF4
3440#undef CASE_VFMA_OPCODE_VV
3441#undef CASE_VFMA_SPLATS
3442
3443// clang-format off
3444#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
3445 RISCV::PseudoV##OP##_##LMUL##_TIED
3446
3447#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
3448 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
3449 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
3450 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
3451 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
3452 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
3453
3454#define CASE_WIDEOP_OPCODE_LMULS(OP) \
3455 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
3456 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
3457
3458#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
3459 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
3460 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
3461 break;
3462
3463#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3464 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
3465 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
3466 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
3467 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
3468 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
3469
3470#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3471 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
3472 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3473
3474// FP Widening Ops may by SEW aware. Create SEW aware cases for these cases.
3475#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
3476 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
3477
3478#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP) \
3479 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
3480 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
3481 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
3482 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
3483 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
3484 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
3485 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
3486 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
3487 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
3488
3489#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
3490 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
3491 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
3492 break;
3493
3494#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3495 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
3496 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
3497 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
3498 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
3499 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
3500 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
3501 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
3502 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
3503 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
3504
3505#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3506 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3507// clang-format on
3508
3510 LiveVariables *LV,
3511 LiveIntervals *LIS) const {
3513 switch (MI.getOpcode()) {
3514 default:
3515 return nullptr;
3516 case CASE_FP_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
3517 case CASE_FP_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV): {
3518 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
3519 MI.getNumExplicitOperands() == 7 &&
3520 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
3521 // If the tail policy is undisturbed we can't convert.
3522 if ((MI.getOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())).getImm() &
3523 1) == 0)
3524 return nullptr;
3525 // clang-format off
3526 unsigned NewOpc;
3527 switch (MI.getOpcode()) {
3528 default:
3529 llvm_unreachable("Unexpected opcode");
3532 }
3533 // clang-format on
3534
3535 MachineBasicBlock &MBB = *MI.getParent();
3536 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3537 .add(MI.getOperand(0))
3538 .addReg(MI.getOperand(0).getReg(), RegState::Undef)
3539 .add(MI.getOperand(1))
3540 .add(MI.getOperand(2))
3541 .add(MI.getOperand(3))
3542 .add(MI.getOperand(4))
3543 .add(MI.getOperand(5))
3544 .add(MI.getOperand(6));
3545 break;
3546 }
3547 case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
3548 case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
3549 case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
3550 case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
3551 // If the tail policy is undisturbed we can't convert.
3552 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
3553 MI.getNumExplicitOperands() == 6);
3554 if ((MI.getOperand(5).getImm() & 1) == 0)
3555 return nullptr;
3556
3557 // clang-format off
3558 unsigned NewOpc;
3559 switch (MI.getOpcode()) {
3560 default:
3561 llvm_unreachable("Unexpected opcode");
3566 }
3567 // clang-format on
3568
3569 MachineBasicBlock &MBB = *MI.getParent();
3570 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3571 .add(MI.getOperand(0))
3572 .addReg(MI.getOperand(0).getReg(), RegState::Undef)
3573 .add(MI.getOperand(1))
3574 .add(MI.getOperand(2))
3575 .add(MI.getOperand(3))
3576 .add(MI.getOperand(4))
3577 .add(MI.getOperand(5));
3578 break;
3579 }
3580 }
3581 MIB.copyImplicitOps(MI);
3582
3583 if (LV) {
3584 unsigned NumOps = MI.getNumOperands();
3585 for (unsigned I = 1; I < NumOps; ++I) {
3586 MachineOperand &Op = MI.getOperand(I);
3587 if (Op.isReg() && Op.isKill())
3588 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
3589 }
3590 }
3591
3592 if (LIS) {
3594
3595 if (MI.getOperand(0).isEarlyClobber()) {
3596 // Use operand 1 was tied to early-clobber def operand 0, so its live
3597 // interval could have ended at an early-clobber slot. Now they are not
3598 // tied we need to update it to the normal register slot.
3599 LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
3601 if (S->end == Idx.getRegSlot(true))
3602 S->end = Idx.getRegSlot();
3603 }
3604 }
3605
3606 return MIB;
3607}
3608
3609#undef CASE_WIDEOP_OPCODE_COMMON
3610#undef CASE_WIDEOP_OPCODE_LMULS_MF4
3611#undef CASE_WIDEOP_OPCODE_LMULS
3612#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3613#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3614#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3615#undef CASE_FP_WIDEOP_OPCODE_COMMON
3616#undef CASE_FP_WIDEOP_OPCODE_LMULS_MF4
3617#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
3618#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3619#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
3620
3623 Register DestReg, uint32_t Amount,
3624 MachineInstr::MIFlag Flag) const {
3626 if (llvm::has_single_bit<uint32_t>(Amount)) {
3627 uint32_t ShiftAmount = Log2_32(Amount);
3628 if (ShiftAmount == 0)
3629 return;
3630 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
3631 .addReg(DestReg, RegState::Kill)
3632 .addImm(ShiftAmount)
3633 .setMIFlag(Flag);
3634 } else if (STI.hasStdExtZba() &&
3635 ((Amount % 3 == 0 && isPowerOf2_64(Amount / 3)) ||
3636 (Amount % 5 == 0 && isPowerOf2_64(Amount / 5)) ||
3637 (Amount % 9 == 0 && isPowerOf2_64(Amount / 9)))) {
3638 // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
3639 unsigned Opc;
3640 uint32_t ShiftAmount;
3641 if (Amount % 9 == 0) {
3642 Opc = RISCV::SH3ADD;
3643 ShiftAmount = Log2_64(Amount / 9);
3644 } else if (Amount % 5 == 0) {
3645 Opc = RISCV::SH2ADD;
3646 ShiftAmount = Log2_64(Amount / 5);
3647 } else if (Amount % 3 == 0) {
3648 Opc = RISCV::SH1ADD;
3649 ShiftAmount = Log2_64(Amount / 3);
3650 } else {
3651 llvm_unreachable("implied by if-clause");
3652 }
3653 if (ShiftAmount)
3654 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
3655 .addReg(DestReg, RegState::Kill)
3656 .addImm(ShiftAmount)
3657 .setMIFlag(Flag);
3658 BuildMI(MBB, II, DL, get(Opc), DestReg)
3659 .addReg(DestReg, RegState::Kill)
3660 .addReg(DestReg)
3661 .setMIFlag(Flag);
3662 } else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
3663 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
3664 uint32_t ShiftAmount = Log2_32(Amount - 1);
3665 BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
3666 .addReg(DestReg)
3667 .addImm(ShiftAmount)
3668 .setMIFlag(Flag);
3669 BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
3670 .addReg(ScaledRegister, RegState::Kill)
3671 .addReg(DestReg, RegState::Kill)
3672 .setMIFlag(Flag);
3673 } else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
3674 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
3675 uint32_t ShiftAmount = Log2_32(Amount + 1);
3676 BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
3677 .addReg(DestReg)
3678 .addImm(ShiftAmount)
3679 .setMIFlag(Flag);
3680 BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
3681 .addReg(ScaledRegister, RegState::Kill)
3682 .addReg(DestReg, RegState::Kill)
3683 .setMIFlag(Flag);
3684 } else if (STI.hasStdExtM() || STI.hasStdExtZmmul()) {
3685 Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
3686 movImm(MBB, II, DL, N, Amount, Flag);
3687 BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
3688 .addReg(DestReg, RegState::Kill)
3690 .setMIFlag(Flag);
3691 } else {
3692 Register Acc;
3693 uint32_t PrevShiftAmount = 0;
3694 for (uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
3695 if (Amount & (1U << ShiftAmount)) {
3696 if (ShiftAmount)
3697 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
3698 .addReg(DestReg, RegState::Kill)
3699 .addImm(ShiftAmount - PrevShiftAmount)
3700 .setMIFlag(Flag);
3701 if (Amount >> (ShiftAmount + 1)) {
3702 // If we don't have an accmulator yet, create it and copy DestReg.
3703 if (!Acc) {
3704 Acc = MRI.createVirtualRegister(&RISCV::GPRRegClass);
3705 BuildMI(MBB, II, DL, get(TargetOpcode::COPY), Acc)
3706 .addReg(DestReg)
3707 .setMIFlag(Flag);
3708 } else {
3709 BuildMI(MBB, II, DL, get(RISCV::ADD), Acc)
3710 .addReg(Acc, RegState::Kill)
3711 .addReg(DestReg)
3712 .setMIFlag(Flag);
3713 }
3714 }
3715 PrevShiftAmount = ShiftAmount;
3716 }
3717 }
3718 assert(Acc && "Expected valid accumulator");
3719 BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
3720 .addReg(DestReg, RegState::Kill)
3721 .addReg(Acc, RegState::Kill)
3722 .setMIFlag(Flag);
3723 }
3724}
3725
3728 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3729 {{MONontemporalBit0, "riscv-nontemporal-domain-bit-0"},
3730 {MONontemporalBit1, "riscv-nontemporal-domain-bit-1"}};
3731 return ArrayRef(TargetFlags);
3732}
3733
3734// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
3736 return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
3737 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
3738}
3739
3740// Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
3742 return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
3743 MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
3744}
3745
3746// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
3748 return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
3749 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
3750}
3751
3752static bool isRVVWholeLoadStore(unsigned Opcode) {
3753 switch (Opcode) {
3754 default:
3755 return false;
3756 case RISCV::VS1R_V:
3757 case RISCV::VS2R_V:
3758 case RISCV::VS4R_V:
3759 case RISCV::VS8R_V:
3760 case RISCV::VL1RE8_V:
3761 case RISCV::VL2RE8_V:
3762 case RISCV::VL4RE8_V:
3763 case RISCV::VL8RE8_V:
3764 case RISCV::VL1RE16_V:
3765 case RISCV::VL2RE16_V:
3766 case RISCV::VL4RE16_V:
3767 case RISCV::VL8RE16_V:
3768 case RISCV::VL1RE32_V:
3769 case RISCV::VL2RE32_V:
3770 case RISCV::VL4RE32_V:
3771 case RISCV::VL8RE32_V:
3772 case RISCV::VL1RE64_V:
3773 case RISCV::VL2RE64_V:
3774 case RISCV::VL4RE64_V:
3775 case RISCV::VL8RE64_V:
3776 return true;
3777 }
3778}
3779
3781 // RVV lacks any support for immediate addressing for stack addresses, so be
3782 // conservative.
3783 unsigned Opcode = MI.getOpcode();
3784 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
3785 !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
3786 return false;
3787 return true;
3788}
3789
3790std::optional<std::pair<unsigned, unsigned>>
3792 switch (Opcode) {
3793 default:
3794 return std::nullopt;
3795 case RISCV::PseudoVSPILL2_M1:
3796 case RISCV::PseudoVRELOAD2_M1:
3797 return std::make_pair(2u, 1u);
3798 case RISCV::PseudoVSPILL2_M2:
3799 case RISCV::PseudoVRELOAD2_M2:
3800 return std::make_pair(2u, 2u);
3801 case RISCV::PseudoVSPILL2_M4:
3802 case RISCV::PseudoVRELOAD2_M4:
3803 return std::make_pair(2u, 4u);
3804 case RISCV::PseudoVSPILL3_M1:
3805 case RISCV::PseudoVRELOAD3_M1:
3806 return std::make_pair(3u, 1u);
3807 case RISCV::PseudoVSPILL3_M2:
3808 case RISCV::PseudoVRELOAD3_M2:
3809 return std::make_pair(3u, 2u);
3810 case RISCV::PseudoVSPILL4_M1:
3811 case RISCV::PseudoVRELOAD4_M1:
3812 return std::make_pair(4u, 1u);
3813 case RISCV::PseudoVSPILL4_M2:
3814 case RISCV::PseudoVRELOAD4_M2:
3815 return std::make_pair(4u, 2u);
3816 case RISCV::PseudoVSPILL5_M1:
3817 case RISCV::PseudoVRELOAD5_M1:
3818 return std::make_pair(5u, 1u);
3819 case RISCV::PseudoVSPILL6_M1:
3820 case RISCV::PseudoVRELOAD6_M1:
3821 return std::make_pair(6u, 1u);
3822 case RISCV::PseudoVSPILL7_M1:
3823 case RISCV::PseudoVRELOAD7_M1:
3824 return std::make_pair(7u, 1u);
3825 case RISCV::PseudoVSPILL8_M1:
3826 case RISCV::PseudoVRELOAD8_M1:
3827 return std::make_pair(8u, 1u);
3828 }
3829}
3830
3832 return MI.getNumExplicitDefs() == 2 &&
3833 MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) && !MI.isInlineAsm();
3834}
3835
3836bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
3837 int16_t MI1FrmOpIdx =
3838 RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
3839 int16_t MI2FrmOpIdx =
3840 RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
3841 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3842 return false;
3843 MachineOperand FrmOp1 = MI1.getOperand(MI1FrmOpIdx);
3844 MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx);
3845 return FrmOp1.getImm() == FrmOp2.getImm();
3846}
3847
3848std::optional<unsigned>
3850 // TODO: Handle Zvbb instructions
3851 switch (Opcode) {
3852 default:
3853 return std::nullopt;
3854
3855 // 11.6. Vector Single-Width Shift Instructions
3856 case RISCV::VSLL_VX:
3857 case RISCV::VSRL_VX:
3858 case RISCV::VSRA_VX:
3859 // 12.4. Vector Single-Width Scaling Shift Instructions
3860 case RISCV::VSSRL_VX:
3861 case RISCV::VSSRA_VX:
3862 // Only the low lg2(SEW) bits of the shift-amount value are used.
3863 return Log2SEW;
3864
3865 // 11.7 Vector Narrowing Integer Right Shift Instructions
3866 case RISCV::VNSRL_WX:
3867 case RISCV::VNSRA_WX:
3868 // 12.5. Vector Narrowing Fixed-Point Clip Instructions
3869 case RISCV::VNCLIPU_WX:
3870 case RISCV::VNCLIP_WX:
3871 // Only the low lg2(2*SEW) bits of the shift-amount value are used.
3872 return Log2SEW + 1;
3873
3874 // 11.1. Vector Single-Width Integer Add and Subtract
3875 case RISCV::VADD_VX:
3876 case RISCV::VSUB_VX:
3877 case RISCV::VRSUB_VX:
3878 // 11.2. Vector Widening Integer Add/Subtract
3879 case RISCV::VWADDU_VX:
3880 case RISCV::VWSUBU_VX:
3881 case RISCV::VWADD_VX:
3882 case RISCV::VWSUB_VX:
3883 case RISCV::VWADDU_WX:
3884 case RISCV::VWSUBU_WX:
3885 case RISCV::VWADD_WX:
3886 case RISCV::VWSUB_WX:
3887 // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
3888 case RISCV::VADC_VXM:
3889 case RISCV::VADC_VIM:
3890 case RISCV::VMADC_VXM:
3891 case RISCV::VMADC_VIM:
3892 case RISCV::VMADC_VX:
3893 case RISCV::VSBC_VXM:
3894 case RISCV::VMSBC_VXM:
3895 case RISCV::VMSBC_VX:
3896 // 11.5 Vector Bitwise Logical Instructions
3897 case RISCV::VAND_VX:
3898 case RISCV::VOR_VX:
3899 case RISCV::VXOR_VX:
3900 // 11.8. Vector Integer Compare Instructions
3901 case RISCV::VMSEQ_VX:
3902 case RISCV::VMSNE_VX:
3903 case RISCV::VMSLTU_VX:
3904 case RISCV::VMSLT_VX:
3905 case RISCV::VMSLEU_VX:
3906 case RISCV::VMSLE_VX:
3907 case RISCV::VMSGTU_VX:
3908 case RISCV::VMSGT_VX:
3909 // 11.9. Vector Integer Min/Max Instructions
3910 case RISCV::VMINU_VX:
3911 case RISCV::VMIN_VX:
3912 case RISCV::VMAXU_VX:
3913 case RISCV::VMAX_VX:
3914 // 11.10. Vector Single-Width Integer Multiply Instructions
3915 case RISCV::VMUL_VX:
3916 case RISCV::VMULH_VX:
3917 case RISCV::VMULHU_VX:
3918 case RISCV::VMULHSU_VX:
3919 // 11.11. Vector Integer Divide Instructions
3920 case RISCV::VDIVU_VX:
3921 case RISCV::VDIV_VX:
3922 case RISCV::VREMU_VX:
3923 case RISCV::VREM_VX:
3924 // 11.12. Vector Widening Integer Multiply Instructions
3925 case RISCV::VWMUL_VX:
3926 case RISCV::VWMULU_VX:
3927 case RISCV::VWMULSU_VX:
3928 // 11.13. Vector Single-Width Integer Multiply-Add Instructions
3929 case RISCV::VMACC_VX:
3930 case RISCV::VNMSAC_VX:
3931 case RISCV::VMADD_VX:
3932 case RISCV::VNMSUB_VX:
3933 // 11.14. Vector Widening Integer Multiply-Add Instructions
3934 case RISCV::VWMACCU_VX:
3935 case RISCV::VWMACC_VX:
3936 case RISCV::VWMACCSU_VX:
3937 case RISCV::VWMACCUS_VX:
3938 // 11.15. Vector Integer Merge Instructions
3939 case RISCV::VMERGE_VXM:
3940 // 11.16. Vector Integer Move Instructions
3941 case RISCV::VMV_V_X:
3942 // 12.1. Vector Single-Width Saturating Add and Subtract
3943 case RISCV::VSADDU_VX:
3944 case RISCV::VSADD_VX:
3945 case RISCV::VSSUBU_VX:
3946 case RISCV::VSSUB_VX:
3947 // 12.2. Vector Single-Width Averaging Add and Subtract
3948 case RISCV::VAADDU_VX:
3949 case RISCV::VAADD_VX:
3950 case RISCV::VASUBU_VX:
3951 case RISCV::VASUB_VX:
3952 // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
3953 case RISCV::VSMUL_VX:
3954 // 16.1. Integer Scalar Move Instructions
3955 case RISCV::VMV_S_X:
3956 return 1U << Log2SEW;
3957 }
3958}
3959
3960unsigned RISCV::getRVVMCOpcode(unsigned RVVPseudoOpcode) {
3962 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
3963 if (!RVV)
3964 return 0;
3965 return RVV->BaseInstr;
3966}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc, unsigned ZeroReg=0, bool CheckZeroReg=false)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerDefault
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
basic Basic Alias true
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:693
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
#define RVV_OPC_LMUL_CASE(OPC, INV)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isFADD(unsigned Opc)
#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
Value * RHS
Value * LHS
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & front() const
front - Get the first element.
Definition: ArrayRef.h:168
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
bool isBigEndian() const
Definition: DataLayout.h:239
A debug info location.
Definition: DebugLoc.h:33
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:682
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
bool hasValue() const
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:37
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:43
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:317
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Set of metadata that should be preserved when using BuildMI().
unsigned pred_size() const
reverse_iterator rend()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:341
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:391
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:555
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:804
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:789
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:771
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:487
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:386
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MI-level patchpoint operands.
Definition: StackMaps.h:76
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Definition: StackMaps.h:104
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool hasStdExtCOrZca() const
unsigned getXLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:356
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
MI-level stackmap operands.
Definition: StackMaps.h:35
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
Definition: StackMaps.h:50
MI-level Statepoint operands.
Definition: StackMaps.h:158
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
Definition: StackMaps.h:207
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
#define INT64_MAX
Definition: DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static RISCVII::VLMUL getLMul(uint64_t TSFlags)
static unsigned getNF(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
@ SHXADD_ADD_SLLI_OP2
@ FMADD_AX
@ FMADD_XA
@ SHXADD_ADD_SLLI_OP1
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2406
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:330
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:324
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:244
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:465
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.