LLVM 23.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
57 static constexpr unsigned MaxRecursionDepth = 6;
58
59 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
60 const unsigned Depth = 0) const;
61 bool hasAllHUsers(const MachineInstr &MI) const {
62 return hasAllNBitUsers(MI, 16);
63 }
64 bool hasAllWUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 32);
66 }
67
68 bool isRegInGprb(Register Reg) const;
69 bool isRegInFprb(Register Reg) const;
70
71 // tblgen-erated 'select' implementation, used as the initial selector for
72 // the patterns that don't require complex C++.
73 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
74
75 // A lowering phase that runs before any selection attempts.
76 // Returns true if the instruction was modified.
77 void preISelLower(MachineInstr &MI);
78
79 bool replacePtrWithInt(MachineOperand &Op);
80
81 // Custom selection methods
82 bool selectCopy(MachineInstr &MI) const;
83 bool selectImplicitDef(MachineInstr &MI) const;
84 bool materializeImm(Register Reg, int64_t Imm, MachineInstr &MI) const;
85 bool selectAddr(MachineInstr &MI, bool IsLocal = true,
86 bool IsExternWeak = false) const;
87 bool selectSelect(MachineInstr &MI) const;
88 bool selectFPCompare(MachineInstr &MI) const;
89 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
90 MachineInstr &MI) const;
92 void addVectorLoadStoreOperands(MachineInstr &I,
94 unsigned &CurOp, bool IsMasked,
95 bool IsStridedOrIndexed,
96 LLT *IndexVT = nullptr) const;
97 bool selectIntrinsicWithSideEffects(MachineInstr &I) const;
98 bool selectIntrinsic(MachineInstr &I) const;
99 bool selectExtractSubvector(MachineInstr &MI) const;
100
101 ComplexRendererFns selectShiftMask(MachineOperand &Root,
102 unsigned ShiftWidth) const;
103 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
104 return selectShiftMask(Root, STI.getXLen());
105 }
106 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
107 return selectShiftMask(Root, 32);
108 }
109 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
110
111 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
112 template <unsigned Bits>
113 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
114 return selectSExtBits(Root, Bits);
115 }
116
117 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
118 template <unsigned Bits>
119 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
120 return selectZExtBits(Root, Bits);
121 }
122
123 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
124 template <unsigned ShAmt>
125 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
126 return selectSHXADDOp(Root, ShAmt);
127 }
128
129 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
130 unsigned ShAmt) const;
131 template <unsigned ShAmt>
132 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
133 return selectSHXADD_UWOp(Root, ShAmt);
134 }
135
136 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
137
138 // Custom renderers for tablegen
139 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
140 int OpIdx) const;
141 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
142 int OpIdx) const;
143 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
144 int OpIdx) const;
145 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
148 int OpIdx) const;
149
150 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
153 const MachineInstr &MI, int OpIdx) const;
154
155 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
156 int OpIdx) const;
157 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
158 int OpIdx) const;
159
160 const RISCVSubtarget &STI;
161 const RISCVInstrInfo &TII;
162 const RISCVRegisterInfo &TRI;
163 const RISCVRegisterBankInfo &RBI;
164 const RISCVTargetMachine &TM;
165
166 MachineRegisterInfo *MRI = nullptr;
167
168 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
169 // uses "STI." in the code generated by TableGen. We need to unify the name of
170 // Subtarget variable.
171 const RISCVSubtarget *Subtarget = &STI;
172
173#define GET_GLOBALISEL_PREDICATES_DECL
174#include "RISCVGenGlobalISel.inc"
175#undef GET_GLOBALISEL_PREDICATES_DECL
176
177#define GET_GLOBALISEL_TEMPORARIES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_TEMPORARIES_DECL
180};
181
182} // end anonymous namespace
183
184#define GET_GLOBALISEL_IMPL
185#include "RISCVGenGlobalISel.inc"
186#undef GET_GLOBALISEL_IMPL
187
188RISCVInstructionSelector::RISCVInstructionSelector(
189 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
190 const RISCVRegisterBankInfo &RBI)
191 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
192 TM(TM),
193
195#include "RISCVGenGlobalISel.inc"
198#include "RISCVGenGlobalISel.inc"
200{
201}
202
203// Mimics optimizations in ISel and RISCVOptWInst Pass
204bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
205 unsigned Bits,
206 const unsigned Depth) const {
207
208 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
209 MI.getOpcode() == TargetOpcode::G_SUB ||
210 MI.getOpcode() == TargetOpcode::G_MUL ||
211 MI.getOpcode() == TargetOpcode::G_SHL ||
212 MI.getOpcode() == TargetOpcode::G_LSHR ||
213 MI.getOpcode() == TargetOpcode::G_AND ||
214 MI.getOpcode() == TargetOpcode::G_OR ||
215 MI.getOpcode() == TargetOpcode::G_XOR ||
216 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
217 "Unexpected opcode");
218
219 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
220 return false;
221
222 auto DestReg = MI.getOperand(0).getReg();
223 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
224 assert(UserOp.getParent() && "UserOp must have a parent");
225 const MachineInstr &UserMI = *UserOp.getParent();
226 unsigned OpIdx = UserOp.getOperandNo();
227
228 switch (UserMI.getOpcode()) {
229 default:
230 return false;
231 case RISCV::ADDW:
232 case RISCV::ADDIW:
233 case RISCV::SUBW:
234 case RISCV::FCVT_D_W:
235 case RISCV::FCVT_S_W:
236 if (Bits >= 32)
237 break;
238 return false;
239 case RISCV::SLL:
240 case RISCV::SRA:
241 case RISCV::SRL:
242 // Shift amount operands only use log2(Xlen) bits.
243 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
244 break;
245 return false;
246 case RISCV::SLLI:
247 // SLLI only uses the lower (XLen - ShAmt) bits.
248 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
249 break;
250 return false;
251 case RISCV::ANDI:
252 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
253 (uint64_t)UserMI.getOperand(2).getImm()))
254 break;
255 goto RecCheck;
256 case RISCV::AND:
257 case RISCV::OR:
258 case RISCV::XOR:
259 RecCheck:
260 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
261 break;
262 return false;
263 case RISCV::SRLI: {
264 unsigned ShAmt = UserMI.getOperand(2).getImm();
265 // If we are shifting right by less than Bits, and users don't demand any
266 // bits that were shifted into [Bits-1:0], then we can consider this as an
267 // N-Bit user.
268 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
269 break;
270 return false;
271 }
272 }
273 }
274
275 return true;
276}
277
278InstructionSelector::ComplexRendererFns
279RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
280 unsigned ShiftWidth) const {
281 if (!Root.isReg())
282 return std::nullopt;
283
284 using namespace llvm::MIPatternMatch;
285
286 Register ShAmtReg = Root.getReg();
287 // Peek through zext.
288 Register ZExtSrcReg;
289 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
290 ShAmtReg = ZExtSrcReg;
291
292 APInt AndMask;
293 Register AndSrcReg;
294 // Try to combine the following pattern (applicable to other shift
295 // instructions as well as 32-bit ones):
296 //
297 // %4:gprb(s64) = G_AND %3, %2
298 // %5:gprb(s64) = G_LSHR %1, %4(s64)
299 //
300 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
301 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
302 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
303 // then it can be eliminated. Given register rs1 or rs2 holding a constant
304 // (the and mask), there are two cases G_AND can be erased:
305 //
306 // 1. the lowest log2(XLEN) bits of the and mask are all set
307 // 2. the bits of the register being masked are already unset (zero set)
308 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
309 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
310 if (ShMask.isSubsetOf(AndMask)) {
311 ShAmtReg = AndSrcReg;
312 } else {
313 // SimplifyDemandedBits may have optimized the mask so try restoring any
314 // bits that are known zero.
315 KnownBits Known = VT->getKnownBits(AndSrcReg);
316 if (ShMask.isSubsetOf(AndMask | Known.Zero))
317 ShAmtReg = AndSrcReg;
318 }
319 }
320
321 APInt Imm;
323 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
324 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
325 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
326 // to avoid the ADD.
327 ShAmtReg = Reg;
328 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
329 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
330 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
331 // to generate a NEG instead of a SUB of a constant.
332 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
333 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
334 return {{[=](MachineInstrBuilder &MIB) {
335 MachineIRBuilder(*MIB.getInstr())
336 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
337 MIB.addReg(ShAmtReg);
338 }}};
339 }
340 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
341 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
342 // to generate a NOT instead of a SUB of a constant.
343 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
344 return {{[=](MachineInstrBuilder &MIB) {
345 MachineIRBuilder(*MIB.getInstr())
346 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
347 .addImm(-1);
348 MIB.addReg(ShAmtReg);
349 }}};
350 }
351 }
352
353 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
354}
355
356InstructionSelector::ComplexRendererFns
357RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
358 unsigned Bits) const {
359 if (!Root.isReg())
360 return std::nullopt;
361 Register RootReg = Root.getReg();
362 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
363
364 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
365 RootDef->getOperand(2).getImm() == Bits) {
366 return {
367 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
368 }
369
370 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
371 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
372 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
373
374 return std::nullopt;
375}
376
377InstructionSelector::ComplexRendererFns
378RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
379 unsigned Bits) const {
380 if (!Root.isReg())
381 return std::nullopt;
382 Register RootReg = Root.getReg();
383
384 Register RegX;
385 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
386 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
387 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
388 }
389
390 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
391 MRI->getType(RegX).getScalarSizeInBits() == Bits)
392 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
393
394 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
395 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
396 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
397
398 return std::nullopt;
399}
400
401InstructionSelector::ComplexRendererFns
402RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
403 unsigned ShAmt) const {
404 using namespace llvm::MIPatternMatch;
405
406 if (!Root.isReg())
407 return std::nullopt;
408 Register RootReg = Root.getReg();
409
410 const unsigned XLen = STI.getXLen();
411 APInt Mask, C2;
412 Register RegY;
413 std::optional<bool> LeftShift;
414 // (and (shl y, c2), mask)
415 if (mi_match(RootReg, *MRI,
416 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
417 LeftShift = true;
418 // (and (lshr y, c2), mask)
419 else if (mi_match(RootReg, *MRI,
420 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
421 LeftShift = false;
422
423 if (LeftShift.has_value()) {
424 if (*LeftShift)
426 else
428
429 if (Mask.isShiftedMask()) {
430 unsigned Leading = XLen - Mask.getActiveBits();
431 unsigned Trailing = Mask.countr_zero();
432 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
433 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
434 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
435 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
436 return {{[=](MachineInstrBuilder &MIB) {
437 MachineIRBuilder(*MIB.getInstr())
438 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
439 .addImm(Trailing - C2.getLimitedValue());
440 MIB.addReg(DstReg);
441 }}};
442 }
443
444 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
445 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
446 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
447 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
448 return {{[=](MachineInstrBuilder &MIB) {
449 MachineIRBuilder(*MIB.getInstr())
450 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
451 .addImm(Leading + Trailing);
452 MIB.addReg(DstReg);
453 }}};
454 }
455 }
456 }
457
458 LeftShift.reset();
459
460 // (shl (and y, mask), c2)
461 if (mi_match(RootReg, *MRI,
462 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
463 m_ICst(C2))))
464 LeftShift = true;
465 // (lshr (and y, mask), c2)
466 else if (mi_match(RootReg, *MRI,
468 m_ICst(C2))))
469 LeftShift = false;
470
471 if (LeftShift.has_value() && Mask.isShiftedMask()) {
472 unsigned Leading = XLen - Mask.getActiveBits();
473 unsigned Trailing = Mask.countr_zero();
474
475 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
476 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
477 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
478 (Trailing + C2.getLimitedValue()) == ShAmt;
479 if (!Cond)
480 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
481 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
482 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
483 (Trailing - C2.getLimitedValue()) == ShAmt;
484
485 if (Cond) {
486 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
487 return {{[=](MachineInstrBuilder &MIB) {
488 MachineIRBuilder(*MIB.getInstr())
489 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
490 .addImm(Trailing);
491 MIB.addReg(DstReg);
492 }}};
493 }
494 }
495
496 return std::nullopt;
497}
498
499InstructionSelector::ComplexRendererFns
500RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
501 unsigned ShAmt) const {
502 using namespace llvm::MIPatternMatch;
503
504 if (!Root.isReg())
505 return std::nullopt;
506 Register RootReg = Root.getReg();
507
508 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
509 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
510 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
511 APInt Mask, C2;
512 Register RegX;
513 if (mi_match(
514 RootReg, *MRI,
516 m_ICst(Mask))))) {
518
519 if (Mask.isShiftedMask()) {
520 unsigned Leading = Mask.countl_zero();
521 unsigned Trailing = Mask.countr_zero();
522 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
523 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
524 return {{[=](MachineInstrBuilder &MIB) {
525 MachineIRBuilder(*MIB.getInstr())
526 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
527 .addImm(C2.getLimitedValue() - ShAmt);
528 MIB.addReg(DstReg);
529 }}};
530 }
531 }
532 }
533
534 return std::nullopt;
535}
536
537InstructionSelector::ComplexRendererFns
538RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
539 assert(Root.isReg() && "Expected operand to be a Register");
540 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
541
542 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
543 auto C = RootDef->getOperand(1).getCImm();
544 if (C->getValue().isAllOnes())
545 // If the operand is a G_CONSTANT with value of all ones it is larger than
546 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
547 // recognized specially by the vsetvli insertion pass.
548 return {{[=](MachineInstrBuilder &MIB) {
549 MIB.addImm(RISCV::VLMaxSentinel);
550 }}};
551
552 if (isUInt<5>(C->getZExtValue())) {
553 uint64_t ZExtC = C->getZExtValue();
554 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
555 }
556 }
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
558}
559
560InstructionSelector::ComplexRendererFns
561RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
562 if (!Root.isReg())
563 return std::nullopt;
564
565 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
566 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
567 return {{
568 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
569 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
570 }};
571 }
572
573 if (isBaseWithConstantOffset(Root, *MRI)) {
574 MachineOperand &LHS = RootDef->getOperand(1);
575 MachineOperand &RHS = RootDef->getOperand(2);
576 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
577 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
578
579 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
580 if (isInt<12>(RHSC)) {
581 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
582 return {{
583 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
584 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
585 }};
586
587 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
589 }
590 }
591
592 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
593 // the combiner?
594 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
595 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
596}
597
598/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
599/// CC Must be an ICMP Predicate.
600static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
601 switch (CC) {
602 default:
603 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
604 case CmpInst::Predicate::ICMP_EQ:
605 return RISCVCC::COND_EQ;
606 case CmpInst::Predicate::ICMP_NE:
607 return RISCVCC::COND_NE;
608 case CmpInst::Predicate::ICMP_ULT:
609 return RISCVCC::COND_LTU;
610 case CmpInst::Predicate::ICMP_SLT:
611 return RISCVCC::COND_LT;
612 case CmpInst::Predicate::ICMP_UGE:
613 return RISCVCC::COND_GEU;
614 case CmpInst::Predicate::ICMP_SGE:
615 return RISCVCC::COND_GE;
616 }
617}
618
621 MachineRegisterInfo &MRI) {
622 // Try to fold an ICmp. If that fails, use a NE compare with X0.
624 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
625 LHS = CondReg;
626 RHS = RISCV::X0;
627 CC = RISCVCC::COND_NE;
628 return;
629 }
630
631 // We found an ICmp, do some canonicalization.
632
633 // Adjust comparisons to use comparison with 0 if possible.
634 if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
635 switch (Pred) {
637 // Convert X > -1 to X >= 0
638 if (*Constant == -1) {
639 CC = RISCVCC::COND_GE;
640 RHS = RISCV::X0;
641 return;
642 }
643 break;
645 // Convert X < 1 to 0 >= X
646 if (*Constant == 1) {
647 CC = RISCVCC::COND_GE;
648 RHS = LHS;
649 LHS = RISCV::X0;
650 return;
651 }
652 break;
653 default:
654 break;
655 }
656 }
657
658 switch (Pred) {
659 default:
660 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
667 // These CCs are supported directly by RISC-V branches.
668 break;
673 // These CCs are not supported directly by RISC-V branches, but changing the
674 // direction of the CC and swapping LHS and RHS are.
675 Pred = CmpInst::getSwappedPredicate(Pred);
676 std::swap(LHS, RHS);
677 break;
678 }
679
680 CC = getRISCVCCFromICmp(Pred);
681}
682
683/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
684/// \p GenericOpc, appropriate for the GPR register bank and of memory access
685/// size \p OpSize.
686static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
687 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
688 switch (OpSize) {
689 default:
690 llvm_unreachable("Unexpected memory size");
691 case 8:
692 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
693 case 16:
694 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
695 case 32:
696 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
697 case 64:
698 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
699 }
700}
701
702/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
703/// \p GenericOpc, appropriate for the GPR register bank and of memory access
704/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
705static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
706 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
707 switch (OpSize) {
708 case 8:
709 // Prefer unsigned due to no c.lb in Zcb.
710 return IsStore ? RISCV::SB : RISCV::LBU;
711 case 16:
712 return IsStore ? RISCV::SH : RISCV::LH;
713 case 32:
714 return IsStore ? RISCV::SW : RISCV::LW;
715 case 64:
716 return IsStore ? RISCV::SD : RISCV::LD;
717 }
718
719 return GenericOpc;
720}
721
722void RISCVInstructionSelector::addVectorLoadStoreOperands(
723 MachineInstr &I, SmallVectorImpl<Register> &SrcOps, unsigned &CurOp,
724 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
725 // Base Pointer
726 auto PtrReg = I.getOperand(CurOp++).getReg();
727 SrcOps.push_back(PtrReg);
728
729 // Stride or Index
730 if (IsStridedOrIndexed) {
731 auto StrideReg = I.getOperand(CurOp++).getReg();
732 SrcOps.push_back(StrideReg);
733 if (IndexVT)
734 *IndexVT = MRI->getType(StrideReg);
735 }
736
737 // Mask
738 if (IsMasked) {
739 auto MaskReg = I.getOperand(CurOp++).getReg();
740 SrcOps.push_back(MaskReg);
741 }
742}
743
744bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
745 MachineInstr &I) const {
746 // Find the intrinsic ID.
747 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
748 // Select the instruction.
749 switch (IntrinID) {
750 default:
751 return false;
752 case Intrinsic::riscv_vlm:
753 case Intrinsic::riscv_vle:
754 case Intrinsic::riscv_vle_mask:
755 case Intrinsic::riscv_vlse:
756 case Intrinsic::riscv_vlse_mask: {
757 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
758 IntrinID == Intrinsic::riscv_vlse_mask;
759 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
760 IntrinID == Intrinsic::riscv_vlse_mask;
761 LLT VT = MRI->getType(I.getOperand(0).getReg());
762 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
763
764 // Result vector
765 const Register DstReg = I.getOperand(0).getReg();
766
767 // Sources
768 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
769 unsigned CurOp = 2;
770 SmallVector<Register, 4> SrcOps; // Source registers.
771
772 // Passthru
773 if (HasPassthruOperand) {
774 auto PassthruReg = I.getOperand(CurOp++).getReg();
775 SrcOps.push_back(PassthruReg);
776 } else {
777 SrcOps.push_back(Register(RISCV::NoRegister));
778 }
779
780 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
781
783 const RISCV::VLEPseudo *P =
784 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
785 static_cast<unsigned>(LMUL));
786
787 MachineInstrBuilder PseudoMI =
788 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo), DstReg);
789 for (Register Reg : SrcOps)
790 PseudoMI.addReg(Reg);
791
792 // Select VL
793 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
794 for (auto &RenderFn : *VLOpFn)
795 RenderFn(PseudoMI);
796
797 // SEW
798 PseudoMI.addImm(Log2SEW);
799
800 // Policy
801 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
802 if (IsMasked)
803 Policy = I.getOperand(CurOp++).getImm();
804 PseudoMI.addImm(Policy);
805
806 // Memref
807 PseudoMI.cloneMemRefs(I);
808
809 I.eraseFromParent();
810 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
811 return true;
812 }
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->getType(I.getOperand(0).getReg());
822 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
823
824 // Result vector
825 const Register DstReg = I.getOperand(0).getReg();
826
827 // Sources
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
829 unsigned CurOp = 2;
830 SmallVector<Register, 4> SrcOps; // Source registers.
831
832 // Passthru
833 if (HasPassthruOperand) {
834 auto PassthruReg = I.getOperand(CurOp++).getReg();
835 SrcOps.push_back(PassthruReg);
836 } else {
837 // Use NoRegister if there is no specified passthru.
838 SrcOps.push_back(Register());
839 }
840 LLT IndexVT;
841 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
842
844 RISCVVType::VLMUL IndexLMUL =
846 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
847 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
848 reportFatalUsageError("The V extension does not support EEW=64 for index "
849 "values when XLEN=32");
850 }
851 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
854
855 MachineInstrBuilder PseudoMI =
856 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo), DstReg);
857 for (Register Reg : SrcOps)
858 PseudoMI.addReg(Reg);
859
860 // Select VL
861 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
862 for (auto &RenderFn : *VLOpFn)
863 RenderFn(PseudoMI);
864
865 // SEW
866 PseudoMI.addImm(Log2SEW);
867
868 // Policy
869 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
870 if (IsMasked)
871 Policy = I.getOperand(CurOp++).getImm();
872 PseudoMI.addImm(Policy);
873
874 // Memref
875 PseudoMI.cloneMemRefs(I);
876
877 I.eraseFromParent();
878 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
879 return true;
880 }
881 case Intrinsic::riscv_vsm:
882 case Intrinsic::riscv_vse:
883 case Intrinsic::riscv_vse_mask:
884 case Intrinsic::riscv_vsse:
885 case Intrinsic::riscv_vsse_mask: {
886 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
887 IntrinID == Intrinsic::riscv_vsse_mask;
888 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
889 IntrinID == Intrinsic::riscv_vsse_mask;
890 LLT VT = MRI->getType(I.getOperand(1).getReg());
891 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
892
893 // Sources
894 unsigned CurOp = 1;
895 SmallVector<Register, 4> SrcOps; // Source registers.
896
897 // Store value
898 auto PassthruReg = I.getOperand(CurOp++).getReg();
899 SrcOps.push_back(PassthruReg);
900
901 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
902
904 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
905 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
906
907 MachineInstrBuilder PseudoMI =
908 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo));
909 for (Register Reg : SrcOps)
910 PseudoMI.addReg(Reg);
911
912 // Select VL
913 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
914 for (auto &RenderFn : *VLOpFn)
915 RenderFn(PseudoMI);
916
917 // SEW
918 PseudoMI.addImm(Log2SEW);
919
920 // Memref
921 PseudoMI.cloneMemRefs(I);
922
923 I.eraseFromParent();
924 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
925 return true;
926 }
927 case Intrinsic::riscv_vsoxei:
928 case Intrinsic::riscv_vsoxei_mask:
929 case Intrinsic::riscv_vsuxei:
930 case Intrinsic::riscv_vsuxei_mask: {
931 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
932 IntrinID == Intrinsic::riscv_vsuxei_mask;
933 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
934 IntrinID == Intrinsic::riscv_vsoxei_mask;
935 LLT VT = MRI->getType(I.getOperand(1).getReg());
936 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
937
938 // Sources
939 unsigned CurOp = 1;
940 SmallVector<Register, 4> SrcOps; // Source registers.
941
942 // Store value
943 auto PassthruReg = I.getOperand(CurOp++).getReg();
944 SrcOps.push_back(PassthruReg);
945
946 LLT IndexVT;
947 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
948
950 RISCVVType::VLMUL IndexLMUL =
952 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
953 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
954 reportFatalUsageError("The V extension does not support EEW=64 for index "
955 "values when XLEN=32");
956 }
957 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
958 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
959 static_cast<unsigned>(IndexLMUL));
960
961 MachineInstrBuilder PseudoMI =
962 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo));
963 for (Register Reg : SrcOps)
964 PseudoMI.addReg(Reg);
965
966 // Select VL
967 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
968 for (auto &RenderFn : *VLOpFn)
969 RenderFn(PseudoMI);
970
971 // SEW
972 PseudoMI.addImm(Log2SEW);
973
974 // Memref
975 PseudoMI.cloneMemRefs(I);
976
977 I.eraseFromParent();
978 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
979 return true;
980 }
981 }
982}
983
984bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I) const {
985 // Find the intrinsic ID.
986 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
987 // Select the instruction.
988 switch (IntrinID) {
989 default:
990 return false;
991 case Intrinsic::riscv_vsetvli:
992 case Intrinsic::riscv_vsetvlimax: {
993
994 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
995
996 unsigned Offset = VLMax ? 2 : 3;
997 unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
998 RISCVVType::VLMUL VLMul =
999 static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
1000
1001 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
1002 /*MaskAgnostic*/ true);
1003
1004 Register DstReg = I.getOperand(0).getReg();
1005
1006 Register VLOperand;
1007 unsigned Opcode = RISCV::PseudoVSETVLI;
1008
1009 // Check if AVL is a constant that equals VLMAX.
1010 if (!VLMax) {
1011 Register AVLReg = I.getOperand(2).getReg();
1012 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1013 uint64_t AVL = AVLConst->Value.getZExtValue();
1014 if (auto VLEN = Subtarget->getRealVLen()) {
1015 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1016 VLMax = true;
1017 }
1018 }
1019
1020 MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
1021 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1022 const auto *C = AVLDef->getOperand(1).getCImm();
1023 if (C->getValue().isAllOnes())
1024 VLMax = true;
1025 }
1026 }
1027
1028 if (VLMax) {
1029 VLOperand = Register(RISCV::X0);
1030 Opcode = RISCV::PseudoVSETVLIX0;
1031 } else {
1032 Register AVLReg = I.getOperand(2).getReg();
1033 VLOperand = AVLReg;
1034
1035 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1036 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1037 uint64_t AVL = AVLConst->Value.getZExtValue();
1038 if (isUInt<5>(AVL)) {
1039 MachineInstr *PseudoMI =
1040 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1041 TII.get(RISCV::PseudoVSETIVLI), DstReg)
1042 .addImm(AVL)
1043 .addImm(VTypeI);
1044 I.eraseFromParent();
1045 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1046 return true;
1047 }
1048 }
1049 }
1050
1051 MachineInstr *PseudoMI =
1052 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1053 .addReg(VLOperand)
1054 .addImm(VTypeI);
1055 I.eraseFromParent();
1056 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1057 return true;
1058 }
1059 }
1060}
1061
1062bool RISCVInstructionSelector::selectExtractSubvector(MachineInstr &MI) const {
1063 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1064
1065 Register DstReg = MI.getOperand(0).getReg();
1066 Register SrcReg = MI.getOperand(1).getReg();
1067
1068 LLT DstTy = MRI->getType(DstReg);
1069 LLT SrcTy = MRI->getType(SrcReg);
1070
1071 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
1072
1073 MVT DstMVT = getMVTForLLT(DstTy);
1074 MVT SrcMVT = getMVTForLLT(SrcTy);
1075
1076 unsigned SubRegIdx;
1077 std::tie(SubRegIdx, Idx) =
1079 SrcMVT, DstMVT, Idx, &TRI);
1080
1081 if (Idx != 0)
1082 return false;
1083
1084 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
1085 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1086 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1087 return false;
1088
1089 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1090 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1091 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1092 return false;
1093
1094 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(TargetOpcode::COPY),
1095 DstReg)
1096 .addReg(SrcReg, {}, SubRegIdx);
1097
1098 MI.eraseFromParent();
1099 return true;
1100}
1101
1102bool RISCVInstructionSelector::select(MachineInstr &MI) {
1103 preISelLower(MI);
1104 const unsigned Opc = MI.getOpcode();
1105
1106 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1107 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1108 const Register DefReg = MI.getOperand(0).getReg();
1109 const LLT DefTy = MRI->getType(DefReg);
1110
1111 const RegClassOrRegBank &RegClassOrBank =
1112 MRI->getRegClassOrRegBank(DefReg);
1113
1114 const TargetRegisterClass *DefRC =
1116 if (!DefRC) {
1117 if (!DefTy.isValid()) {
1118 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1119 return false;
1120 }
1121
1122 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1123 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, STI.is64Bit());
1124 if (!DefRC) {
1125 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1126 return false;
1127 }
1128 }
1129
1130 MI.setDesc(TII.get(TargetOpcode::PHI));
1131 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1132 }
1133
1134 // Certain non-generic instructions also need some special handling.
1135 if (MI.isCopy())
1136 return selectCopy(MI);
1137
1138 return true;
1139 }
1140
1141 if (selectImpl(MI, *CoverageInfo))
1142 return true;
1143
1144 switch (Opc) {
1145 case TargetOpcode::G_ANYEXT:
1146 case TargetOpcode::G_PTRTOINT:
1147 case TargetOpcode::G_INTTOPTR:
1148 case TargetOpcode::G_TRUNC:
1149 case TargetOpcode::G_FREEZE:
1150 return selectCopy(MI);
1151 case TargetOpcode::G_CONSTANT: {
1152 Register DstReg = MI.getOperand(0).getReg();
1153 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1154
1155 if (!materializeImm(DstReg, Imm, MI))
1156 return false;
1157
1158 MI.eraseFromParent();
1159 return true;
1160 }
1161 case TargetOpcode::G_ZEXT:
1162 case TargetOpcode::G_SEXT: {
1163 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1164 Register DstReg = MI.getOperand(0).getReg();
1165 Register SrcReg = MI.getOperand(1).getReg();
1166 LLT SrcTy = MRI->getType(SrcReg);
1167 unsigned SrcSize = SrcTy.getSizeInBits();
1168
1169 if (SrcTy.isVector())
1170 return false; // Should be handled by imported patterns.
1171
1172 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1173 RISCV::GPRBRegBankID &&
1174 "Unexpected ext regbank");
1175
1176 // Use addiw SrcReg, 0 (sext.w) for i32.
1177 if (IsSigned && SrcSize == 32) {
1178 MI.setDesc(TII.get(RISCV::ADDIW));
1179 MI.addOperand(MachineOperand::CreateImm(0));
1181 return true;
1182 }
1183
1184 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1185 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1186 MI.setDesc(TII.get(RISCV::ADD_UW));
1187 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1189 return true;
1190 }
1191
1192 // Use sext.h/zext.h for i16 with Zbb.
1193 if (SrcSize == 16 &&
1194 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1195 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1196 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1197 : RISCV::ZEXT_H_RV32));
1199 return true;
1200 }
1201
1202 // Fall back to shift pair.
1203 Register ShiftLeftReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1204 MachineInstr *ShiftLeft = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1205 TII.get(RISCV::SLLI), ShiftLeftReg)
1206 .addReg(SrcReg)
1207 .addImm(STI.getXLen() - SrcSize);
1208 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1209 MachineInstr *ShiftRight =
1210 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1211 TII.get(IsSigned ? RISCV::SRAI : RISCV::SRLI), DstReg)
1212 .addReg(ShiftLeftReg)
1213 .addImm(STI.getXLen() - SrcSize);
1214 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1215 MI.eraseFromParent();
1216 return true;
1217 }
1218 case TargetOpcode::G_FCONSTANT: {
1219 // TODO: Use constant pool for complex constants.
1220 Register DstReg = MI.getOperand(0).getReg();
1221 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1222 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1223 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1224 Register GPRReg;
1225 if (FPimm.isPosZero()) {
1226 GPRReg = RISCV::X0;
1227 } else {
1228 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1229 APInt Imm = FPimm.bitcastToAPInt();
1230 if (!materializeImm(GPRReg, Imm.getSExtValue(), MI))
1231 return false;
1232 }
1233
1234 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1235 : Size == 32 ? RISCV::FMV_W_X
1236 : RISCV::FMV_H_X;
1237 MachineInstr *FMV = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1238 TII.get(Opcode), DstReg)
1239 .addReg(GPRReg);
1241 } else {
1242 // s64 on rv32
1243 assert(Size == 64 && !Subtarget->is64Bit() &&
1244 "Unexpected size or subtarget");
1245
1246 if (FPimm.isPosZero()) {
1247 // Optimize +0.0 to use fcvt.d.w
1248 MachineInstr *FCVT = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1249 TII.get(RISCV::FCVT_D_W), DstReg)
1250 .addReg(RISCV::X0)
1253
1254 MI.eraseFromParent();
1255 return true;
1256 }
1257
1258 // Split into two pieces and build through the stack.
1259 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1260 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1261 APInt Imm = FPimm.bitcastToAPInt();
1262 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1263 MI))
1264 return false;
1265 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MI))
1266 return false;
1267 MachineInstr *PairF64 =
1268 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1269 TII.get(RISCV::BuildPairF64Pseudo), DstReg)
1270 .addReg(GPRRegLow)
1271 .addReg(GPRRegHigh);
1272 constrainSelectedInstRegOperands(*PairF64, TII, TRI, RBI);
1273 }
1274
1275 MI.eraseFromParent();
1276 return true;
1277 }
1278 case TargetOpcode::G_GLOBAL_VALUE: {
1279 auto *GV = MI.getOperand(1).getGlobal();
1280 if (GV->isThreadLocal()) {
1281 // TODO: implement this case.
1282 return false;
1283 }
1284
1285 return selectAddr(MI, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1286 }
1287 case TargetOpcode::G_JUMP_TABLE:
1288 case TargetOpcode::G_CONSTANT_POOL:
1289 return selectAddr(MI);
1290 case TargetOpcode::G_BRCOND: {
1291 Register LHS, RHS;
1293 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1294
1295 MachineInstr *Bcc = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1296 TII.get(RISCVCC::getBrCond(CC)))
1297 .addReg(LHS)
1298 .addReg(RHS)
1299 .addMBB(MI.getOperand(1).getMBB());
1300 MI.eraseFromParent();
1302 return true;
1303 }
1304 case TargetOpcode::G_BRINDIRECT:
1305 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1306 MI.addOperand(MachineOperand::CreateImm(0));
1308 return true;
1309 case TargetOpcode::G_SELECT:
1310 return selectSelect(MI);
1311 case TargetOpcode::G_FCMP:
1312 return selectFPCompare(MI);
1313 case TargetOpcode::G_FENCE: {
1314 AtomicOrdering FenceOrdering =
1315 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1316 SyncScope::ID FenceSSID =
1317 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1318 emitFence(FenceOrdering, FenceSSID, MI);
1319 MI.eraseFromParent();
1320 return true;
1321 }
1322 case TargetOpcode::G_IMPLICIT_DEF:
1323 return selectImplicitDef(MI);
1324 case TargetOpcode::G_UNMERGE_VALUES:
1325 return selectUnmergeValues(MI);
1326 case TargetOpcode::G_LOAD:
1327 case TargetOpcode::G_STORE: {
1328 GLoadStore &LdSt = cast<GLoadStore>(MI);
1329 const Register ValReg = LdSt.getReg(0);
1330 const Register PtrReg = LdSt.getPointerReg();
1331 LLT PtrTy = MRI->getType(PtrReg);
1332
1333 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1334 if (RB.getID() != RISCV::GPRBRegBankID)
1335 return false;
1336
1337#ifndef NDEBUG
1338 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1339 // Check that the pointer register is valid.
1340 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1341 "Load/Store pointer operand isn't a GPR");
1342 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1343#endif
1344
1345 // Can only handle AddressSpace 0.
1346 if (PtrTy.getAddressSpace() != 0)
1347 return false;
1348
1349 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1350 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1351
1352 if (isStrongerThanMonotonic(Order)) {
1353 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1355 return true;
1356 }
1357
1358 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1359 if (NewOpc == MI.getOpcode())
1360 return false;
1361
1362 // Check if we can fold anything into the addressing mode.
1363 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1364 if (!AddrModeFns)
1365 return false;
1366
1367 // Folded something. Create a new instruction and return it.
1368 MachineInstrBuilder NewInst =
1369 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(NewOpc));
1370 NewInst.setMIFlags(MI.getFlags());
1371 if (isa<GStore>(MI))
1372 NewInst.addUse(ValReg);
1373 else
1374 NewInst.addDef(ValReg);
1375 NewInst.cloneMemRefs(MI);
1376 for (auto &Fn : *AddrModeFns)
1377 Fn(NewInst);
1378 MI.eraseFromParent();
1379
1380 constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1381 return true;
1382 }
1383 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1384 return selectIntrinsicWithSideEffects(MI);
1385 case TargetOpcode::G_INTRINSIC:
1386 return selectIntrinsic(MI);
1387 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1388 return selectExtractSubvector(MI);
1389 default:
1390 return false;
1391 }
1392}
1393
1394bool RISCVInstructionSelector::selectUnmergeValues(MachineInstr &MI) const {
1395 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1396
1397 if (!Subtarget->hasStdExtZfa())
1398 return false;
1399
1400 // Split F64 Src into two s32 parts
1401 if (MI.getNumOperands() != 3)
1402 return false;
1403 Register Src = MI.getOperand(2).getReg();
1404 Register Lo = MI.getOperand(0).getReg();
1405 Register Hi = MI.getOperand(1).getReg();
1406 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1407 return false;
1408
1409 MachineInstr *ExtractLo = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1410 TII.get(RISCV::FMV_X_W_FPR64), Lo)
1411 .addReg(Src);
1412 constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI);
1413
1414 MachineInstr *ExtractHi = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1415 TII.get(RISCV::FMVH_X_D), Hi)
1416 .addReg(Src);
1417 constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI);
1418
1419 MI.eraseFromParent();
1420 return true;
1421}
1422
1423bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op) {
1424 Register PtrReg = Op.getReg();
1425 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1426
1427 const LLT sXLen = LLT::scalar(STI.getXLen());
1428 MachineInstr &ParentMI = *Op.getParent();
1429 Register IntReg = MRI->createGenericVirtualRegister(sXLen);
1430 MRI->setRegBank(IntReg, RBI.getRegBank(RISCV::GPRBRegBankID));
1431 MachineInstr *PtrToInt =
1432 BuildMI(*ParentMI.getParent(), ParentMI, ParentMI.getDebugLoc(),
1433 TII.get(TargetOpcode::G_PTRTOINT), IntReg)
1434 .addReg(PtrReg);
1435 Op.setReg(IntReg);
1436 return select(*PtrToInt);
1437}
1438
1439void RISCVInstructionSelector::preISelLower(MachineInstr &MI) {
1440 switch (MI.getOpcode()) {
1441 case TargetOpcode::G_PTR_ADD: {
1442 Register DstReg = MI.getOperand(0).getReg();
1443 const LLT sXLen = LLT::scalar(STI.getXLen());
1444
1445 replacePtrWithInt(MI.getOperand(1));
1446 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1447 MRI->setType(DstReg, sXLen);
1448 break;
1449 }
1450 case TargetOpcode::G_PTRMASK: {
1451 Register DstReg = MI.getOperand(0).getReg();
1452 const LLT sXLen = LLT::scalar(STI.getXLen());
1453 replacePtrWithInt(MI.getOperand(1));
1454 MI.setDesc(TII.get(TargetOpcode::G_AND));
1455 MRI->setType(DstReg, sXLen);
1456 break;
1457 }
1458 }
1459}
1460
1461void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1462 const MachineInstr &MI,
1463 int OpIdx) const {
1464 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1465 "Expected G_CONSTANT");
1466 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1467 MIB.addImm(-CstVal);
1468}
1469
1470void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1471 const MachineInstr &MI,
1472 int OpIdx) const {
1473 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1474 "Expected G_CONSTANT");
1475 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1476 MIB.addImm(STI.getXLen() - CstVal);
1477}
1478
1479void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1480 const MachineInstr &MI,
1481 int OpIdx) const {
1482 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1483 "Expected G_CONSTANT");
1484 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1485 MIB.addImm(32 - CstVal);
1486}
1487
1488void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1489 const MachineInstr &MI,
1490 int OpIdx) const {
1491 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1492 "Expected G_CONSTANT");
1493 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1494 MIB.addImm(CstVal + 1);
1495}
1496
1497void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1498 const MachineInstr &MI,
1499 int OpIdx) const {
1500 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1501 "Expected G_FRAME_INDEX");
1502 MIB.add(MI.getOperand(1));
1503}
1504
1505void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1506 const MachineInstr &MI,
1507 int OpIdx) const {
1508 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1509 "Expected G_CONSTANT");
1510 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1512}
1513
1514void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1515 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1516 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1517 "Expected G_CONSTANT");
1518 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1519 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1520}
1521
1522void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1523 const MachineInstr &MI,
1524 int OpIdx) const {
1525 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1526 "Expected G_CONSTANT");
1527 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1528 int64_t Adj = Imm < 0 ? -2048 : 2047;
1529 MIB.addImm(Imm - Adj);
1530}
1531
1532void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1533 const MachineInstr &MI,
1534 int OpIdx) const {
1535 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1536 "Expected G_CONSTANT");
1537 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1538 MIB.addImm(Imm);
1539}
1540
1541bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1542 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1543}
1544
1545bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1546 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1547}
1548
1549bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1550 MachineOperand Dst = MI.getOperand(0);
1551 Register DstReg = MI.getOperand(0).getReg();
1552
1553 if (DstReg.isPhysical())
1554 return true;
1555
1556 const TargetRegisterClass *DstRC =
1557 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1558
1559 assert(DstRC &&
1560 "Register class not available for LLT, register bank combination");
1561
1562 // No need to constrain SrcReg. It will get constrained when
1563 // we hit another of its uses or its defs.
1564 // Copies do not have constraints.
1565 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1566 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1567 << " operand\n");
1568 return false;
1569 }
1570
1571 MI.setDesc(TII.get(RISCV::COPY));
1572 return true;
1573}
1574
1575bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI) const {
1576 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1577
1578 const Register DstReg = MI.getOperand(0).getReg();
1579 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(
1580 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI), STI.is64Bit());
1581
1582 assert(DstRC &&
1583 "Register class not available for LLT, register bank combination");
1584
1585 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1586 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1587 << " operand\n");
1588 }
1589 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1590 return true;
1591}
1592
1593bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1594 MachineInstr &MI) const {
1595 MachineBasicBlock &MBB = *MI.getParent();
1596 DebugLoc DL = MI.getDebugLoc();
1597
1598 if (Imm == 0) {
1599 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::COPY), DstReg).addReg(RISCV::X0);
1600 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1601 return true;
1602 }
1603
1605 unsigned NumInsts = Seq.size();
1606 Register SrcReg = RISCV::X0;
1607
1608 for (unsigned i = 0; i < NumInsts; i++) {
1609 Register TmpReg = i < NumInsts - 1
1610 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1611 : DstReg;
1612 const RISCVMatInt::Inst &I = Seq[i];
1613 MachineInstr *Result;
1614
1615 switch (I.getOpndKind()) {
1616 case RISCVMatInt::Imm:
1617 // clang-format off
1618 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1619 .addImm(I.getImm());
1620 // clang-format on
1621 break;
1622 case RISCVMatInt::RegX0:
1623 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1624 .addReg(SrcReg)
1625 .addReg(RISCV::X0);
1626 break;
1628 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1629 .addReg(SrcReg)
1630 .addReg(SrcReg);
1631 break;
1633 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1634 .addReg(SrcReg)
1635 .addImm(I.getImm());
1636 break;
1637 }
1638
1640
1641 SrcReg = TmpReg;
1642 }
1643
1644 return true;
1645}
1646
1647bool RISCVInstructionSelector::selectAddr(MachineInstr &MI, bool IsLocal,
1648 bool IsExternWeak) const {
1649 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1650 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1651 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1652 "Unexpected opcode");
1653
1654 const MachineOperand &DispMO = MI.getOperand(1);
1655
1656 Register DefReg = MI.getOperand(0).getReg();
1657 const LLT DefTy = MRI->getType(DefReg);
1658
1659 // When HWASAN is used and tagging of global variables is enabled
1660 // they should be accessed via the GOT, since the tagged address of a global
1661 // is incompatible with existing code models. This also applies to non-pic
1662 // mode.
1663 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1664 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1665 // Use PC-relative addressing to access the symbol. This generates the
1666 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1667 // %pcrel_lo(auipc)).
1668 MI.setDesc(TII.get(RISCV::PseudoLLA));
1670 return true;
1671 }
1672
1673 // Use PC-relative addressing to access the GOT for this symbol, then
1674 // load the address from the GOT. This generates the pattern (PseudoLGA
1675 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1676 // %pcrel_lo(auipc))).
1677 MachineFunction &MF = *MI.getParent()->getParent();
1678 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1682 DefTy, Align(DefTy.getSizeInBits() / 8));
1683
1684 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1685 TII.get(RISCV::PseudoLGA), DefReg)
1686 .addDisp(DispMO, 0)
1687 .addMemOperand(MemOp);
1688
1690
1691 MI.eraseFromParent();
1692 return true;
1693 }
1694
1695 switch (TM.getCodeModel()) {
1696 default: {
1698 "Unsupported code model for lowering", MI);
1699 return false;
1700 }
1701 case CodeModel::Small: {
1702 // Must lie within a single 2 GiB address range and must lie between
1703 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1704 // (lui %hi(sym)) %lo(sym)).
1705 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1706 MachineInstr *AddrHi = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1707 TII.get(RISCV::LUI), AddrHiDest)
1708 .addDisp(DispMO, 0, RISCVII::MO_HI);
1709
1711
1712 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1713 TII.get(RISCV::ADDI), DefReg)
1714 .addReg(AddrHiDest)
1715 .addDisp(DispMO, 0, RISCVII::MO_LO);
1716
1718
1719 MI.eraseFromParent();
1720 return true;
1721 }
1722 case CodeModel::Medium:
1723 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1724 // relocation needs to reference a label that points to the auipc
1725 // instruction itself, not the global. This cannot be done inside the
1726 // instruction selector.
1727 if (IsExternWeak) {
1728 // An extern weak symbol may be undefined, i.e. have value 0, which may
1729 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1730 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1731 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1732 MachineFunction &MF = *MI.getParent()->getParent();
1733 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1737 DefTy, Align(DefTy.getSizeInBits() / 8));
1738
1739 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1740 TII.get(RISCV::PseudoLGA), DefReg)
1741 .addDisp(DispMO, 0)
1742 .addMemOperand(MemOp);
1743
1745
1746 MI.eraseFromParent();
1747 return true;
1748 }
1749
1750 // Generate a sequence for accessing addresses within any 2GiB range
1751 // within the address space. This generates the pattern (PseudoLLA sym),
1752 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1753 MI.setDesc(TII.get(RISCV::PseudoLLA));
1755 return true;
1756 }
1757
1758 return false;
1759}
1760
1761bool RISCVInstructionSelector::selectSelect(MachineInstr &MI) const {
1762 auto &SelectMI = cast<GSelect>(MI);
1763
1764 Register LHS, RHS;
1766 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1767
1768 Register DstReg = SelectMI.getReg(0);
1769
1770 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1771 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1772 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1773 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1774 : RISCV::Select_FPR64_Using_CC_GPR;
1775 }
1776
1777 MachineInstr *Result =
1778 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(Opc))
1779 .addDef(DstReg)
1780 .addReg(LHS)
1781 .addReg(RHS)
1782 .addImm(CC)
1783 .addReg(SelectMI.getTrueReg())
1784 .addReg(SelectMI.getFalseReg());
1785 MI.eraseFromParent();
1787 return true;
1788}
1789
1790// Convert an FCMP predicate to one of the supported F or D instructions.
1791static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1792 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1793 switch (Pred) {
1794 default:
1795 llvm_unreachable("Unsupported predicate");
1796 case CmpInst::FCMP_OLT:
1797 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1798 case CmpInst::FCMP_OLE:
1799 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1800 case CmpInst::FCMP_OEQ:
1801 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1802 }
1803}
1804
1805// Try legalizing an FCMP by swapping or inverting the predicate to one that
1806// is supported.
1808 CmpInst::Predicate &Pred, bool &NeedInvert) {
1809 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1810 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1811 Pred == CmpInst::FCMP_OEQ;
1812 };
1813
1814 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1815
1817 if (isLegalFCmpPredicate(InvPred)) {
1818 Pred = InvPred;
1819 std::swap(LHS, RHS);
1820 return true;
1821 }
1822
1823 InvPred = CmpInst::getInversePredicate(Pred);
1824 NeedInvert = true;
1825 if (isLegalFCmpPredicate(InvPred)) {
1826 Pred = InvPred;
1827 return true;
1828 }
1829 InvPred = CmpInst::getSwappedPredicate(InvPred);
1830 if (isLegalFCmpPredicate(InvPred)) {
1831 Pred = InvPred;
1832 std::swap(LHS, RHS);
1833 return true;
1834 }
1835
1836 return false;
1837}
1838
1839// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1840// the result in DstReg.
1841// FIXME: Maybe we should expand this earlier.
1842bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI) const {
1843 auto &CmpMI = cast<GFCmp>(MI);
1844 CmpInst::Predicate Pred = CmpMI.getCond();
1845
1846 Register DstReg = CmpMI.getReg(0);
1847 Register LHS = CmpMI.getLHSReg();
1848 Register RHS = CmpMI.getRHSReg();
1849
1850 unsigned Size = MRI->getType(LHS).getSizeInBits();
1851 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1852
1853 Register TmpReg = DstReg;
1854
1855 bool NeedInvert = false;
1856 // First try swapping operands or inverting.
1857 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1858 if (NeedInvert)
1859 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1860 MachineInstr *Cmp = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1861 TII.get(getFCmpOpcode(Pred, Size)), TmpReg)
1862 .addReg(LHS)
1863 .addReg(RHS);
1865 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1866 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1867 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1868 Register Cmp1Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1869 MachineInstr *Cmp1 =
1870 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1871 TII.get(getFCmpOpcode(CmpInst::FCMP_OLT, Size)), Cmp1Reg)
1872 .addReg(LHS)
1873 .addReg(RHS);
1875 Register Cmp2Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1876 MachineInstr *Cmp2 =
1877 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1878 TII.get(getFCmpOpcode(CmpInst::FCMP_OLT, Size)), Cmp2Reg)
1879 .addReg(RHS)
1880 .addReg(LHS);
1882 if (NeedInvert)
1883 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1884 MachineInstr *Or = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1885 TII.get(RISCV::OR), TmpReg)
1886 .addReg(Cmp1Reg)
1887 .addReg(Cmp2Reg);
1889 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1890 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1891 // If LHS and RHS are the same, a single FEQ suffices.
1892 NeedInvert = Pred == CmpInst::FCMP_UNO;
1893 if (NeedInvert)
1894 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1895 if (LHS == RHS) {
1896 MachineInstr *Cmp =
1897 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1898 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), TmpReg)
1899 .addReg(LHS)
1900 .addReg(LHS);
1902 } else {
1903 Register Cmp1Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1904 MachineInstr *Cmp1 =
1905 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1906 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), Cmp1Reg)
1907 .addReg(LHS)
1908 .addReg(LHS);
1910 Register Cmp2Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1911 MachineInstr *Cmp2 =
1912 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1913 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), Cmp2Reg)
1914 .addReg(RHS)
1915 .addReg(RHS);
1917 MachineInstr *And = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1918 TII.get(RISCV::AND), TmpReg)
1919 .addReg(Cmp1Reg)
1920 .addReg(Cmp2Reg);
1922 }
1923 } else
1924 llvm_unreachable("Unhandled predicate");
1925
1926 // Emit an XORI to invert the result if needed.
1927 if (NeedInvert) {
1928 MachineInstr *Xor = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1929 TII.get(RISCV::XORI), DstReg)
1930 .addReg(TmpReg)
1931 .addImm(1);
1933 }
1934
1935 MI.eraseFromParent();
1936 return true;
1937}
1938
1939void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1940 SyncScope::ID FenceSSID,
1941 MachineInstr &MI) const {
1942 MachineBasicBlock &MBB = *MI.getParent();
1943 DebugLoc DL = MI.getDebugLoc();
1944
1945 if (STI.hasStdExtZtso()) {
1946 // The only fence that needs an instruction is a sequentially-consistent
1947 // cross-thread fence.
1948 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1949 FenceSSID == SyncScope::System) {
1950 // fence rw, rw
1951 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE))
1954 return;
1955 }
1956
1957 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1958 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::MEMBARRIER));
1959 return;
1960 }
1961
1962 // singlethread fences only synchronize with signal handlers on the same
1963 // thread and thus only need to preserve instruction order, not actually
1964 // enforce memory ordering.
1965 if (FenceSSID == SyncScope::SingleThread) {
1966 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::MEMBARRIER));
1967 return;
1968 }
1969
1970 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1971 // Manual: Volume I.
1972 unsigned Pred, Succ;
1973 switch (FenceOrdering) {
1974 default:
1975 llvm_unreachable("Unexpected ordering");
1976 case AtomicOrdering::AcquireRelease:
1977 // fence acq_rel -> fence.tso
1978 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE_TSO));
1979 return;
1980 case AtomicOrdering::Acquire:
1981 // fence acquire -> fence r, rw
1982 Pred = RISCVFenceField::R;
1984 break;
1985 case AtomicOrdering::Release:
1986 // fence release -> fence rw, w
1988 Succ = RISCVFenceField::W;
1989 break;
1990 case AtomicOrdering::SequentiallyConsistent:
1991 // fence seq_cst -> fence rw, rw
1994 break;
1995 }
1996 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE)).addImm(Pred).addImm(Succ);
1997}
1998
1999namespace llvm {
2000InstructionSelector *
2002 const RISCVSubtarget &Subtarget,
2003 const RISCVRegisterBankInfo &RBI) {
2004 return new RISCVInstructionSelector(TM, Subtarget, RBI);
2005}
2006} // end namespace llvm
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1408
bool isPosZero() const
Definition APFloat.h:1527
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:315
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:325
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:313
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:257
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:432
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.