LLVM 23.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95 void addVectorLoadStoreOperands(MachineInstr &I,
97 unsigned &CurOp, bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT = nullptr) const;
100 bool selectIntrinsicWithSideEffects(MachineInstr &I,
101 MachineIRBuilder &MIB) const;
102 bool selectIntrinsic(MachineInstr &I, MachineIRBuilder &MIB) const;
103 bool selectExtractSubvector(MachineInstr &MI, MachineIRBuilder &MIB) const;
104
105 ComplexRendererFns selectShiftMask(MachineOperand &Root,
106 unsigned ShiftWidth) const;
107 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
108 return selectShiftMask(Root, STI.getXLen());
109 }
110 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
111 return selectShiftMask(Root, 32);
112 }
113 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
114
115 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
116 template <unsigned Bits>
117 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
118 return selectSExtBits(Root, Bits);
119 }
120
121 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
122 template <unsigned Bits>
123 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
124 return selectZExtBits(Root, Bits);
125 }
126
127 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
128 template <unsigned ShAmt>
129 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
130 return selectSHXADDOp(Root, ShAmt);
131 }
132
133 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
134 unsigned ShAmt) const;
135 template <unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
137 return selectSHXADD_UWOp(Root, ShAmt);
138 }
139
140 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
141
142 // Custom renderers for tablegen
143 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
144 int OpIdx) const;
145 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
148 int OpIdx) const;
149 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
150 int OpIdx) const;
151 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
152 int OpIdx) const;
153
154 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
155 int OpIdx) const;
156 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
157 const MachineInstr &MI, int OpIdx) const;
158
159 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
160 int OpIdx) const;
161 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
162 int OpIdx) const;
163
164 const RISCVSubtarget &STI;
165 const RISCVInstrInfo &TII;
166 const RISCVRegisterInfo &TRI;
167 const RISCVRegisterBankInfo &RBI;
168 const RISCVTargetMachine &TM;
169
170 MachineRegisterInfo *MRI = nullptr;
171
172 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
173 // uses "STI." in the code generated by TableGen. We need to unify the name of
174 // Subtarget variable.
175 const RISCVSubtarget *Subtarget = &STI;
176
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
180
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
184};
185
186} // end anonymous namespace
187
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
191
192RISCVInstructionSelector::RISCVInstructionSelector(
193 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
194 const RISCVRegisterBankInfo &RBI)
195 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
196 TM(TM),
197
199#include "RISCVGenGlobalISel.inc"
202#include "RISCVGenGlobalISel.inc"
204{
205}
206
207// Mimics optimizations in ISel and RISCVOptWInst Pass
208bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
209 unsigned Bits,
210 const unsigned Depth) const {
211
212 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
221 "Unexpected opcode");
222
223 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
224 return false;
225
226 auto DestReg = MI.getOperand(0).getReg();
227 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
228 assert(UserOp.getParent() && "UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
230 unsigned OpIdx = UserOp.getOperandNo();
231
232 switch (UserMI.getOpcode()) {
233 default:
234 return false;
235 case RISCV::ADDW:
236 case RISCV::ADDIW:
237 case RISCV::SUBW:
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
240 if (Bits >= 32)
241 break;
242 return false;
243 case RISCV::SLL:
244 case RISCV::SRA:
245 case RISCV::SRL:
246 // Shift amount operands only use log2(Xlen) bits.
247 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
248 break;
249 return false;
250 case RISCV::SLLI:
251 // SLLI only uses the lower (XLen - ShAmt) bits.
252 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
253 break;
254 return false;
255 case RISCV::ANDI:
256 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
257 (uint64_t)UserMI.getOperand(2).getImm()))
258 break;
259 goto RecCheck;
260 case RISCV::AND:
261 case RISCV::OR:
262 case RISCV::XOR:
263 RecCheck:
264 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
265 break;
266 return false;
267 case RISCV::SRLI: {
268 unsigned ShAmt = UserMI.getOperand(2).getImm();
269 // If we are shifting right by less than Bits, and users don't demand any
270 // bits that were shifted into [Bits-1:0], then we can consider this as an
271 // N-Bit user.
272 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
273 break;
274 return false;
275 }
276 }
277 }
278
279 return true;
280}
281
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth) const {
285 if (!Root.isReg())
286 return std::nullopt;
287
288 using namespace llvm::MIPatternMatch;
289
290 Register ShAmtReg = Root.getReg();
291 // Peek through zext.
292 Register ZExtSrcReg;
293 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
294 ShAmtReg = ZExtSrcReg;
295
296 APInt AndMask;
297 Register AndSrcReg;
298 // Try to combine the following pattern (applicable to other shift
299 // instructions as well as 32-bit ones):
300 //
301 // %4:gprb(s64) = G_AND %3, %2
302 // %5:gprb(s64) = G_LSHR %1, %4(s64)
303 //
304 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
305 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
306 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
307 // then it can be eliminated. Given register rs1 or rs2 holding a constant
308 // (the and mask), there are two cases G_AND can be erased:
309 //
310 // 1. the lowest log2(XLEN) bits of the and mask are all set
311 // 2. the bits of the register being masked are already unset (zero set)
312 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
313 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(AndMask)) {
315 ShAmtReg = AndSrcReg;
316 } else {
317 // SimplifyDemandedBits may have optimized the mask so try restoring any
318 // bits that are known zero.
319 KnownBits Known = VT->getKnownBits(AndSrcReg);
320 if (ShMask.isSubsetOf(AndMask | Known.Zero))
321 ShAmtReg = AndSrcReg;
322 }
323 }
324
325 APInt Imm;
327 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
328 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
329 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
330 // to avoid the ADD.
331 ShAmtReg = Reg;
332 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
333 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
334 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
335 // to generate a NEG instead of a SUB of a constant.
336 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
341 MIB.addReg(ShAmtReg);
342 }}};
343 }
344 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
345 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
346 // to generate a NOT instead of a SUB of a constant.
347 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
351 .addImm(-1);
352 MIB.addReg(ShAmtReg);
353 }}};
354 }
355 }
356
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
358}
359
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits) const {
363 if (!Root.isReg())
364 return std::nullopt;
365 Register RootReg = Root.getReg();
366 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
367
368 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
369 RootDef->getOperand(2).getImm() == Bits) {
370 return {
371 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
372 }
373
374 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
375 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
377
378 return std::nullopt;
379}
380
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits) const {
384 if (!Root.isReg())
385 return std::nullopt;
386 Register RootReg = Root.getReg();
387
388 Register RegX;
389 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
390 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
392 }
393
394 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
395 MRI->getType(RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
397
398 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
399 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
401
402 return std::nullopt;
403}
404
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt) const {
408 using namespace llvm::MIPatternMatch;
409
410 if (!Root.isReg())
411 return std::nullopt;
412 Register RootReg = Root.getReg();
413
414 const unsigned XLen = STI.getXLen();
415 APInt Mask, C2;
416 Register RegY;
417 std::optional<bool> LeftShift;
418 // (and (shl y, c2), mask)
419 if (mi_match(RootReg, *MRI,
420 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
421 LeftShift = true;
422 // (and (lshr y, c2), mask)
423 else if (mi_match(RootReg, *MRI,
424 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
425 LeftShift = false;
426
427 if (LeftShift.has_value()) {
428 if (*LeftShift)
430 else
432
433 if (Mask.isShiftedMask()) {
434 unsigned Leading = XLen - Mask.getActiveBits();
435 unsigned Trailing = Mask.countr_zero();
436 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
437 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
438 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
439 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
443 .addImm(Trailing - C2.getLimitedValue());
444 MIB.addReg(DstReg);
445 }}};
446 }
447
448 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
449 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
455 .addImm(Leading + Trailing);
456 MIB.addReg(DstReg);
457 }}};
458 }
459 }
460 }
461
462 LeftShift.reset();
463
464 // (shl (and y, mask), c2)
465 if (mi_match(RootReg, *MRI,
466 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
467 m_ICst(C2))))
468 LeftShift = true;
469 // (lshr (and y, mask), c2)
470 else if (mi_match(RootReg, *MRI,
472 m_ICst(C2))))
473 LeftShift = false;
474
475 if (LeftShift.has_value() && Mask.isShiftedMask()) {
476 unsigned Leading = XLen - Mask.getActiveBits();
477 unsigned Trailing = Mask.countr_zero();
478
479 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
480 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
481 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
482 (Trailing + C2.getLimitedValue()) == ShAmt;
483 if (!Cond)
484 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
485 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
486 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
487 (Trailing - C2.getLimitedValue()) == ShAmt;
488
489 if (Cond) {
490 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
494 .addImm(Trailing);
495 MIB.addReg(DstReg);
496 }}};
497 }
498 }
499
500 return std::nullopt;
501}
502
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt) const {
506 using namespace llvm::MIPatternMatch;
507
508 if (!Root.isReg())
509 return std::nullopt;
510 Register RootReg = Root.getReg();
511
512 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
513 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
514 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
515 APInt Mask, C2;
516 Register RegX;
517 if (mi_match(
518 RootReg, *MRI,
520 m_ICst(Mask))))) {
522
523 if (Mask.isShiftedMask()) {
524 unsigned Leading = Mask.countl_zero();
525 unsigned Trailing = Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
531 .addImm(C2.getLimitedValue() - ShAmt);
532 MIB.addReg(DstReg);
533 }}};
534 }
535 }
536 }
537
538 return std::nullopt;
539}
540
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
543 assert(Root.isReg() && "Expected operand to be a Register");
544 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
545
546 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
547 auto C = RootDef->getOperand(1).getCImm();
548 if (C->getValue().isAllOnes())
549 // If the operand is a G_CONSTANT with value of all ones it is larger than
550 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
551 // recognized specially by the vsetvli insertion pass.
552 return {{[=](MachineInstrBuilder &MIB) {
553 MIB.addImm(RISCV::VLMaxSentinel);
554 }}};
555
556 if (isUInt<5>(C->getZExtValue())) {
557 uint64_t ZExtC = C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
559 }
560 }
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
562}
563
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
566 if (!Root.isReg())
567 return std::nullopt;
568
569 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
570 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
571 return {{
572 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
574 }};
575 }
576
577 if (isBaseWithConstantOffset(Root, *MRI)) {
578 MachineOperand &LHS = RootDef->getOperand(1);
579 MachineOperand &RHS = RootDef->getOperand(2);
580 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
581 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
582
583 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
584 if (isInt<12>(RHSC)) {
585 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
586 return {{
587 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
589 }};
590
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
593 }
594 }
595
596 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
597 // the combiner?
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
600}
601
602/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
603/// CC Must be an ICMP Predicate.
604static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
605 switch (CC) {
606 default:
607 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
608 case CmpInst::Predicate::ICMP_EQ:
609 return RISCVCC::COND_EQ;
610 case CmpInst::Predicate::ICMP_NE:
611 return RISCVCC::COND_NE;
612 case CmpInst::Predicate::ICMP_ULT:
613 return RISCVCC::COND_LTU;
614 case CmpInst::Predicate::ICMP_SLT:
615 return RISCVCC::COND_LT;
616 case CmpInst::Predicate::ICMP_UGE:
617 return RISCVCC::COND_GEU;
618 case CmpInst::Predicate::ICMP_SGE:
619 return RISCVCC::COND_GE;
620 }
621}
622
626 // Try to fold an ICmp. If that fails, use a NE compare with X0.
628 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
629 LHS = CondReg;
630 RHS = RISCV::X0;
631 CC = RISCVCC::COND_NE;
632 return;
633 }
634
635 // We found an ICmp, do some canonicalization.
636
637 // Adjust comparisons to use comparison with 0 if possible.
639 switch (Pred) {
641 // Convert X > -1 to X >= 0
642 if (*Constant == -1) {
643 CC = RISCVCC::COND_GE;
644 RHS = RISCV::X0;
645 return;
646 }
647 break;
649 // Convert X < 1 to 0 >= X
650 if (*Constant == 1) {
651 CC = RISCVCC::COND_GE;
652 RHS = LHS;
653 LHS = RISCV::X0;
654 return;
655 }
656 break;
657 default:
658 break;
659 }
660 }
661
662 switch (Pred) {
663 default:
664 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
671 // These CCs are supported directly by RISC-V branches.
672 break;
677 // These CCs are not supported directly by RISC-V branches, but changing the
678 // direction of the CC and swapping LHS and RHS are.
679 Pred = CmpInst::getSwappedPredicate(Pred);
680 std::swap(LHS, RHS);
681 break;
682 }
683
684 CC = getRISCVCCFromICmp(Pred);
685}
686
687/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
688/// \p GenericOpc, appropriate for the GPR register bank and of memory access
689/// size \p OpSize.
690static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
692 switch (OpSize) {
693 default:
694 llvm_unreachable("Unexpected memory size");
695 case 8:
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
697 case 16:
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
699 case 32:
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
701 case 64:
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
703 }
704}
705
706/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
707/// \p GenericOpc, appropriate for the GPR register bank and of memory access
708/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
709static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
711 switch (OpSize) {
712 case 8:
713 // Prefer unsigned due to no c.lb in Zcb.
714 return IsStore ? RISCV::SB : RISCV::LBU;
715 case 16:
716 return IsStore ? RISCV::SH : RISCV::LH;
717 case 32:
718 return IsStore ? RISCV::SW : RISCV::LW;
719 case 64:
720 return IsStore ? RISCV::SD : RISCV::LD;
721 }
722
723 return GenericOpc;
724}
725
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp,
728 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
729 // Base Pointer
730 auto PtrReg = I.getOperand(CurOp++).getReg();
731 SrcOps.push_back(PtrReg);
732
733 // Stride or Index
734 if (IsStridedOrIndexed) {
735 auto StrideReg = I.getOperand(CurOp++).getReg();
736 SrcOps.push_back(StrideReg);
737 if (IndexVT)
738 *IndexVT = MRI->getType(StrideReg);
739 }
740
741 // Mask
742 if (IsMasked) {
743 auto MaskReg = I.getOperand(CurOp++).getReg();
744 SrcOps.push_back(MaskReg);
745 }
746}
747
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &I, MachineIRBuilder &MIB) const {
750 // Find the intrinsic ID.
751 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
752 // Select the instruction.
753 switch (IntrinID) {
754 default:
755 return false;
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT = MRI->getType(I.getOperand(0).getReg());
766 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
767
768 // Result vector
769 const Register DstReg = I.getOperand(0).getReg();
770
771 // Sources
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
773 unsigned CurOp = 2;
774 SmallVector<SrcOp, 4> SrcOps; // Source registers.
775
776 // Passthru
777 if (HasPassthruOperand) {
778 auto PassthruReg = I.getOperand(CurOp++).getReg();
779 SrcOps.push_back(PassthruReg);
780 } else {
781 SrcOps.push_back(Register(RISCV::NoRegister));
782 }
783
784 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
785
787 const RISCV::VLEPseudo *P =
788 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
789 static_cast<unsigned>(LMUL));
790
791 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
792
793 // Select VL
794 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
795 for (auto &RenderFn : *VLOpFn)
796 RenderFn(PseudoMI);
797
798 // SEW
799 PseudoMI.addImm(Log2SEW);
800
801 // Policy
802 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
803 if (IsMasked)
804 Policy = I.getOperand(CurOp++).getImm();
805 PseudoMI.addImm(Policy);
806
807 // Memref
808 PseudoMI.cloneMemRefs(I);
809
810 I.eraseFromParent();
811 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
812 }
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->getType(I.getOperand(0).getReg());
822 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
823
824 // Result vector
825 const Register DstReg = I.getOperand(0).getReg();
826
827 // Sources
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
829 unsigned CurOp = 2;
830 SmallVector<SrcOp, 4> SrcOps; // Source registers.
831
832 // Passthru
833 if (HasPassthruOperand) {
834 auto PassthruReg = I.getOperand(CurOp++).getReg();
835 SrcOps.push_back(PassthruReg);
836 } else {
837 // Use NoRegister if there is no specified passthru.
838 SrcOps.push_back(Register());
839 }
840 LLT IndexVT;
841 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
842
844 RISCVVType::VLMUL IndexLMUL =
846 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
847 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
848 reportFatalUsageError("The V extension does not support EEW=64 for index "
849 "values when XLEN=32");
850 }
851 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
854
855 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
856
857 // Select VL
858 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
859 for (auto &RenderFn : *VLOpFn)
860 RenderFn(PseudoMI);
861
862 // SEW
863 PseudoMI.addImm(Log2SEW);
864
865 // Policy
866 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
867 if (IsMasked)
868 Policy = I.getOperand(CurOp++).getImm();
869 PseudoMI.addImm(Policy);
870
871 // Memref
872 PseudoMI.cloneMemRefs(I);
873
874 I.eraseFromParent();
875 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
876 }
877 case Intrinsic::riscv_vsm:
878 case Intrinsic::riscv_vse:
879 case Intrinsic::riscv_vse_mask:
880 case Intrinsic::riscv_vsse:
881 case Intrinsic::riscv_vsse_mask: {
882 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
883 IntrinID == Intrinsic::riscv_vsse_mask;
884 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 LLT VT = MRI->getType(I.getOperand(1).getReg());
887 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
888
889 // Sources
890 unsigned CurOp = 1;
891 SmallVector<SrcOp, 4> SrcOps; // Source registers.
892
893 // Store value
894 auto PassthruReg = I.getOperand(CurOp++).getReg();
895 SrcOps.push_back(PassthruReg);
896
897 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
898
900 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
901 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
902
903 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
904
905 // Select VL
906 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
907 for (auto &RenderFn : *VLOpFn)
908 RenderFn(PseudoMI);
909
910 // SEW
911 PseudoMI.addImm(Log2SEW);
912
913 // Memref
914 PseudoMI.cloneMemRefs(I);
915
916 I.eraseFromParent();
917 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
918 }
919 case Intrinsic::riscv_vsoxei:
920 case Intrinsic::riscv_vsoxei_mask:
921 case Intrinsic::riscv_vsuxei:
922 case Intrinsic::riscv_vsuxei_mask: {
923 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
924 IntrinID == Intrinsic::riscv_vsuxei_mask;
925 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
926 IntrinID == Intrinsic::riscv_vsoxei_mask;
927 LLT VT = MRI->getType(I.getOperand(1).getReg());
928 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
929
930 // Sources
931 unsigned CurOp = 1;
932 SmallVector<SrcOp, 4> SrcOps; // Source registers.
933
934 // Store value
935 auto PassthruReg = I.getOperand(CurOp++).getReg();
936 SrcOps.push_back(PassthruReg);
937
938 LLT IndexVT;
939 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
940
942 RISCVVType::VLMUL IndexLMUL =
944 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
945 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
946 reportFatalUsageError("The V extension does not support EEW=64 for index "
947 "values when XLEN=32");
948 }
949 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
950 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
951 static_cast<unsigned>(IndexLMUL));
952
953 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
954
955 // Select VL
956 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
957 for (auto &RenderFn : *VLOpFn)
958 RenderFn(PseudoMI);
959
960 // SEW
961 PseudoMI.addImm(Log2SEW);
962
963 // Memref
964 PseudoMI.cloneMemRefs(I);
965
966 I.eraseFromParent();
967 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
968 }
969 }
970}
971
972bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I,
973 MachineIRBuilder &MIB) const {
974 // Find the intrinsic ID.
975 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
976 // Select the instruction.
977 switch (IntrinID) {
978 default:
979 return false;
980 case Intrinsic::riscv_vsetvli:
981 case Intrinsic::riscv_vsetvlimax: {
982
983 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
984
985 unsigned Offset = VLMax ? 2 : 3;
986 unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
987 RISCVVType::VLMUL VLMul =
988 static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
989
990 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
991 /*MaskAgnostic*/ true);
992
993 Register DstReg = I.getOperand(0).getReg();
994
995 Register VLOperand;
996 unsigned Opcode = RISCV::PseudoVSETVLI;
997
998 // Check if AVL is a constant that equals VLMAX.
999 if (!VLMax) {
1000 Register AVLReg = I.getOperand(2).getReg();
1001 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1002 uint64_t AVL = AVLConst->Value.getZExtValue();
1003 if (auto VLEN = Subtarget->getRealVLen()) {
1004 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1005 VLMax = true;
1006 }
1007 }
1008
1009 MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
1010 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1011 const auto *C = AVLDef->getOperand(1).getCImm();
1012 if (C->getValue().isAllOnes())
1013 VLMax = true;
1014 }
1015 }
1016
1017 if (VLMax) {
1018 VLOperand = Register(RISCV::X0);
1019 Opcode = RISCV::PseudoVSETVLIX0;
1020 } else {
1021 Register AVLReg = I.getOperand(2).getReg();
1022 VLOperand = AVLReg;
1023
1024 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1025 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1026 uint64_t AVL = AVLConst->Value.getZExtValue();
1027 if (isUInt<5>(AVL)) {
1028 auto PseudoMI = MIB.buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
1029 .addImm(AVL)
1030 .addImm(VTypeI);
1031 I.eraseFromParent();
1032 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1033 }
1034 }
1035 }
1036
1037 auto PseudoMI =
1038 MIB.buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
1039 I.eraseFromParent();
1040 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1041 }
1042 }
1043}
1044
1045bool RISCVInstructionSelector::selectExtractSubvector(
1046 MachineInstr &MI, MachineIRBuilder &MIB) const {
1047 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1048
1049 Register DstReg = MI.getOperand(0).getReg();
1050 Register SrcReg = MI.getOperand(1).getReg();
1051
1052 LLT DstTy = MRI->getType(DstReg);
1053 LLT SrcTy = MRI->getType(SrcReg);
1054
1055 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
1056
1057 MVT DstMVT = getMVTForLLT(DstTy);
1058 MVT SrcMVT = getMVTForLLT(SrcTy);
1059
1060 unsigned SubRegIdx;
1061 std::tie(SubRegIdx, Idx) =
1063 SrcMVT, DstMVT, Idx, &TRI);
1064
1065 if (Idx != 0)
1066 return false;
1067
1068 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
1069 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1070 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1071 return false;
1072
1073 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1074 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1075 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1076 return false;
1077
1078 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
1079 .addReg(SrcReg, {}, SubRegIdx);
1080
1081 MI.eraseFromParent();
1082 return true;
1083}
1084
1085bool RISCVInstructionSelector::select(MachineInstr &MI) {
1086 MachineIRBuilder MIB(MI);
1087
1088 preISelLower(MI, MIB);
1089 const unsigned Opc = MI.getOpcode();
1090
1091 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1092 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1093 const Register DefReg = MI.getOperand(0).getReg();
1094 const LLT DefTy = MRI->getType(DefReg);
1095
1096 const RegClassOrRegBank &RegClassOrBank =
1097 MRI->getRegClassOrRegBank(DefReg);
1098
1099 const TargetRegisterClass *DefRC =
1101 if (!DefRC) {
1102 if (!DefTy.isValid()) {
1103 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1104 return false;
1105 }
1106
1107 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1108 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1109 if (!DefRC) {
1110 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1111 return false;
1112 }
1113 }
1114
1115 MI.setDesc(TII.get(TargetOpcode::PHI));
1116 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1117 }
1118
1119 // Certain non-generic instructions also need some special handling.
1120 if (MI.isCopy())
1121 return selectCopy(MI);
1122
1123 return true;
1124 }
1125
1126 if (selectImpl(MI, *CoverageInfo))
1127 return true;
1128
1129 switch (Opc) {
1130 case TargetOpcode::G_ANYEXT:
1131 case TargetOpcode::G_PTRTOINT:
1132 case TargetOpcode::G_INTTOPTR:
1133 case TargetOpcode::G_TRUNC:
1134 case TargetOpcode::G_FREEZE:
1135 return selectCopy(MI);
1136 case TargetOpcode::G_CONSTANT: {
1137 Register DstReg = MI.getOperand(0).getReg();
1138 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1139
1140 if (!materializeImm(DstReg, Imm, MIB))
1141 return false;
1142
1143 MI.eraseFromParent();
1144 return true;
1145 }
1146 case TargetOpcode::G_ZEXT:
1147 case TargetOpcode::G_SEXT: {
1148 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1149 Register DstReg = MI.getOperand(0).getReg();
1150 Register SrcReg = MI.getOperand(1).getReg();
1151 LLT SrcTy = MRI->getType(SrcReg);
1152 unsigned SrcSize = SrcTy.getSizeInBits();
1153
1154 if (SrcTy.isVector())
1155 return false; // Should be handled by imported patterns.
1156
1157 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1158 RISCV::GPRBRegBankID &&
1159 "Unexpected ext regbank");
1160
1161 // Use addiw SrcReg, 0 (sext.w) for i32.
1162 if (IsSigned && SrcSize == 32) {
1163 MI.setDesc(TII.get(RISCV::ADDIW));
1164 MI.addOperand(MachineOperand::CreateImm(0));
1166 }
1167
1168 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1169 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1170 MI.setDesc(TII.get(RISCV::ADD_UW));
1171 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1173 }
1174
1175 // Use sext.h/zext.h for i16 with Zbb.
1176 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1177 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1178 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1179 : RISCV::ZEXT_H_RV32));
1181 }
1182
1183 // Use pack(w) SrcReg, X0 for i16 zext with Zbkb.
1184 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1185 MI.setDesc(TII.get(STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));
1186 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1188 }
1189
1190 // Fall back to shift pair.
1191 auto ShiftLeft =
1192 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1193 .addImm(STI.getXLen() - SrcSize);
1194 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1195 auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1196 {DstReg}, {ShiftLeft})
1197 .addImm(STI.getXLen() - SrcSize);
1198 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1199 MI.eraseFromParent();
1200 return true;
1201 }
1202 case TargetOpcode::G_FCONSTANT: {
1203 // TODO: Use constant pool for complex constants.
1204 Register DstReg = MI.getOperand(0).getReg();
1205 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1206 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1207 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1208 Register GPRReg;
1209 if (FPimm.isPosZero()) {
1210 GPRReg = RISCV::X0;
1211 } else {
1212 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1213 APInt Imm = FPimm.bitcastToAPInt();
1214 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
1215 return false;
1216 }
1217
1218 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1219 : Size == 32 ? RISCV::FMV_W_X
1220 : RISCV::FMV_H_X;
1221 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
1222 if (!FMV.constrainAllUses(TII, TRI, RBI))
1223 return false;
1224 } else {
1225 // s64 on rv32
1226 assert(Size == 64 && !Subtarget->is64Bit() &&
1227 "Unexpected size or subtarget");
1228
1229 if (FPimm.isPosZero()) {
1230 // Optimize +0.0 to use fcvt.d.w
1231 MachineInstrBuilder FCVT =
1232 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
1233 .addImm(RISCVFPRndMode::RNE);
1234 if (!FCVT.constrainAllUses(TII, TRI, RBI))
1235 return false;
1236
1237 MI.eraseFromParent();
1238 return true;
1239 }
1240
1241 // Split into two pieces and build through the stack.
1242 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1243 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1244 APInt Imm = FPimm.bitcastToAPInt();
1245 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1246 MIB))
1247 return false;
1248 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
1249 return false;
1250 MachineInstrBuilder PairF64 = MIB.buildInstr(
1251 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1252 if (!PairF64.constrainAllUses(TII, TRI, RBI))
1253 return false;
1254 }
1255
1256 MI.eraseFromParent();
1257 return true;
1258 }
1259 case TargetOpcode::G_GLOBAL_VALUE: {
1260 auto *GV = MI.getOperand(1).getGlobal();
1261 if (GV->isThreadLocal()) {
1262 // TODO: implement this case.
1263 return false;
1264 }
1265
1266 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1267 }
1268 case TargetOpcode::G_JUMP_TABLE:
1269 case TargetOpcode::G_CONSTANT_POOL:
1270 return selectAddr(MI, MIB, MRI);
1271 case TargetOpcode::G_BRCOND: {
1272 Register LHS, RHS;
1274 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1275
1276 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
1277 .addMBB(MI.getOperand(1).getMBB());
1278 MI.eraseFromParent();
1279 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
1280 }
1281 case TargetOpcode::G_BRINDIRECT:
1282 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1283 MI.addOperand(MachineOperand::CreateImm(0));
1285 case TargetOpcode::G_SELECT:
1286 return selectSelect(MI, MIB);
1287 case TargetOpcode::G_FCMP:
1288 return selectFPCompare(MI, MIB);
1289 case TargetOpcode::G_FENCE: {
1290 AtomicOrdering FenceOrdering =
1291 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1292 SyncScope::ID FenceSSID =
1293 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1294 emitFence(FenceOrdering, FenceSSID, MIB);
1295 MI.eraseFromParent();
1296 return true;
1297 }
1298 case TargetOpcode::G_IMPLICIT_DEF:
1299 return selectImplicitDef(MI, MIB);
1300 case TargetOpcode::G_UNMERGE_VALUES:
1301 return selectUnmergeValues(MI, MIB);
1302 case TargetOpcode::G_LOAD:
1303 case TargetOpcode::G_STORE: {
1304 GLoadStore &LdSt = cast<GLoadStore>(MI);
1305 const Register ValReg = LdSt.getReg(0);
1306 const Register PtrReg = LdSt.getPointerReg();
1307 LLT PtrTy = MRI->getType(PtrReg);
1308
1309 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1310 if (RB.getID() != RISCV::GPRBRegBankID)
1311 return false;
1312
1313#ifndef NDEBUG
1314 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1315 // Check that the pointer register is valid.
1316 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1317 "Load/Store pointer operand isn't a GPR");
1318 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1319#endif
1320
1321 // Can only handle AddressSpace 0.
1322 if (PtrTy.getAddressSpace() != 0)
1323 return false;
1324
1325 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1326 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1327
1328 if (isStrongerThanMonotonic(Order)) {
1329 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1331 }
1332
1333 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1334 if (NewOpc == MI.getOpcode())
1335 return false;
1336
1337 // Check if we can fold anything into the addressing mode.
1338 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1339 if (!AddrModeFns)
1340 return false;
1341
1342 // Folded something. Create a new instruction and return it.
1343 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
1344 if (isa<GStore>(MI))
1345 NewInst.addUse(ValReg);
1346 else
1347 NewInst.addDef(ValReg);
1348 NewInst.cloneMemRefs(MI);
1349 for (auto &Fn : *AddrModeFns)
1350 Fn(NewInst);
1351 MI.eraseFromParent();
1352
1353 return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1354 }
1355 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1356 return selectIntrinsicWithSideEffects(MI, MIB);
1357 case TargetOpcode::G_INTRINSIC:
1358 return selectIntrinsic(MI, MIB);
1359 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1360 return selectExtractSubvector(MI, MIB);
1361 default:
1362 return false;
1363 }
1364}
1365
1366bool RISCVInstructionSelector::selectUnmergeValues(
1367 MachineInstr &MI, MachineIRBuilder &MIB) const {
1368 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1369
1370 if (!Subtarget->hasStdExtZfa())
1371 return false;
1372
1373 // Split F64 Src into two s32 parts
1374 if (MI.getNumOperands() != 3)
1375 return false;
1376 Register Src = MI.getOperand(2).getReg();
1377 Register Lo = MI.getOperand(0).getReg();
1378 Register Hi = MI.getOperand(1).getReg();
1379 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1380 return false;
1381
1382 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
1383 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
1384 return false;
1385
1386 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
1387 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
1388 return false;
1389
1390 MI.eraseFromParent();
1391 return true;
1392}
1393
1394bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
1395 MachineIRBuilder &MIB) {
1396 Register PtrReg = Op.getReg();
1397 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1398
1399 const LLT sXLen = LLT::scalar(STI.getXLen());
1400 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
1401 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
1402 Op.setReg(PtrToInt.getReg(0));
1403 return select(*PtrToInt);
1404}
1405
1406void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
1407 MachineIRBuilder &MIB) {
1408 switch (MI.getOpcode()) {
1409 case TargetOpcode::G_PTR_ADD: {
1410 Register DstReg = MI.getOperand(0).getReg();
1411 const LLT sXLen = LLT::scalar(STI.getXLen());
1412
1413 replacePtrWithInt(MI.getOperand(1), MIB);
1414 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1415 MRI->setType(DstReg, sXLen);
1416 break;
1417 }
1418 case TargetOpcode::G_PTRMASK: {
1419 Register DstReg = MI.getOperand(0).getReg();
1420 const LLT sXLen = LLT::scalar(STI.getXLen());
1421 replacePtrWithInt(MI.getOperand(1), MIB);
1422 MI.setDesc(TII.get(TargetOpcode::G_AND));
1423 MRI->setType(DstReg, sXLen);
1424 break;
1425 }
1426 }
1427}
1428
1429void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1430 const MachineInstr &MI,
1431 int OpIdx) const {
1432 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1433 "Expected G_CONSTANT");
1434 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1435 MIB.addImm(-CstVal);
1436}
1437
1438void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1439 const MachineInstr &MI,
1440 int OpIdx) const {
1441 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1442 "Expected G_CONSTANT");
1443 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1444 MIB.addImm(STI.getXLen() - CstVal);
1445}
1446
1447void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1448 const MachineInstr &MI,
1449 int OpIdx) const {
1450 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1451 "Expected G_CONSTANT");
1452 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1453 MIB.addImm(32 - CstVal);
1454}
1455
1456void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1457 const MachineInstr &MI,
1458 int OpIdx) const {
1459 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1460 "Expected G_CONSTANT");
1461 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1462 MIB.addImm(CstVal + 1);
1463}
1464
1465void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1466 const MachineInstr &MI,
1467 int OpIdx) const {
1468 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1469 "Expected G_FRAME_INDEX");
1470 MIB.add(MI.getOperand(1));
1471}
1472
1473void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1474 const MachineInstr &MI,
1475 int OpIdx) const {
1476 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1477 "Expected G_CONSTANT");
1478 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1480}
1481
1482void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1483 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1484 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1485 "Expected G_CONSTANT");
1486 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1487 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1488}
1489
1490void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1491 const MachineInstr &MI,
1492 int OpIdx) const {
1493 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1494 "Expected G_CONSTANT");
1495 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1496 int64_t Adj = Imm < 0 ? -2048 : 2047;
1497 MIB.addImm(Imm - Adj);
1498}
1499
1500void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1501 const MachineInstr &MI,
1502 int OpIdx) const {
1503 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1504 "Expected G_CONSTANT");
1505 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1506 MIB.addImm(Imm);
1507}
1508
1509const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1510 LLT Ty, const RegisterBank &RB) const {
1511 if (RB.getID() == RISCV::GPRBRegBankID) {
1512 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1513 return &RISCV::GPRRegClass;
1514 }
1515
1516 if (RB.getID() == RISCV::FPRBRegBankID) {
1517 if (Ty.getSizeInBits() == 16)
1518 return &RISCV::FPR16RegClass;
1519 if (Ty.getSizeInBits() == 32)
1520 return &RISCV::FPR32RegClass;
1521 if (Ty.getSizeInBits() == 64)
1522 return &RISCV::FPR64RegClass;
1523 }
1524
1525 if (RB.getID() == RISCV::VRBRegBankID) {
1526 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1527 return &RISCV::VRRegClass;
1528
1529 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1530 return &RISCV::VRM2RegClass;
1531
1532 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1533 return &RISCV::VRM4RegClass;
1534
1535 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1536 return &RISCV::VRM8RegClass;
1537 }
1538
1539 return nullptr;
1540}
1541
1542bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1543 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1544}
1545
1546bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1547 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1548}
1549
1550bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1551 Register DstReg = MI.getOperand(0).getReg();
1552
1553 if (DstReg.isPhysical())
1554 return true;
1555
1556 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1557 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1558 assert(DstRC &&
1559 "Register class not available for LLT, register bank combination");
1560
1561 // No need to constrain SrcReg. It will get constrained when
1562 // we hit another of its uses or its defs.
1563 // Copies do not have constraints.
1564 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1565 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1566 << " operand\n");
1567 return false;
1568 }
1569
1570 MI.setDesc(TII.get(RISCV::COPY));
1571 return true;
1572}
1573
1574bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1575 MachineIRBuilder &MIB) const {
1576 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1577
1578 const Register DstReg = MI.getOperand(0).getReg();
1579 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1580 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1581
1582 assert(DstRC &&
1583 "Register class not available for LLT, register bank combination");
1584
1585 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1586 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1587 << " operand\n");
1588 }
1589 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1590 return true;
1591}
1592
1593bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1594 MachineIRBuilder &MIB) const {
1595 if (Imm == 0) {
1596 MIB.buildCopy(DstReg, Register(RISCV::X0));
1597 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1598 return true;
1599 }
1600
1602 unsigned NumInsts = Seq.size();
1603 Register SrcReg = RISCV::X0;
1604
1605 for (unsigned i = 0; i < NumInsts; i++) {
1606 Register TmpReg = i < NumInsts - 1
1607 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1608 : DstReg;
1609 const RISCVMatInt::Inst &I = Seq[i];
1610 MachineInstr *Result;
1611
1612 switch (I.getOpndKind()) {
1613 case RISCVMatInt::Imm:
1614 // clang-format off
1615 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1616 .addImm(I.getImm());
1617 // clang-format on
1618 break;
1619 case RISCVMatInt::RegX0:
1620 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1621 {SrcReg, Register(RISCV::X0)});
1622 break;
1624 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1625 break;
1627 Result =
1628 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1629 break;
1630 }
1631
1632 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1633 return false;
1634
1635 SrcReg = TmpReg;
1636 }
1637
1638 return true;
1639}
1640
1641bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1642 MachineIRBuilder &MIB, bool IsLocal,
1643 bool IsExternWeak) const {
1644 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1645 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1646 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1647 "Unexpected opcode");
1648
1649 const MachineOperand &DispMO = MI.getOperand(1);
1650
1651 Register DefReg = MI.getOperand(0).getReg();
1652 const LLT DefTy = MRI->getType(DefReg);
1653
1654 // When HWASAN is used and tagging of global variables is enabled
1655 // they should be accessed via the GOT, since the tagged address of a global
1656 // is incompatible with existing code models. This also applies to non-pic
1657 // mode.
1658 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1659 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1660 // Use PC-relative addressing to access the symbol. This generates the
1661 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1662 // %pcrel_lo(auipc)).
1663 MI.setDesc(TII.get(RISCV::PseudoLLA));
1665 }
1666
1667 // Use PC-relative addressing to access the GOT for this symbol, then
1668 // load the address from the GOT. This generates the pattern (PseudoLGA
1669 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1670 // %pcrel_lo(auipc))).
1671 MachineFunction &MF = *MI.getParent()->getParent();
1672 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1676 DefTy, Align(DefTy.getSizeInBits() / 8));
1677
1678 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1679 .addDisp(DispMO, 0)
1680 .addMemOperand(MemOp);
1681
1682 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1683 return false;
1684
1685 MI.eraseFromParent();
1686 return true;
1687 }
1688
1689 switch (TM.getCodeModel()) {
1690 default: {
1692 "Unsupported code model for lowering", MI);
1693 return false;
1694 }
1695 case CodeModel::Small: {
1696 // Must lie within a single 2 GiB address range and must lie between
1697 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1698 // (lui %hi(sym)) %lo(sym)).
1699 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1700 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1701 .addDisp(DispMO, 0, RISCVII::MO_HI);
1702
1703 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1704 return false;
1705
1706 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1707 .addDisp(DispMO, 0, RISCVII::MO_LO);
1708
1709 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1710 return false;
1711
1712 MI.eraseFromParent();
1713 return true;
1714 }
1715 case CodeModel::Medium:
1716 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1717 // relocation needs to reference a label that points to the auipc
1718 // instruction itself, not the global. This cannot be done inside the
1719 // instruction selector.
1720 if (IsExternWeak) {
1721 // An extern weak symbol may be undefined, i.e. have value 0, which may
1722 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1723 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1724 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1725 MachineFunction &MF = *MI.getParent()->getParent();
1726 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1730 DefTy, Align(DefTy.getSizeInBits() / 8));
1731
1732 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1733 .addDisp(DispMO, 0)
1734 .addMemOperand(MemOp);
1735
1736 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1737 return false;
1738
1739 MI.eraseFromParent();
1740 return true;
1741 }
1742
1743 // Generate a sequence for accessing addresses within any 2GiB range
1744 // within the address space. This generates the pattern (PseudoLLA sym),
1745 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1746 MI.setDesc(TII.get(RISCV::PseudoLLA));
1748 }
1749
1750 return false;
1751}
1752
1753bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1754 MachineIRBuilder &MIB) const {
1755 auto &SelectMI = cast<GSelect>(MI);
1756
1757 Register LHS, RHS;
1759 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1760
1761 Register DstReg = SelectMI.getReg(0);
1762
1763 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1764 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1765 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1766 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1767 : RISCV::Select_FPR64_Using_CC_GPR;
1768 }
1769
1770 MachineInstr *Result = MIB.buildInstr(Opc)
1771 .addDef(DstReg)
1772 .addReg(LHS)
1773 .addReg(RHS)
1774 .addImm(CC)
1775 .addReg(SelectMI.getTrueReg())
1776 .addReg(SelectMI.getFalseReg());
1777 MI.eraseFromParent();
1778 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1779}
1780
1781// Convert an FCMP predicate to one of the supported F or D instructions.
1782static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1783 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1784 switch (Pred) {
1785 default:
1786 llvm_unreachable("Unsupported predicate");
1787 case CmpInst::FCMP_OLT:
1788 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1789 case CmpInst::FCMP_OLE:
1790 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1791 case CmpInst::FCMP_OEQ:
1792 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1793 }
1794}
1795
1796// Try legalizing an FCMP by swapping or inverting the predicate to one that
1797// is supported.
1799 CmpInst::Predicate &Pred, bool &NeedInvert) {
1800 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1801 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1802 Pred == CmpInst::FCMP_OEQ;
1803 };
1804
1805 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1806
1808 if (isLegalFCmpPredicate(InvPred)) {
1809 Pred = InvPred;
1810 std::swap(LHS, RHS);
1811 return true;
1812 }
1813
1814 InvPred = CmpInst::getInversePredicate(Pred);
1815 NeedInvert = true;
1816 if (isLegalFCmpPredicate(InvPred)) {
1817 Pred = InvPred;
1818 return true;
1819 }
1820 InvPred = CmpInst::getSwappedPredicate(InvPred);
1821 if (isLegalFCmpPredicate(InvPred)) {
1822 Pred = InvPred;
1823 std::swap(LHS, RHS);
1824 return true;
1825 }
1826
1827 return false;
1828}
1829
1830// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1831// the result in DstReg.
1832// FIXME: Maybe we should expand this earlier.
1833bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1834 MachineIRBuilder &MIB) const {
1835 auto &CmpMI = cast<GFCmp>(MI);
1836 CmpInst::Predicate Pred = CmpMI.getCond();
1837
1838 Register DstReg = CmpMI.getReg(0);
1839 Register LHS = CmpMI.getLHSReg();
1840 Register RHS = CmpMI.getRHSReg();
1841
1842 unsigned Size = MRI->getType(LHS).getSizeInBits();
1843 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1844
1845 Register TmpReg = DstReg;
1846
1847 bool NeedInvert = false;
1848 // First try swapping operands or inverting.
1849 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1850 if (NeedInvert)
1851 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1852 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1853 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1854 return false;
1855 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1856 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1857 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1859 {&RISCV::GPRRegClass}, {LHS, RHS});
1860 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1861 return false;
1863 {&RISCV::GPRRegClass}, {RHS, LHS});
1864 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1865 return false;
1866 if (NeedInvert)
1867 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1868 auto Or =
1869 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1870 if (!Or.constrainAllUses(TII, TRI, RBI))
1871 return false;
1872 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1873 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1874 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1875 NeedInvert = Pred == CmpInst::FCMP_UNO;
1877 {&RISCV::GPRRegClass}, {LHS, LHS});
1878 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1879 return false;
1881 {&RISCV::GPRRegClass}, {RHS, RHS});
1882 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1883 return false;
1884 if (NeedInvert)
1885 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1886 auto And =
1887 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1888 if (!And.constrainAllUses(TII, TRI, RBI))
1889 return false;
1890 } else
1891 llvm_unreachable("Unhandled predicate");
1892
1893 // Emit an XORI to invert the result if needed.
1894 if (NeedInvert) {
1895 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1896 if (!Xor.constrainAllUses(TII, TRI, RBI))
1897 return false;
1898 }
1899
1900 MI.eraseFromParent();
1901 return true;
1902}
1903
1904void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1905 SyncScope::ID FenceSSID,
1906 MachineIRBuilder &MIB) const {
1907 if (STI.hasStdExtZtso()) {
1908 // The only fence that needs an instruction is a sequentially-consistent
1909 // cross-thread fence.
1910 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1911 FenceSSID == SyncScope::System) {
1912 // fence rw, rw
1913 MIB.buildInstr(RISCV::FENCE, {}, {})
1916 return;
1917 }
1918
1919 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1920 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1921 return;
1922 }
1923
1924 // singlethread fences only synchronize with signal handlers on the same
1925 // thread and thus only need to preserve instruction order, not actually
1926 // enforce memory ordering.
1927 if (FenceSSID == SyncScope::SingleThread) {
1928 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1929 return;
1930 }
1931
1932 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1933 // Manual: Volume I.
1934 unsigned Pred, Succ;
1935 switch (FenceOrdering) {
1936 default:
1937 llvm_unreachable("Unexpected ordering");
1938 case AtomicOrdering::AcquireRelease:
1939 // fence acq_rel -> fence.tso
1940 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1941 return;
1942 case AtomicOrdering::Acquire:
1943 // fence acquire -> fence r, rw
1944 Pred = RISCVFenceField::R;
1946 break;
1947 case AtomicOrdering::Release:
1948 // fence release -> fence rw, w
1950 Succ = RISCVFenceField::W;
1951 break;
1952 case AtomicOrdering::SequentiallyConsistent:
1953 // fence seq_cst -> fence rw, rw
1956 break;
1957 }
1958 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1959}
1960
1961namespace llvm {
1962InstructionSelector *
1964 const RISCVSubtarget &Subtarget,
1965 const RISCVRegisterBankInfo &RBI) {
1966 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1967}
1968} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1416
bool isPosZero() const
Definition APFloat.h:1523
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:303
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:315
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:434
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.