LLVM 22.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95 void addVectorLoadStoreOperands(MachineInstr &I,
97 unsigned &CurOp, bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT = nullptr) const;
100 bool selectIntrinsicWithSideEffects(MachineInstr &I,
101 MachineIRBuilder &MIB) const;
102 bool selectExtractSubvector(MachineInstr &MI, MachineIRBuilder &MIB) const;
103
104 ComplexRendererFns selectShiftMask(MachineOperand &Root,
105 unsigned ShiftWidth) const;
106 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
107 return selectShiftMask(Root, STI.getXLen());
108 }
109 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
110 return selectShiftMask(Root, 32);
111 }
112 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
113
114 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
115 template <unsigned Bits>
116 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
117 return selectSExtBits(Root, Bits);
118 }
119
120 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
121 template <unsigned Bits>
122 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
123 return selectZExtBits(Root, Bits);
124 }
125
126 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
127 template <unsigned ShAmt>
128 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
129 return selectSHXADDOp(Root, ShAmt);
130 }
131
132 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
133 unsigned ShAmt) const;
134 template <unsigned ShAmt>
135 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
136 return selectSHXADD_UWOp(Root, ShAmt);
137 }
138
139 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
140
141 // Custom renderers for tablegen
142 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
143 int OpIdx) const;
144 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
145 int OpIdx) const;
146 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
147 int OpIdx) const;
148 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
149 int OpIdx) const;
150 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152
153 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
154 int OpIdx) const;
155 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
156 const MachineInstr &MI, int OpIdx) const;
157
158 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
159 int OpIdx) const;
160 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
161 int OpIdx) const;
162
163 const RISCVSubtarget &STI;
164 const RISCVInstrInfo &TII;
165 const RISCVRegisterInfo &TRI;
166 const RISCVRegisterBankInfo &RBI;
167 const RISCVTargetMachine &TM;
168
169 MachineRegisterInfo *MRI = nullptr;
170
171 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
172 // uses "STI." in the code generated by TableGen. We need to unify the name of
173 // Subtarget variable.
174 const RISCVSubtarget *Subtarget = &STI;
175
176#define GET_GLOBALISEL_PREDICATES_DECL
177#include "RISCVGenGlobalISel.inc"
178#undef GET_GLOBALISEL_PREDICATES_DECL
179
180#define GET_GLOBALISEL_TEMPORARIES_DECL
181#include "RISCVGenGlobalISel.inc"
182#undef GET_GLOBALISEL_TEMPORARIES_DECL
183};
184
185} // end anonymous namespace
186
187#define GET_GLOBALISEL_IMPL
188#include "RISCVGenGlobalISel.inc"
189#undef GET_GLOBALISEL_IMPL
190
191RISCVInstructionSelector::RISCVInstructionSelector(
192 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
193 const RISCVRegisterBankInfo &RBI)
194 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
195 TM(TM),
196
198#include "RISCVGenGlobalISel.inc"
201#include "RISCVGenGlobalISel.inc"
203{
204}
205
206// Mimics optimizations in ISel and RISCVOptWInst Pass
207bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
208 unsigned Bits,
209 const unsigned Depth) const {
210
211 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
212 MI.getOpcode() == TargetOpcode::G_SUB ||
213 MI.getOpcode() == TargetOpcode::G_MUL ||
214 MI.getOpcode() == TargetOpcode::G_SHL ||
215 MI.getOpcode() == TargetOpcode::G_LSHR ||
216 MI.getOpcode() == TargetOpcode::G_AND ||
217 MI.getOpcode() == TargetOpcode::G_OR ||
218 MI.getOpcode() == TargetOpcode::G_XOR ||
219 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
220 "Unexpected opcode");
221
222 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
223 return false;
224
225 auto DestReg = MI.getOperand(0).getReg();
226 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
227 assert(UserOp.getParent() && "UserOp must have a parent");
228 const MachineInstr &UserMI = *UserOp.getParent();
229 unsigned OpIdx = UserOp.getOperandNo();
230
231 switch (UserMI.getOpcode()) {
232 default:
233 return false;
234 case RISCV::ADDW:
235 case RISCV::ADDIW:
236 case RISCV::SUBW:
237 case RISCV::FCVT_D_W:
238 case RISCV::FCVT_S_W:
239 if (Bits >= 32)
240 break;
241 return false;
242 case RISCV::SLL:
243 case RISCV::SRA:
244 case RISCV::SRL:
245 // Shift amount operands only use log2(Xlen) bits.
246 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
247 break;
248 return false;
249 case RISCV::SLLI:
250 // SLLI only uses the lower (XLen - ShAmt) bits.
251 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
252 break;
253 return false;
254 case RISCV::ANDI:
255 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
256 (uint64_t)UserMI.getOperand(2).getImm()))
257 break;
258 goto RecCheck;
259 case RISCV::AND:
260 case RISCV::OR:
261 case RISCV::XOR:
262 RecCheck:
263 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
264 break;
265 return false;
266 case RISCV::SRLI: {
267 unsigned ShAmt = UserMI.getOperand(2).getImm();
268 // If we are shifting right by less than Bits, and users don't demand any
269 // bits that were shifted into [Bits-1:0], then we can consider this as an
270 // N-Bit user.
271 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
272 break;
273 return false;
274 }
275 }
276 }
277
278 return true;
279}
280
281InstructionSelector::ComplexRendererFns
282RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
283 unsigned ShiftWidth) const {
284 if (!Root.isReg())
285 return std::nullopt;
286
287 using namespace llvm::MIPatternMatch;
288
289 Register ShAmtReg = Root.getReg();
290 // Peek through zext.
291 Register ZExtSrcReg;
292 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
293 ShAmtReg = ZExtSrcReg;
294
295 APInt AndMask;
296 Register AndSrcReg;
297 // Try to combine the following pattern (applicable to other shift
298 // instructions as well as 32-bit ones):
299 //
300 // %4:gprb(s64) = G_AND %3, %2
301 // %5:gprb(s64) = G_LSHR %1, %4(s64)
302 //
303 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
304 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
305 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
306 // then it can be eliminated. Given register rs1 or rs2 holding a constant
307 // (the and mask), there are two cases G_AND can be erased:
308 //
309 // 1. the lowest log2(XLEN) bits of the and mask are all set
310 // 2. the bits of the register being masked are already unset (zero set)
311 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
312 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
313 if (ShMask.isSubsetOf(AndMask)) {
314 ShAmtReg = AndSrcReg;
315 } else {
316 // SimplifyDemandedBits may have optimized the mask so try restoring any
317 // bits that are known zero.
318 KnownBits Known = VT->getKnownBits(AndSrcReg);
319 if (ShMask.isSubsetOf(AndMask | Known.Zero))
320 ShAmtReg = AndSrcReg;
321 }
322 }
323
324 APInt Imm;
326 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
327 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
328 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
329 // to avoid the ADD.
330 ShAmtReg = Reg;
331 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
332 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
333 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
334 // to generate a NEG instead of a SUB of a constant.
335 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
336 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
337 return {{[=](MachineInstrBuilder &MIB) {
338 MachineIRBuilder(*MIB.getInstr())
339 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
340 MIB.addReg(ShAmtReg);
341 }}};
342 }
343 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
344 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
345 // to generate a NOT instead of a SUB of a constant.
346 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
347 return {{[=](MachineInstrBuilder &MIB) {
348 MachineIRBuilder(*MIB.getInstr())
349 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
350 .addImm(-1);
351 MIB.addReg(ShAmtReg);
352 }}};
353 }
354 }
355
356 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
357}
358
359InstructionSelector::ComplexRendererFns
360RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
361 unsigned Bits) const {
362 if (!Root.isReg())
363 return std::nullopt;
364 Register RootReg = Root.getReg();
365 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
366
367 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
368 RootDef->getOperand(2).getImm() == Bits) {
369 return {
370 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
371 }
372
373 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
374 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
375 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
376
377 return std::nullopt;
378}
379
380InstructionSelector::ComplexRendererFns
381RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
382 unsigned Bits) const {
383 if (!Root.isReg())
384 return std::nullopt;
385 Register RootReg = Root.getReg();
386
387 Register RegX;
388 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
389 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
390 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
391 }
392
393 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
394 MRI->getType(RegX).getScalarSizeInBits() == Bits)
395 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
396
397 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
398 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
399 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
400
401 return std::nullopt;
402}
403
404InstructionSelector::ComplexRendererFns
405RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
406 unsigned ShAmt) const {
407 using namespace llvm::MIPatternMatch;
408
409 if (!Root.isReg())
410 return std::nullopt;
411 Register RootReg = Root.getReg();
412
413 const unsigned XLen = STI.getXLen();
414 APInt Mask, C2;
415 Register RegY;
416 std::optional<bool> LeftShift;
417 // (and (shl y, c2), mask)
418 if (mi_match(RootReg, *MRI,
419 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
420 LeftShift = true;
421 // (and (lshr y, c2), mask)
422 else if (mi_match(RootReg, *MRI,
423 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
424 LeftShift = false;
425
426 if (LeftShift.has_value()) {
427 if (*LeftShift)
429 else
431
432 if (Mask.isShiftedMask()) {
433 unsigned Leading = XLen - Mask.getActiveBits();
434 unsigned Trailing = Mask.countr_zero();
435 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
436 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
437 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
438 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
439 return {{[=](MachineInstrBuilder &MIB) {
440 MachineIRBuilder(*MIB.getInstr())
441 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
442 .addImm(Trailing - C2.getLimitedValue());
443 MIB.addReg(DstReg);
444 }}};
445 }
446
447 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
448 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
449 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
450 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
451 return {{[=](MachineInstrBuilder &MIB) {
452 MachineIRBuilder(*MIB.getInstr())
453 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
454 .addImm(Leading + Trailing);
455 MIB.addReg(DstReg);
456 }}};
457 }
458 }
459 }
460
461 LeftShift.reset();
462
463 // (shl (and y, mask), c2)
464 if (mi_match(RootReg, *MRI,
465 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
466 m_ICst(C2))))
467 LeftShift = true;
468 // (lshr (and y, mask), c2)
469 else if (mi_match(RootReg, *MRI,
471 m_ICst(C2))))
472 LeftShift = false;
473
474 if (LeftShift.has_value() && Mask.isShiftedMask()) {
475 unsigned Leading = XLen - Mask.getActiveBits();
476 unsigned Trailing = Mask.countr_zero();
477
478 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
479 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
480 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
481 (Trailing + C2.getLimitedValue()) == ShAmt;
482 if (!Cond)
483 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
484 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
485 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
486 (Trailing - C2.getLimitedValue()) == ShAmt;
487
488 if (Cond) {
489 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
490 return {{[=](MachineInstrBuilder &MIB) {
491 MachineIRBuilder(*MIB.getInstr())
492 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
493 .addImm(Trailing);
494 MIB.addReg(DstReg);
495 }}};
496 }
497 }
498
499 return std::nullopt;
500}
501
502InstructionSelector::ComplexRendererFns
503RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
504 unsigned ShAmt) const {
505 using namespace llvm::MIPatternMatch;
506
507 if (!Root.isReg())
508 return std::nullopt;
509 Register RootReg = Root.getReg();
510
511 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
512 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
513 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
514 APInt Mask, C2;
515 Register RegX;
516 if (mi_match(
517 RootReg, *MRI,
519 m_ICst(Mask))))) {
521
522 if (Mask.isShiftedMask()) {
523 unsigned Leading = Mask.countl_zero();
524 unsigned Trailing = Mask.countr_zero();
525 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
526 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
527 return {{[=](MachineInstrBuilder &MIB) {
528 MachineIRBuilder(*MIB.getInstr())
529 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
530 .addImm(C2.getLimitedValue() - ShAmt);
531 MIB.addReg(DstReg);
532 }}};
533 }
534 }
535 }
536
537 return std::nullopt;
538}
539
540InstructionSelector::ComplexRendererFns
541RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
542 assert(Root.isReg() && "Expected operand to be a Register");
543 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
544
545 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
546 auto C = RootDef->getOperand(1).getCImm();
547 if (C->getValue().isAllOnes())
548 // If the operand is a G_CONSTANT with value of all ones it is larger than
549 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
550 // recognized specially by the vsetvli insertion pass.
551 return {{[=](MachineInstrBuilder &MIB) {
552 MIB.addImm(RISCV::VLMaxSentinel);
553 }}};
554
555 if (isUInt<5>(C->getZExtValue())) {
556 uint64_t ZExtC = C->getZExtValue();
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
558 }
559 }
560 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
561}
562
563InstructionSelector::ComplexRendererFns
564RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
565 if (!Root.isReg())
566 return std::nullopt;
567
568 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
569 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
570 return {{
571 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
572 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
573 }};
574 }
575
576 if (isBaseWithConstantOffset(Root, *MRI)) {
577 MachineOperand &LHS = RootDef->getOperand(1);
578 MachineOperand &RHS = RootDef->getOperand(2);
579 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
580 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
581
582 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
583 if (isInt<12>(RHSC)) {
584 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
585 return {{
586 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
587 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
588 }};
589
590 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
591 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
592 }
593 }
594
595 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
596 // the combiner?
597 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
598 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
599}
600
601/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
602/// CC Must be an ICMP Predicate.
603static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
604 switch (CC) {
605 default:
606 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
607 case CmpInst::Predicate::ICMP_EQ:
608 return RISCVCC::COND_EQ;
609 case CmpInst::Predicate::ICMP_NE:
610 return RISCVCC::COND_NE;
611 case CmpInst::Predicate::ICMP_ULT:
612 return RISCVCC::COND_LTU;
613 case CmpInst::Predicate::ICMP_SLT:
614 return RISCVCC::COND_LT;
615 case CmpInst::Predicate::ICMP_UGE:
616 return RISCVCC::COND_GEU;
617 case CmpInst::Predicate::ICMP_SGE:
618 return RISCVCC::COND_GE;
619 }
620}
621
625 // Try to fold an ICmp. If that fails, use a NE compare with X0.
627 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
628 LHS = CondReg;
629 RHS = RISCV::X0;
630 CC = RISCVCC::COND_NE;
631 return;
632 }
633
634 // We found an ICmp, do some canonicalization.
635
636 // Adjust comparisons to use comparison with 0 if possible.
638 switch (Pred) {
640 // Convert X > -1 to X >= 0
641 if (*Constant == -1) {
642 CC = RISCVCC::COND_GE;
643 RHS = RISCV::X0;
644 return;
645 }
646 break;
648 // Convert X < 1 to 0 >= X
649 if (*Constant == 1) {
650 CC = RISCVCC::COND_GE;
651 RHS = LHS;
652 LHS = RISCV::X0;
653 return;
654 }
655 break;
656 default:
657 break;
658 }
659 }
660
661 switch (Pred) {
662 default:
663 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
670 // These CCs are supported directly by RISC-V branches.
671 break;
676 // These CCs are not supported directly by RISC-V branches, but changing the
677 // direction of the CC and swapping LHS and RHS are.
678 Pred = CmpInst::getSwappedPredicate(Pred);
679 std::swap(LHS, RHS);
680 break;
681 }
682
683 CC = getRISCVCCFromICmp(Pred);
684}
685
686/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
687/// \p GenericOpc, appropriate for the GPR register bank and of memory access
688/// size \p OpSize.
689static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
690 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
691 switch (OpSize) {
692 default:
693 llvm_unreachable("Unexpected memory size");
694 case 8:
695 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
696 case 16:
697 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
698 case 32:
699 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
700 case 64:
701 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
702 }
703}
704
705/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
706/// \p GenericOpc, appropriate for the GPR register bank and of memory access
707/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
708static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
709 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
710 switch (OpSize) {
711 case 8:
712 // Prefer unsigned due to no c.lb in Zcb.
713 return IsStore ? RISCV::SB : RISCV::LBU;
714 case 16:
715 return IsStore ? RISCV::SH : RISCV::LH;
716 case 32:
717 return IsStore ? RISCV::SW : RISCV::LW;
718 case 64:
719 return IsStore ? RISCV::SD : RISCV::LD;
720 }
721
722 return GenericOpc;
723}
724
725void RISCVInstructionSelector::addVectorLoadStoreOperands(
726 MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp,
727 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
728 // Base Pointer
729 auto PtrReg = I.getOperand(CurOp++).getReg();
730 SrcOps.push_back(PtrReg);
731
732 // Stride or Index
733 if (IsStridedOrIndexed) {
734 auto StrideReg = I.getOperand(CurOp++).getReg();
735 SrcOps.push_back(StrideReg);
736 if (IndexVT)
737 *IndexVT = MRI->getType(StrideReg);
738 }
739
740 // Mask
741 if (IsMasked) {
742 auto MaskReg = I.getOperand(CurOp++).getReg();
743 SrcOps.push_back(MaskReg);
744 }
745}
746
747bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
748 MachineInstr &I, MachineIRBuilder &MIB) const {
749 // Find the intrinsic ID.
750 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
751 // Select the instruction.
752 switch (IntrinID) {
753 default:
754 return false;
755 case Intrinsic::riscv_vlm:
756 case Intrinsic::riscv_vle:
757 case Intrinsic::riscv_vle_mask:
758 case Intrinsic::riscv_vlse:
759 case Intrinsic::riscv_vlse_mask: {
760 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
761 IntrinID == Intrinsic::riscv_vlse_mask;
762 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
763 IntrinID == Intrinsic::riscv_vlse_mask;
764 LLT VT = MRI->getType(I.getOperand(0).getReg());
765 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
766
767 // Result vector
768 const Register DstReg = I.getOperand(0).getReg();
769
770 // Sources
771 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
772 unsigned CurOp = 2;
773 SmallVector<SrcOp, 4> SrcOps; // Source registers.
774
775 // Passthru
776 if (HasPassthruOperand) {
777 auto PassthruReg = I.getOperand(CurOp++).getReg();
778 SrcOps.push_back(PassthruReg);
779 } else {
780 SrcOps.push_back(Register(RISCV::NoRegister));
781 }
782
783 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
784
786 const RISCV::VLEPseudo *P =
787 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
788 static_cast<unsigned>(LMUL));
789
790 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
791
792 // Select VL
793 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
794 for (auto &RenderFn : *VLOpFn)
795 RenderFn(PseudoMI);
796
797 // SEW
798 PseudoMI.addImm(Log2SEW);
799
800 // Policy
801 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
802 if (IsMasked)
803 Policy = I.getOperand(CurOp++).getImm();
804 PseudoMI.addImm(Policy);
805
806 // Memref
807 PseudoMI.cloneMemRefs(I);
808
809 I.eraseFromParent();
810 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
811 }
812 case Intrinsic::riscv_vloxei:
813 case Intrinsic::riscv_vloxei_mask:
814 case Intrinsic::riscv_vluxei:
815 case Intrinsic::riscv_vluxei_mask: {
816 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
817 IntrinID == Intrinsic::riscv_vluxei_mask;
818 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
819 IntrinID == Intrinsic::riscv_vloxei_mask;
820 LLT VT = MRI->getType(I.getOperand(0).getReg());
821 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
822
823 // Result vector
824 const Register DstReg = I.getOperand(0).getReg();
825
826 // Sources
827 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
828 unsigned CurOp = 2;
829 SmallVector<SrcOp, 4> SrcOps; // Source registers.
830
831 // Passthru
832 if (HasPassthruOperand) {
833 auto PassthruReg = I.getOperand(CurOp++).getReg();
834 SrcOps.push_back(PassthruReg);
835 } else {
836 // Use NoRegister if there is no specified passthru.
837 SrcOps.push_back(Register());
838 }
839 LLT IndexVT;
840 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
841
843 RISCVVType::VLMUL IndexLMUL =
845 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
846 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
847 reportFatalUsageError("The V extension does not support EEW=64 for index "
848 "values when XLEN=32");
849 }
850 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
851 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
852 static_cast<unsigned>(IndexLMUL));
853
854 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
855
856 // Select VL
857 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
858 for (auto &RenderFn : *VLOpFn)
859 RenderFn(PseudoMI);
860
861 // SEW
862 PseudoMI.addImm(Log2SEW);
863
864 // Policy
865 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
866 if (IsMasked)
867 Policy = I.getOperand(CurOp++).getImm();
868 PseudoMI.addImm(Policy);
869
870 // Memref
871 PseudoMI.cloneMemRefs(I);
872
873 I.eraseFromParent();
874 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
875 }
876 case Intrinsic::riscv_vsm:
877 case Intrinsic::riscv_vse:
878 case Intrinsic::riscv_vse_mask:
879 case Intrinsic::riscv_vsse:
880 case Intrinsic::riscv_vsse_mask: {
881 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
882 IntrinID == Intrinsic::riscv_vsse_mask;
883 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
884 IntrinID == Intrinsic::riscv_vsse_mask;
885 LLT VT = MRI->getType(I.getOperand(1).getReg());
886 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
887
888 // Sources
889 unsigned CurOp = 1;
890 SmallVector<SrcOp, 4> SrcOps; // Source registers.
891
892 // Store value
893 auto PassthruReg = I.getOperand(CurOp++).getReg();
894 SrcOps.push_back(PassthruReg);
895
896 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
897
899 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
900 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
901
902 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
903
904 // Select VL
905 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
906 for (auto &RenderFn : *VLOpFn)
907 RenderFn(PseudoMI);
908
909 // SEW
910 PseudoMI.addImm(Log2SEW);
911
912 // Memref
913 PseudoMI.cloneMemRefs(I);
914
915 I.eraseFromParent();
916 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
917 }
918 case Intrinsic::riscv_vsoxei:
919 case Intrinsic::riscv_vsoxei_mask:
920 case Intrinsic::riscv_vsuxei:
921 case Intrinsic::riscv_vsuxei_mask: {
922 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
923 IntrinID == Intrinsic::riscv_vsuxei_mask;
924 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
925 IntrinID == Intrinsic::riscv_vsoxei_mask;
926 LLT VT = MRI->getType(I.getOperand(1).getReg());
927 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
928
929 // Sources
930 unsigned CurOp = 1;
931 SmallVector<SrcOp, 4> SrcOps; // Source registers.
932
933 // Store value
934 auto PassthruReg = I.getOperand(CurOp++).getReg();
935 SrcOps.push_back(PassthruReg);
936
937 LLT IndexVT;
938 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
939
941 RISCVVType::VLMUL IndexLMUL =
943 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
944 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
945 reportFatalUsageError("The V extension does not support EEW=64 for index "
946 "values when XLEN=32");
947 }
948 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
949 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
950 static_cast<unsigned>(IndexLMUL));
951
952 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
953
954 // Select VL
955 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
956 for (auto &RenderFn : *VLOpFn)
957 RenderFn(PseudoMI);
958
959 // SEW
960 PseudoMI.addImm(Log2SEW);
961
962 // Memref
963 PseudoMI.cloneMemRefs(I);
964
965 I.eraseFromParent();
966 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
967 }
968 }
969}
970
971bool RISCVInstructionSelector::selectExtractSubvector(
972 MachineInstr &MI, MachineIRBuilder &MIB) const {
973 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
974
975 Register DstReg = MI.getOperand(0).getReg();
976 Register SrcReg = MI.getOperand(1).getReg();
977
978 LLT DstTy = MRI->getType(DstReg);
979 LLT SrcTy = MRI->getType(SrcReg);
980
981 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
982
983 MVT DstMVT = getMVTForLLT(DstTy);
984 MVT SrcMVT = getMVTForLLT(SrcTy);
985
986 unsigned SubRegIdx;
987 std::tie(SubRegIdx, Idx) =
989 SrcMVT, DstMVT, Idx, &TRI);
990
991 if (Idx != 0)
992 return false;
993
994 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
995 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
996 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
997 return false;
998
999 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1000 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1001 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1002 return false;
1003
1004 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {}).addReg(SrcReg, 0, SubRegIdx);
1005
1006 MI.eraseFromParent();
1007 return true;
1008}
1009
1010bool RISCVInstructionSelector::select(MachineInstr &MI) {
1011 MachineIRBuilder MIB(MI);
1012
1013 preISelLower(MI, MIB);
1014 const unsigned Opc = MI.getOpcode();
1015
1016 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1017 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1018 const Register DefReg = MI.getOperand(0).getReg();
1019 const LLT DefTy = MRI->getType(DefReg);
1020
1021 const RegClassOrRegBank &RegClassOrBank =
1022 MRI->getRegClassOrRegBank(DefReg);
1023
1024 const TargetRegisterClass *DefRC =
1026 if (!DefRC) {
1027 if (!DefTy.isValid()) {
1028 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1029 return false;
1030 }
1031
1032 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1033 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1034 if (!DefRC) {
1035 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1036 return false;
1037 }
1038 }
1039
1040 MI.setDesc(TII.get(TargetOpcode::PHI));
1041 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1042 }
1043
1044 // Certain non-generic instructions also need some special handling.
1045 if (MI.isCopy())
1046 return selectCopy(MI);
1047
1048 return true;
1049 }
1050
1051 if (selectImpl(MI, *CoverageInfo))
1052 return true;
1053
1054 switch (Opc) {
1055 case TargetOpcode::G_ANYEXT:
1056 case TargetOpcode::G_PTRTOINT:
1057 case TargetOpcode::G_INTTOPTR:
1058 case TargetOpcode::G_TRUNC:
1059 case TargetOpcode::G_FREEZE:
1060 return selectCopy(MI);
1061 case TargetOpcode::G_CONSTANT: {
1062 Register DstReg = MI.getOperand(0).getReg();
1063 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1064
1065 if (!materializeImm(DstReg, Imm, MIB))
1066 return false;
1067
1068 MI.eraseFromParent();
1069 return true;
1070 }
1071 case TargetOpcode::G_ZEXT:
1072 case TargetOpcode::G_SEXT: {
1073 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1074 Register DstReg = MI.getOperand(0).getReg();
1075 Register SrcReg = MI.getOperand(1).getReg();
1076 LLT SrcTy = MRI->getType(SrcReg);
1077 unsigned SrcSize = SrcTy.getSizeInBits();
1078
1079 if (SrcTy.isVector())
1080 return false; // Should be handled by imported patterns.
1081
1082 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1083 RISCV::GPRBRegBankID &&
1084 "Unexpected ext regbank");
1085
1086 // Use addiw SrcReg, 0 (sext.w) for i32.
1087 if (IsSigned && SrcSize == 32) {
1088 MI.setDesc(TII.get(RISCV::ADDIW));
1089 MI.addOperand(MachineOperand::CreateImm(0));
1091 }
1092
1093 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1094 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1095 MI.setDesc(TII.get(RISCV::ADD_UW));
1096 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1098 }
1099
1100 // Use sext.h/zext.h for i16 with Zbb.
1101 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1102 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1103 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1104 : RISCV::ZEXT_H_RV32));
1106 }
1107
1108 // Use pack(w) SrcReg, X0 for i16 zext with Zbkb.
1109 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1110 MI.setDesc(TII.get(STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));
1111 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1113 }
1114
1115 // Fall back to shift pair.
1116 auto ShiftLeft =
1117 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1118 .addImm(STI.getXLen() - SrcSize);
1119 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1120 auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1121 {DstReg}, {ShiftLeft})
1122 .addImm(STI.getXLen() - SrcSize);
1123 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1124 MI.eraseFromParent();
1125 return true;
1126 }
1127 case TargetOpcode::G_FCONSTANT: {
1128 // TODO: Use constant pool for complex constants.
1129 Register DstReg = MI.getOperand(0).getReg();
1130 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1131 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1132 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1133 Register GPRReg;
1134 if (FPimm.isPosZero()) {
1135 GPRReg = RISCV::X0;
1136 } else {
1137 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1138 APInt Imm = FPimm.bitcastToAPInt();
1139 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
1140 return false;
1141 }
1142
1143 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1144 : Size == 32 ? RISCV::FMV_W_X
1145 : RISCV::FMV_H_X;
1146 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
1147 if (!FMV.constrainAllUses(TII, TRI, RBI))
1148 return false;
1149 } else {
1150 // s64 on rv32
1151 assert(Size == 64 && !Subtarget->is64Bit() &&
1152 "Unexpected size or subtarget");
1153
1154 if (FPimm.isPosZero()) {
1155 // Optimize +0.0 to use fcvt.d.w
1156 MachineInstrBuilder FCVT =
1157 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
1158 .addImm(RISCVFPRndMode::RNE);
1159 if (!FCVT.constrainAllUses(TII, TRI, RBI))
1160 return false;
1161
1162 MI.eraseFromParent();
1163 return true;
1164 }
1165
1166 // Split into two pieces and build through the stack.
1167 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1168 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1169 APInt Imm = FPimm.bitcastToAPInt();
1170 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1171 MIB))
1172 return false;
1173 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
1174 return false;
1175 MachineInstrBuilder PairF64 = MIB.buildInstr(
1176 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1177 if (!PairF64.constrainAllUses(TII, TRI, RBI))
1178 return false;
1179 }
1180
1181 MI.eraseFromParent();
1182 return true;
1183 }
1184 case TargetOpcode::G_GLOBAL_VALUE: {
1185 auto *GV = MI.getOperand(1).getGlobal();
1186 if (GV->isThreadLocal()) {
1187 // TODO: implement this case.
1188 return false;
1189 }
1190
1191 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1192 }
1193 case TargetOpcode::G_JUMP_TABLE:
1194 case TargetOpcode::G_CONSTANT_POOL:
1195 return selectAddr(MI, MIB, MRI);
1196 case TargetOpcode::G_BRCOND: {
1197 Register LHS, RHS;
1199 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1200
1201 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
1202 .addMBB(MI.getOperand(1).getMBB());
1203 MI.eraseFromParent();
1204 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
1205 }
1206 case TargetOpcode::G_BRINDIRECT:
1207 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1208 MI.addOperand(MachineOperand::CreateImm(0));
1210 case TargetOpcode::G_SELECT:
1211 return selectSelect(MI, MIB);
1212 case TargetOpcode::G_FCMP:
1213 return selectFPCompare(MI, MIB);
1214 case TargetOpcode::G_FENCE: {
1215 AtomicOrdering FenceOrdering =
1216 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1217 SyncScope::ID FenceSSID =
1218 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1219 emitFence(FenceOrdering, FenceSSID, MIB);
1220 MI.eraseFromParent();
1221 return true;
1222 }
1223 case TargetOpcode::G_IMPLICIT_DEF:
1224 return selectImplicitDef(MI, MIB);
1225 case TargetOpcode::G_UNMERGE_VALUES:
1226 return selectUnmergeValues(MI, MIB);
1227 case TargetOpcode::G_LOAD:
1228 case TargetOpcode::G_STORE: {
1229 GLoadStore &LdSt = cast<GLoadStore>(MI);
1230 const Register ValReg = LdSt.getReg(0);
1231 const Register PtrReg = LdSt.getPointerReg();
1232 LLT PtrTy = MRI->getType(PtrReg);
1233
1234 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1235 if (RB.getID() != RISCV::GPRBRegBankID)
1236 return false;
1237
1238#ifndef NDEBUG
1239 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1240 // Check that the pointer register is valid.
1241 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1242 "Load/Store pointer operand isn't a GPR");
1243 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1244#endif
1245
1246 // Can only handle AddressSpace 0.
1247 if (PtrTy.getAddressSpace() != 0)
1248 return false;
1249
1250 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1251 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1252
1253 if (isStrongerThanMonotonic(Order)) {
1254 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1256 }
1257
1258 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1259 if (NewOpc == MI.getOpcode())
1260 return false;
1261
1262 // Check if we can fold anything into the addressing mode.
1263 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1264 if (!AddrModeFns)
1265 return false;
1266
1267 // Folded something. Create a new instruction and return it.
1268 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
1269 if (isa<GStore>(MI))
1270 NewInst.addUse(ValReg);
1271 else
1272 NewInst.addDef(ValReg);
1273 NewInst.cloneMemRefs(MI);
1274 for (auto &Fn : *AddrModeFns)
1275 Fn(NewInst);
1276 MI.eraseFromParent();
1277
1278 return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1279 }
1280 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1281 return selectIntrinsicWithSideEffects(MI, MIB);
1282 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1283 return selectExtractSubvector(MI, MIB);
1284 default:
1285 return false;
1286 }
1287}
1288
1289bool RISCVInstructionSelector::selectUnmergeValues(
1290 MachineInstr &MI, MachineIRBuilder &MIB) const {
1291 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1292
1293 if (!Subtarget->hasStdExtZfa())
1294 return false;
1295
1296 // Split F64 Src into two s32 parts
1297 if (MI.getNumOperands() != 3)
1298 return false;
1299 Register Src = MI.getOperand(2).getReg();
1300 Register Lo = MI.getOperand(0).getReg();
1301 Register Hi = MI.getOperand(1).getReg();
1302 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1303 return false;
1304
1305 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
1306 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
1307 return false;
1308
1309 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
1310 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
1311 return false;
1312
1313 MI.eraseFromParent();
1314 return true;
1315}
1316
1317bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
1318 MachineIRBuilder &MIB) {
1319 Register PtrReg = Op.getReg();
1320 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1321
1322 const LLT sXLen = LLT::scalar(STI.getXLen());
1323 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
1324 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
1325 Op.setReg(PtrToInt.getReg(0));
1326 return select(*PtrToInt);
1327}
1328
1329void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
1330 MachineIRBuilder &MIB) {
1331 switch (MI.getOpcode()) {
1332 case TargetOpcode::G_PTR_ADD: {
1333 Register DstReg = MI.getOperand(0).getReg();
1334 const LLT sXLen = LLT::scalar(STI.getXLen());
1335
1336 replacePtrWithInt(MI.getOperand(1), MIB);
1337 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1338 MRI->setType(DstReg, sXLen);
1339 break;
1340 }
1341 case TargetOpcode::G_PTRMASK: {
1342 Register DstReg = MI.getOperand(0).getReg();
1343 const LLT sXLen = LLT::scalar(STI.getXLen());
1344 replacePtrWithInt(MI.getOperand(1), MIB);
1345 MI.setDesc(TII.get(TargetOpcode::G_AND));
1346 MRI->setType(DstReg, sXLen);
1347 break;
1348 }
1349 }
1350}
1351
1352void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1353 const MachineInstr &MI,
1354 int OpIdx) const {
1355 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1356 "Expected G_CONSTANT");
1357 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1358 MIB.addImm(-CstVal);
1359}
1360
1361void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1362 const MachineInstr &MI,
1363 int OpIdx) const {
1364 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1365 "Expected G_CONSTANT");
1366 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1367 MIB.addImm(STI.getXLen() - CstVal);
1368}
1369
1370void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1371 const MachineInstr &MI,
1372 int OpIdx) const {
1373 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1374 "Expected G_CONSTANT");
1375 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1376 MIB.addImm(32 - CstVal);
1377}
1378
1379void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1380 const MachineInstr &MI,
1381 int OpIdx) const {
1382 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1383 "Expected G_CONSTANT");
1384 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1385 MIB.addImm(CstVal + 1);
1386}
1387
1388void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1389 const MachineInstr &MI,
1390 int OpIdx) const {
1391 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1392 "Expected G_FRAME_INDEX");
1393 MIB.add(MI.getOperand(1));
1394}
1395
1396void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1397 const MachineInstr &MI,
1398 int OpIdx) const {
1399 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1400 "Expected G_CONSTANT");
1401 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1403}
1404
1405void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1406 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1407 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1408 "Expected G_CONSTANT");
1409 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1410 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1411}
1412
1413void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1414 const MachineInstr &MI,
1415 int OpIdx) const {
1416 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1417 "Expected G_CONSTANT");
1418 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1419 int64_t Adj = Imm < 0 ? -2048 : 2047;
1420 MIB.addImm(Imm - Adj);
1421}
1422
1423void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1424 const MachineInstr &MI,
1425 int OpIdx) const {
1426 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1427 "Expected G_CONSTANT");
1428 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1429 MIB.addImm(Imm);
1430}
1431
1432const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1433 LLT Ty, const RegisterBank &RB) const {
1434 if (RB.getID() == RISCV::GPRBRegBankID) {
1435 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1436 return &RISCV::GPRRegClass;
1437 }
1438
1439 if (RB.getID() == RISCV::FPRBRegBankID) {
1440 if (Ty.getSizeInBits() == 16)
1441 return &RISCV::FPR16RegClass;
1442 if (Ty.getSizeInBits() == 32)
1443 return &RISCV::FPR32RegClass;
1444 if (Ty.getSizeInBits() == 64)
1445 return &RISCV::FPR64RegClass;
1446 }
1447
1448 if (RB.getID() == RISCV::VRBRegBankID) {
1449 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1450 return &RISCV::VRRegClass;
1451
1452 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1453 return &RISCV::VRM2RegClass;
1454
1455 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1456 return &RISCV::VRM4RegClass;
1457
1458 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1459 return &RISCV::VRM8RegClass;
1460 }
1461
1462 return nullptr;
1463}
1464
1465bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1466 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1467}
1468
1469bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1470 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1471}
1472
1473bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1474 Register DstReg = MI.getOperand(0).getReg();
1475
1476 if (DstReg.isPhysical())
1477 return true;
1478
1479 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1480 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1481 assert(DstRC &&
1482 "Register class not available for LLT, register bank combination");
1483
1484 // No need to constrain SrcReg. It will get constrained when
1485 // we hit another of its uses or its defs.
1486 // Copies do not have constraints.
1487 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1488 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1489 << " operand\n");
1490 return false;
1491 }
1492
1493 MI.setDesc(TII.get(RISCV::COPY));
1494 return true;
1495}
1496
1497bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1498 MachineIRBuilder &MIB) const {
1499 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1500
1501 const Register DstReg = MI.getOperand(0).getReg();
1502 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1503 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1504
1505 assert(DstRC &&
1506 "Register class not available for LLT, register bank combination");
1507
1508 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1509 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1510 << " operand\n");
1511 }
1512 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1513 return true;
1514}
1515
1516bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1517 MachineIRBuilder &MIB) const {
1518 if (Imm == 0) {
1519 MIB.buildCopy(DstReg, Register(RISCV::X0));
1520 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1521 return true;
1522 }
1523
1525 unsigned NumInsts = Seq.size();
1526 Register SrcReg = RISCV::X0;
1527
1528 for (unsigned i = 0; i < NumInsts; i++) {
1529 Register TmpReg = i < NumInsts - 1
1530 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1531 : DstReg;
1532 const RISCVMatInt::Inst &I = Seq[i];
1533 MachineInstr *Result;
1534
1535 switch (I.getOpndKind()) {
1536 case RISCVMatInt::Imm:
1537 // clang-format off
1538 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1539 .addImm(I.getImm());
1540 // clang-format on
1541 break;
1542 case RISCVMatInt::RegX0:
1543 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1544 {SrcReg, Register(RISCV::X0)});
1545 break;
1547 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1548 break;
1550 Result =
1551 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1552 break;
1553 }
1554
1555 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1556 return false;
1557
1558 SrcReg = TmpReg;
1559 }
1560
1561 return true;
1562}
1563
1564bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1565 MachineIRBuilder &MIB, bool IsLocal,
1566 bool IsExternWeak) const {
1567 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1568 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1569 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1570 "Unexpected opcode");
1571
1572 const MachineOperand &DispMO = MI.getOperand(1);
1573
1574 Register DefReg = MI.getOperand(0).getReg();
1575 const LLT DefTy = MRI->getType(DefReg);
1576
1577 // When HWASAN is used and tagging of global variables is enabled
1578 // they should be accessed via the GOT, since the tagged address of a global
1579 // is incompatible with existing code models. This also applies to non-pic
1580 // mode.
1581 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1582 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1583 // Use PC-relative addressing to access the symbol. This generates the
1584 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1585 // %pcrel_lo(auipc)).
1586 MI.setDesc(TII.get(RISCV::PseudoLLA));
1588 }
1589
1590 // Use PC-relative addressing to access the GOT for this symbol, then
1591 // load the address from the GOT. This generates the pattern (PseudoLGA
1592 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1593 // %pcrel_lo(auipc))).
1594 MachineFunction &MF = *MI.getParent()->getParent();
1595 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1599 DefTy, Align(DefTy.getSizeInBits() / 8));
1600
1601 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1602 .addDisp(DispMO, 0)
1603 .addMemOperand(MemOp);
1604
1605 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1606 return false;
1607
1608 MI.eraseFromParent();
1609 return true;
1610 }
1611
1612 switch (TM.getCodeModel()) {
1613 default: {
1615 "Unsupported code model for lowering", MI);
1616 return false;
1617 }
1618 case CodeModel::Small: {
1619 // Must lie within a single 2 GiB address range and must lie between
1620 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1621 // (lui %hi(sym)) %lo(sym)).
1622 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1623 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1624 .addDisp(DispMO, 0, RISCVII::MO_HI);
1625
1626 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1627 return false;
1628
1629 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1630 .addDisp(DispMO, 0, RISCVII::MO_LO);
1631
1632 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1633 return false;
1634
1635 MI.eraseFromParent();
1636 return true;
1637 }
1638 case CodeModel::Medium:
1639 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1640 // relocation needs to reference a label that points to the auipc
1641 // instruction itself, not the global. This cannot be done inside the
1642 // instruction selector.
1643 if (IsExternWeak) {
1644 // An extern weak symbol may be undefined, i.e. have value 0, which may
1645 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1646 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1647 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1648 MachineFunction &MF = *MI.getParent()->getParent();
1649 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1653 DefTy, Align(DefTy.getSizeInBits() / 8));
1654
1655 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1656 .addDisp(DispMO, 0)
1657 .addMemOperand(MemOp);
1658
1659 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1660 return false;
1661
1662 MI.eraseFromParent();
1663 return true;
1664 }
1665
1666 // Generate a sequence for accessing addresses within any 2GiB range
1667 // within the address space. This generates the pattern (PseudoLLA sym),
1668 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1669 MI.setDesc(TII.get(RISCV::PseudoLLA));
1671 }
1672
1673 return false;
1674}
1675
1676bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1677 MachineIRBuilder &MIB) const {
1678 auto &SelectMI = cast<GSelect>(MI);
1679
1680 Register LHS, RHS;
1682 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1683
1684 Register DstReg = SelectMI.getReg(0);
1685
1686 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1687 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1688 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1689 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1690 : RISCV::Select_FPR64_Using_CC_GPR;
1691 }
1692
1693 MachineInstr *Result = MIB.buildInstr(Opc)
1694 .addDef(DstReg)
1695 .addReg(LHS)
1696 .addReg(RHS)
1697 .addImm(CC)
1698 .addReg(SelectMI.getTrueReg())
1699 .addReg(SelectMI.getFalseReg());
1700 MI.eraseFromParent();
1701 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1702}
1703
1704// Convert an FCMP predicate to one of the supported F or D instructions.
1705static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1706 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1707 switch (Pred) {
1708 default:
1709 llvm_unreachable("Unsupported predicate");
1710 case CmpInst::FCMP_OLT:
1711 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1712 case CmpInst::FCMP_OLE:
1713 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1714 case CmpInst::FCMP_OEQ:
1715 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1716 }
1717}
1718
1719// Try legalizing an FCMP by swapping or inverting the predicate to one that
1720// is supported.
1722 CmpInst::Predicate &Pred, bool &NeedInvert) {
1723 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1724 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1725 Pred == CmpInst::FCMP_OEQ;
1726 };
1727
1728 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1729
1731 if (isLegalFCmpPredicate(InvPred)) {
1732 Pred = InvPred;
1733 std::swap(LHS, RHS);
1734 return true;
1735 }
1736
1737 InvPred = CmpInst::getInversePredicate(Pred);
1738 NeedInvert = true;
1739 if (isLegalFCmpPredicate(InvPred)) {
1740 Pred = InvPred;
1741 return true;
1742 }
1743 InvPred = CmpInst::getSwappedPredicate(InvPred);
1744 if (isLegalFCmpPredicate(InvPred)) {
1745 Pred = InvPred;
1746 std::swap(LHS, RHS);
1747 return true;
1748 }
1749
1750 return false;
1751}
1752
1753// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1754// the result in DstReg.
1755// FIXME: Maybe we should expand this earlier.
1756bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1757 MachineIRBuilder &MIB) const {
1758 auto &CmpMI = cast<GFCmp>(MI);
1759 CmpInst::Predicate Pred = CmpMI.getCond();
1760
1761 Register DstReg = CmpMI.getReg(0);
1762 Register LHS = CmpMI.getLHSReg();
1763 Register RHS = CmpMI.getRHSReg();
1764
1765 unsigned Size = MRI->getType(LHS).getSizeInBits();
1766 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1767
1768 Register TmpReg = DstReg;
1769
1770 bool NeedInvert = false;
1771 // First try swapping operands or inverting.
1772 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1773 if (NeedInvert)
1774 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1775 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1776 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1777 return false;
1778 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1779 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1780 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1782 {&RISCV::GPRRegClass}, {LHS, RHS});
1783 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1784 return false;
1786 {&RISCV::GPRRegClass}, {RHS, LHS});
1787 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1788 return false;
1789 if (NeedInvert)
1790 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1791 auto Or =
1792 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1793 if (!Or.constrainAllUses(TII, TRI, RBI))
1794 return false;
1795 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1796 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1797 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1798 NeedInvert = Pred == CmpInst::FCMP_UNO;
1800 {&RISCV::GPRRegClass}, {LHS, LHS});
1801 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1802 return false;
1804 {&RISCV::GPRRegClass}, {RHS, RHS});
1805 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1806 return false;
1807 if (NeedInvert)
1808 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1809 auto And =
1810 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1811 if (!And.constrainAllUses(TII, TRI, RBI))
1812 return false;
1813 } else
1814 llvm_unreachable("Unhandled predicate");
1815
1816 // Emit an XORI to invert the result if needed.
1817 if (NeedInvert) {
1818 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1819 if (!Xor.constrainAllUses(TII, TRI, RBI))
1820 return false;
1821 }
1822
1823 MI.eraseFromParent();
1824 return true;
1825}
1826
1827void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1828 SyncScope::ID FenceSSID,
1829 MachineIRBuilder &MIB) const {
1830 if (STI.hasStdExtZtso()) {
1831 // The only fence that needs an instruction is a sequentially-consistent
1832 // cross-thread fence.
1833 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1834 FenceSSID == SyncScope::System) {
1835 // fence rw, rw
1836 MIB.buildInstr(RISCV::FENCE, {}, {})
1839 return;
1840 }
1841
1842 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1843 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1844 return;
1845 }
1846
1847 // singlethread fences only synchronize with signal handlers on the same
1848 // thread and thus only need to preserve instruction order, not actually
1849 // enforce memory ordering.
1850 if (FenceSSID == SyncScope::SingleThread) {
1851 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1852 return;
1853 }
1854
1855 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1856 // Manual: Volume I.
1857 unsigned Pred, Succ;
1858 switch (FenceOrdering) {
1859 default:
1860 llvm_unreachable("Unexpected ordering");
1861 case AtomicOrdering::AcquireRelease:
1862 // fence acq_rel -> fence.tso
1863 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1864 return;
1865 case AtomicOrdering::Acquire:
1866 // fence acquire -> fence r, rw
1867 Pred = RISCVFenceField::R;
1869 break;
1870 case AtomicOrdering::Release:
1871 // fence release -> fence rw, w
1873 Succ = RISCVFenceField::W;
1874 break;
1875 case AtomicOrdering::SequentiallyConsistent:
1876 // fence seq_cst -> fence rw, rw
1879 break;
1880 }
1881 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1882}
1883
1884namespace llvm {
1885InstructionSelector *
1887 const RISCVSubtarget &Subtarget,
1888 const RISCVRegisterBankInfo &RBI) {
1889 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1890}
1891} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1335
bool isPosZero() const
Definition APFloat.h:1442
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:303
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:315
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.