LLVM 23.0.0git
RISCVMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- RISCVMCCodeEmitter.cpp - Convert RISC-V code to machine code ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the RISCVMCCodeEmitter class.
10//
11//===----------------------------------------------------------------------===//
12
17#include "llvm/ADT/Statistic.h"
18#include "llvm/MC/MCAsmInfo.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCExpr.h"
22#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
27#include "llvm/MC/MCSymbol.h"
30
31using namespace llvm;
32
33#define DEBUG_TYPE "mccodeemitter"
34
35STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
36STATISTIC(MCNumFixups, "Number of MC fixups created");
37
38namespace {
39class RISCVMCCodeEmitter : public MCCodeEmitter {
40 RISCVMCCodeEmitter(const RISCVMCCodeEmitter &) = delete;
41 void operator=(const RISCVMCCodeEmitter &) = delete;
42 MCContext &Ctx;
43 MCInstrInfo const &MCII;
44
45public:
46 RISCVMCCodeEmitter(MCContext &ctx, MCInstrInfo const &MCII)
47 : Ctx(ctx), MCII(MCII) {}
48
49 ~RISCVMCCodeEmitter() override = default;
50
51 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
52 SmallVectorImpl<MCFixup> &Fixups,
53 const MCSubtargetInfo &STI) const override;
54
55 void expandFunctionCall(const MCInst &MI, SmallVectorImpl<char> &CB,
56 SmallVectorImpl<MCFixup> &Fixups,
57 const MCSubtargetInfo &STI) const;
58
59 void expandTLSDESCCall(const MCInst &MI, SmallVectorImpl<char> &CB,
60 SmallVectorImpl<MCFixup> &Fixups,
61 const MCSubtargetInfo &STI) const;
62
63 void expandAddTPRel(const MCInst &MI, SmallVectorImpl<char> &CB,
64 SmallVectorImpl<MCFixup> &Fixups,
65 const MCSubtargetInfo &STI) const;
66
67 void expandLongCondBr(const MCInst &MI, SmallVectorImpl<char> &CB,
68 SmallVectorImpl<MCFixup> &Fixups,
69 const MCSubtargetInfo &STI) const;
70
71 void expandQCLongCondBrImm(const MCInst &MI, SmallVectorImpl<char> &CB,
72 SmallVectorImpl<MCFixup> &Fixups,
73 const MCSubtargetInfo &STI, unsigned Size) const;
74
75 /// TableGen'erated function for getting the binary encoding for an
76 /// instruction.
77 uint64_t getBinaryCodeForInstr(const MCInst &MI,
78 SmallVectorImpl<MCFixup> &Fixups,
79 const MCSubtargetInfo &STI) const;
80
81 /// Return binary encoding of operand. If the machine operand requires
82 /// relocation, record the relocation and return zero.
83 uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
84 SmallVectorImpl<MCFixup> &Fixups,
85 const MCSubtargetInfo &STI) const;
86
87 uint64_t getImmOpValueMinus1(const MCInst &MI, unsigned OpNo,
88 SmallVectorImpl<MCFixup> &Fixups,
89 const MCSubtargetInfo &STI) const;
90
91 uint64_t getImmOpValueSlist(const MCInst &MI, unsigned OpNo,
92 SmallVectorImpl<MCFixup> &Fixups,
93 const MCSubtargetInfo &STI) const;
94
95 template <unsigned N>
96 unsigned getImmOpValueAsrN(const MCInst &MI, unsigned OpNo,
97 SmallVectorImpl<MCFixup> &Fixups,
98 const MCSubtargetInfo &STI) const;
99
100 uint64_t getImmOpValueZibi(const MCInst &MI, unsigned OpNo,
101 SmallVectorImpl<MCFixup> &Fixups,
102 const MCSubtargetInfo &STI) const;
103
104 uint64_t getImmOpValue(const MCInst &MI, unsigned OpNo,
105 SmallVectorImpl<MCFixup> &Fixups,
106 const MCSubtargetInfo &STI) const;
107
108 unsigned getVMaskReg(const MCInst &MI, unsigned OpNo,
109 SmallVectorImpl<MCFixup> &Fixups,
110 const MCSubtargetInfo &STI) const;
111
112 unsigned getRlistOpValue(const MCInst &MI, unsigned OpNo,
113 SmallVectorImpl<MCFixup> &Fixups,
114 const MCSubtargetInfo &STI) const;
115
116 unsigned getRlistS0OpValue(const MCInst &MI, unsigned OpNo,
117 SmallVectorImpl<MCFixup> &Fixups,
118 const MCSubtargetInfo &STI) const;
119};
120} // end anonymous namespace
121
123 MCContext &Ctx) {
124 return new RISCVMCCodeEmitter(Ctx, MCII);
125}
126
128 const MCExpr *Value, uint16_t Kind) {
129 bool PCRel = false;
130 switch (Kind) {
131 case ELF::R_RISCV_CALL_PLT:
144 PCRel = true;
145 }
146 Fixups.push_back(MCFixup::create(Offset, Value, Kind, PCRel));
147}
148
149// Expand PseudoCALL(Reg), PseudoTAIL and PseudoJump to AUIPC and JALR with
150// relocation types. We expand those pseudo-instructions while encoding them,
151// meaning AUIPC and JALR won't go through RISC-V MC to MC compressed
152// instruction transformation. This is acceptable because AUIPC has no 16-bit
153// form and C_JALR has no immediate operand field. We let linker relaxation
154// deal with it. When linker relaxation is enabled, AUIPC and JALR have a
155// chance to relax to JAL.
156// If the C extension is enabled, JAL has a chance relax to C_JAL.
157void RISCVMCCodeEmitter::expandFunctionCall(const MCInst &MI,
160 const MCSubtargetInfo &STI) const {
161 MCInst TmpInst;
162 MCOperand Func;
163 MCRegister Ra;
164 if (MI.getOpcode() == RISCV::PseudoTAIL) {
165 Func = MI.getOperand(0);
167 } else if (MI.getOpcode() == RISCV::PseudoCALLReg) {
168 Func = MI.getOperand(1);
169 Ra = MI.getOperand(0).getReg();
170 } else if (MI.getOpcode() == RISCV::PseudoCALL) {
171 Func = MI.getOperand(0);
172 Ra = RISCV::X1;
173 } else if (MI.getOpcode() == RISCV::PseudoJump) {
174 Func = MI.getOperand(1);
175 Ra = MI.getOperand(0).getReg();
176 }
177 uint32_t Binary;
178
179 assert(Func.isExpr() && "Expected expression");
180
181 const MCExpr *CallExpr = Func.getExpr();
182
184 MCOperand FuncOp = MCOperand::createExpr(CallExpr);
185 if (MI.getOpcode() == RISCV::PseudoTAIL ||
186 MI.getOpcode() == RISCV::PseudoJump)
187 // Emit JAL X0, Func
188 TmpInst = MCInstBuilder(RISCV::JAL).addReg(RISCV::X0).addOperand(FuncOp);
189 else
190 // Emit JAL Ra, Func
191 TmpInst = MCInstBuilder(RISCV::JAL).addReg(Ra).addOperand(FuncOp);
192 Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
194 return;
195 }
196 // Emit AUIPC Ra, Func with R_RISCV_CALL relocation type.
197 TmpInst = MCInstBuilder(RISCV::AUIPC).addReg(Ra).addExpr(CallExpr);
198 Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
200
201 if (MI.getOpcode() == RISCV::PseudoTAIL ||
202 MI.getOpcode() == RISCV::PseudoJump)
203 // Emit JALR X0, Ra, 0
204 TmpInst = MCInstBuilder(RISCV::JALR).addReg(RISCV::X0).addReg(Ra).addImm(0);
205 else
206 // Emit JALR Ra, Ra, 0
207 TmpInst = MCInstBuilder(RISCV::JALR).addReg(Ra).addReg(Ra).addImm(0);
208 Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
210}
211
212void RISCVMCCodeEmitter::expandTLSDESCCall(const MCInst &MI,
213 SmallVectorImpl<char> &CB,
214 SmallVectorImpl<MCFixup> &Fixups,
215 const MCSubtargetInfo &STI) const {
216 MCOperand SrcSymbol = MI.getOperand(3);
217 assert(SrcSymbol.isExpr() &&
218 "Expected expression as first input to TLSDESCCALL");
219 const auto *Expr = dyn_cast<MCSpecifierExpr>(SrcSymbol.getExpr());
220 MCRegister Link = MI.getOperand(0).getReg();
221 MCRegister Dest = MI.getOperand(1).getReg();
222 int64_t Imm = MI.getOperand(2).getImm();
223 addFixup(Fixups, 0, Expr, ELF::R_RISCV_TLSDESC_CALL);
224 MCInst Call =
225 MCInstBuilder(RISCV::JALR).addReg(Link).addReg(Dest).addImm(Imm);
226
227 uint32_t Binary = getBinaryCodeForInstr(Call, Fixups, STI);
229}
230
231// Expand PseudoAddTPRel to a simple ADD with the correct relocation.
232void RISCVMCCodeEmitter::expandAddTPRel(const MCInst &MI,
233 SmallVectorImpl<char> &CB,
234 SmallVectorImpl<MCFixup> &Fixups,
235 const MCSubtargetInfo &STI) const {
236 MCOperand DestReg = MI.getOperand(0);
237 MCOperand SrcReg = MI.getOperand(1);
238 MCOperand TPReg = MI.getOperand(2);
239 assert(TPReg.isReg() && TPReg.getReg() == RISCV::X4 &&
240 "Expected thread pointer as second input to TP-relative add");
241
242 MCOperand SrcSymbol = MI.getOperand(3);
243 assert(SrcSymbol.isExpr() &&
244 "Expected expression as third input to TP-relative add");
245
246 const auto *Expr = dyn_cast<MCSpecifierExpr>(SrcSymbol.getExpr());
247 assert(Expr && Expr->getSpecifier() == ELF::R_RISCV_TPREL_ADD &&
248 "Expected tprel_add relocation on TP-relative symbol");
249
250 addFixup(Fixups, 0, Expr, ELF::R_RISCV_TPREL_ADD);
251 if (STI.hasFeature(RISCV::FeatureRelax))
252 Fixups.back().setLinkerRelaxable();
253
254 // Emit a normal ADD instruction with the given operands.
255 MCInst TmpInst = MCInstBuilder(RISCV::ADD)
256 .addOperand(DestReg)
257 .addOperand(SrcReg)
258 .addOperand(TPReg);
259 uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
261}
262
263static unsigned getInvertedBranchOp(unsigned BrOp) {
264 switch (BrOp) {
265 default:
266 llvm_unreachable("Unexpected branch opcode!");
267 case RISCV::PseudoLongBEQ:
268 return RISCV::BNE;
269 case RISCV::PseudoLongBNE:
270 return RISCV::BEQ;
271 case RISCV::PseudoLongBLT:
272 return RISCV::BGE;
273 case RISCV::PseudoLongBGE:
274 return RISCV::BLT;
275 case RISCV::PseudoLongBLTU:
276 return RISCV::BGEU;
277 case RISCV::PseudoLongBGEU:
278 return RISCV::BLTU;
279 case RISCV::PseudoLongQC_BEQI:
280 return RISCV::QC_BNEI;
281 case RISCV::PseudoLongQC_BNEI:
282 return RISCV::QC_BEQI;
283 case RISCV::PseudoLongQC_BLTI:
284 return RISCV::QC_BGEI;
285 case RISCV::PseudoLongQC_BGEI:
286 return RISCV::QC_BLTI;
287 case RISCV::PseudoLongQC_BLTUI:
288 return RISCV::QC_BGEUI;
289 case RISCV::PseudoLongQC_BGEUI:
290 return RISCV::QC_BLTUI;
291 case RISCV::PseudoLongQC_E_BEQI:
292 return RISCV::QC_E_BNEI;
293 case RISCV::PseudoLongQC_E_BNEI:
294 return RISCV::QC_E_BEQI;
295 case RISCV::PseudoLongQC_E_BLTI:
296 return RISCV::QC_E_BGEI;
297 case RISCV::PseudoLongQC_E_BGEI:
298 return RISCV::QC_E_BLTI;
299 case RISCV::PseudoLongQC_E_BLTUI:
300 return RISCV::QC_E_BGEUI;
301 case RISCV::PseudoLongQC_E_BGEUI:
302 return RISCV::QC_E_BLTUI;
303 }
304}
305
306// Expand PseudoLongBxx to an inverted conditional branch and an unconditional
307// jump.
308void RISCVMCCodeEmitter::expandLongCondBr(const MCInst &MI,
309 SmallVectorImpl<char> &CB,
310 SmallVectorImpl<MCFixup> &Fixups,
311 const MCSubtargetInfo &STI) const {
312 MCRegister SrcReg1 = MI.getOperand(0).getReg();
313 MCRegister SrcReg2 = MI.getOperand(1).getReg();
314 MCOperand SrcSymbol = MI.getOperand(2);
315 unsigned Opcode = MI.getOpcode();
316 bool IsEqTest =
317 Opcode == RISCV::PseudoLongBNE || Opcode == RISCV::PseudoLongBEQ;
318
319 bool UseCompressedBr = false;
320 if (IsEqTest && STI.hasFeature(RISCV::FeatureStdExtZca)) {
321 if (RISCV::X8 <= SrcReg1.id() && SrcReg1.id() <= RISCV::X15 &&
322 SrcReg2.id() == RISCV::X0) {
323 UseCompressedBr = true;
324 } else if (RISCV::X8 <= SrcReg2.id() && SrcReg2.id() <= RISCV::X15 &&
325 SrcReg1.id() == RISCV::X0) {
326 std::swap(SrcReg1, SrcReg2);
327 UseCompressedBr = true;
328 }
329 }
330
331 uint32_t Offset;
332 if (UseCompressedBr) {
333 unsigned InvOpc =
334 Opcode == RISCV::PseudoLongBNE ? RISCV::C_BEQZ : RISCV::C_BNEZ;
335 MCInst TmpInst = MCInstBuilder(InvOpc).addReg(SrcReg1).addImm(6);
336 uint16_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
338 Offset = 2;
339 } else {
340 unsigned InvOpc = getInvertedBranchOp(Opcode);
341 MCInst TmpInst =
342 MCInstBuilder(InvOpc).addReg(SrcReg1).addReg(SrcReg2).addImm(8);
343 uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
345 Offset = 4;
346 }
347
348 // Save the number fixups.
349 size_t FixupStartIndex = Fixups.size();
350
351 // Emit an unconditional jump to the destination.
352 MCInst TmpInst =
353 MCInstBuilder(RISCV::JAL).addReg(RISCV::X0).addOperand(SrcSymbol);
354 uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
356
357 // Drop any fixup added so we can add the correct one.
358 Fixups.resize(FixupStartIndex);
359
360 if (SrcSymbol.isExpr()) {
361 addFixup(Fixups, Offset, SrcSymbol.getExpr(), RISCV::fixup_riscv_jal);
362 if (STI.hasFeature(RISCV::FeatureRelax))
363 Fixups.back().setLinkerRelaxable();
364 }
365}
366
367// Expand PseudoLongQC_(E_)Bxxx to an inverted conditional branch and an
368// unconditional jump.
369void RISCVMCCodeEmitter::expandQCLongCondBrImm(const MCInst &MI,
370 SmallVectorImpl<char> &CB,
371 SmallVectorImpl<MCFixup> &Fixups,
372 const MCSubtargetInfo &STI,
373 unsigned Size) const {
374 MCRegister SrcReg1 = MI.getOperand(0).getReg();
375 auto BrImm = MI.getOperand(1).getImm();
376 MCOperand SrcSymbol = MI.getOperand(2);
377 unsigned Opcode = MI.getOpcode();
378 uint32_t Offset;
379 unsigned InvOpc = getInvertedBranchOp(Opcode);
380 // Emit inverted conditional branch with offset:
381 // 8 (QC.BXXX(4) + JAL(4))
382 // or
383 // 10 (QC.E.BXXX(6) + JAL(4)).
384 if (Size == 4) {
385 MCInst TmpBr =
386 MCInstBuilder(InvOpc).addReg(SrcReg1).addImm(BrImm).addImm(8);
387 uint32_t BrBinary = getBinaryCodeForInstr(TmpBr, Fixups, STI);
389 } else {
390 MCInst TmpBr =
391 MCInstBuilder(InvOpc).addReg(SrcReg1).addImm(BrImm).addImm(10);
392 uint64_t BrBinary =
393 getBinaryCodeForInstr(TmpBr, Fixups, STI) & 0xffff'ffff'ffffu;
394 SmallVector<char, 8> Encoding;
396 assert(Encoding[6] == 0 && Encoding[7] == 0 &&
397 "Unexpected encoding for 48-bit instruction");
398 Encoding.truncate(6);
399 CB.append(Encoding);
400 }
401 Offset = Size;
402 // Save the number fixups.
403 size_t FixupStartIndex = Fixups.size();
404 // Emit an unconditional jump to the destination.
405 MCInst TmpJ =
406 MCInstBuilder(RISCV::JAL).addReg(RISCV::X0).addOperand(SrcSymbol);
407 uint32_t JBinary = getBinaryCodeForInstr(TmpJ, Fixups, STI);
409 // Drop any fixup added so we can add the correct one.
410 Fixups.resize(FixupStartIndex);
411 if (SrcSymbol.isExpr()) {
412 addFixup(Fixups, Offset, SrcSymbol.getExpr(), RISCV::fixup_riscv_jal);
413 if (STI.hasFeature(RISCV::FeatureRelax))
414 Fixups.back().setLinkerRelaxable();
415 }
416}
417
418void RISCVMCCodeEmitter::encodeInstruction(const MCInst &MI,
419 SmallVectorImpl<char> &CB,
420 SmallVectorImpl<MCFixup> &Fixups,
421 const MCSubtargetInfo &STI) const {
422 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
423 // Get byte count of instruction.
424 unsigned Size = Desc.getSize();
425
426 // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the
427 // expanded instructions for each pseudo is correct in the Size field of the
428 // tablegen definition for the pseudo.
429 switch (MI.getOpcode()) {
430 default:
431 break;
432 case RISCV::PseudoCALLReg:
433 case RISCV::PseudoCALL:
434 case RISCV::PseudoTAIL:
435 case RISCV::PseudoJump:
436 expandFunctionCall(MI, CB, Fixups, STI);
437 MCNumEmitted += 2;
438 return;
439 case RISCV::PseudoAddTPRel:
440 expandAddTPRel(MI, CB, Fixups, STI);
441 MCNumEmitted += 1;
442 return;
443 case RISCV::PseudoLongBEQ:
444 case RISCV::PseudoLongBNE:
445 case RISCV::PseudoLongBLT:
446 case RISCV::PseudoLongBGE:
447 case RISCV::PseudoLongBLTU:
448 case RISCV::PseudoLongBGEU:
449 expandLongCondBr(MI, CB, Fixups, STI);
450 MCNumEmitted += 2;
451 return;
452 case RISCV::PseudoLongQC_BEQI:
453 case RISCV::PseudoLongQC_BNEI:
454 case RISCV::PseudoLongQC_BLTI:
455 case RISCV::PseudoLongQC_BGEI:
456 case RISCV::PseudoLongQC_BLTUI:
457 case RISCV::PseudoLongQC_BGEUI:
458 expandQCLongCondBrImm(MI, CB, Fixups, STI, 4);
459 MCNumEmitted += 2;
460 return;
461 case RISCV::PseudoLongQC_E_BEQI:
462 case RISCV::PseudoLongQC_E_BNEI:
463 case RISCV::PseudoLongQC_E_BLTI:
464 case RISCV::PseudoLongQC_E_BGEI:
465 case RISCV::PseudoLongQC_E_BLTUI:
466 case RISCV::PseudoLongQC_E_BGEUI:
467 expandQCLongCondBrImm(MI, CB, Fixups, STI, 6);
468 MCNumEmitted += 2;
469 return;
470 case RISCV::PseudoTLSDESCCall:
471 expandTLSDESCCall(MI, CB, Fixups, STI);
472 MCNumEmitted += 1;
473 return;
474 }
475
476 switch (Size) {
477 default:
478 llvm_unreachable("Unhandled encodeInstruction length!");
479 case 2: {
480 uint16_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
482 break;
483 }
484 case 4: {
485 uint32_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
487 break;
488 }
489 case 6: {
490 uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI) & 0xffff'ffff'ffffu;
491 SmallVector<char, 8> Encoding;
493 assert(Encoding[6] == 0 && Encoding[7] == 0 &&
494 "Unexpected encoding for 48-bit instruction");
495 Encoding.truncate(6);
496 CB.append(Encoding);
497 break;
498 }
499 case 8: {
500 uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
502 break;
503 }
504 }
505
506 ++MCNumEmitted; // Keep track of the # of mi's emitted.
507}
508
509uint64_t
510RISCVMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
511 SmallVectorImpl<MCFixup> &Fixups,
512 const MCSubtargetInfo &STI) const {
513
514 if (MO.isReg())
515 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
516
517 if (MO.isImm())
518 return MO.getImm();
519
520 llvm_unreachable("Unhandled expression!");
521 return 0;
522}
523
524uint64_t
525RISCVMCCodeEmitter::getImmOpValueMinus1(const MCInst &MI, unsigned OpNo,
526 SmallVectorImpl<MCFixup> &Fixups,
527 const MCSubtargetInfo &STI) const {
528 const MCOperand &MO = MI.getOperand(OpNo);
529
530 if (MO.isImm()) {
531 uint64_t Res = MO.getImm();
532 return (Res - 1);
533 }
534
535 llvm_unreachable("Unhandled expression!");
536 return 0;
537}
538
539uint64_t
540RISCVMCCodeEmitter::getImmOpValueSlist(const MCInst &MI, unsigned OpNo,
541 SmallVectorImpl<MCFixup> &Fixups,
542 const MCSubtargetInfo &STI) const {
543 const MCOperand &MO = MI.getOperand(OpNo);
544 assert(MO.isImm() && "Slist operand must be immediate");
545
546 uint64_t Res = MO.getImm();
547 switch (Res) {
548 case 0:
549 return 0;
550 case 1:
551 return 1;
552 case 2:
553 return 2;
554 case 4:
555 return 3;
556 case 8:
557 return 4;
558 case 16:
559 return 5;
560 case 15:
561 return 6;
562 case 31:
563 return 7;
564 default:
565 llvm_unreachable("Unhandled Slist value!");
566 }
567}
568
569template <unsigned N>
570unsigned
571RISCVMCCodeEmitter::getImmOpValueAsrN(const MCInst &MI, unsigned OpNo,
572 SmallVectorImpl<MCFixup> &Fixups,
573 const MCSubtargetInfo &STI) const {
574 const MCOperand &MO = MI.getOperand(OpNo);
575
576 if (MO.isImm()) {
577 uint64_t Res = MO.getImm();
578 assert((Res & ((1 << N) - 1)) == 0 && "LSB is non-zero");
579 return Res >> N;
580 }
581
582 return getImmOpValue(MI, OpNo, Fixups, STI);
583}
584
585uint64_t
586RISCVMCCodeEmitter::getImmOpValueZibi(const MCInst &MI, unsigned OpNo,
587 SmallVectorImpl<MCFixup> &Fixups,
588 const MCSubtargetInfo &STI) const {
589 const MCOperand &MO = MI.getOperand(OpNo);
590 assert(MO.isImm() && "Zibi operand must be an immediate");
591 int64_t Res = MO.getImm();
592 if (Res == -1)
593 return 0;
594
595 return Res;
596}
597
598uint64_t RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo,
599 SmallVectorImpl<MCFixup> &Fixups,
600 const MCSubtargetInfo &STI) const {
601 bool EnableRelax = STI.hasFeature(RISCV::FeatureRelax);
602 const MCOperand &MO = MI.getOperand(OpNo);
603
604 MCInstrDesc const &Desc = MCII.get(MI.getOpcode());
605 unsigned MIFrm = RISCVII::getFormat(Desc.TSFlags);
606
607 // If the destination is an immediate, there is nothing to do.
608 if (MO.isImm())
609 return MO.getImm();
610
611 assert(MO.isExpr() &&
612 "getImmOpValue expects only expressions or immediates");
613 const MCExpr *Expr = MO.getExpr();
614 MCExpr::ExprKind Kind = Expr->getKind();
615
616 // `RelaxCandidate` must be set to `true` in two cases:
617 // - The fixup's relocation gets a R_RISCV_RELAX relocation
618 // - The underlying instruction may be relaxed to an instruction that gets a
619 // `R_RISCV_RELAX` relocation.
620 //
621 // The actual emission of `R_RISCV_RELAX` will be handled in
622 // `RISCVAsmBackend::applyFixup`.
623 bool RelaxCandidate = false;
624 auto AsmRelaxToLinkerRelaxable = [&]() -> void {
625 if (!STI.hasFeature(RISCV::FeatureExactAssembly))
626 RelaxCandidate = true;
627 };
628
630 if (Kind == MCExpr::Specifier) {
631 const auto *RVExpr = cast<MCSpecifierExpr>(Expr);
632 FixupKind = RVExpr->getSpecifier();
633 switch (RVExpr->getSpecifier()) {
634 default:
636 "invalid specifier");
637 break;
638 case ELF::R_RISCV_TPREL_ADD:
639 // tprel_add is only used to indicate that a relocation should be emitted
640 // for an add instruction used in TP-relative addressing. It should not be
641 // expanded as if representing an actual instruction operand and so to
642 // encounter it here is an error.
644 "ELF::R_RISCV_TPREL_ADD should not represent an instruction operand");
645 case RISCV::S_LO:
646 if (MIFrm == RISCVII::InstFormatI)
648 else if (MIFrm == RISCVII::InstFormatS)
650 else
651 llvm_unreachable("VK_LO used with unexpected instruction format");
652 RelaxCandidate = true;
653 break;
654 case ELF::R_RISCV_HI20:
656 RelaxCandidate = true;
657 break;
659 if (MIFrm == RISCVII::InstFormatI)
661 else if (MIFrm == RISCVII::InstFormatS)
663 else
664 llvm_unreachable("VK_PCREL_LO used with unexpected instruction format");
665 RelaxCandidate = true;
666 break;
669 RelaxCandidate = true;
670 break;
671 case RISCV::S_GOT_HI:
672 FixupKind = ELF::R_RISCV_GOT_HI20;
673 RelaxCandidate = true;
674 break;
676 if (MIFrm == RISCVII::InstFormatI)
677 FixupKind = ELF::R_RISCV_TPREL_LO12_I;
678 else if (MIFrm == RISCVII::InstFormatS)
679 FixupKind = ELF::R_RISCV_TPREL_LO12_S;
680 else
681 llvm_unreachable("VK_TPREL_LO used with unexpected instruction format");
682 RelaxCandidate = true;
683 break;
687 break;
688 }
690 RelaxCandidate = true;
691 break;
694 RelaxCandidate = true;
695 break;
696 case ELF::R_RISCV_GOT_HI20:
697 case ELF::R_RISCV_TPREL_HI20:
698 case ELF::R_RISCV_TLSDESC_HI20:
699 RelaxCandidate = true;
700 break;
701 }
702 } else if (Kind == MCExpr::SymbolRef || Kind == MCExpr::Binary) {
703 // FIXME: Sub kind binary exprs have chance of underflow.
704 if (MIFrm == RISCVII::InstFormatJ) {
706 RelaxCandidate = true;
707 } else if (MIFrm == RISCVII::InstFormatB) {
709 // Relaxes to B<cc>; JAL, with fixup_riscv_jal
710 AsmRelaxToLinkerRelaxable();
711 } else if (MIFrm == RISCVII::InstFormatCJ) {
713 // Relaxes to JAL with fixup_riscv_jal
714 AsmRelaxToLinkerRelaxable();
715 } else if (MIFrm == RISCVII::InstFormatCB) {
717 // Relaxes to B<cc>; JAL, with fixup_riscv_jal
718 AsmRelaxToLinkerRelaxable();
719 } else if (MIFrm == RISCVII::InstFormatCI) {
721 // Relaxes to `QC.E.LI` with fixup_riscv_qc_e_32
722 if (STI.hasFeature(RISCV::FeatureVendorXqcili))
723 AsmRelaxToLinkerRelaxable();
724 } else if (MIFrm == RISCVII::InstFormatI) {
726 } else if (MIFrm == RISCVII::InstFormatQC_EB) {
728 // Relaxes to QC.E.B<cc>I; JAL, with fixup_riscv_jal
729 AsmRelaxToLinkerRelaxable();
730 } else if (MIFrm == RISCVII::InstFormatQC_EAI) {
732 RelaxCandidate = true;
733 } else if (MIFrm == RISCVII::InstFormatQC_EJ) {
735 RelaxCandidate = true;
736 } else if (MIFrm == RISCVII::InstFormatNDS_BRANCH_10) {
738 }
739 }
740
741 assert(FixupKind != RISCV::fixup_riscv_invalid && "Unhandled expression!");
742
743 addFixup(Fixups, 0, Expr, FixupKind);
744 // If linker relaxation is enabled and supported by this relocation, set a bit
745 // so that the assembler knows the size of the instruction is not fixed/known,
746 // and the relocation will need a R_RISCV_RELAX relocation.
747 if (EnableRelax && RelaxCandidate)
748 Fixups.back().setLinkerRelaxable();
749 ++MCNumFixups;
750
751 return 0;
752}
753
754unsigned RISCVMCCodeEmitter::getVMaskReg(const MCInst &MI, unsigned OpNo,
755 SmallVectorImpl<MCFixup> &Fixups,
756 const MCSubtargetInfo &STI) const {
757 MCOperand MO = MI.getOperand(OpNo);
758 assert(MO.isReg() && "Expected a register.");
759
760 switch (MO.getReg().id()) {
761 default:
762 llvm_unreachable("Invalid mask register.");
763 case RISCV::V0:
764 return 0;
765 case RISCV::NoRegister:
766 return 1;
767 }
768}
769
770unsigned RISCVMCCodeEmitter::getRlistOpValue(const MCInst &MI, unsigned OpNo,
771 SmallVectorImpl<MCFixup> &Fixups,
772 const MCSubtargetInfo &STI) const {
773 const MCOperand &MO = MI.getOperand(OpNo);
774 assert(MO.isImm() && "Rlist operand must be immediate");
775 auto Imm = MO.getImm();
776 assert(Imm >= 4 && "EABI is currently not implemented");
777 return Imm;
778}
779unsigned
780RISCVMCCodeEmitter::getRlistS0OpValue(const MCInst &MI, unsigned OpNo,
781 SmallVectorImpl<MCFixup> &Fixups,
782 const MCSubtargetInfo &STI) const {
783 const MCOperand &MO = MI.getOperand(OpNo);
784 assert(MO.isImm() && "Rlist operand must be immediate");
785 auto Imm = MO.getImm();
786 assert(Imm >= 4 && "EABI is currently not implemented");
787 assert(Imm != RISCVZC::RA && "Rlist operand must include s0");
788 return Imm;
789}
790
791#include "RISCVGenMCCodeEmitter.inc"
static void addFixup(SmallVectorImpl< MCFixup > &Fixups, uint32_t Offset, const MCExpr *Value, uint16_t Kind, bool PCRel=false)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
IRTranslator LLVM IR MI
static unsigned getInvertedBranchOp(unsigned BrOp)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
MCCodeEmitter - Generic instruction encoding interface.
Context object for machine code objects.
Definition MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
const Triple & getTargetTriple() const
Definition MCContext.h:400
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
@ SymbolRef
References to labels and assigned expressions.
Definition MCExpr.h:43
@ Specifier
Expression with a relocation specifier.
Definition MCExpr.h:45
@ Binary
Binary expressions.
Definition MCExpr.h:41
ExprKind getKind() const
Definition MCExpr.h:85
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, bool PCRel=false)
Consider bit fields if we need more flags.
Definition MCFixup.h:86
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
constexpr unsigned id() const
Definition MCRegister.h:82
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void truncate(size_type N)
Like resize, but requires that N is less than size().
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:804
LLVM Value Representation.
Definition Value.h:75
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getFormat(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
void write(void *memory, value_type value, endianness endian)
Write a value to memory with a particular endianness.
Definition Endian.h:96
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MCCodeEmitter * createRISCVMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Op::Description Desc
static Lanai::Fixups FixupKind(const MCExpr *Expr)
static void addFixup(SmallVectorImpl< MCFixup > &Fixups, uint32_t Offset, const MCExpr *Value, uint16_t Kind)
@ FirstTargetFixupKind
Definition MCFixup.h:44
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N