LLVM 17.0.0git
X86MCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the X86MCCodeEmitter class.
10//
11//===----------------------------------------------------------------------===//
12
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCFixup.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrDesc.h"
23#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCSymbol.h"
30#include <cassert>
31#include <cstdint>
32#include <cstdlib>
33
34using namespace llvm;
35
36#define DEBUG_TYPE "mccodeemitter"
37
38namespace {
39
40enum PrefixKind { None, REX, XOP, VEX2, VEX3, EVEX };
41
42static void emitByte(uint8_t C, raw_ostream &OS) { OS << static_cast<char>(C); }
43
44class X86OpcodePrefixHelper {
45 // REX (1 byte)
46 // +-----+ +------+
47 // | 40H | | WRXB |
48 // +-----+ +------+
49
50 // XOP (3-byte)
51 // +-----+ +--------------+ +-------------------+
52 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
53 // +-----+ +--------------+ +-------------------+
54
55 // VEX2 (2 bytes)
56 // +-----+ +-------------------+
57 // | C5h | | R | vvvv | L | pp |
58 // +-----+ +-------------------+
59
60 // VEX3 (3 bytes)
61 // +-----+ +--------------+ +-------------------+
62 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
63 // +-----+ +--------------+ +-------------------+
64
65 // VEX_R: opcode externsion equivalent to REX.R in
66 // 1's complement (inverted) form
67 //
68 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
69 // 0: Same as REX_R=1 (64 bit mode only)
70
71 // VEX_X: equivalent to REX.X, only used when a
72 // register is used for index in SIB Byte.
73 //
74 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
75 // 0: Same as REX.X=1 (64-bit mode only)
76
77 // VEX_B:
78 // 1: Same as REX_B=0 (ignored in 32-bit mode)
79 // 0: Same as REX_B=1 (64 bit mode only)
80
81 // VEX_W: opcode specific (use like REX.W, or used for
82 // opcode extension, or ignored, depending on the opcode byte)
83
84 // VEX_5M (VEX m-mmmmm field):
85 //
86 // 0b00000: Reserved for future use
87 // 0b00001: implied 0F leading opcode
88 // 0b00010: implied 0F 38 leading opcode bytes
89 // 0b00011: implied 0F 3A leading opcode bytes
90 // 0b00100: Reserved for future use
91 // 0b00101: VEX MAP5
92 // 0b00110: VEX MAP6
93 // 0b00111-0b11111: Reserved for future use
94 // 0b01000: XOP map select - 08h instructions with imm byte
95 // 0b01001: XOP map select - 09h instructions with no imm byte
96 // 0b01010: XOP map select - 0Ah instructions with imm dword
97
98 // VEX_4V (VEX vvvv field): a register specifier
99 // (in 1's complement form) or 1111 if unused.
100
101 // VEX_PP: opcode extension providing equivalent
102 // functionality of a SIMD prefix
103 // 0b00: None
104 // 0b01: 66
105 // 0b10: F3
106 // 0b11: F2
107
108 // EVEX (4 bytes)
109 // +-----+ +--------------+ +-------------------+ +------------------------+
110 // | 62h | | RXBR' | 0mmm | | W | vvvv | 1 | pp | | z | L'L | b | v' | aaa |
111 // +-----+ +--------------+ +-------------------+ +------------------------+
112
113 // EVEX_L2/VEX_L (Vector Length):
114 // L2 L
115 // 0 0: scalar or 128-bit vector
116 // 0 1: 256-bit vector
117 // 1 0: 512-bit vector
118
119private:
120 unsigned W : 1;
121 unsigned R : 1;
122 unsigned X : 1;
123 unsigned B : 1;
124 unsigned VEX_4V : 4;
125 unsigned VEX_L : 1;
126 unsigned VEX_PP : 2;
127 unsigned VEX_5M : 5;
128 unsigned EVEX_R2 : 1;
129 unsigned EVEX_z : 1;
130 unsigned EVEX_L2 : 1;
131 unsigned EVEX_b : 1;
132 unsigned EVEX_V2 : 1;
133 unsigned EVEX_aaa : 3;
134 PrefixKind Kind = None;
135 const MCRegisterInfo &MRI;
136
137 unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const {
138 return MRI.getEncodingValue(MI.getOperand(OpNum).getReg());
139 }
140
141 void setR(unsigned Encoding) { R = Encoding >> 3 & 1; }
142 void setR2(unsigned Encoding) { EVEX_R2 = Encoding >> 4 & 1; }
143 void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; }
144 void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; }
145
146public:
147 void setW(bool V) { W = V; }
148 void setR(const MCInst &MI, unsigned OpNum) {
149 setR(getRegEncoding(MI, OpNum));
150 }
151 void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) {
152 X = getRegEncoding(MI, OpNum) >> Shift & 1;
153 }
154 void setB(const MCInst &MI, unsigned OpNum) {
155 B = getRegEncoding(MI, OpNum) >> 3 & 1;
156 }
157 void set4V(const MCInst &MI, unsigned OpNum) {
158 set4V(getRegEncoding(MI, OpNum));
159 }
160 void setL(bool V) { VEX_L = V; }
161 void setPP(unsigned V) { VEX_PP = V; }
162 void set5M(unsigned V) { VEX_5M = V; }
163 void setR2(const MCInst &MI, unsigned OpNum) {
164 setR2(getRegEncoding(MI, OpNum));
165 }
166 void setRR2(const MCInst &MI, unsigned OpNum) {
167 unsigned Encoding = getRegEncoding(MI, OpNum);
168 setR(Encoding);
169 setR2(Encoding);
170 }
171 void setZ(bool V) { EVEX_z = V; }
172 void setL2(bool V) { EVEX_L2 = V; }
173 void setEVEX_b(bool V) { EVEX_b = V; }
174 void setV2(const MCInst &MI, unsigned OpNum) {
175 setV2(getRegEncoding(MI, OpNum));
176 }
177 void set4VV2(const MCInst &MI, unsigned OpNum) {
178 unsigned Encoding = getRegEncoding(MI, OpNum);
179 set4V(Encoding);
180 setV2(Encoding);
181 }
182 void setAAA(const MCInst &MI, unsigned OpNum) {
183 EVEX_aaa = getRegEncoding(MI, OpNum);
184 }
185
186 X86OpcodePrefixHelper(const MCRegisterInfo &MRI)
187 : W(0), R(0), X(0), B(0), VEX_4V(0), VEX_L(0), VEX_PP(0), VEX_5M(0),
188 EVEX_R2(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0), EVEX_aaa(0),
189 MRI(MRI) {}
190
191 void setLowerBound(PrefixKind K) { Kind = K; }
192
193 PrefixKind determineOptimalKind() {
194 switch (Kind) {
195 case None:
196 Kind = (W | R | X | B) ? REX : None;
197 break;
198 case REX:
199 case XOP:
200 case VEX3:
201 case EVEX:
202 break;
203 case VEX2:
204 Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2;
205 break;
206 }
207 return Kind;
208 }
209
210 void emit(raw_ostream &OS) const {
211 uint8_t FirstPayload =
212 ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5;
213 uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP;
214 switch (Kind) {
215 case None:
216 return;
217 case REX:
218 emitByte(0x40 | W << 3 | R << 2 | X << 1 | B, OS);
219 return;
220 case VEX2:
221 emitByte(0xC5, OS);
222 emitByte(((~R) & 1) << 7 | LastPayload, OS);
223 return;
224 case VEX3:
225 case XOP:
226 emitByte(Kind == VEX3 ? 0xC4 : 0x8F, OS);
227 emitByte(FirstPayload | VEX_5M, OS);
228 emitByte(W << 7 | LastPayload, OS);
229 return;
230 case EVEX:
231 assert(VEX_5M && !(VEX_5M & 0x8) && "invalid mmm fields for EVEX!");
232 emitByte(0x62, OS);
233 emitByte(FirstPayload | ((~EVEX_R2) & 0x1) << 4 | VEX_5M, OS);
234 emitByte(W << 7 | ((~VEX_4V) & 0xf) << 3 | 1 << 2 | VEX_PP, OS);
235 emitByte(EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 |
236 ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa,
237 OS);
238 return;
239 }
240 }
241};
242
243class X86MCCodeEmitter : public MCCodeEmitter {
244 const MCInstrInfo &MCII;
245 MCContext &Ctx;
246
247public:
248 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
249 : MCII(mcii), Ctx(ctx) {}
250 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
251 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
252 ~X86MCCodeEmitter() override = default;
253
254 void emitPrefix(const MCInst &MI, raw_ostream &OS,
255 const MCSubtargetInfo &STI) const override;
256
259 const MCSubtargetInfo &STI) const override;
260
261private:
262 unsigned getX86RegNum(const MCOperand &MO) const;
263
264 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const;
265
266 void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize,
267 MCFixupKind FixupKind, uint64_t StartByte, raw_ostream &OS,
268 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const;
269
270 void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
271 raw_ostream &OS) const;
272
273 void emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
274 raw_ostream &OS) const;
275
276 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
277 uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte,
279 const MCSubtargetInfo &STI,
280 bool ForceSIB = false) const;
281
282 PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
283 const MCSubtargetInfo &STI, raw_ostream &OS) const;
284
285 PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
286 raw_ostream &OS) const;
287
288 void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI,
289 raw_ostream &OS) const;
290
291 PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI,
292 const MCSubtargetInfo &STI,
293 raw_ostream &OS) const;
294
295 PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI,
296 const MCSubtargetInfo &STI, raw_ostream &OS) const;
297};
298
299} // end anonymous namespace
300
301static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
302 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
303 return RM | (RegOpcode << 3) | (Mod << 6);
304}
305
306static void emitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) {
307 // Output the constant in little endian byte order.
308 for (unsigned i = 0; i != Size; ++i) {
309 emitByte(Val & 255, OS);
310 Val >>= 8;
311 }
312}
313
314/// Determine if this immediate can fit in a disp8 or a compressed disp8 for
315/// EVEX instructions. \p will be set to the value to pass to the ImmOffset
316/// parameter of emitImmediate.
317static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) {
318 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
319
320 int CD8_Scale =
322 if (!HasEVEX || CD8_Scale == 0)
323 return isInt<8>(Value);
324
325 assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!");
326 if (Value & (CD8_Scale - 1)) // Unaligned offset
327 return false;
328
329 int CDisp8 = Value / CD8_Scale;
330 if (!isInt<8>(CDisp8))
331 return false;
332
333 // ImmOffset will be added to Value in emitImmediate leaving just CDisp8.
334 ImmOffset = CDisp8 - Value;
335 return true;
336}
337
338/// \returns the appropriate fixup kind to use for an immediate in an
339/// instruction with the specified TSFlags.
341 unsigned Size = X86II::getSizeOfImm(TSFlags);
343
345 switch (Size) {
346 default:
347 llvm_unreachable("Unsupported signed fixup size!");
348 case 4:
350 }
351 }
353}
354
356
357/// Check if this expression starts with _GLOBAL_OFFSET_TABLE_ and if it is
358/// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on
359/// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
360/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a
361/// binary expression.
364 const MCExpr *RHS = nullptr;
365 if (Expr->getKind() == MCExpr::Binary) {
366 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
367 Expr = BE->getLHS();
368 RHS = BE->getRHS();
369 }
370
371 if (Expr->getKind() != MCExpr::SymbolRef)
372 return GOT_None;
373
374 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
375 const MCSymbol &S = Ref->getSymbol();
376 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
377 return GOT_None;
378 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
379 return GOT_SymDiff;
380 return GOT_Normal;
381}
382
383static bool hasSecRelSymbolRef(const MCExpr *Expr) {
384 if (Expr->getKind() == MCExpr::SymbolRef) {
385 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
386 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
387 }
388 return false;
389}
390
391static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) {
392 unsigned Opcode = MI.getOpcode();
393 const MCInstrDesc &Desc = MCII.get(Opcode);
394 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 &&
395 Opcode != X86::JCC_4) ||
397 return false;
398
399 unsigned CurOp = X86II::getOperandBias(Desc);
400 const MCOperand &Op = MI.getOperand(CurOp);
401 if (!Op.isExpr())
402 return false;
403
404 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
405 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
406}
407
408unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const {
409 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
410}
411
412unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI,
413 unsigned OpNum) const {
414 return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg());
415}
416
417void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
418 unsigned Size, MCFixupKind FixupKind,
419 uint64_t StartByte, raw_ostream &OS,
421 int ImmOffset) const {
422 const MCExpr *Expr = nullptr;
423 if (DispOp.isImm()) {
424 // If this is a simple integer displacement that doesn't require a
425 // relocation, emit it now.
426 if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 &&
427 FixupKind != FK_PCRel_4) {
428 emitConstant(DispOp.getImm() + ImmOffset, Size, OS);
429 return;
430 }
431 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
432 } else {
433 Expr = DispOp.getExpr();
434 }
435
436 // If we have an immoffset, add it to the expression.
437 if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 ||
438 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
440 if (Kind != GOT_None) {
441 assert(ImmOffset == 0);
442
443 if (Size == 8) {
445 } else {
446 assert(Size == 4);
448 }
449
450 if (Kind == GOT_Normal)
451 ImmOffset = static_cast<int>(OS.tell() - StartByte);
452 } else if (Expr->getKind() == MCExpr::SymbolRef) {
453 if (hasSecRelSymbolRef(Expr)) {
455 }
456 } else if (Expr->getKind() == MCExpr::Binary) {
457 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr);
458 if (hasSecRelSymbolRef(Bin->getLHS()) ||
459 hasSecRelSymbolRef(Bin->getRHS())) {
461 }
462 }
463 }
464
465 // If the fixup is pc-relative, we need to bias the value to be relative to
466 // the start of the field, not the end of the field.
467 if (FixupKind == FK_PCRel_4 ||
468 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
473 ImmOffset -= 4;
474 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
475 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
476 // this needs to be a GOTPC32 relocation.
479 }
480 if (FixupKind == FK_PCRel_2)
481 ImmOffset -= 2;
482 if (FixupKind == FK_PCRel_1)
483 ImmOffset -= 1;
484
485 if (ImmOffset)
486 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
487 Ctx);
488
489 // Emit a symbolic constant as a fixup and 4 zeros.
490 Fixups.push_back(MCFixup::create(static_cast<uint32_t>(OS.tell() - StartByte),
491 Expr, FixupKind, Loc));
492 emitConstant(0, Size, OS);
493}
494
495void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg,
496 unsigned RegOpcodeFld,
497 raw_ostream &OS) const {
498 emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), OS);
499}
500
501void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
502 raw_ostream &OS) const {
503 // SIB byte is in the same format as the modRMByte.
504 emitByte(modRMByte(SS, Index, Base), OS);
505}
506
507void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
508 unsigned RegOpcodeField,
509 uint64_t TSFlags, PrefixKind Kind,
510 uint64_t StartByte, raw_ostream &OS,
512 const MCSubtargetInfo &STI,
513 bool ForceSIB) const {
514 const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp);
515 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
516 const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt);
517 const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg);
518 unsigned BaseReg = Base.getReg();
519
520 // Handle %rip relative addressing.
521 if (BaseReg == X86::RIP ||
522 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
523 assert(STI.hasFeature(X86::Is64Bit) &&
524 "Rip-relative addressing requires 64-bit mode");
525 assert(IndexReg.getReg() == 0 && !ForceSIB &&
526 "Invalid rip-relative address");
527 emitByte(modRMByte(0, RegOpcodeField, 5), OS);
528
529 unsigned Opcode = MI.getOpcode();
530 unsigned FixupKind = [&]() {
531 // Enable relaxed relocation only for a MCSymbolRefExpr. We cannot use a
532 // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4).
533 if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Disp.getExpr())))
535
536 // Certain loads for GOT references can be relocated against the symbol
537 // directly if the symbol ends up in the same linkage unit.
538 switch (Opcode) {
539 default:
541 case X86::MOV64rm:
542 // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
543 // special case because COFF and Mach-O don't support ELF's more
544 // flexible R_X86_64_REX_GOTPCRELX relaxation.
545 assert(Kind == REX);
547 case X86::ADC32rm:
548 case X86::ADD32rm:
549 case X86::AND32rm:
550 case X86::CMP32rm:
551 case X86::MOV32rm:
552 case X86::OR32rm:
553 case X86::SBB32rm:
554 case X86::SUB32rm:
555 case X86::TEST32mr:
556 case X86::XOR32rm:
557 case X86::CALL64m:
558 case X86::JMP64m:
559 case X86::TAILJMPm64:
560 case X86::TEST64mr:
561 case X86::ADC64rm:
562 case X86::ADD64rm:
563 case X86::AND64rm:
564 case X86::CMP64rm:
565 case X86::OR64rm:
566 case X86::SBB64rm:
567 case X86::SUB64rm:
568 case X86::XOR64rm:
571 }
572 }();
573
574 // rip-relative addressing is actually relative to the *next* instruction.
575 // Since an immediate can follow the mod/rm byte for an instruction, this
576 // means that we need to bias the displacement field of the instruction with
577 // the size of the immediate field. If we have this case, add it into the
578 // expression to emit.
579 // Note: rip-relative addressing using immediate displacement values should
580 // not be adjusted, assuming it was the user's intent.
581 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
583 : 0;
584
585 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, OS,
586 Fixups, -ImmSize);
587 return;
588 }
589
590 unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U;
591
592 // 16-bit addressing forms of the ModR/M byte have a different encoding for
593 // the R/M field and are far more limited in which registers can be used.
594 if (X86_MC::is16BitMemOperand(MI, Op, STI)) {
595 if (BaseReg) {
596 // For 32-bit addressing, the row and column values in Table 2-2 are
597 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
598 // some special cases. And getX86RegNum reflects that numbering.
599 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
600 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
601 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
602 // while values 0-3 indicate the allowed combinations (base+index) of
603 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
604 //
605 // R16Table[] is a lookup from the normal RegNo, to the row values from
606 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
607 static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5};
608 unsigned RMfield = R16Table[BaseRegNo];
609
610 assert(RMfield && "invalid 16-bit base register");
611
612 if (IndexReg.getReg()) {
613 unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)];
614
615 assert(IndexReg16 && "invalid 16-bit index register");
616 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
617 assert(((IndexReg16 ^ RMfield) & 2) &&
618 "invalid 16-bit base/index register combination");
619 assert(Scale.getImm() == 1 &&
620 "invalid scale for 16-bit memory reference");
621
622 // Allow base/index to appear in either order (although GAS doesn't).
623 if (IndexReg16 & 2)
624 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
625 else
626 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
627 }
628
629 if (Disp.isImm() && isInt<8>(Disp.getImm())) {
630 if (Disp.getImm() == 0 && RMfield != 6) {
631 // There is no displacement; just the register.
632 emitByte(modRMByte(0, RegOpcodeField, RMfield), OS);
633 return;
634 }
635 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
636 emitByte(modRMByte(1, RegOpcodeField, RMfield), OS);
637 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, OS, Fixups);
638 return;
639 }
640 // This is the [REG]+disp16 case.
641 emitByte(modRMByte(2, RegOpcodeField, RMfield), OS);
642 } else {
643 assert(IndexReg.getReg() == 0 && "Unexpected index register!");
644 // There is no BaseReg; this is the plain [disp16] case.
645 emitByte(modRMByte(0, RegOpcodeField, 6), OS);
646 }
647
648 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
649 emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, StartByte, OS, Fixups);
650 return;
651 }
652
653 // Check for presence of {disp8} or {disp32} pseudo prefixes.
654 bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8;
655 bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32;
656
657 // We only allow no displacement if no pseudo prefix is present.
658 bool AllowNoDisp = !UseDisp8 && !UseDisp32;
659 // Disp8 is allowed unless the {disp32} prefix is present.
660 bool AllowDisp8 = !UseDisp32;
661
662 // Determine whether a SIB byte is needed.
663 if (// The SIB byte must be used if there is an index register or the
664 // encoding requires a SIB byte.
665 !ForceSIB && IndexReg.getReg() == 0 &&
666 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
667 // encode to an R/M value of 4, which indicates that a SIB byte is
668 // present.
669 BaseRegNo != N86::ESP &&
670 // If there is no base register and we're in 64-bit mode, we need a SIB
671 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
672 (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
673
674 if (BaseReg == 0) { // [disp32] in X86-32 mode
675 emitByte(modRMByte(0, RegOpcodeField, 5), OS);
676 emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, OS, Fixups);
677 return;
678 }
679
680 // If the base is not EBP/ESP/R12/R13 and there is no displacement, use
681 // simple indirect register encoding, this handles addresses like [EAX].
682 // The encoding for [EBP] or[R13] with no displacement means [disp32] so we
683 // handle it by emitting a displacement of 0 later.
684 if (BaseRegNo != N86::EBP) {
685 if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) {
686 emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), OS);
687 return;
688 }
689
690 // If the displacement is @tlscall, treat it as a zero.
691 if (Disp.isExpr()) {
692 auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr());
693 if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) {
694 // This is exclusively used by call *a@tlscall(base). The relocation
695 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
696 Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc()));
697 emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), OS);
698 return;
699 }
700 }
701 }
702
703 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
704 // Including a compressed disp8 for EVEX instructions that support it.
705 // This also handles the 0 displacement for [EBP] or [R13]. We can't use
706 // disp8 if the {disp32} pseudo prefix is present.
707 if (Disp.isImm() && AllowDisp8) {
708 int ImmOffset = 0;
709 if (isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
710 emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), OS);
711 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, OS, Fixups,
712 ImmOffset);
713 return;
714 }
715 }
716
717 // Otherwise, emit the most general non-SIB encoding: [REG+disp32].
718 // Displacement may be 0 for [EBP] or [R13] case if {disp32} pseudo prefix
719 // prevented using disp8 above.
720 emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), OS);
721 unsigned Opcode = MI.getOpcode();
722 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
724 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, OS,
725 Fixups);
726 return;
727 }
728
729 // We need a SIB byte, so start by outputting the ModR/M byte first
730 assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&
731 "Cannot use ESP as index reg!");
732
733 bool ForceDisp32 = false;
734 bool ForceDisp8 = false;
735 int ImmOffset = 0;
736 if (BaseReg == 0) {
737 // If there is no base register, we emit the special case SIB byte with
738 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
739 BaseRegNo = 5;
740 emitByte(modRMByte(0, RegOpcodeField, 4), OS);
741 ForceDisp32 = true;
742 } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp &&
743 // Base reg can't be EBP/RBP/R13 as that would end up with '5' as
744 // the base field, but that is the magic [*] nomenclature that
745 // indicates no base when mod=0. For these cases we'll emit a 0
746 // displacement instead.
747 BaseRegNo != N86::EBP) {
748 // Emit no displacement ModR/M byte
749 emitByte(modRMByte(0, RegOpcodeField, 4), OS);
750 } else if (Disp.isImm() && AllowDisp8 &&
751 isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
752 // Displacement fits in a byte or matches an EVEX compressed disp8, use
753 // disp8 encoding. This also handles EBP/R13 base with 0 displacement unless
754 // {disp32} pseudo prefix was used.
755 emitByte(modRMByte(1, RegOpcodeField, 4), OS);
756 ForceDisp8 = true;
757 } else {
758 // Otherwise, emit the normal disp32 encoding.
759 emitByte(modRMByte(2, RegOpcodeField, 4), OS);
760 ForceDisp32 = true;
761 }
762
763 // Calculate what the SS field value should be...
764 static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3};
765 unsigned SS = SSTable[Scale.getImm()];
766
767 unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(IndexReg) : 4;
768
769 emitSIBByte(SS, IndexRegNo, BaseRegNo, OS);
770
771 // Do we need to output a displacement?
772 if (ForceDisp8)
773 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, OS, Fixups,
774 ImmOffset);
775 else if (ForceDisp32)
776 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
777 StartByte, OS, Fixups);
778}
779
780/// Emit all instruction prefixes.
781///
782/// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used,
783/// otherwise returns None.
784PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
785 const MCSubtargetInfo &STI,
786 raw_ostream &OS) const {
787 uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags;
788 // Determine where the memory operand starts, if present.
789 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
790 // Emit segment override opcode prefix as needed.
791 if (MemoryOperand != -1) {
792 MemoryOperand += CurOp;
793 emitSegmentOverridePrefix(MemoryOperand + X86::AddrSegmentReg, MI, OS);
794 }
795
796 // Emit the repeat opcode prefix as needed.
797 unsigned Flags = MI.getFlags();
798 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
799 emitByte(0xF3, OS);
800 if (Flags & X86::IP_HAS_REPEAT_NE)
801 emitByte(0xF2, OS);
802
803 // Emit the address size opcode prefix as needed.
804 if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) ||
805 Flags & X86::IP_HAS_AD_SIZE)
806 emitByte(0x67, OS);
807
809 switch (Form) {
810 default:
811 break;
812 case X86II::RawFrmDstSrc: {
813 // Emit segment override opcode prefix as needed (not for %ds).
814 if (MI.getOperand(2).getReg() != X86::DS)
815 emitSegmentOverridePrefix(2, MI, OS);
816 CurOp += 3; // Consume operands.
817 break;
818 }
819 case X86II::RawFrmSrc: {
820 // Emit segment override opcode prefix as needed (not for %ds).
821 if (MI.getOperand(1).getReg() != X86::DS)
822 emitSegmentOverridePrefix(1, MI, OS);
823 CurOp += 2; // Consume operands.
824 break;
825 }
826 case X86II::RawFrmDst: {
827 ++CurOp; // Consume operand.
828 break;
829 }
831 // Emit segment override opcode prefix as needed.
832 emitSegmentOverridePrefix(1, MI, OS);
833 break;
834 }
835 }
836
837 // REX prefix is optional, but if used must be immediately before the opcode
838 // Encoding type for this instruction.
840 ? emitVEXOpcodePrefix(MemoryOperand, MI, OS)
841 : emitOpcodePrefix(MemoryOperand, MI, STI, OS);
842}
843
844// AVX instructions are encoded using an encoding scheme that combines
845// prefix bytes, opcode extension field, operand encoding fields, and vector
846// length encoding capability into a new prefix, referred to as VEX.
847
848// The majority of the AVX-512 family of instructions (operating on
849// 512/256/128-bit vector register operands) are encoded using a new prefix
850// (called EVEX).
851
852// XOP is a revised subset of what was originally intended as SSE5. It was
853// changed to be similar but not overlapping with AVX.
854
855/// Emit XOP, VEX2, VEX3 or EVEX prefix.
856/// \returns the used prefix.
857PrefixKind X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand,
858 const MCInst &MI,
859 raw_ostream &OS) const {
860 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
861 uint64_t TSFlags = Desc.TSFlags;
862
863 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
864
865 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
866 switch (TSFlags & X86II::EncodingMask) {
867 default:
868 break;
869 case X86II::XOP:
870 Prefix.setLowerBound(XOP);
871 break;
872 case X86II::VEX:
873 // VEX can be 2 byte or 3 byte, not determined yet if not explicit
874 Prefix.setLowerBound(MI.getFlags() & X86::IP_USE_VEX3 ? VEX3 : VEX2);
875 break;
876 case X86II::EVEX:
877 Prefix.setLowerBound(EVEX);
878 break;
879 }
880
882
883 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
884 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
885 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
886
887 switch (TSFlags & X86II::OpMapMask) {
888 default:
889 llvm_unreachable("Invalid prefix!");
890 case X86II::TB:
891 Prefix.set5M(0x1); // 0F
892 break;
893 case X86II::T8:
894 Prefix.set5M(0x2); // 0F 38
895 break;
896 case X86II::TA:
897 Prefix.set5M(0x3); // 0F 3A
898 break;
899 case X86II::XOP8:
900 Prefix.set5M(0x8);
901 break;
902 case X86II::XOP9:
903 Prefix.set5M(0x9);
904 break;
905 case X86II::XOPA:
906 Prefix.set5M(0xA);
907 break;
908 case X86II::T_MAP5:
909 Prefix.set5M(0x5);
910 break;
911 case X86II::T_MAP6:
912 Prefix.set5M(0x6);
913 break;
914 }
915
918 switch (TSFlags & X86II::OpPrefixMask) {
919 case X86II::PD:
920 Prefix.setPP(0x1); // 66
921 break;
922 case X86II::XS:
923 Prefix.setPP(0x2); // F3
924 break;
925 case X86II::XD:
926 Prefix.setPP(0x3); // F2
927 break;
928 }
929
930 Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z));
931 Prefix.setEVEX_b(TSFlags & X86II::EVEX_B);
932
933 bool EncodeRC = false;
934 uint8_t EVEX_rc = 0;
935 unsigned CurOp = X86II::getOperandBias(Desc);
936
937 switch (TSFlags & X86II::FormMask) {
938 default:
939 llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
941 // MemAddr, src1(ModR/M), src2(VEX_4V)
942 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
943 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
944 CurOp += X86::AddrNumOperands;
945 Prefix.setR(MI, ++CurOp);
946 Prefix.set4V(MI, CurOp++);
947 break;
948 }
949 case X86II::MRM_C0:
950 case X86II::RawFrm:
951 break;
953 case X86II::MRMDestMem: {
954 // MRMDestMem instructions forms:
955 // MemAddr, src1(ModR/M)
956 // MemAddr, src1(VEX_4V), src2(ModR/M)
957 // MemAddr, src1(ModR/M), imm8
958 //
959 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
960 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
961 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
962 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
963
964 CurOp += X86::AddrNumOperands;
965
966 if (HasEVEX_K)
967 Prefix.setAAA(MI, CurOp++);
968
969 if (HasVEX_4V)
970 Prefix.set4VV2(MI, CurOp++);
971
972 Prefix.setRR2(MI, CurOp++);
973 break;
974 }
976 case X86II::MRMSrcMem: {
977 // MRMSrcMem instructions forms:
978 // src1(ModR/M), MemAddr
979 // src1(ModR/M), src2(VEX_4V), MemAddr
980 // src1(ModR/M), MemAddr, imm8
981 // src1(ModR/M), MemAddr, src2(Imm[7:4])
982 //
983 // FMA4:
984 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
985 Prefix.setRR2(MI, CurOp++);
986
987 if (HasEVEX_K)
988 Prefix.setAAA(MI, CurOp++);
989
990 if (HasVEX_4V)
991 Prefix.set4VV2(MI, CurOp++);
992
993 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
994 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
995 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
996 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
997
998 break;
999 }
1000 case X86II::MRMSrcMem4VOp3: {
1001 // Instruction format for 4VOp3:
1002 // src1(ModR/M), MemAddr, src3(VEX_4V)
1003 Prefix.setR(MI, CurOp++);
1004 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1005 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1006 Prefix.set4V(MI, CurOp + X86::AddrNumOperands);
1007 break;
1008 }
1009 case X86II::MRMSrcMemOp4: {
1010 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1011 Prefix.setR(MI, CurOp++);
1012 Prefix.set4V(MI, CurOp++);
1013 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1014 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1015 break;
1016 }
1017 case X86II::MRM0m:
1018 case X86II::MRM1m:
1019 case X86II::MRM2m:
1020 case X86II::MRM3m:
1021 case X86II::MRM4m:
1022 case X86II::MRM5m:
1023 case X86II::MRM6m:
1024 case X86II::MRM7m: {
1025 // MRM[0-9]m instructions forms:
1026 // MemAddr
1027 // src1(VEX_4V), MemAddr
1028 if (HasVEX_4V)
1029 Prefix.set4VV2(MI, CurOp++);
1030
1031 if (HasEVEX_K)
1032 Prefix.setAAA(MI, CurOp++);
1033
1034 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1035 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1036 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
1037 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
1038
1039 break;
1040 }
1041 case X86II::MRMSrcReg: {
1042 // MRMSrcReg instructions forms:
1043 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1044 // dst(ModR/M), src1(ModR/M)
1045 // dst(ModR/M), src1(ModR/M), imm8
1046 //
1047 // FMA4:
1048 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1049 Prefix.setRR2(MI, CurOp++);
1050
1051 if (HasEVEX_K)
1052 Prefix.setAAA(MI, CurOp++);
1053
1054 if (HasVEX_4V)
1055 Prefix.set4VV2(MI, CurOp++);
1056
1057 Prefix.setB(MI, CurOp);
1058 Prefix.setX(MI, CurOp, 4);
1059 ++CurOp;
1060
1061 if (TSFlags & X86II::EVEX_B) {
1062 if (HasEVEX_RC) {
1063 unsigned NumOps = Desc.getNumOperands();
1064 unsigned RcOperand = NumOps - 1;
1065 assert(RcOperand >= CurOp);
1066 EVEX_rc = MI.getOperand(RcOperand).getImm();
1067 assert(EVEX_rc <= 3 && "Invalid rounding control!");
1068 }
1069 EncodeRC = true;
1070 }
1071 break;
1072 }
1073 case X86II::MRMSrcReg4VOp3: {
1074 // Instruction format for 4VOp3:
1075 // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
1076 Prefix.setR(MI, CurOp++);
1077 Prefix.setB(MI, CurOp++);
1078 Prefix.set4V(MI, CurOp++);
1079 break;
1080 }
1081 case X86II::MRMSrcRegOp4: {
1082 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1083 Prefix.setR(MI, CurOp++);
1084 Prefix.set4V(MI, CurOp++);
1085 // Skip second register source (encoded in Imm[7:4])
1086 ++CurOp;
1087
1088 Prefix.setB(MI, CurOp);
1089 Prefix.setX(MI, CurOp, 4);
1090 ++CurOp;
1091 break;
1092 }
1093 case X86II::MRMDestReg: {
1094 // MRMDestReg instructions forms:
1095 // dst(ModR/M), src(ModR/M)
1096 // dst(ModR/M), src(ModR/M), imm8
1097 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
1098 Prefix.setB(MI, CurOp);
1099 Prefix.setX(MI, CurOp, 4);
1100 ++CurOp;
1101
1102 if (HasEVEX_K)
1103 Prefix.setAAA(MI, CurOp++);
1104
1105 if (HasVEX_4V)
1106 Prefix.set4VV2(MI, CurOp++);
1107
1108 Prefix.setRR2(MI, CurOp++);
1109 if (TSFlags & X86II::EVEX_B)
1110 EncodeRC = true;
1111 break;
1112 }
1113 case X86II::MRMr0: {
1114 // MRMr0 instructions forms:
1115 // 11:rrr:000
1116 // dst(ModR/M)
1117 Prefix.setRR2(MI, CurOp++);
1118 break;
1119 }
1120 case X86II::MRM0r:
1121 case X86II::MRM1r:
1122 case X86II::MRM2r:
1123 case X86II::MRM3r:
1124 case X86II::MRM4r:
1125 case X86II::MRM5r:
1126 case X86II::MRM6r:
1127 case X86II::MRM7r: {
1128 // MRM0r-MRM7r instructions forms:
1129 // dst(VEX_4V), src(ModR/M), imm8
1130 if (HasVEX_4V)
1131 Prefix.set4VV2(MI, CurOp++);
1132
1133 if (HasEVEX_K)
1134 Prefix.setAAA(MI, CurOp++);
1135
1136 Prefix.setB(MI, CurOp);
1137 Prefix.setX(MI, CurOp, 4);
1138 ++CurOp;
1139 break;
1140 }
1141 }
1142 if (EncodeRC) {
1143 Prefix.setL(EVEX_rc & 0x1);
1144 Prefix.setL2(EVEX_rc & 0x2);
1145 }
1146 PrefixKind Kind = Prefix.determineOptimalKind();
1147 Prefix.emit(OS);
1148 return Kind;
1149}
1150
1151/// Emit REX prefix which specifies
1152/// 1) 64-bit instructions,
1153/// 2) non-default operand size, and
1154/// 3) use of X86-64 extended registers.
1155///
1156/// \returns the used prefix (REX or None).
1157PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
1158 const MCSubtargetInfo &STI,
1159 raw_ostream &OS) const {
1160 if (!STI.hasFeature(X86::Is64Bit))
1161 return None;
1162 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
1163 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1164 uint64_t TSFlags = Desc.TSFlags;
1165 Prefix.setW(TSFlags & X86II::REX_W);
1166 unsigned NumOps = MI.getNumOperands();
1167 bool UsesHighByteReg = false;
1168#ifndef NDEBUG
1169 bool HasRegOp = false;
1170#endif
1171 unsigned CurOp = NumOps ? X86II::getOperandBias(Desc) : 0;
1172 for (unsigned i = CurOp; i != NumOps; ++i) {
1173 const MCOperand &MO = MI.getOperand(i);
1174 if (MO.isReg()) {
1175#ifndef NDEBUG
1176 HasRegOp = true;
1177#endif
1178 unsigned Reg = MO.getReg();
1179 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1180 UsesHighByteReg = true;
1181 // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix.
1183 Prefix.setLowerBound(REX);
1184 } else if (MO.isExpr() && STI.getTargetTriple().isX32()) {
1185 // GOTTPOFF and TLSDESC relocations require a REX prefix to allow
1186 // linker optimizations: even if the instructions we see may not require
1187 // any prefix, they may be replaced by instructions that do. This is
1188 // handled as a special case here so that it also works for hand-written
1189 // assembly without the user needing to write REX, as with GNU as.
1190 const auto *Ref = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
1191 if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF ||
1192 Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) {
1193 Prefix.setLowerBound(REX);
1194 }
1195 }
1196 }
1197 switch (TSFlags & X86II::FormMask) {
1198 default:
1199 assert(!HasRegOp && "Unexpected form in emitREXPrefix!");
1200 break;
1201 case X86II::RawFrm:
1203 case X86II::RawFrmSrc:
1204 case X86II::RawFrmDst:
1206 break;
1207 case X86II::AddRegFrm:
1208 Prefix.setB(MI, CurOp++);
1209 break;
1210 case X86II::MRMSrcReg:
1211 case X86II::MRMSrcRegCC:
1212 Prefix.setR(MI, CurOp++);
1213 Prefix.setB(MI, CurOp++);
1214 break;
1215 case X86II::MRMSrcMem:
1216 case X86II::MRMSrcMemCC:
1217 Prefix.setR(MI, CurOp++);
1218 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1219 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1220 CurOp += X86::AddrNumOperands;
1221 break;
1222 case X86II::MRMDestReg:
1223 Prefix.setB(MI, CurOp++);
1224 Prefix.setR(MI, CurOp++);
1225 break;
1226 case X86II::MRMDestMem:
1227 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1228 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1229 CurOp += X86::AddrNumOperands;
1230 Prefix.setR(MI, CurOp++);
1231 break;
1232 case X86II::MRMXmCC:
1233 case X86II::MRMXm:
1234 case X86II::MRM0m:
1235 case X86II::MRM1m:
1236 case X86II::MRM2m:
1237 case X86II::MRM3m:
1238 case X86II::MRM4m:
1239 case X86II::MRM5m:
1240 case X86II::MRM6m:
1241 case X86II::MRM7m:
1242 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1243 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1244 break;
1245 case X86II::MRMXrCC:
1246 case X86II::MRMXr:
1247 case X86II::MRM0r:
1248 case X86II::MRM1r:
1249 case X86II::MRM2r:
1250 case X86II::MRM3r:
1251 case X86II::MRM4r:
1252 case X86II::MRM5r:
1253 case X86II::MRM6r:
1254 case X86II::MRM7r:
1255 Prefix.setB(MI, CurOp++);
1256 break;
1257 }
1258 PrefixKind Kind = Prefix.determineOptimalKind();
1259 if (Kind && UsesHighByteReg)
1261 "Cannot encode high byte register in REX-prefixed instruction");
1262 Prefix.emit(OS);
1263 return Kind;
1264}
1265
1266/// Emit segment override opcode prefix as needed.
1267void X86MCCodeEmitter::emitSegmentOverridePrefix(unsigned SegOperand,
1268 const MCInst &MI,
1269 raw_ostream &OS) const {
1270 // Check for explicit segment override on memory operand.
1271 if (unsigned Reg = MI.getOperand(SegOperand).getReg())
1273}
1274
1275/// Emit all instruction prefixes prior to the opcode.
1276///
1277/// \param MemOperand the operand # of the start of a memory operand if present.
1278/// If not present, it is -1.
1279///
1280/// \returns the used prefix (REX or None).
1281PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
1282 const MCSubtargetInfo &STI,
1283 raw_ostream &OS) const {
1284 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1285 uint64_t TSFlags = Desc.TSFlags;
1286
1287 // Emit the operand size opcode prefix as needed.
1288 if ((TSFlags & X86II::OpSizeMask) ==
1289 (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
1290 emitByte(0x66, OS);
1291
1292 // Emit the LOCK opcode prefix.
1293 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
1294 emitByte(0xF0, OS);
1295
1296 // Emit the NOTRACK opcode prefix.
1297 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
1298 emitByte(0x3E, OS);
1299
1300 switch (TSFlags & X86II::OpPrefixMask) {
1301 case X86II::PD: // 66
1302 emitByte(0x66, OS);
1303 break;
1304 case X86II::XS: // F3
1305 emitByte(0xF3, OS);
1306 break;
1307 case X86II::XD: // F2
1308 emitByte(0xF2, OS);
1309 break;
1310 }
1311
1312 // Handle REX prefix.
1313 assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
1314 "REX.W requires 64bit mode.");
1315 PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, OS);
1316
1317 // 0x0F escape code must be emitted just before the opcode.
1318 switch (TSFlags & X86II::OpMapMask) {
1319 case X86II::TB: // Two-byte opcode map
1320 case X86II::T8: // 0F 38
1321 case X86II::TA: // 0F 3A
1322 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
1323 emitByte(0x0F, OS);
1324 break;
1325 }
1326
1327 switch (TSFlags & X86II::OpMapMask) {
1328 case X86II::T8: // 0F 38
1329 emitByte(0x38, OS);
1330 break;
1331 case X86II::TA: // 0F 3A
1332 emitByte(0x3A, OS);
1333 break;
1334 }
1335
1336 return Kind;
1337}
1338
1339void X86MCCodeEmitter::emitPrefix(const MCInst &MI, raw_ostream &OS,
1340 const MCSubtargetInfo &STI) const {
1341 unsigned Opcode = MI.getOpcode();
1342 const MCInstrDesc &Desc = MCII.get(Opcode);
1343 uint64_t TSFlags = Desc.TSFlags;
1344
1345 // Pseudo instructions don't get encoded.
1347 return;
1348
1349 unsigned CurOp = X86II::getOperandBias(Desc);
1350
1351 emitPrefixImpl(CurOp, MI, STI, OS);
1352}
1353
1354void X86MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
1356 const MCSubtargetInfo &STI) const {
1357 unsigned Opcode = MI.getOpcode();
1358 const MCInstrDesc &Desc = MCII.get(Opcode);
1359 uint64_t TSFlags = Desc.TSFlags;
1360
1361 // Pseudo instructions don't get encoded.
1363 return;
1364
1365 unsigned NumOps = Desc.getNumOperands();
1366 unsigned CurOp = X86II::getOperandBias(Desc);
1367
1368 uint64_t StartByte = OS.tell();
1369
1370 PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, OS);
1371
1372 // It uses the VEX.VVVV field?
1373 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1374 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1375
1376 // It uses the EVEX.aaa field?
1377 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1378 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1379
1380 // Used if a register is encoded in 7:4 of immediate.
1381 unsigned I8RegNum = 0;
1382
1383 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1384
1386 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1387
1388 unsigned OpcodeOffset = 0;
1389
1391 switch (Form) {
1392 default:
1393 errs() << "FORM: " << Form << "\n";
1394 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1395 case X86II::Pseudo:
1396 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1398 case X86II::RawFrmSrc:
1399 case X86II::RawFrmDst:
1400 case X86II::PrefixByte:
1401 emitByte(BaseOpcode, OS);
1402 break;
1403 case X86II::AddCCFrm: {
1404 // This will be added to the opcode in the fallthrough.
1405 OpcodeOffset = MI.getOperand(NumOps - 1).getImm();
1406 assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
1407 --NumOps; // Drop the operand from the end.
1408 [[fallthrough]];
1409 case X86II::RawFrm:
1410 emitByte(BaseOpcode + OpcodeOffset, OS);
1411
1412 if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
1413 break;
1414
1415 const MCOperand &Op = MI.getOperand(CurOp++);
1416 emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags),
1418 Fixups);
1419 break;
1420 }
1422 emitByte(BaseOpcode, OS);
1423 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1425 StartByte, OS, Fixups);
1426 ++CurOp; // skip segment operand
1427 break;
1428 case X86II::RawFrmImm8:
1429 emitByte(BaseOpcode, OS);
1430 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1432 StartByte, OS, Fixups);
1433 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, StartByte,
1434 OS, Fixups);
1435 break;
1436 case X86II::RawFrmImm16:
1437 emitByte(BaseOpcode, OS);
1438 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1440 StartByte, OS, Fixups);
1441 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, StartByte,
1442 OS, Fixups);
1443 break;
1444
1445 case X86II::AddRegFrm:
1446 emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), OS);
1447 break;
1448
1449 case X86II::MRMDestReg: {
1450 emitByte(BaseOpcode, OS);
1451 unsigned SrcRegNum = CurOp + 1;
1452
1453 if (HasEVEX_K) // Skip writemask
1454 ++SrcRegNum;
1455
1456 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1457 ++SrcRegNum;
1458
1459 emitRegModRMByte(MI.getOperand(CurOp),
1460 getX86RegNum(MI.getOperand(SrcRegNum)), OS);
1461 CurOp = SrcRegNum + 1;
1462 break;
1463 }
1465 unsigned CC = MI.getOperand(8).getImm();
1466 emitByte(BaseOpcode + CC, OS);
1467 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1468 emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags,
1469 Kind, StartByte, OS, Fixups, STI, false);
1470 CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC
1471 break;
1472 }
1474 case X86II::MRMDestMem: {
1475 emitByte(BaseOpcode, OS);
1476 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1477
1478 if (HasEVEX_K) // Skip writemask
1479 ++SrcRegNum;
1480
1481 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1482 ++SrcRegNum;
1483
1484 bool ForceSIB = (Form == X86II::MRMDestMemFSIB);
1485 emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1486 Kind, StartByte, OS, Fixups, STI, ForceSIB);
1487 CurOp = SrcRegNum + 1;
1488 break;
1489 }
1490 case X86II::MRMSrcReg: {
1491 emitByte(BaseOpcode, OS);
1492 unsigned SrcRegNum = CurOp + 1;
1493
1494 if (HasEVEX_K) // Skip writemask
1495 ++SrcRegNum;
1496
1497 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1498 ++SrcRegNum;
1499
1500 emitRegModRMByte(MI.getOperand(SrcRegNum),
1501 getX86RegNum(MI.getOperand(CurOp)), OS);
1502 CurOp = SrcRegNum + 1;
1503 if (HasVEX_I8Reg)
1504 I8RegNum = getX86RegEncoding(MI, CurOp++);
1505 // do not count the rounding control operand
1506 if (HasEVEX_RC)
1507 --NumOps;
1508 break;
1509 }
1510 case X86II::MRMSrcReg4VOp3: {
1511 emitByte(BaseOpcode, OS);
1512 unsigned SrcRegNum = CurOp + 1;
1513
1514 emitRegModRMByte(MI.getOperand(SrcRegNum),
1515 getX86RegNum(MI.getOperand(CurOp)), OS);
1516 CurOp = SrcRegNum + 1;
1517 ++CurOp; // Encoded in VEX.VVVV
1518 break;
1519 }
1520 case X86II::MRMSrcRegOp4: {
1521 emitByte(BaseOpcode, OS);
1522 unsigned SrcRegNum = CurOp + 1;
1523
1524 // Skip 1st src (which is encoded in VEX_VVVV)
1525 ++SrcRegNum;
1526
1527 // Capture 2nd src (which is encoded in Imm[7:4])
1528 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1529 I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1530
1531 emitRegModRMByte(MI.getOperand(SrcRegNum),
1532 getX86RegNum(MI.getOperand(CurOp)), OS);
1533 CurOp = SrcRegNum + 1;
1534 break;
1535 }
1536 case X86II::MRMSrcRegCC: {
1537 unsigned FirstOp = CurOp++;
1538 unsigned SecondOp = CurOp++;
1539
1540 unsigned CC = MI.getOperand(CurOp++).getImm();
1541 emitByte(BaseOpcode + CC, OS);
1542
1543 emitRegModRMByte(MI.getOperand(SecondOp),
1544 getX86RegNum(MI.getOperand(FirstOp)), OS);
1545 break;
1546 }
1548 case X86II::MRMSrcMem: {
1549 unsigned FirstMemOp = CurOp + 1;
1550
1551 if (HasEVEX_K) // Skip writemask
1552 ++FirstMemOp;
1553
1554 if (HasVEX_4V)
1555 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1556
1557 emitByte(BaseOpcode, OS);
1558
1559 bool ForceSIB = (Form == X86II::MRMSrcMemFSIB);
1560 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1561 TSFlags, Kind, StartByte, OS, Fixups, STI, ForceSIB);
1562 CurOp = FirstMemOp + X86::AddrNumOperands;
1563 if (HasVEX_I8Reg)
1564 I8RegNum = getX86RegEncoding(MI, CurOp++);
1565 break;
1566 }
1567 case X86II::MRMSrcMem4VOp3: {
1568 unsigned FirstMemOp = CurOp + 1;
1569
1570 emitByte(BaseOpcode, OS);
1571
1572 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1573 TSFlags, Kind, StartByte, OS, Fixups, STI);
1574 CurOp = FirstMemOp + X86::AddrNumOperands;
1575 ++CurOp; // Encoded in VEX.VVVV.
1576 break;
1577 }
1578 case X86II::MRMSrcMemOp4: {
1579 unsigned FirstMemOp = CurOp + 1;
1580
1581 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1582
1583 // Capture second register source (encoded in Imm[7:4])
1584 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1585 I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1586
1587 emitByte(BaseOpcode, OS);
1588
1589 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1590 TSFlags, Kind, StartByte, OS, Fixups, STI);
1591 CurOp = FirstMemOp + X86::AddrNumOperands;
1592 break;
1593 }
1594 case X86II::MRMSrcMemCC: {
1595 unsigned RegOp = CurOp++;
1596 unsigned FirstMemOp = CurOp;
1597 CurOp = FirstMemOp + X86::AddrNumOperands;
1598
1599 unsigned CC = MI.getOperand(CurOp++).getImm();
1600 emitByte(BaseOpcode + CC, OS);
1601
1602 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)),
1603 TSFlags, Kind, StartByte, OS, Fixups, STI);
1604 break;
1605 }
1606
1607 case X86II::MRMXrCC: {
1608 unsigned RegOp = CurOp++;
1609
1610 unsigned CC = MI.getOperand(CurOp++).getImm();
1611 emitByte(BaseOpcode + CC, OS);
1612 emitRegModRMByte(MI.getOperand(RegOp), 0, OS);
1613 break;
1614 }
1615
1616 case X86II::MRMXr:
1617 case X86II::MRM0r:
1618 case X86II::MRM1r:
1619 case X86II::MRM2r:
1620 case X86II::MRM3r:
1621 case X86II::MRM4r:
1622 case X86II::MRM5r:
1623 case X86II::MRM6r:
1624 case X86II::MRM7r:
1625 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1626 ++CurOp;
1627 if (HasEVEX_K) // Skip writemask
1628 ++CurOp;
1629 emitByte(BaseOpcode, OS);
1630 emitRegModRMByte(MI.getOperand(CurOp++),
1631 (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, OS);
1632 break;
1633 case X86II::MRMr0:
1634 emitByte(BaseOpcode, OS);
1635 emitByte(modRMByte(3, getX86RegNum(MI.getOperand(CurOp++)),0), OS);
1636 break;
1637
1638 case X86II::MRMXmCC: {
1639 unsigned FirstMemOp = CurOp;
1640 CurOp = FirstMemOp + X86::AddrNumOperands;
1641
1642 unsigned CC = MI.getOperand(CurOp++).getImm();
1643 emitByte(BaseOpcode + CC, OS);
1644
1645 emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Kind, StartByte, OS, Fixups,
1646 STI);
1647 break;
1648 }
1649
1650 case X86II::MRMXm:
1651 case X86II::MRM0m:
1652 case X86II::MRM1m:
1653 case X86II::MRM2m:
1654 case X86II::MRM3m:
1655 case X86II::MRM4m:
1656 case X86II::MRM5m:
1657 case X86II::MRM6m:
1658 case X86II::MRM7m:
1659 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1660 ++CurOp;
1661 if (HasEVEX_K) // Skip writemask
1662 ++CurOp;
1663 emitByte(BaseOpcode, OS);
1664 emitMemModRMByte(MI, CurOp,
1666 Kind, StartByte, OS, Fixups, STI);
1667 CurOp += X86::AddrNumOperands;
1668 break;
1669
1670 case X86II::MRM0X:
1671 case X86II::MRM1X:
1672 case X86II::MRM2X:
1673 case X86II::MRM3X:
1674 case X86II::MRM4X:
1675 case X86II::MRM5X:
1676 case X86II::MRM6X:
1677 case X86II::MRM7X:
1678 emitByte(BaseOpcode, OS);
1679 emitByte(0xC0 + ((Form - X86II::MRM0X) << 3), OS);
1680 break;
1681
1682 case X86II::MRM_C0:
1683 case X86II::MRM_C1:
1684 case X86II::MRM_C2:
1685 case X86II::MRM_C3:
1686 case X86II::MRM_C4:
1687 case X86II::MRM_C5:
1688 case X86II::MRM_C6:
1689 case X86II::MRM_C7:
1690 case X86II::MRM_C8:
1691 case X86II::MRM_C9:
1692 case X86II::MRM_CA:
1693 case X86II::MRM_CB:
1694 case X86II::MRM_CC:
1695 case X86II::MRM_CD:
1696 case X86II::MRM_CE:
1697 case X86II::MRM_CF:
1698 case X86II::MRM_D0:
1699 case X86II::MRM_D1:
1700 case X86II::MRM_D2:
1701 case X86II::MRM_D3:
1702 case X86II::MRM_D4:
1703 case X86II::MRM_D5:
1704 case X86II::MRM_D6:
1705 case X86II::MRM_D7:
1706 case X86II::MRM_D8:
1707 case X86II::MRM_D9:
1708 case X86II::MRM_DA:
1709 case X86II::MRM_DB:
1710 case X86II::MRM_DC:
1711 case X86II::MRM_DD:
1712 case X86II::MRM_DE:
1713 case X86II::MRM_DF:
1714 case X86II::MRM_E0:
1715 case X86II::MRM_E1:
1716 case X86II::MRM_E2:
1717 case X86II::MRM_E3:
1718 case X86II::MRM_E4:
1719 case X86II::MRM_E5:
1720 case X86II::MRM_E6:
1721 case X86II::MRM_E7:
1722 case X86II::MRM_E8:
1723 case X86II::MRM_E9:
1724 case X86II::MRM_EA:
1725 case X86II::MRM_EB:
1726 case X86II::MRM_EC:
1727 case X86II::MRM_ED:
1728 case X86II::MRM_EE:
1729 case X86II::MRM_EF:
1730 case X86II::MRM_F0:
1731 case X86II::MRM_F1:
1732 case X86II::MRM_F2:
1733 case X86II::MRM_F3:
1734 case X86II::MRM_F4:
1735 case X86II::MRM_F5:
1736 case X86II::MRM_F6:
1737 case X86II::MRM_F7:
1738 case X86II::MRM_F8:
1739 case X86II::MRM_F9:
1740 case X86II::MRM_FA:
1741 case X86II::MRM_FB:
1742 case X86II::MRM_FC:
1743 case X86II::MRM_FD:
1744 case X86II::MRM_FE:
1745 case X86II::MRM_FF:
1746 emitByte(BaseOpcode, OS);
1747 emitByte(0xC0 + Form - X86II::MRM_C0, OS);
1748 break;
1749 }
1750
1751 if (HasVEX_I8Reg) {
1752 // The last source register of a 4 operand instruction in AVX is encoded
1753 // in bits[7:4] of a immediate byte.
1754 assert(I8RegNum < 16 && "Register encoding out of range");
1755 I8RegNum <<= 4;
1756 if (CurOp != NumOps) {
1757 unsigned Val = MI.getOperand(CurOp++).getImm();
1758 assert(Val < 16 && "Immediate operand value out of range");
1759 I8RegNum |= Val;
1760 }
1761 emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1762 StartByte, OS, Fixups);
1763 } else {
1764 // If there is a remaining operand, it must be a trailing immediate. Emit it
1765 // according to the right size for the instruction. Some instructions
1766 // (SSE4a extrq and insertq) have two trailing immediates.
1767 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1768 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1770 StartByte, OS, Fixups);
1771 }
1772 }
1773
1776
1777 assert(OS.tell() - StartByte <= 15 &&
1778 "The size of instruction must be no longer than 15.");
1779#ifndef NDEBUG
1780 // FIXME: Verify.
1781 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1782 errs() << "Cannot encode all operands of: ";
1783 MI.dump();
1784 errs() << '\n';
1785 abort();
1786 }
1787#endif
1788}
1789
1791 MCContext &Ctx) {
1792 return new X86MCCodeEmitter(MCII, Ctx);
1793}
unsigned const MachineRegisterInfo * MRI
dxil metadata emit
uint64_t Size
static bool isPCRel(unsigned Kind)
IRTranslator LLVM IR MI
Module * Mod
uint64_t TSFlags
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines the SmallVector class.
@ Flags
Definition: TextStubV5.cpp:93
static MCFixupKind getImmFixupKind(uint64_t TSFlags)
static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII)
static GlobalOffsetTableExprKind startsWithGlobalOffsetTable(const MCExpr *Expr)
Check if this expression starts with GLOBAL_OFFSET_TABLE and if it is of the form GLOBAL_OFFSET_TABLE...
static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM)
static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset)
Determine if this immediate can fit in a disp8 or a compressed disp8 for EVEX instructions.
GlobalOffsetTableExprKind
@ GOT_Normal
@ GOT_None
@ GOT_SymDiff
static void emitConstant(uint64_t Val, unsigned Size, raw_ostream &OS)
static bool hasSecRelSymbolRef(const MCExpr *Expr)
Value * RHS
Binary assembler expressions.
Definition: MCExpr.h:481
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:628
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:631
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:525
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
EncodeInstruction - Encode the given Inst to bytes on the output stream OS.
MCCodeEmitter & operator=(const MCCodeEmitter &)=delete
virtual void emitPrefix(const MCInst &Inst, raw_ostream &OS, const MCSubtargetInfo &STI) const
Emit the prefixes of given instruction on the output stream.
Definition: MCCodeEmitter.h:37
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Context object for machine code objects.
Definition: MCContext.h:76
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
@ Binary
Binary expressions.
Definition: MCExpr.h:38
ExprKind getKind() const
Definition: MCExpr.h:81
static MCFixupKind getKindForSize(unsigned Size, bool IsPCRel)
Return the generic fixup kind for a value with the given size.
Definition: MCFixup.h:109
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:87
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:203
void dump() const
Definition: Pass.cpp:136
Represents a location in source code.
Definition: SMLoc.h:23
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:965
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
uint64_t tell() const
tell - Return the current offset with the file.
Definition: raw_ostream.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SS
Definition: X86.h:209
Reg
All possible values of the reg field in the ModR/M byte.
@ RawFrm
Raw - This form is for instructions that don't have any operands, so they are just a fixed opcode val...
Definition: X86BaseInfo.h:591
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:612
@ MRMSrcMemCC
MRMSrcMemCC - This form is used for instructions that use the Mod/RM byte to specify the operands and...
Definition: X86BaseInfo.h:675
@ MRM_C0
MRM_XX - A mod/rm byte of exactly 0xXX.
Definition: X86BaseInfo.h:737
@ RawFrmDst
RawFrmDst - This form is for instructions that use the destination index register DI/EDI/RDI.
Definition: X86BaseInfo.h:607
@ MRMDestMem4VOp3CC
MRMDestMem4VOp3CC - This form is used for instructions that use the Mod/RM byte to specify a destinat...
Definition: X86BaseInfo.h:636
@ AddCCFrm
AddCCFrm - This form is used for Jcc that encode the condition code in the lower 4 bits of the opcode...
Definition: X86BaseInfo.h:627
@ PrefixByte
PrefixByte - This form is used for instructions that represent a prefix byte like data16 or rep.
Definition: X86BaseInfo.h:631
@ MRMr0
MRM[0-7][rm] - These forms are used to represent instructions that use a Mod/RM byte,...
Definition: X86BaseInfo.h:644
@ MRMXm
MRMXm - This form is used for instructions that use the Mod/RM byte to specify a memory source,...
Definition: X86BaseInfo.h:686
@ MRMDestMemFSIB
MRMDestMem - But force to use the SIB field.
Definition: X86BaseInfo.h:650
@ AddRegFrm
AddRegFrm - This form is used for instructions like 'push r32' that have their one register operand a...
Definition: X86BaseInfo.h:595
@ RawFrmImm8
RawFrmImm8 - This is used for the ENTER instruction, which has two immediates, the first of which is ...
Definition: X86BaseInfo.h:617
@ XOP
XOP - Opcode prefix used by XOP instructions.
Definition: X86BaseInfo.h:916
@ MRMXr
MRMXr - This form is used for instructions that use the Mod/RM byte to specify a register source,...
Definition: X86BaseInfo.h:726
@ MRMSrcMem4VOp3
MRMSrcMem4VOp3 - This form is used for instructions that encode operand 3 with VEX....
Definition: X86BaseInfo.h:665
@ MRMDestMem
MRMDestMem - This form is used for instructions that use the Mod/RM byte to specify a destination,...
Definition: X86BaseInfo.h:655
@ MRMSrcMemFSIB
MRMSrcMem - But force to use the SIB field.
Definition: X86BaseInfo.h:647
@ MRMSrcRegOp4
MRMSrcRegOp4 - This form is used for instructions that use the Mod/RM byte to specify the fourth sour...
Definition: X86BaseInfo.h:710
@ MRMXrCC
MRMXCCr - This form is used for instructions that use the Mod/RM byte to specify a register source,...
Definition: X86BaseInfo.h:721
@ MRMXmCC
MRMXm - This form is used for instructions that use the Mod/RM byte to specify a memory source,...
Definition: X86BaseInfo.h:681
@ RawFrmImm16
RawFrmImm16 - This is used for CALL FAR instructions, which have two immediates, the first of which i...
Definition: X86BaseInfo.h:623
@ MRMSrcReg
MRMSrcReg - This form is used for instructions that use the Mod/RM byte to specify a source,...
Definition: X86BaseInfo.h:700
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:603
@ MRMDestReg
MRMDestReg - This form is used for instructions that use the Mod/RM byte to specify a destination,...
Definition: X86BaseInfo.h:695
@ MRMSrcMem
MRMSrcMem - This form is used for instructions that use the Mod/RM byte to specify a source,...
Definition: X86BaseInfo.h:660
@ MRMSrcMemOp4
MRMSrcMemOp4 - This form is used for instructions that use the Mod/RM byte to specify the fourth sour...
Definition: X86BaseInfo.h:670
@ MRMSrcRegCC
MRMSrcRegCC - This form is used for instructions that use the Mod/RM byte to specify the operands and...
Definition: X86BaseInfo.h:715
@ ThreeDNow
ThreeDNow - This indicates that the instruction uses the wacky 0x0F 0x0F prefix for 3DNow!...
Definition: X86BaseInfo.h:830
@ MRMSrcReg4VOp3
MRMSrcReg4VOp3 - This form is used for instructions that encode operand 3 with VEX....
Definition: X86BaseInfo.h:705
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:599
bool hasImm(uint64_t TSFlags)
Definition: X86BaseInfo.h:994
bool isX86_64NonExtLowByteReg(unsigned reg)
Definition: X86BaseInfo.h:1225
bool isPseudo(uint64_t TSFlags)
Definition: X86BaseInfo.h:984
bool isImmPCRel(uint64_t TSFlags)
Definition: X86BaseInfo.h:1017
unsigned getSizeOfImm(uint64_t TSFlags)
Decode the "size of immediate" field from the TSFlags field of the specified instruction.
Definition: X86BaseInfo.h:1000
uint8_t getBaseOpcodeFor(uint64_t TSFlags)
Definition: X86BaseInfo.h:990
int getMemoryOperandNo(uint64_t TSFlags)
The function returns the MCInst operand # for the first field of the memory operand.
Definition: X86BaseInfo.h:1100
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:1060
bool isImmSigned(uint64_t TSFlags)
Definition: X86BaseInfo.h:1036
bool is16BitMemOperand(const MCInst &MI, unsigned Op, const MCSubtargetInfo &STI)
bool needsAddressSizeOverride(const MCInst &MI, const MCSubtargetInfo &STI, int MemoryOperand, uint64_t TSFlags)
Returns true if this instruction needs an Address-Size override prefix.
@ AddrScaleAmt
Definition: X86BaseInfo.h:33
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:38
@ AddrIndexReg
Definition: X86BaseInfo.h:34
@ AddrNumOperands
AddrNumOperands - Total number of operands in a memory reference.
Definition: X86BaseInfo.h:41
@ IP_HAS_NOTRACK
Definition: X86BaseInfo.h:63
@ IP_USE_DISP8
Definition: X86BaseInfo.h:68
@ IP_HAS_AD_SIZE
Definition: X86BaseInfo.h:59
@ IP_HAS_REPEAT
Definition: X86BaseInfo.h:61
@ IP_USE_DISP32
Definition: X86BaseInfo.h:69
@ IP_HAS_REPEAT_NE
Definition: X86BaseInfo.h:60
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:380
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MCCodeEmitter * createX86MCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:292
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Ref
The access may reference the value stored in memory.
constexpr std::nullopt_t None
Definition: None.h:28