LLVM 18.0.0git
X86MCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the X86MCCodeEmitter class.
10//
11//===----------------------------------------------------------------------===//
12
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCFixup.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrDesc.h"
23#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCSymbol.h"
29#include <cassert>
30#include <cstdint>
31#include <cstdlib>
32
33using namespace llvm;
34
35#define DEBUG_TYPE "mccodeemitter"
36
37namespace {
38
39enum PrefixKind { None, REX, XOP, VEX2, VEX3, EVEX };
40
41static void emitByte(uint8_t C, SmallVectorImpl<char> &CB) { CB.push_back(C); }
42
43class X86OpcodePrefixHelper {
44 // REX (1 byte)
45 // +-----+ +------+
46 // | 40H | | WRXB |
47 // +-----+ +------+
48
49 // XOP (3-byte)
50 // +-----+ +--------------+ +-------------------+
51 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
52 // +-----+ +--------------+ +-------------------+
53
54 // VEX2 (2 bytes)
55 // +-----+ +-------------------+
56 // | C5h | | R | vvvv | L | pp |
57 // +-----+ +-------------------+
58
59 // VEX3 (3 bytes)
60 // +-----+ +--------------+ +-------------------+
61 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
62 // +-----+ +--------------+ +-------------------+
63
64 // VEX_R: opcode externsion equivalent to REX.R in
65 // 1's complement (inverted) form
66 //
67 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
68 // 0: Same as REX_R=1 (64 bit mode only)
69
70 // VEX_X: equivalent to REX.X, only used when a
71 // register is used for index in SIB Byte.
72 //
73 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
74 // 0: Same as REX.X=1 (64-bit mode only)
75
76 // VEX_B:
77 // 1: Same as REX_B=0 (ignored in 32-bit mode)
78 // 0: Same as REX_B=1 (64 bit mode only)
79
80 // VEX_W: opcode specific (use like REX.W, or used for
81 // opcode extension, or ignored, depending on the opcode byte)
82
83 // VEX_5M (VEX m-mmmmm field):
84 //
85 // 0b00000: Reserved for future use
86 // 0b00001: implied 0F leading opcode
87 // 0b00010: implied 0F 38 leading opcode bytes
88 // 0b00011: implied 0F 3A leading opcode bytes
89 // 0b00100: Reserved for future use
90 // 0b00101: VEX MAP5
91 // 0b00110: VEX MAP6
92 // 0b00111-0b11111: Reserved for future use
93 // 0b01000: XOP map select - 08h instructions with imm byte
94 // 0b01001: XOP map select - 09h instructions with no imm byte
95 // 0b01010: XOP map select - 0Ah instructions with imm dword
96
97 // VEX_4V (VEX vvvv field): a register specifier
98 // (in 1's complement form) or 1111 if unused.
99
100 // VEX_PP: opcode extension providing equivalent
101 // functionality of a SIMD prefix
102 // 0b00: None
103 // 0b01: 66
104 // 0b10: F3
105 // 0b11: F2
106
107 // EVEX (4 bytes)
108 // +-----+ +--------------+ +-------------------+ +------------------------+
109 // | 62h | | RXBR' | 0mmm | | W | vvvv | 1 | pp | | z | L'L | b | v' | aaa |
110 // +-----+ +--------------+ +-------------------+ +------------------------+
111
112 // EVEX_L2/VEX_L (Vector Length):
113 // L2 L
114 // 0 0: scalar or 128-bit vector
115 // 0 1: 256-bit vector
116 // 1 0: 512-bit vector
117
118private:
119 unsigned W : 1;
120 unsigned R : 1;
121 unsigned X : 1;
122 unsigned B : 1;
123 unsigned VEX_4V : 4;
124 unsigned VEX_L : 1;
125 unsigned VEX_PP : 2;
126 unsigned VEX_5M : 5;
127 unsigned EVEX_R2 : 1;
128 unsigned EVEX_z : 1;
129 unsigned EVEX_L2 : 1;
130 unsigned EVEX_b : 1;
131 unsigned EVEX_V2 : 1;
132 unsigned EVEX_aaa : 3;
133 PrefixKind Kind = None;
134 const MCRegisterInfo &MRI;
135
136 unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const {
137 return MRI.getEncodingValue(MI.getOperand(OpNum).getReg());
138 }
139
140 void setR(unsigned Encoding) { R = Encoding >> 3 & 1; }
141 void setR2(unsigned Encoding) { EVEX_R2 = Encoding >> 4 & 1; }
142 void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; }
143 void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; }
144
145public:
146 void setW(bool V) { W = V; }
147 void setR(const MCInst &MI, unsigned OpNum) {
148 setR(getRegEncoding(MI, OpNum));
149 }
150 void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) {
151 X = getRegEncoding(MI, OpNum) >> Shift & 1;
152 }
153 void setB(const MCInst &MI, unsigned OpNum) {
154 B = getRegEncoding(MI, OpNum) >> 3 & 1;
155 }
156 void set4V(const MCInst &MI, unsigned OpNum) {
157 set4V(getRegEncoding(MI, OpNum));
158 }
159 void setL(bool V) { VEX_L = V; }
160 void setPP(unsigned V) { VEX_PP = V; }
161 void set5M(unsigned V) { VEX_5M = V; }
162 void setR2(const MCInst &MI, unsigned OpNum) {
163 setR2(getRegEncoding(MI, OpNum));
164 }
165 void setRR2(const MCInst &MI, unsigned OpNum) {
166 unsigned Encoding = getRegEncoding(MI, OpNum);
167 setR(Encoding);
168 setR2(Encoding);
169 }
170 void setZ(bool V) { EVEX_z = V; }
171 void setL2(bool V) { EVEX_L2 = V; }
172 void setEVEX_b(bool V) { EVEX_b = V; }
173 void setV2(const MCInst &MI, unsigned OpNum) {
174 setV2(getRegEncoding(MI, OpNum));
175 }
176 void set4VV2(const MCInst &MI, unsigned OpNum) {
177 unsigned Encoding = getRegEncoding(MI, OpNum);
178 set4V(Encoding);
179 setV2(Encoding);
180 }
181 void setAAA(const MCInst &MI, unsigned OpNum) {
182 EVEX_aaa = getRegEncoding(MI, OpNum);
183 }
184
185 X86OpcodePrefixHelper(const MCRegisterInfo &MRI)
186 : W(0), R(0), X(0), B(0), VEX_4V(0), VEX_L(0), VEX_PP(0), VEX_5M(0),
187 EVEX_R2(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0), EVEX_aaa(0),
188 MRI(MRI) {}
189
190 void setLowerBound(PrefixKind K) { Kind = K; }
191
192 PrefixKind determineOptimalKind() {
193 switch (Kind) {
194 case None:
195 Kind = (W | R | X | B) ? REX : None;
196 break;
197 case REX:
198 case XOP:
199 case VEX3:
200 case EVEX:
201 break;
202 case VEX2:
203 Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2;
204 break;
205 }
206 return Kind;
207 }
208
209 void emit(SmallVectorImpl<char> &CB) const {
210 uint8_t FirstPayload =
211 ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5;
212 uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP;
213 switch (Kind) {
214 case None:
215 return;
216 case REX:
217 emitByte(0x40 | W << 3 | R << 2 | X << 1 | B, CB);
218 return;
219 case VEX2:
220 emitByte(0xC5, CB);
221 emitByte(((~R) & 1) << 7 | LastPayload, CB);
222 return;
223 case VEX3:
224 case XOP:
225 emitByte(Kind == VEX3 ? 0xC4 : 0x8F, CB);
226 emitByte(FirstPayload | VEX_5M, CB);
227 emitByte(W << 7 | LastPayload, CB);
228 return;
229 case EVEX:
230 assert(VEX_5M && !(VEX_5M & 0x8) && "invalid mmm fields for EVEX!");
231 emitByte(0x62, CB);
232 emitByte(FirstPayload | ((~EVEX_R2) & 0x1) << 4 | VEX_5M, CB);
233 emitByte(W << 7 | ((~VEX_4V) & 0xf) << 3 | 1 << 2 | VEX_PP, CB);
234 emitByte(EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 |
235 ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa,
236 CB);
237 return;
238 }
239 }
240};
241
242class X86MCCodeEmitter : public MCCodeEmitter {
243 const MCInstrInfo &MCII;
244 MCContext &Ctx;
245
246public:
247 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
248 : MCII(mcii), Ctx(ctx) {}
249 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
250 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
251 ~X86MCCodeEmitter() override = default;
252
253 void emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
254 const MCSubtargetInfo &STI) const override;
255
258 const MCSubtargetInfo &STI) const override;
259
260private:
261 unsigned getX86RegNum(const MCOperand &MO) const;
262
263 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const;
264
265 void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize,
266 MCFixupKind FixupKind, uint64_t StartByte,
268 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const;
269
270 void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
271 SmallVectorImpl<char> &CB) const;
272
273 void emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
274 SmallVectorImpl<char> &CB) const;
275
276 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
277 uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte,
280 const MCSubtargetInfo &STI,
281 bool ForceSIB = false) const;
282
283 PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
284 const MCSubtargetInfo &STI,
285 SmallVectorImpl<char> &CB) const;
286
287 PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
288 const MCSubtargetInfo &STI,
289 SmallVectorImpl<char> &CB) const;
290
291 void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI,
292 SmallVectorImpl<char> &CB) const;
293
294 PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI,
295 const MCSubtargetInfo &STI,
296 SmallVectorImpl<char> &CB) const;
297
298 PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI,
299 const MCSubtargetInfo &STI,
300 SmallVectorImpl<char> &CB) const;
301};
302
303} // end anonymous namespace
304
305static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
306 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
307 return RM | (RegOpcode << 3) | (Mod << 6);
308}
309
310static void emitConstant(uint64_t Val, unsigned Size,
312 // Output the constant in little endian byte order.
313 for (unsigned i = 0; i != Size; ++i) {
314 emitByte(Val & 255, CB);
315 Val >>= 8;
316 }
317}
318
319/// Determine if this immediate can fit in a disp8 or a compressed disp8 for
320/// EVEX instructions. \p will be set to the value to pass to the ImmOffset
321/// parameter of emitImmediate.
322static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) {
323 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
324
325 unsigned CD8_Scale =
327 CD8_Scale = CD8_Scale ? 1U << (CD8_Scale - 1) : 0U;
328 if (!HasEVEX || !CD8_Scale)
329 return isInt<8>(Value);
330
331 assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!");
332 if (Value & (CD8_Scale - 1)) // Unaligned offset
333 return false;
334
335 int CDisp8 = Value / static_cast<int>(CD8_Scale);
336 if (!isInt<8>(CDisp8))
337 return false;
338
339 // ImmOffset will be added to Value in emitImmediate leaving just CDisp8.
340 ImmOffset = CDisp8 - Value;
341 return true;
342}
343
344/// \returns the appropriate fixup kind to use for an immediate in an
345/// instruction with the specified TSFlags.
347 unsigned Size = X86II::getSizeOfImm(TSFlags);
349
351 switch (Size) {
352 default:
353 llvm_unreachable("Unsupported signed fixup size!");
354 case 4:
356 }
357 }
359}
360
362
363/// Check if this expression starts with _GLOBAL_OFFSET_TABLE_ and if it is
364/// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on
365/// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
366/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a
367/// binary expression.
370 const MCExpr *RHS = nullptr;
371 if (Expr->getKind() == MCExpr::Binary) {
372 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
373 Expr = BE->getLHS();
374 RHS = BE->getRHS();
375 }
376
377 if (Expr->getKind() != MCExpr::SymbolRef)
378 return GOT_None;
379
380 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
381 const MCSymbol &S = Ref->getSymbol();
382 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
383 return GOT_None;
384 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
385 return GOT_SymDiff;
386 return GOT_Normal;
387}
388
389static bool hasSecRelSymbolRef(const MCExpr *Expr) {
390 if (Expr->getKind() == MCExpr::SymbolRef) {
391 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
392 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
393 }
394 return false;
395}
396
397static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) {
398 unsigned Opcode = MI.getOpcode();
399 const MCInstrDesc &Desc = MCII.get(Opcode);
400 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 &&
401 Opcode != X86::JCC_4) ||
402 getImmFixupKind(Desc.TSFlags) != FK_PCRel_4)
403 return false;
404
405 unsigned CurOp = X86II::getOperandBias(Desc);
406 const MCOperand &Op = MI.getOperand(CurOp);
407 if (!Op.isExpr())
408 return false;
409
410 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
411 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
412}
413
414unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const {
415 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
416}
417
418unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI,
419 unsigned OpNum) const {
420 return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg());
421}
422
423void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
424 unsigned Size, MCFixupKind FixupKind,
425 uint64_t StartByte,
428 int ImmOffset) const {
429 const MCExpr *Expr = nullptr;
430 if (DispOp.isImm()) {
431 // If this is a simple integer displacement that doesn't require a
432 // relocation, emit it now.
433 if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 &&
434 FixupKind != FK_PCRel_4) {
435 emitConstant(DispOp.getImm() + ImmOffset, Size, CB);
436 return;
437 }
438 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
439 } else {
440 Expr = DispOp.getExpr();
441 }
442
443 // If we have an immoffset, add it to the expression.
444 if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 ||
445 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
447 if (Kind != GOT_None) {
448 assert(ImmOffset == 0);
449
450 if (Size == 8) {
452 } else {
453 assert(Size == 4);
455 }
456
457 if (Kind == GOT_Normal)
458 ImmOffset = static_cast<int>(CB.size() - StartByte);
459 } else if (Expr->getKind() == MCExpr::SymbolRef) {
460 if (hasSecRelSymbolRef(Expr)) {
462 }
463 } else if (Expr->getKind() == MCExpr::Binary) {
464 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr);
465 if (hasSecRelSymbolRef(Bin->getLHS()) ||
466 hasSecRelSymbolRef(Bin->getRHS())) {
468 }
469 }
470 }
471
472 // If the fixup is pc-relative, we need to bias the value to be relative to
473 // the start of the field, not the end of the field.
474 if (FixupKind == FK_PCRel_4 ||
475 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
480 ImmOffset -= 4;
481 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
482 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
483 // this needs to be a GOTPC32 relocation.
486 }
487 if (FixupKind == FK_PCRel_2)
488 ImmOffset -= 2;
489 if (FixupKind == FK_PCRel_1)
490 ImmOffset -= 1;
491
492 if (ImmOffset)
493 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
494 Ctx);
495
496 // Emit a symbolic constant as a fixup and 4 zeros.
497 Fixups.push_back(MCFixup::create(static_cast<uint32_t>(CB.size() - StartByte),
498 Expr, FixupKind, Loc));
499 emitConstant(0, Size, CB);
500}
501
502void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg,
503 unsigned RegOpcodeFld,
504 SmallVectorImpl<char> &CB) const {
505 emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), CB);
506}
507
508void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
509 SmallVectorImpl<char> &CB) const {
510 // SIB byte is in the same format as the modRMByte.
511 emitByte(modRMByte(SS, Index, Base), CB);
512}
513
514void X86MCCodeEmitter::emitMemModRMByte(
515 const MCInst &MI, unsigned Op, unsigned RegOpcodeField, uint64_t TSFlags,
516 PrefixKind Kind, uint64_t StartByte, SmallVectorImpl<char> &CB,
517 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI,
518 bool ForceSIB) const {
519 const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp);
520 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
521 const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt);
522 const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg);
523 unsigned BaseReg = Base.getReg();
524
525 // Handle %rip relative addressing.
526 if (BaseReg == X86::RIP ||
527 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
528 assert(STI.hasFeature(X86::Is64Bit) &&
529 "Rip-relative addressing requires 64-bit mode");
530 assert(IndexReg.getReg() == 0 && !ForceSIB &&
531 "Invalid rip-relative address");
532 emitByte(modRMByte(0, RegOpcodeField, 5), CB);
533
534 unsigned Opcode = MI.getOpcode();
535 unsigned FixupKind = [&]() {
536 // Enable relaxed relocation only for a MCSymbolRefExpr. We cannot use a
537 // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4).
538 if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Disp.getExpr())))
540
541 // Certain loads for GOT references can be relocated against the symbol
542 // directly if the symbol ends up in the same linkage unit.
543 switch (Opcode) {
544 default:
546 case X86::MOV64rm:
547 // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
548 // special case because COFF and Mach-O don't support ELF's more
549 // flexible R_X86_64_REX_GOTPCRELX relaxation.
550 assert(Kind == REX);
552 case X86::ADC32rm:
553 case X86::ADD32rm:
554 case X86::AND32rm:
555 case X86::CMP32rm:
556 case X86::MOV32rm:
557 case X86::OR32rm:
558 case X86::SBB32rm:
559 case X86::SUB32rm:
560 case X86::TEST32mr:
561 case X86::XOR32rm:
562 case X86::CALL64m:
563 case X86::JMP64m:
564 case X86::TAILJMPm64:
565 case X86::TEST64mr:
566 case X86::ADC64rm:
567 case X86::ADD64rm:
568 case X86::AND64rm:
569 case X86::CMP64rm:
570 case X86::OR64rm:
571 case X86::SBB64rm:
572 case X86::SUB64rm:
573 case X86::XOR64rm:
576 }
577 }();
578
579 // rip-relative addressing is actually relative to the *next* instruction.
580 // Since an immediate can follow the mod/rm byte for an instruction, this
581 // means that we need to bias the displacement field of the instruction with
582 // the size of the immediate field. If we have this case, add it into the
583 // expression to emit.
584 // Note: rip-relative addressing using immediate displacement values should
585 // not be adjusted, assuming it was the user's intent.
586 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
588 : 0;
589
590 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB,
591 Fixups, -ImmSize);
592 return;
593 }
594
595 unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U;
596
597 // 16-bit addressing forms of the ModR/M byte have a different encoding for
598 // the R/M field and are far more limited in which registers can be used.
599 if (X86_MC::is16BitMemOperand(MI, Op, STI)) {
600 if (BaseReg) {
601 // For 32-bit addressing, the row and column values in Table 2-2 are
602 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
603 // some special cases. And getX86RegNum reflects that numbering.
604 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
605 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
606 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
607 // while values 0-3 indicate the allowed combinations (base+index) of
608 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
609 //
610 // R16Table[] is a lookup from the normal RegNo, to the row values from
611 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
612 static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5};
613 unsigned RMfield = R16Table[BaseRegNo];
614
615 assert(RMfield && "invalid 16-bit base register");
616
617 if (IndexReg.getReg()) {
618 unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)];
619
620 assert(IndexReg16 && "invalid 16-bit index register");
621 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
622 assert(((IndexReg16 ^ RMfield) & 2) &&
623 "invalid 16-bit base/index register combination");
624 assert(Scale.getImm() == 1 &&
625 "invalid scale for 16-bit memory reference");
626
627 // Allow base/index to appear in either order (although GAS doesn't).
628 if (IndexReg16 & 2)
629 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
630 else
631 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
632 }
633
634 if (Disp.isImm() && isInt<8>(Disp.getImm())) {
635 if (Disp.getImm() == 0 && RMfield != 6) {
636 // There is no displacement; just the register.
637 emitByte(modRMByte(0, RegOpcodeField, RMfield), CB);
638 return;
639 }
640 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
641 emitByte(modRMByte(1, RegOpcodeField, RMfield), CB);
642 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups);
643 return;
644 }
645 // This is the [REG]+disp16 case.
646 emitByte(modRMByte(2, RegOpcodeField, RMfield), CB);
647 } else {
648 assert(IndexReg.getReg() == 0 && "Unexpected index register!");
649 // There is no BaseReg; this is the plain [disp16] case.
650 emitByte(modRMByte(0, RegOpcodeField, 6), CB);
651 }
652
653 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
654 emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, StartByte, CB, Fixups);
655 return;
656 }
657
658 // Check for presence of {disp8} or {disp32} pseudo prefixes.
659 bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8;
660 bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32;
661
662 // We only allow no displacement if no pseudo prefix is present.
663 bool AllowNoDisp = !UseDisp8 && !UseDisp32;
664 // Disp8 is allowed unless the {disp32} prefix is present.
665 bool AllowDisp8 = !UseDisp32;
666
667 // Determine whether a SIB byte is needed.
668 if (// The SIB byte must be used if there is an index register or the
669 // encoding requires a SIB byte.
670 !ForceSIB && IndexReg.getReg() == 0 &&
671 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
672 // encode to an R/M value of 4, which indicates that a SIB byte is
673 // present.
674 BaseRegNo != N86::ESP &&
675 // If there is no base register and we're in 64-bit mode, we need a SIB
676 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
677 (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
678
679 if (BaseReg == 0) { // [disp32] in X86-32 mode
680 emitByte(modRMByte(0, RegOpcodeField, 5), CB);
681 emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups);
682 return;
683 }
684
685 // If the base is not EBP/ESP/R12/R13 and there is no displacement, use
686 // simple indirect register encoding, this handles addresses like [EAX].
687 // The encoding for [EBP] or[R13] with no displacement means [disp32] so we
688 // handle it by emitting a displacement of 0 later.
689 if (BaseRegNo != N86::EBP) {
690 if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) {
691 emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB);
692 return;
693 }
694
695 // If the displacement is @tlscall, treat it as a zero.
696 if (Disp.isExpr()) {
697 auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr());
698 if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) {
699 // This is exclusively used by call *a@tlscall(base). The relocation
700 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
701 Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc()));
702 emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB);
703 return;
704 }
705 }
706 }
707
708 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
709 // Including a compressed disp8 for EVEX instructions that support it.
710 // This also handles the 0 displacement for [EBP] or [R13]. We can't use
711 // disp8 if the {disp32} pseudo prefix is present.
712 if (Disp.isImm() && AllowDisp8) {
713 int ImmOffset = 0;
714 if (isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
715 emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), CB);
716 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups,
717 ImmOffset);
718 return;
719 }
720 }
721
722 // Otherwise, emit the most general non-SIB encoding: [REG+disp32].
723 // Displacement may be 0 for [EBP] or [R13] case if {disp32} pseudo prefix
724 // prevented using disp8 above.
725 emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), CB);
726 unsigned Opcode = MI.getOpcode();
727 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
729 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB,
730 Fixups);
731 return;
732 }
733
734 // We need a SIB byte, so start by outputting the ModR/M byte first
735 assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&
736 "Cannot use ESP as index reg!");
737
738 bool ForceDisp32 = false;
739 bool ForceDisp8 = false;
740 int ImmOffset = 0;
741 if (BaseReg == 0) {
742 // If there is no base register, we emit the special case SIB byte with
743 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
744 BaseRegNo = 5;
745 emitByte(modRMByte(0, RegOpcodeField, 4), CB);
746 ForceDisp32 = true;
747 } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp &&
748 // Base reg can't be EBP/RBP/R13 as that would end up with '5' as
749 // the base field, but that is the magic [*] nomenclature that
750 // indicates no base when mod=0. For these cases we'll emit a 0
751 // displacement instead.
752 BaseRegNo != N86::EBP) {
753 // Emit no displacement ModR/M byte
754 emitByte(modRMByte(0, RegOpcodeField, 4), CB);
755 } else if (Disp.isImm() && AllowDisp8 &&
756 isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
757 // Displacement fits in a byte or matches an EVEX compressed disp8, use
758 // disp8 encoding. This also handles EBP/R13 base with 0 displacement unless
759 // {disp32} pseudo prefix was used.
760 emitByte(modRMByte(1, RegOpcodeField, 4), CB);
761 ForceDisp8 = true;
762 } else {
763 // Otherwise, emit the normal disp32 encoding.
764 emitByte(modRMByte(2, RegOpcodeField, 4), CB);
765 ForceDisp32 = true;
766 }
767
768 // Calculate what the SS field value should be...
769 static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3};
770 unsigned SS = SSTable[Scale.getImm()];
771
772 unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(IndexReg) : 4;
773
774 emitSIBByte(SS, IndexRegNo, BaseRegNo, CB);
775
776 // Do we need to output a displacement?
777 if (ForceDisp8)
778 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups,
779 ImmOffset);
780 else if (ForceDisp32)
781 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
782 StartByte, CB, Fixups);
783}
784
785/// Emit all instruction prefixes.
786///
787/// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used,
788/// otherwise returns None.
789PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
790 const MCSubtargetInfo &STI,
791 SmallVectorImpl<char> &CB) const {
792 uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags;
793 // Determine where the memory operand starts, if present.
794 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
795 // Emit segment override opcode prefix as needed.
796 if (MemoryOperand != -1) {
797 MemoryOperand += CurOp;
798 emitSegmentOverridePrefix(MemoryOperand + X86::AddrSegmentReg, MI, CB);
799 }
800
801 // Emit the repeat opcode prefix as needed.
802 unsigned Flags = MI.getFlags();
803 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
804 emitByte(0xF3, CB);
805 if (Flags & X86::IP_HAS_REPEAT_NE)
806 emitByte(0xF2, CB);
807
808 // Emit the address size opcode prefix as needed.
809 if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) ||
810 Flags & X86::IP_HAS_AD_SIZE)
811 emitByte(0x67, CB);
812
814 switch (Form) {
815 default:
816 break;
817 case X86II::RawFrmDstSrc: {
818 // Emit segment override opcode prefix as needed (not for %ds).
819 if (MI.getOperand(2).getReg() != X86::DS)
820 emitSegmentOverridePrefix(2, MI, CB);
821 CurOp += 3; // Consume operands.
822 break;
823 }
824 case X86II::RawFrmSrc: {
825 // Emit segment override opcode prefix as needed (not for %ds).
826 if (MI.getOperand(1).getReg() != X86::DS)
827 emitSegmentOverridePrefix(1, MI, CB);
828 CurOp += 2; // Consume operands.
829 break;
830 }
831 case X86II::RawFrmDst: {
832 ++CurOp; // Consume operand.
833 break;
834 }
836 // Emit segment override opcode prefix as needed.
837 emitSegmentOverridePrefix(1, MI, CB);
838 break;
839 }
840 }
841
842 // REX prefix is optional, but if used must be immediately before the opcode
843 // Encoding type for this instruction.
845 ? emitVEXOpcodePrefix(MemoryOperand, MI, STI, CB)
846 : emitOpcodePrefix(MemoryOperand, MI, STI, CB);
847}
848
849// AVX instructions are encoded using an encoding scheme that combines
850// prefix bytes, opcode extension field, operand encoding fields, and vector
851// length encoding capability into a new prefix, referred to as VEX.
852
853// The majority of the AVX-512 family of instructions (operating on
854// 512/256/128-bit vector register operands) are encoded using a new prefix
855// (called EVEX).
856
857// XOP is a revised subset of what was originally intended as SSE5. It was
858// changed to be similar but not overlapping with AVX.
859
860/// Emit XOP, VEX2, VEX3 or EVEX prefix.
861/// \returns the used prefix.
862PrefixKind
863X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
864 const MCSubtargetInfo &STI,
865 SmallVectorImpl<char> &CB) const {
866 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
867 uint64_t TSFlags = Desc.TSFlags;
868
869 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
870
871 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
872 switch (TSFlags & X86II::EncodingMask) {
873 default:
874 break;
875 case X86II::XOP:
876 Prefix.setLowerBound(XOP);
877 break;
878 case X86II::VEX:
879 // VEX can be 2 byte or 3 byte, not determined yet if not explicit
880 Prefix.setLowerBound(MI.getFlags() & X86::IP_USE_VEX3 ? VEX3 : VEX2);
881 break;
882 case X86II::EVEX:
883 Prefix.setLowerBound(EVEX);
884 break;
885 }
886
888
889 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
890 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
891 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
892
893 switch (TSFlags & X86II::OpMapMask) {
894 default:
895 llvm_unreachable("Invalid prefix!");
896 case X86II::TB:
897 Prefix.set5M(0x1); // 0F
898 break;
899 case X86II::T8:
900 Prefix.set5M(0x2); // 0F 38
901 break;
902 case X86II::TA:
903 Prefix.set5M(0x3); // 0F 3A
904 break;
905 case X86II::XOP8:
906 Prefix.set5M(0x8);
907 break;
908 case X86II::XOP9:
909 Prefix.set5M(0x9);
910 break;
911 case X86II::XOPA:
912 Prefix.set5M(0xA);
913 break;
914 case X86II::T_MAP5:
915 Prefix.set5M(0x5);
916 break;
917 case X86II::T_MAP6:
918 Prefix.set5M(0x6);
919 break;
920 }
921
924 if ((TSFlags & X86II::EVEX_L2) && STI.hasFeature(X86::FeatureAVX512) &&
925 !STI.hasFeature(X86::FeatureEVEX512))
926 report_fatal_error("ZMM registers are not supported without EVEX512");
927 switch (TSFlags & X86II::OpPrefixMask) {
928 case X86II::PD:
929 Prefix.setPP(0x1); // 66
930 break;
931 case X86II::XS:
932 Prefix.setPP(0x2); // F3
933 break;
934 case X86II::XD:
935 Prefix.setPP(0x3); // F2
936 break;
937 }
938
939 Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z));
940 Prefix.setEVEX_b(TSFlags & X86II::EVEX_B);
941
942 bool EncodeRC = false;
943 uint8_t EVEX_rc = 0;
944 unsigned CurOp = X86II::getOperandBias(Desc);
945
946 switch (TSFlags & X86II::FormMask) {
947 default:
948 llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
950 // src1(ModR/M), MemAddr, src2(VEX_4V)
951 Prefix.setR(MI, CurOp++);
952 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
953 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
954 CurOp += X86::AddrNumOperands;
955 Prefix.set4V(MI, CurOp++);
956 break;
957 }
958 case X86II::MRM_C0:
959 case X86II::RawFrm:
960 break;
962 case X86II::MRMDestMem: {
963 // MRMDestMem instructions forms:
964 // MemAddr, src1(ModR/M)
965 // MemAddr, src1(VEX_4V), src2(ModR/M)
966 // MemAddr, src1(ModR/M), imm8
967 //
968 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
969 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
970 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
971 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
972
973 CurOp += X86::AddrNumOperands;
974
975 if (HasEVEX_K)
976 Prefix.setAAA(MI, CurOp++);
977
978 if (HasVEX_4V)
979 Prefix.set4VV2(MI, CurOp++);
980
981 Prefix.setRR2(MI, CurOp++);
982 break;
983 }
985 case X86II::MRMSrcMem: {
986 // MRMSrcMem instructions forms:
987 // src1(ModR/M), MemAddr
988 // src1(ModR/M), src2(VEX_4V), MemAddr
989 // src1(ModR/M), MemAddr, imm8
990 // src1(ModR/M), MemAddr, src2(Imm[7:4])
991 //
992 // FMA4:
993 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
994 Prefix.setRR2(MI, CurOp++);
995
996 if (HasEVEX_K)
997 Prefix.setAAA(MI, CurOp++);
998
999 if (HasVEX_4V)
1000 Prefix.set4VV2(MI, CurOp++);
1001
1002 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1003 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1004 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
1005 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
1006
1007 break;
1008 }
1009 case X86II::MRMSrcMem4VOp3: {
1010 // Instruction format for 4VOp3:
1011 // src1(ModR/M), MemAddr, src3(VEX_4V)
1012 Prefix.setR(MI, CurOp++);
1013 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1014 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1015 Prefix.set4V(MI, CurOp + X86::AddrNumOperands);
1016 break;
1017 }
1018 case X86II::MRMSrcMemOp4: {
1019 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1020 Prefix.setR(MI, CurOp++);
1021 Prefix.set4V(MI, CurOp++);
1022 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1023 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1024 break;
1025 }
1026 case X86II::MRM0m:
1027 case X86II::MRM1m:
1028 case X86II::MRM2m:
1029 case X86II::MRM3m:
1030 case X86II::MRM4m:
1031 case X86II::MRM5m:
1032 case X86II::MRM6m:
1033 case X86II::MRM7m: {
1034 // MRM[0-9]m instructions forms:
1035 // MemAddr
1036 // src1(VEX_4V), MemAddr
1037 if (HasVEX_4V)
1038 Prefix.set4VV2(MI, CurOp++);
1039
1040 if (HasEVEX_K)
1041 Prefix.setAAA(MI, CurOp++);
1042
1043 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1044 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1045 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
1046 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
1047
1048 break;
1049 }
1050 case X86II::MRMSrcReg: {
1051 // MRMSrcReg instructions forms:
1052 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1053 // dst(ModR/M), src1(ModR/M)
1054 // dst(ModR/M), src1(ModR/M), imm8
1055 //
1056 // FMA4:
1057 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1058 Prefix.setRR2(MI, CurOp++);
1059
1060 if (HasEVEX_K)
1061 Prefix.setAAA(MI, CurOp++);
1062
1063 if (HasVEX_4V)
1064 Prefix.set4VV2(MI, CurOp++);
1065
1066 Prefix.setB(MI, CurOp);
1067 Prefix.setX(MI, CurOp, 4);
1068 ++CurOp;
1069
1070 if (TSFlags & X86II::EVEX_B) {
1071 if (HasEVEX_RC) {
1072 unsigned NumOps = Desc.getNumOperands();
1073 unsigned RcOperand = NumOps - 1;
1074 assert(RcOperand >= CurOp);
1075 EVEX_rc = MI.getOperand(RcOperand).getImm();
1076 assert(EVEX_rc <= 3 && "Invalid rounding control!");
1077 }
1078 EncodeRC = true;
1079 }
1080 break;
1081 }
1082 case X86II::MRMSrcReg4VOp3: {
1083 // Instruction format for 4VOp3:
1084 // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
1085 Prefix.setR(MI, CurOp++);
1086 Prefix.setB(MI, CurOp++);
1087 Prefix.set4V(MI, CurOp++);
1088 break;
1089 }
1090 case X86II::MRMSrcRegOp4: {
1091 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1092 Prefix.setR(MI, CurOp++);
1093 Prefix.set4V(MI, CurOp++);
1094 // Skip second register source (encoded in Imm[7:4])
1095 ++CurOp;
1096
1097 Prefix.setB(MI, CurOp);
1098 Prefix.setX(MI, CurOp, 4);
1099 ++CurOp;
1100 break;
1101 }
1102 case X86II::MRMDestReg: {
1103 // MRMDestReg instructions forms:
1104 // dst(ModR/M), src(ModR/M)
1105 // dst(ModR/M), src(ModR/M), imm8
1106 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
1107 Prefix.setB(MI, CurOp);
1108 Prefix.setX(MI, CurOp, 4);
1109 ++CurOp;
1110
1111 if (HasEVEX_K)
1112 Prefix.setAAA(MI, CurOp++);
1113
1114 if (HasVEX_4V)
1115 Prefix.set4VV2(MI, CurOp++);
1116
1117 Prefix.setRR2(MI, CurOp++);
1118 if (TSFlags & X86II::EVEX_B)
1119 EncodeRC = true;
1120 break;
1121 }
1122 case X86II::MRMr0: {
1123 // MRMr0 instructions forms:
1124 // 11:rrr:000
1125 // dst(ModR/M)
1126 Prefix.setRR2(MI, CurOp++);
1127 break;
1128 }
1129 case X86II::MRM0r:
1130 case X86II::MRM1r:
1131 case X86II::MRM2r:
1132 case X86II::MRM3r:
1133 case X86II::MRM4r:
1134 case X86II::MRM5r:
1135 case X86II::MRM6r:
1136 case X86II::MRM7r: {
1137 // MRM0r-MRM7r instructions forms:
1138 // dst(VEX_4V), src(ModR/M), imm8
1139 if (HasVEX_4V)
1140 Prefix.set4VV2(MI, CurOp++);
1141
1142 if (HasEVEX_K)
1143 Prefix.setAAA(MI, CurOp++);
1144
1145 Prefix.setB(MI, CurOp);
1146 Prefix.setX(MI, CurOp, 4);
1147 ++CurOp;
1148 break;
1149 }
1150 }
1151 if (EncodeRC) {
1152 Prefix.setL(EVEX_rc & 0x1);
1153 Prefix.setL2(EVEX_rc & 0x2);
1154 }
1155 PrefixKind Kind = Prefix.determineOptimalKind();
1156 Prefix.emit(CB);
1157 return Kind;
1158}
1159
1160/// Emit REX prefix which specifies
1161/// 1) 64-bit instructions,
1162/// 2) non-default operand size, and
1163/// 3) use of X86-64 extended registers.
1164///
1165/// \returns the used prefix (REX or None).
1166PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
1167 const MCSubtargetInfo &STI,
1168 SmallVectorImpl<char> &CB) const {
1169 if (!STI.hasFeature(X86::Is64Bit))
1170 return None;
1171 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
1172 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1173 uint64_t TSFlags = Desc.TSFlags;
1174 Prefix.setW(TSFlags & X86II::REX_W);
1175 unsigned NumOps = MI.getNumOperands();
1176 bool UsesHighByteReg = false;
1177#ifndef NDEBUG
1178 bool HasRegOp = false;
1179#endif
1180 unsigned CurOp = NumOps ? X86II::getOperandBias(Desc) : 0;
1181 for (unsigned i = CurOp; i != NumOps; ++i) {
1182 const MCOperand &MO = MI.getOperand(i);
1183 if (MO.isReg()) {
1184#ifndef NDEBUG
1185 HasRegOp = true;
1186#endif
1187 unsigned Reg = MO.getReg();
1188 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1189 UsesHighByteReg = true;
1190 // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix.
1192 Prefix.setLowerBound(REX);
1193 } else if (MO.isExpr() && STI.getTargetTriple().isX32()) {
1194 // GOTTPOFF and TLSDESC relocations require a REX prefix to allow
1195 // linker optimizations: even if the instructions we see may not require
1196 // any prefix, they may be replaced by instructions that do. This is
1197 // handled as a special case here so that it also works for hand-written
1198 // assembly without the user needing to write REX, as with GNU as.
1199 const auto *Ref = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
1200 if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF ||
1201 Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) {
1202 Prefix.setLowerBound(REX);
1203 }
1204 }
1205 }
1206 switch (TSFlags & X86II::FormMask) {
1207 default:
1208 assert(!HasRegOp && "Unexpected form in emitREXPrefix!");
1209 break;
1210 case X86II::RawFrm:
1212 case X86II::RawFrmSrc:
1213 case X86II::RawFrmDst:
1215 break;
1216 case X86II::AddRegFrm:
1217 Prefix.setB(MI, CurOp++);
1218 break;
1219 case X86II::MRMSrcReg:
1220 case X86II::MRMSrcRegCC:
1221 Prefix.setR(MI, CurOp++);
1222 Prefix.setB(MI, CurOp++);
1223 break;
1224 case X86II::MRMSrcMem:
1225 case X86II::MRMSrcMemCC:
1226 Prefix.setR(MI, CurOp++);
1227 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1228 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1229 CurOp += X86::AddrNumOperands;
1230 break;
1231 case X86II::MRMDestReg:
1232 Prefix.setB(MI, CurOp++);
1233 Prefix.setR(MI, CurOp++);
1234 break;
1235 case X86II::MRMDestMem:
1236 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1237 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1238 CurOp += X86::AddrNumOperands;
1239 Prefix.setR(MI, CurOp++);
1240 break;
1241 case X86II::MRMXmCC:
1242 case X86II::MRMXm:
1243 case X86II::MRM0m:
1244 case X86II::MRM1m:
1245 case X86II::MRM2m:
1246 case X86II::MRM3m:
1247 case X86II::MRM4m:
1248 case X86II::MRM5m:
1249 case X86II::MRM6m:
1250 case X86II::MRM7m:
1251 Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1252 Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1253 break;
1254 case X86II::MRMXrCC:
1255 case X86II::MRMXr:
1256 case X86II::MRM0r:
1257 case X86II::MRM1r:
1258 case X86II::MRM2r:
1259 case X86II::MRM3r:
1260 case X86II::MRM4r:
1261 case X86II::MRM5r:
1262 case X86II::MRM6r:
1263 case X86II::MRM7r:
1264 Prefix.setB(MI, CurOp++);
1265 break;
1266 }
1267 PrefixKind Kind = Prefix.determineOptimalKind();
1268 if (Kind && UsesHighByteReg)
1270 "Cannot encode high byte register in REX-prefixed instruction");
1271 Prefix.emit(CB);
1272 return Kind;
1273}
1274
1275/// Emit segment override opcode prefix as needed.
1276void X86MCCodeEmitter::emitSegmentOverridePrefix(
1277 unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const {
1278 // Check for explicit segment override on memory operand.
1279 if (unsigned Reg = MI.getOperand(SegOperand).getReg())
1280 emitByte(X86::getSegmentOverridePrefixForReg(Reg), CB);
1281}
1282
1283/// Emit all instruction prefixes prior to the opcode.
1284///
1285/// \param MemOperand the operand # of the start of a memory operand if present.
1286/// If not present, it is -1.
1287///
1288/// \returns the used prefix (REX or None).
1289PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
1290 const MCSubtargetInfo &STI,
1291 SmallVectorImpl<char> &CB) const {
1292 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1293 uint64_t TSFlags = Desc.TSFlags;
1294
1295 // Emit the operand size opcode prefix as needed.
1296 if ((TSFlags & X86II::OpSizeMask) ==
1297 (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
1298 emitByte(0x66, CB);
1299
1300 // Emit the LOCK opcode prefix.
1301 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
1302 emitByte(0xF0, CB);
1303
1304 // Emit the NOTRACK opcode prefix.
1305 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
1306 emitByte(0x3E, CB);
1307
1308 switch (TSFlags & X86II::OpPrefixMask) {
1309 case X86II::PD: // 66
1310 emitByte(0x66, CB);
1311 break;
1312 case X86II::XS: // F3
1313 emitByte(0xF3, CB);
1314 break;
1315 case X86II::XD: // F2
1316 emitByte(0xF2, CB);
1317 break;
1318 }
1319
1320 // Handle REX prefix.
1321 assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
1322 "REX.W requires 64bit mode.");
1323 PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, CB);
1324
1325 // 0x0F escape code must be emitted just before the opcode.
1326 switch (TSFlags & X86II::OpMapMask) {
1327 case X86II::TB: // Two-byte opcode map
1328 case X86II::T8: // 0F 38
1329 case X86II::TA: // 0F 3A
1330 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
1331 emitByte(0x0F, CB);
1332 break;
1333 }
1334
1335 switch (TSFlags & X86II::OpMapMask) {
1336 case X86II::T8: // 0F 38
1337 emitByte(0x38, CB);
1338 break;
1339 case X86II::TA: // 0F 3A
1340 emitByte(0x3A, CB);
1341 break;
1342 }
1343
1344 return Kind;
1345}
1346
1347void X86MCCodeEmitter::emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
1348 const MCSubtargetInfo &STI) const {
1349 unsigned Opcode = MI.getOpcode();
1350 const MCInstrDesc &Desc = MCII.get(Opcode);
1351 uint64_t TSFlags = Desc.TSFlags;
1352
1353 // Pseudo instructions don't get encoded.
1355 return;
1356
1357 unsigned CurOp = X86II::getOperandBias(Desc);
1358
1359 emitPrefixImpl(CurOp, MI, STI, CB);
1360}
1361
1362void X86MCCodeEmitter::encodeInstruction(const MCInst &MI,
1365 const MCSubtargetInfo &STI) const {
1366 unsigned Opcode = MI.getOpcode();
1367 const MCInstrDesc &Desc = MCII.get(Opcode);
1368 uint64_t TSFlags = Desc.TSFlags;
1369
1370 // Pseudo instructions don't get encoded.
1372 return;
1373
1374 unsigned NumOps = Desc.getNumOperands();
1375 unsigned CurOp = X86II::getOperandBias(Desc);
1376
1377 uint64_t StartByte = CB.size();
1378
1379 PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, CB);
1380
1381 // It uses the VEX.VVVV field?
1382 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1383 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1384
1385 // It uses the EVEX.aaa field?
1386 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1387 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1388
1389 // Used if a register is encoded in 7:4 of immediate.
1390 unsigned I8RegNum = 0;
1391
1392 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1393
1395 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1396
1397 unsigned OpcodeOffset = 0;
1398
1400 switch (Form) {
1401 default:
1402 errs() << "FORM: " << Form << "\n";
1403 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1404 case X86II::Pseudo:
1405 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1407 case X86II::RawFrmSrc:
1408 case X86II::RawFrmDst:
1409 case X86II::PrefixByte:
1410 emitByte(BaseOpcode, CB);
1411 break;
1412 case X86II::AddCCFrm: {
1413 // This will be added to the opcode in the fallthrough.
1414 OpcodeOffset = MI.getOperand(NumOps - 1).getImm();
1415 assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
1416 --NumOps; // Drop the operand from the end.
1417 [[fallthrough]];
1418 case X86II::RawFrm:
1419 emitByte(BaseOpcode + OpcodeOffset, CB);
1420
1421 if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
1422 break;
1423
1424 const MCOperand &Op = MI.getOperand(CurOp++);
1425 emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags),
1427 Fixups);
1428 break;
1429 }
1431 emitByte(BaseOpcode, CB);
1432 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1434 StartByte, CB, Fixups);
1435 ++CurOp; // skip segment operand
1436 break;
1437 case X86II::RawFrmImm8:
1438 emitByte(BaseOpcode, CB);
1439 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1441 StartByte, CB, Fixups);
1442 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, StartByte,
1443 CB, Fixups);
1444 break;
1445 case X86II::RawFrmImm16:
1446 emitByte(BaseOpcode, CB);
1447 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1449 StartByte, CB, Fixups);
1450 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, StartByte,
1451 CB, Fixups);
1452 break;
1453
1454 case X86II::AddRegFrm:
1455 emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), CB);
1456 break;
1457
1458 case X86II::MRMDestReg: {
1459 emitByte(BaseOpcode, CB);
1460 unsigned SrcRegNum = CurOp + 1;
1461
1462 if (HasEVEX_K) // Skip writemask
1463 ++SrcRegNum;
1464
1465 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1466 ++SrcRegNum;
1467
1468 emitRegModRMByte(MI.getOperand(CurOp),
1469 getX86RegNum(MI.getOperand(SrcRegNum)), CB);
1470 CurOp = SrcRegNum + 1;
1471 break;
1472 }
1474 unsigned CC = MI.getOperand(8).getImm();
1475 emitByte(BaseOpcode + CC, CB);
1476 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1477 emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags,
1478 Kind, StartByte, CB, Fixups, STI, false);
1479 CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC
1480 break;
1481 }
1483 case X86II::MRMDestMem: {
1484 emitByte(BaseOpcode, CB);
1485 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1486
1487 if (HasEVEX_K) // Skip writemask
1488 ++SrcRegNum;
1489
1490 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1491 ++SrcRegNum;
1492
1493 bool ForceSIB = (Form == X86II::MRMDestMemFSIB);
1494 emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1495 Kind, StartByte, CB, Fixups, STI, ForceSIB);
1496 CurOp = SrcRegNum + 1;
1497 break;
1498 }
1499 case X86II::MRMSrcReg: {
1500 emitByte(BaseOpcode, CB);
1501 unsigned SrcRegNum = CurOp + 1;
1502
1503 if (HasEVEX_K) // Skip writemask
1504 ++SrcRegNum;
1505
1506 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1507 ++SrcRegNum;
1508
1509 emitRegModRMByte(MI.getOperand(SrcRegNum),
1510 getX86RegNum(MI.getOperand(CurOp)), CB);
1511 CurOp = SrcRegNum + 1;
1512 if (HasVEX_I8Reg)
1513 I8RegNum = getX86RegEncoding(MI, CurOp++);
1514 // do not count the rounding control operand
1515 if (HasEVEX_RC)
1516 --NumOps;
1517 break;
1518 }
1519 case X86II::MRMSrcReg4VOp3: {
1520 emitByte(BaseOpcode, CB);
1521 unsigned SrcRegNum = CurOp + 1;
1522
1523 emitRegModRMByte(MI.getOperand(SrcRegNum),
1524 getX86RegNum(MI.getOperand(CurOp)), CB);
1525 CurOp = SrcRegNum + 1;
1526 ++CurOp; // Encoded in VEX.VVVV
1527 break;
1528 }
1529 case X86II::MRMSrcRegOp4: {
1530 emitByte(BaseOpcode, CB);
1531 unsigned SrcRegNum = CurOp + 1;
1532
1533 // Skip 1st src (which is encoded in VEX_VVVV)
1534 ++SrcRegNum;
1535
1536 // Capture 2nd src (which is encoded in Imm[7:4])
1537 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1538 I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1539
1540 emitRegModRMByte(MI.getOperand(SrcRegNum),
1541 getX86RegNum(MI.getOperand(CurOp)), CB);
1542 CurOp = SrcRegNum + 1;
1543 break;
1544 }
1545 case X86II::MRMSrcRegCC: {
1546 unsigned FirstOp = CurOp++;
1547 unsigned SecondOp = CurOp++;
1548
1549 unsigned CC = MI.getOperand(CurOp++).getImm();
1550 emitByte(BaseOpcode + CC, CB);
1551
1552 emitRegModRMByte(MI.getOperand(SecondOp),
1553 getX86RegNum(MI.getOperand(FirstOp)), CB);
1554 break;
1555 }
1557 case X86II::MRMSrcMem: {
1558 unsigned FirstMemOp = CurOp + 1;
1559
1560 if (HasEVEX_K) // Skip writemask
1561 ++FirstMemOp;
1562
1563 if (HasVEX_4V)
1564 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1565
1566 emitByte(BaseOpcode, CB);
1567
1568 bool ForceSIB = (Form == X86II::MRMSrcMemFSIB);
1569 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1570 TSFlags, Kind, StartByte, CB, Fixups, STI, ForceSIB);
1571 CurOp = FirstMemOp + X86::AddrNumOperands;
1572 if (HasVEX_I8Reg)
1573 I8RegNum = getX86RegEncoding(MI, CurOp++);
1574 break;
1575 }
1576 case X86II::MRMSrcMem4VOp3: {
1577 unsigned FirstMemOp = CurOp + 1;
1578
1579 emitByte(BaseOpcode, CB);
1580
1581 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1582 TSFlags, Kind, StartByte, CB, Fixups, STI);
1583 CurOp = FirstMemOp + X86::AddrNumOperands;
1584 ++CurOp; // Encoded in VEX.VVVV.
1585 break;
1586 }
1587 case X86II::MRMSrcMemOp4: {
1588 unsigned FirstMemOp = CurOp + 1;
1589
1590 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1591
1592 // Capture second register source (encoded in Imm[7:4])
1593 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1594 I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1595
1596 emitByte(BaseOpcode, CB);
1597
1598 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1599 TSFlags, Kind, StartByte, CB, Fixups, STI);
1600 CurOp = FirstMemOp + X86::AddrNumOperands;
1601 break;
1602 }
1603 case X86II::MRMSrcMemCC: {
1604 unsigned RegOp = CurOp++;
1605 unsigned FirstMemOp = CurOp;
1606 CurOp = FirstMemOp + X86::AddrNumOperands;
1607
1608 unsigned CC = MI.getOperand(CurOp++).getImm();
1609 emitByte(BaseOpcode + CC, CB);
1610
1611 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)),
1612 TSFlags, Kind, StartByte, CB, Fixups, STI);
1613 break;
1614 }
1615
1616 case X86II::MRMXrCC: {
1617 unsigned RegOp = CurOp++;
1618
1619 unsigned CC = MI.getOperand(CurOp++).getImm();
1620 emitByte(BaseOpcode + CC, CB);
1621 emitRegModRMByte(MI.getOperand(RegOp), 0, CB);
1622 break;
1623 }
1624
1625 case X86II::MRMXr:
1626 case X86II::MRM0r:
1627 case X86II::MRM1r:
1628 case X86II::MRM2r:
1629 case X86II::MRM3r:
1630 case X86II::MRM4r:
1631 case X86II::MRM5r:
1632 case X86II::MRM6r:
1633 case X86II::MRM7r:
1634 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1635 ++CurOp;
1636 if (HasEVEX_K) // Skip writemask
1637 ++CurOp;
1638 emitByte(BaseOpcode, CB);
1639 emitRegModRMByte(MI.getOperand(CurOp++),
1640 (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, CB);
1641 break;
1642 case X86II::MRMr0:
1643 emitByte(BaseOpcode, CB);
1644 emitByte(modRMByte(3, getX86RegNum(MI.getOperand(CurOp++)), 0), CB);
1645 break;
1646
1647 case X86II::MRMXmCC: {
1648 unsigned FirstMemOp = CurOp;
1649 CurOp = FirstMemOp + X86::AddrNumOperands;
1650
1651 unsigned CC = MI.getOperand(CurOp++).getImm();
1652 emitByte(BaseOpcode + CC, CB);
1653
1654 emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Kind, StartByte, CB, Fixups,
1655 STI);
1656 break;
1657 }
1658
1659 case X86II::MRMXm:
1660 case X86II::MRM0m:
1661 case X86II::MRM1m:
1662 case X86II::MRM2m:
1663 case X86II::MRM3m:
1664 case X86II::MRM4m:
1665 case X86II::MRM5m:
1666 case X86II::MRM6m:
1667 case X86II::MRM7m:
1668 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1669 ++CurOp;
1670 if (HasEVEX_K) // Skip writemask
1671 ++CurOp;
1672 emitByte(BaseOpcode, CB);
1673 emitMemModRMByte(MI, CurOp,
1675 Kind, StartByte, CB, Fixups, STI);
1676 CurOp += X86::AddrNumOperands;
1677 break;
1678
1679 case X86II::MRM0X:
1680 case X86II::MRM1X:
1681 case X86II::MRM2X:
1682 case X86II::MRM3X:
1683 case X86II::MRM4X:
1684 case X86II::MRM5X:
1685 case X86II::MRM6X:
1686 case X86II::MRM7X:
1687 emitByte(BaseOpcode, CB);
1688 emitByte(0xC0 + ((Form - X86II::MRM0X) << 3), CB);
1689 break;
1690
1691 case X86II::MRM_C0:
1692 case X86II::MRM_C1:
1693 case X86II::MRM_C2:
1694 case X86II::MRM_C3:
1695 case X86II::MRM_C4:
1696 case X86II::MRM_C5:
1697 case X86II::MRM_C6:
1698 case X86II::MRM_C7:
1699 case X86II::MRM_C8:
1700 case X86II::MRM_C9:
1701 case X86II::MRM_CA:
1702 case X86II::MRM_CB:
1703 case X86II::MRM_CC:
1704 case X86II::MRM_CD:
1705 case X86II::MRM_CE:
1706 case X86II::MRM_CF:
1707 case X86II::MRM_D0:
1708 case X86II::MRM_D1:
1709 case X86II::MRM_D2:
1710 case X86II::MRM_D3:
1711 case X86II::MRM_D4:
1712 case X86II::MRM_D5:
1713 case X86II::MRM_D6:
1714 case X86II::MRM_D7:
1715 case X86II::MRM_D8:
1716 case X86II::MRM_D9:
1717 case X86II::MRM_DA:
1718 case X86II::MRM_DB:
1719 case X86II::MRM_DC:
1720 case X86II::MRM_DD:
1721 case X86II::MRM_DE:
1722 case X86II::MRM_DF:
1723 case X86II::MRM_E0:
1724 case X86II::MRM_E1:
1725 case X86II::MRM_E2:
1726 case X86II::MRM_E3:
1727 case X86II::MRM_E4:
1728 case X86II::MRM_E5:
1729 case X86II::MRM_E6:
1730 case X86II::MRM_E7:
1731 case X86II::MRM_E8:
1732 case X86II::MRM_E9:
1733 case X86II::MRM_EA:
1734 case X86II::MRM_EB:
1735 case X86II::MRM_EC:
1736 case X86II::MRM_ED:
1737 case X86II::MRM_EE:
1738 case X86II::MRM_EF:
1739 case X86II::MRM_F0:
1740 case X86II::MRM_F1:
1741 case X86II::MRM_F2:
1742 case X86II::MRM_F3:
1743 case X86II::MRM_F4:
1744 case X86II::MRM_F5:
1745 case X86II::MRM_F6:
1746 case X86II::MRM_F7:
1747 case X86II::MRM_F8:
1748 case X86II::MRM_F9:
1749 case X86II::MRM_FA:
1750 case X86II::MRM_FB:
1751 case X86II::MRM_FC:
1752 case X86II::MRM_FD:
1753 case X86II::MRM_FE:
1754 case X86II::MRM_FF:
1755 emitByte(BaseOpcode, CB);
1756 emitByte(0xC0 + Form - X86II::MRM_C0, CB);
1757 break;
1758 }
1759
1760 if (HasVEX_I8Reg) {
1761 // The last source register of a 4 operand instruction in AVX is encoded
1762 // in bits[7:4] of a immediate byte.
1763 assert(I8RegNum < 16 && "Register encoding out of range");
1764 I8RegNum <<= 4;
1765 if (CurOp != NumOps) {
1766 unsigned Val = MI.getOperand(CurOp++).getImm();
1767 assert(Val < 16 && "Immediate operand value out of range");
1768 I8RegNum |= Val;
1769 }
1770 emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1771 StartByte, CB, Fixups);
1772 } else {
1773 // If there is a remaining operand, it must be a trailing immediate. Emit it
1774 // according to the right size for the instruction. Some instructions
1775 // (SSE4a extrq and insertq) have two trailing immediates.
1776 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1777 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1779 StartByte, CB, Fixups);
1780 }
1781 }
1782
1784 emitByte(X86II::getBaseOpcodeFor(TSFlags), CB);
1785
1786 assert(CB.size() - StartByte <= 15 &&
1787 "The size of instruction must be no longer than 15.");
1788#ifndef NDEBUG
1789 // FIXME: Verify.
1790 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1791 errs() << "Cannot encode all operands of: ";
1792 MI.dump();
1793 errs() << '\n';
1794 abort();
1795 }
1796#endif
1797}
1798
1800 MCContext &Ctx) {
1801 return new X86MCCodeEmitter(MCII, Ctx);
1802}
unsigned const MachineRegisterInfo * MRI
dxil metadata emit
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:468
static bool isPCRel(unsigned Kind)
IRTranslator LLVM IR MI
Module * Mod
uint64_t TSFlags
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
@ None
static MCFixupKind getImmFixupKind(uint64_t TSFlags)
static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII)
static GlobalOffsetTableExprKind startsWithGlobalOffsetTable(const MCExpr *Expr)
Check if this expression starts with GLOBAL_OFFSET_TABLE and if it is of the form GLOBAL_OFFSET_TABLE...
static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM)
static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset)
Determine if this immediate can fit in a disp8 or a compressed disp8 for EVEX instructions.
GlobalOffsetTableExprKind
@ GOT_Normal
@ GOT_None
@ GOT_SymDiff
static void emitConstant(uint64_t Val, unsigned Size, SmallVectorImpl< char > &CB)
static bool hasSecRelSymbolRef(const MCExpr *Expr)
Value * RHS
This class represents an Operation in the Expression.
Binary assembler expressions.
Definition: MCExpr.h:484
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:631
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:634
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:528
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
MCCodeEmitter & operator=(const MCCodeEmitter &)=delete
virtual void emitPrefix(const MCInst &Inst, SmallVectorImpl< char > &CB, const MCSubtargetInfo &STI) const
Append the prefixes of given instruction to the code buffer.
Definition: MCCodeEmitter.h:37
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Context object for machine code objects.
Definition: MCContext.h:76
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
@ Binary
Binary expressions.
Definition: MCExpr.h:38
ExprKind getKind() const
Definition: MCExpr.h:81
static MCFixupKind getKindForSize(unsigned Size, bool IsPCRel)
Return the generic fixup kind for a value with the given size.
Definition: MCFixup.h:108
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:86
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:206
void dump() const
Definition: Pass.cpp:136
Represents a location in source code.
Definition: SMLoc.h:23
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:991
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SS
Definition: X86.h:208
Reg
All possible values of the reg field in the ModR/M byte.
bool hasImm(uint64_t TSFlags)
Definition: X86BaseInfo.h:989
bool isX86_64NonExtLowByteReg(unsigned reg)
Definition: X86BaseInfo.h:1220
bool isPseudo(uint64_t TSFlags)
Definition: X86BaseInfo.h:979
@ RawFrm
Raw - This form is for instructions that don't have any operands, so they are just a fixed opcode val...
Definition: X86BaseInfo.h:591
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:612
@ MRMSrcMemCC
MRMSrcMemCC - This form is used for instructions that use the Mod/RM byte to specify the operands and...
Definition: X86BaseInfo.h:675
@ MRM_C0
MRM_XX - A mod/rm byte of exactly 0xXX.
Definition: X86BaseInfo.h:737
@ RawFrmDst
RawFrmDst - This form is for instructions that use the destination index register DI/EDI/RDI.
Definition: X86BaseInfo.h:607
@ MRMDestMem4VOp3CC
MRMDestMem4VOp3CC - This form is used for instructions that use the Mod/RM byte to specify a destinat...
Definition: X86BaseInfo.h:636
@ AddCCFrm
AddCCFrm - This form is used for Jcc that encode the condition code in the lower 4 bits of the opcode...
Definition: X86BaseInfo.h:627
@ PrefixByte
PrefixByte - This form is used for instructions that represent a prefix byte like data16 or rep.
Definition: X86BaseInfo.h:631
@ MRMr0
MRM[0-7][rm] - These forms are used to represent instructions that use a Mod/RM byte,...
Definition: X86BaseInfo.h:644
@ MRMXm
MRMXm - This form is used for instructions that use the Mod/RM byte to specify a memory source,...
Definition: X86BaseInfo.h:686
@ MRMDestMemFSIB
MRMDestMem - But force to use the SIB field.
Definition: X86BaseInfo.h:650
@ AddRegFrm
AddRegFrm - This form is used for instructions like 'push r32' that have their one register operand a...
Definition: X86BaseInfo.h:595
@ RawFrmImm8
RawFrmImm8 - This is used for the ENTER instruction, which has two immediates, the first of which is ...
Definition: X86BaseInfo.h:617
@ XOP
XOP - Opcode prefix used by XOP instructions.
Definition: X86BaseInfo.h:916
@ MRMXr
MRMXr - This form is used for instructions that use the Mod/RM byte to specify a register source,...
Definition: X86BaseInfo.h:726
@ MRMSrcMem4VOp3
MRMSrcMem4VOp3 - This form is used for instructions that encode operand 3 with VEX....
Definition: X86BaseInfo.h:665
@ MRMDestMem
MRMDestMem - This form is used for instructions that use the Mod/RM byte to specify a destination,...
Definition: X86BaseInfo.h:655
@ MRMSrcMemFSIB
MRMSrcMem - But force to use the SIB field.
Definition: X86BaseInfo.h:647
@ MRMSrcRegOp4
MRMSrcRegOp4 - This form is used for instructions that use the Mod/RM byte to specify the fourth sour...
Definition: X86BaseInfo.h:710
@ MRMXrCC
MRMXCCr - This form is used for instructions that use the Mod/RM byte to specify a register source,...
Definition: X86BaseInfo.h:721
@ MRMXmCC
MRMXm - This form is used for instructions that use the Mod/RM byte to specify a memory source,...
Definition: X86BaseInfo.h:681
@ RawFrmImm16
RawFrmImm16 - This is used for CALL FAR instructions, which have two immediates, the first of which i...
Definition: X86BaseInfo.h:623
@ MRMSrcReg
MRMSrcReg - This form is used for instructions that use the Mod/RM byte to specify a source,...
Definition: X86BaseInfo.h:700
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:603
@ MRMDestReg
MRMDestReg - This form is used for instructions that use the Mod/RM byte to specify a destination,...
Definition: X86BaseInfo.h:695
@ MRMSrcMem
MRMSrcMem - This form is used for instructions that use the Mod/RM byte to specify a source,...
Definition: X86BaseInfo.h:660
@ MRMSrcMemOp4
MRMSrcMemOp4 - This form is used for instructions that use the Mod/RM byte to specify the fourth sour...
Definition: X86BaseInfo.h:670
@ MRMSrcRegCC
MRMSrcRegCC - This form is used for instructions that use the Mod/RM byte to specify the operands and...
Definition: X86BaseInfo.h:715
@ ThreeDNow
ThreeDNow - This indicates that the instruction uses the wacky 0x0F 0x0F prefix for 3DNow!...
Definition: X86BaseInfo.h:830
@ MRMSrcReg4VOp3
MRMSrcReg4VOp3 - This form is used for instructions that encode operand 3 with VEX....
Definition: X86BaseInfo.h:705
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:599
bool isImmPCRel(uint64_t TSFlags)
Definition: X86BaseInfo.h:1012
unsigned getSizeOfImm(uint64_t TSFlags)
Decode the "size of immediate" field from the TSFlags field of the specified instruction.
Definition: X86BaseInfo.h:995
uint8_t getBaseOpcodeFor(uint64_t TSFlags)
Definition: X86BaseInfo.h:985
int getMemoryOperandNo(uint64_t TSFlags)
The function returns the MCInst operand # for the first field of the memory operand.
Definition: X86BaseInfo.h:1095
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:1055
bool isImmSigned(uint64_t TSFlags)
Definition: X86BaseInfo.h:1031
bool is16BitMemOperand(const MCInst &MI, unsigned Op, const MCSubtargetInfo &STI)
bool needsAddressSizeOverride(const MCInst &MI, const MCSubtargetInfo &STI, int MemoryOperand, uint64_t TSFlags)
Returns true if this instruction needs an Address-Size override prefix.
@ IP_HAS_NOTRACK
Definition: X86BaseInfo.h:63
@ IP_USE_DISP8
Definition: X86BaseInfo.h:68
@ IP_HAS_AD_SIZE
Definition: X86BaseInfo.h:59
@ IP_HAS_REPEAT
Definition: X86BaseInfo.h:61
@ IP_USE_DISP32
Definition: X86BaseInfo.h:69
@ IP_HAS_REPEAT_NE
Definition: X86BaseInfo.h:60
@ AddrScaleAmt
Definition: X86BaseInfo.h:33
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:38
@ AddrIndexReg
Definition: X86BaseInfo.h:34
@ AddrNumOperands
AddrNumOperands - Total number of operands in a memory reference.
Definition: X86BaseInfo.h:41
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:380
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MCCodeEmitter * createX86MCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:28
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:41
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:27
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Ref
The access may reference the value stored in memory.
Description of the encoding of one expression Op.