LLVM 23.0.0git
X86MCTargetDesc.cpp
Go to the documentation of this file.
1//===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides X86 specific target descriptions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "X86MCTargetDesc.h"
15#include "X86ATTInstPrinter.h"
16#include "X86BaseInfo.h"
17#include "X86IntelInstPrinter.h"
18#include "X86MCAsmInfo.h"
19#include "X86TargetStreamer.h"
20#include "llvm-c/Visibility.h"
21#include "llvm/ADT/APInt.h"
23#include "llvm/MC/MCDwarf.h"
25#include "llvm/MC/MCInstrInfo.h"
27#include "llvm/MC/MCStreamer.h"
33
34using namespace llvm;
35
36#define GET_REGINFO_MC_DESC
37#include "X86GenRegisterInfo.inc"
38
39#define GET_INSTRINFO_MC_DESC
40#define GET_INSTRINFO_MC_HELPERS
41#define ENABLE_INSTR_PREDICATE_VERIFIER
42#include "X86GenInstrInfo.inc"
43
44#define GET_SUBTARGETINFO_MC_DESC
45#include "X86GenSubtargetInfo.inc"
46
47std::string X86_MC::ParseX86Triple(const Triple &TT) {
48 std::string FS;
49 // SSE2 should default to enabled in 64-bit mode, but can be turned off
50 // explicitly.
51 if (TT.isX86_64())
52 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
53 else if (TT.getEnvironment() != Triple::CODE16)
54 FS = "-64bit-mode,+32bit-mode,-16bit-mode";
55 else
56 FS = "-64bit-mode,-32bit-mode,+16bit-mode";
57
58 if (TT.isX32())
59 FS += ",+x32";
60
61 return FS;
62}
63
64unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
65 if (TT.isX86_64())
67
68 if (TT.isOSDarwin())
70 if (TT.isOSCygMing())
71 // Unsupported by now, just quick fallback
74}
75
77 return MI.getFlags() & X86::IP_HAS_LOCK;
78}
79
80static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) {
81 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
82 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
83 const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
84
85 return (Base.isReg() && Base.getReg() && RC.contains(Base.getReg())) ||
86 (Index.isReg() && Index.getReg() && RC.contains(Index.getReg()));
87}
88
89bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op,
90 const MCSubtargetInfo &STI) {
91 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
92 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
93
94 if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && !Base.getReg() &&
95 Index.isReg() && !Index.getReg())
96 return true;
97 return isMemOperand(MI, Op, X86::GR16RegClassID);
98}
99
100bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) {
101 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
102 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
103 if (Base.isReg() && Base.getReg() == X86::EIP) {
104 assert(Index.isReg() && !Index.getReg() && "Invalid eip-based address");
105 return true;
106 }
107 if (Index.isReg() && Index.getReg() == X86::EIZ)
108 return true;
109 return isMemOperand(MI, Op, X86::GR32RegClassID);
110}
111
112#ifndef NDEBUG
113bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) {
114 return isMemOperand(MI, Op, X86::GR64RegClassID);
115}
116#endif
117
119 const MCSubtargetInfo &STI,
120 int MemoryOperand, uint64_t TSFlags) {
121 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
122 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
123 bool Is32BitMode = STI.hasFeature(X86::Is32Bit);
124 bool Is64BitMode = STI.hasFeature(X86::Is64Bit);
125 if ((Is16BitMode && AdSize == X86II::AdSize32) ||
126 (Is32BitMode && AdSize == X86II::AdSize16) ||
127 (Is64BitMode && AdSize == X86II::AdSize32))
128 return true;
129 uint64_t Form = TSFlags & X86II::FormMask;
130 switch (Form) {
131 default:
132 break;
133 case X86II::RawFrmDstSrc: {
134 MCRegister siReg = MI.getOperand(1).getReg();
135 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
136 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
137 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
138 "SI and DI register sizes do not match");
139 return (!Is32BitMode && siReg == X86::ESI) ||
140 (Is32BitMode && siReg == X86::SI);
141 }
142 case X86II::RawFrmSrc: {
143 MCRegister siReg = MI.getOperand(0).getReg();
144 return (!Is32BitMode && siReg == X86::ESI) ||
145 (Is32BitMode && siReg == X86::SI);
146 }
147 case X86II::RawFrmDst: {
148 MCRegister siReg = MI.getOperand(0).getReg();
149 return (!Is32BitMode && siReg == X86::EDI) ||
150 (Is32BitMode && siReg == X86::DI);
151 }
152 }
153
154 // Determine where the memory operand starts, if present.
155 if (MemoryOperand < 0)
156 return false;
157
158 if (STI.hasFeature(X86::Is64Bit)) {
159 assert(!is16BitMemOperand(MI, MemoryOperand, STI));
160 return is32BitMemOperand(MI, MemoryOperand);
161 }
162 if (STI.hasFeature(X86::Is32Bit)) {
163 assert(!is64BitMemOperand(MI, MemoryOperand));
164 return is16BitMemOperand(MI, MemoryOperand, STI);
165 }
166 assert(STI.hasFeature(X86::Is16Bit));
167 assert(!is64BitMemOperand(MI, MemoryOperand));
168 return !is16BitMemOperand(MI, MemoryOperand, STI);
169}
170
172 // FIXME: TableGen these.
173 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
174 unsigned SEH = MRI->getEncodingValue(Reg);
175 MRI->mapLLVMRegToSEHReg(Reg, SEH);
176 }
177
178 // Mapping from CodeView to MC register id.
179 static const struct {
181 MCPhysReg Reg;
182 } RegMap[] = {
183 {codeview::RegisterId::AL, X86::AL},
184 {codeview::RegisterId::CL, X86::CL},
185 {codeview::RegisterId::DL, X86::DL},
186 {codeview::RegisterId::BL, X86::BL},
187 {codeview::RegisterId::AH, X86::AH},
188 {codeview::RegisterId::CH, X86::CH},
189 {codeview::RegisterId::DH, X86::DH},
190 {codeview::RegisterId::BH, X86::BH},
191 {codeview::RegisterId::AX, X86::AX},
192 {codeview::RegisterId::CX, X86::CX},
193 {codeview::RegisterId::DX, X86::DX},
194 {codeview::RegisterId::BX, X86::BX},
195 {codeview::RegisterId::SP, X86::SP},
196 {codeview::RegisterId::BP, X86::BP},
197 {codeview::RegisterId::SI, X86::SI},
198 {codeview::RegisterId::DI, X86::DI},
199 {codeview::RegisterId::EAX, X86::EAX},
200 {codeview::RegisterId::ECX, X86::ECX},
201 {codeview::RegisterId::EDX, X86::EDX},
202 {codeview::RegisterId::EBX, X86::EBX},
203 {codeview::RegisterId::ESP, X86::ESP},
204 {codeview::RegisterId::EBP, X86::EBP},
205 {codeview::RegisterId::ESI, X86::ESI},
206 {codeview::RegisterId::EDI, X86::EDI},
207
208 {codeview::RegisterId::EFLAGS, X86::EFLAGS},
209
210 {codeview::RegisterId::ST0, X86::ST0},
211 {codeview::RegisterId::ST1, X86::ST1},
212 {codeview::RegisterId::ST2, X86::ST2},
213 {codeview::RegisterId::ST3, X86::ST3},
214 {codeview::RegisterId::ST4, X86::ST4},
215 {codeview::RegisterId::ST5, X86::ST5},
216 {codeview::RegisterId::ST6, X86::ST6},
217 {codeview::RegisterId::ST7, X86::ST7},
218
219 {codeview::RegisterId::ST0, X86::FP0},
220 {codeview::RegisterId::ST1, X86::FP1},
221 {codeview::RegisterId::ST2, X86::FP2},
222 {codeview::RegisterId::ST3, X86::FP3},
223 {codeview::RegisterId::ST4, X86::FP4},
224 {codeview::RegisterId::ST5, X86::FP5},
225 {codeview::RegisterId::ST6, X86::FP6},
226 {codeview::RegisterId::ST7, X86::FP7},
227
228 {codeview::RegisterId::MM0, X86::MM0},
229 {codeview::RegisterId::MM1, X86::MM1},
230 {codeview::RegisterId::MM2, X86::MM2},
231 {codeview::RegisterId::MM3, X86::MM3},
232 {codeview::RegisterId::MM4, X86::MM4},
233 {codeview::RegisterId::MM5, X86::MM5},
234 {codeview::RegisterId::MM6, X86::MM6},
235 {codeview::RegisterId::MM7, X86::MM7},
236
237 {codeview::RegisterId::XMM0, X86::XMM0},
238 {codeview::RegisterId::XMM1, X86::XMM1},
239 {codeview::RegisterId::XMM2, X86::XMM2},
240 {codeview::RegisterId::XMM3, X86::XMM3},
241 {codeview::RegisterId::XMM4, X86::XMM4},
242 {codeview::RegisterId::XMM5, X86::XMM5},
243 {codeview::RegisterId::XMM6, X86::XMM6},
244 {codeview::RegisterId::XMM7, X86::XMM7},
245
246 {codeview::RegisterId::XMM8, X86::XMM8},
247 {codeview::RegisterId::XMM9, X86::XMM9},
248 {codeview::RegisterId::XMM10, X86::XMM10},
249 {codeview::RegisterId::XMM11, X86::XMM11},
250 {codeview::RegisterId::XMM12, X86::XMM12},
251 {codeview::RegisterId::XMM13, X86::XMM13},
252 {codeview::RegisterId::XMM14, X86::XMM14},
253 {codeview::RegisterId::XMM15, X86::XMM15},
254
255 {codeview::RegisterId::SIL, X86::SIL},
256 {codeview::RegisterId::DIL, X86::DIL},
257 {codeview::RegisterId::BPL, X86::BPL},
258 {codeview::RegisterId::SPL, X86::SPL},
259 {codeview::RegisterId::RAX, X86::RAX},
260 {codeview::RegisterId::RBX, X86::RBX},
261 {codeview::RegisterId::RCX, X86::RCX},
262 {codeview::RegisterId::RDX, X86::RDX},
263 {codeview::RegisterId::RSI, X86::RSI},
264 {codeview::RegisterId::RDI, X86::RDI},
265 {codeview::RegisterId::RBP, X86::RBP},
266 {codeview::RegisterId::RSP, X86::RSP},
267 {codeview::RegisterId::R8, X86::R8},
268 {codeview::RegisterId::R9, X86::R9},
269 {codeview::RegisterId::R10, X86::R10},
270 {codeview::RegisterId::R11, X86::R11},
271 {codeview::RegisterId::R12, X86::R12},
272 {codeview::RegisterId::R13, X86::R13},
273 {codeview::RegisterId::R14, X86::R14},
274 {codeview::RegisterId::R15, X86::R15},
275 {codeview::RegisterId::R8B, X86::R8B},
276 {codeview::RegisterId::R9B, X86::R9B},
277 {codeview::RegisterId::R10B, X86::R10B},
278 {codeview::RegisterId::R11B, X86::R11B},
279 {codeview::RegisterId::R12B, X86::R12B},
280 {codeview::RegisterId::R13B, X86::R13B},
281 {codeview::RegisterId::R14B, X86::R14B},
282 {codeview::RegisterId::R15B, X86::R15B},
283 {codeview::RegisterId::R8W, X86::R8W},
284 {codeview::RegisterId::R9W, X86::R9W},
285 {codeview::RegisterId::R10W, X86::R10W},
286 {codeview::RegisterId::R11W, X86::R11W},
287 {codeview::RegisterId::R12W, X86::R12W},
288 {codeview::RegisterId::R13W, X86::R13W},
289 {codeview::RegisterId::R14W, X86::R14W},
290 {codeview::RegisterId::R15W, X86::R15W},
291 {codeview::RegisterId::R8D, X86::R8D},
292 {codeview::RegisterId::R9D, X86::R9D},
293 {codeview::RegisterId::R10D, X86::R10D},
294 {codeview::RegisterId::R11D, X86::R11D},
295 {codeview::RegisterId::R12D, X86::R12D},
296 {codeview::RegisterId::R13D, X86::R13D},
297 {codeview::RegisterId::R14D, X86::R14D},
298 {codeview::RegisterId::R15D, X86::R15D},
299 {codeview::RegisterId::AMD64_YMM0, X86::YMM0},
300 {codeview::RegisterId::AMD64_YMM1, X86::YMM1},
301 {codeview::RegisterId::AMD64_YMM2, X86::YMM2},
302 {codeview::RegisterId::AMD64_YMM3, X86::YMM3},
303 {codeview::RegisterId::AMD64_YMM4, X86::YMM4},
304 {codeview::RegisterId::AMD64_YMM5, X86::YMM5},
305 {codeview::RegisterId::AMD64_YMM6, X86::YMM6},
306 {codeview::RegisterId::AMD64_YMM7, X86::YMM7},
307 {codeview::RegisterId::AMD64_YMM8, X86::YMM8},
308 {codeview::RegisterId::AMD64_YMM9, X86::YMM9},
309 {codeview::RegisterId::AMD64_YMM10, X86::YMM10},
310 {codeview::RegisterId::AMD64_YMM11, X86::YMM11},
311 {codeview::RegisterId::AMD64_YMM12, X86::YMM12},
312 {codeview::RegisterId::AMD64_YMM13, X86::YMM13},
313 {codeview::RegisterId::AMD64_YMM14, X86::YMM14},
314 {codeview::RegisterId::AMD64_YMM15, X86::YMM15},
315 {codeview::RegisterId::AMD64_YMM16, X86::YMM16},
316 {codeview::RegisterId::AMD64_YMM17, X86::YMM17},
317 {codeview::RegisterId::AMD64_YMM18, X86::YMM18},
318 {codeview::RegisterId::AMD64_YMM19, X86::YMM19},
319 {codeview::RegisterId::AMD64_YMM20, X86::YMM20},
320 {codeview::RegisterId::AMD64_YMM21, X86::YMM21},
321 {codeview::RegisterId::AMD64_YMM22, X86::YMM22},
322 {codeview::RegisterId::AMD64_YMM23, X86::YMM23},
323 {codeview::RegisterId::AMD64_YMM24, X86::YMM24},
324 {codeview::RegisterId::AMD64_YMM25, X86::YMM25},
325 {codeview::RegisterId::AMD64_YMM26, X86::YMM26},
326 {codeview::RegisterId::AMD64_YMM27, X86::YMM27},
327 {codeview::RegisterId::AMD64_YMM28, X86::YMM28},
328 {codeview::RegisterId::AMD64_YMM29, X86::YMM29},
329 {codeview::RegisterId::AMD64_YMM30, X86::YMM30},
330 {codeview::RegisterId::AMD64_YMM31, X86::YMM31},
331 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0},
332 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1},
333 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2},
334 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3},
335 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4},
336 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5},
337 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6},
338 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7},
339 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8},
340 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9},
341 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10},
342 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11},
343 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12},
344 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13},
345 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14},
346 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15},
347 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16},
348 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17},
349 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18},
350 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19},
351 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20},
352 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21},
353 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22},
354 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23},
355 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24},
356 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25},
357 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26},
358 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27},
359 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28},
360 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29},
361 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30},
362 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31},
363 {codeview::RegisterId::AMD64_K0, X86::K0},
364 {codeview::RegisterId::AMD64_K1, X86::K1},
365 {codeview::RegisterId::AMD64_K2, X86::K2},
366 {codeview::RegisterId::AMD64_K3, X86::K3},
367 {codeview::RegisterId::AMD64_K4, X86::K4},
368 {codeview::RegisterId::AMD64_K5, X86::K5},
369 {codeview::RegisterId::AMD64_K6, X86::K6},
370 {codeview::RegisterId::AMD64_K7, X86::K7},
371 {codeview::RegisterId::AMD64_XMM16, X86::XMM16},
372 {codeview::RegisterId::AMD64_XMM17, X86::XMM17},
373 {codeview::RegisterId::AMD64_XMM18, X86::XMM18},
374 {codeview::RegisterId::AMD64_XMM19, X86::XMM19},
375 {codeview::RegisterId::AMD64_XMM20, X86::XMM20},
376 {codeview::RegisterId::AMD64_XMM21, X86::XMM21},
377 {codeview::RegisterId::AMD64_XMM22, X86::XMM22},
378 {codeview::RegisterId::AMD64_XMM23, X86::XMM23},
379 {codeview::RegisterId::AMD64_XMM24, X86::XMM24},
380 {codeview::RegisterId::AMD64_XMM25, X86::XMM25},
381 {codeview::RegisterId::AMD64_XMM26, X86::XMM26},
382 {codeview::RegisterId::AMD64_XMM27, X86::XMM27},
383 {codeview::RegisterId::AMD64_XMM28, X86::XMM28},
384 {codeview::RegisterId::AMD64_XMM29, X86::XMM29},
385 {codeview::RegisterId::AMD64_XMM30, X86::XMM30},
386 {codeview::RegisterId::AMD64_XMM31, X86::XMM31},
387
388 };
389 for (const auto &I : RegMap)
390 MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
391}
392
394 StringRef CPU, StringRef FS) {
395 std::string ArchFS = X86_MC::ParseX86Triple(TT);
396 assert(!ArchFS.empty() && "Failed to parse X86 triple");
397 if (!FS.empty())
398 ArchFS = (Twine(ArchFS) + "," + FS).str();
399
400 if (CPU.empty())
401 CPU = "generic";
402
403 return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, ArchFS);
404}
405
407 MCInstrInfo *X = new MCInstrInfo();
408 InitX86MCInstrInfo(X);
409 return X;
410}
411
413 unsigned RA = TT.isX86_64() ? X86::RIP // Should have dwarf #16.
414 : X86::EIP; // Should have dwarf #8.
415
417 InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
418 X86_MC::getDwarfRegFlavour(TT, true), RA);
420 return X;
421}
422
424 const MCRegisterInfo &MRI) {
425 auto &Set = MAI.getReservedIdentifiers();
426 // Register names: `call rsi` is misassembled as an indirect call. Use the
427 // Intel printer's table directly — it's the lowercase asm name in stable
428 // storage. MRI::getName() returns the uppercase enum name and would need
429 // an extra .lower() heap allocation per entry.
430 for (unsigned i = 1, e = MRI.getNumRegs(); i < e; ++i)
431 if (const char *Name = X86IntelInstPrinter::getRegisterName(i))
432 if (Name[0])
433 Set.insert(CachedHashStringRef(Name));
434 // Keywords that GAS Intel syntax misparses as constants, modifiers, or
435 // pseudo-registers instead of symbol references (e.g., `call byte` calls
436 // address 1, not symbol "byte"; `call flat` errors out).
437 for (StringRef KW : {"byte", "word", "dword", "fword", "qword", "mmword",
438 "tbyte", "oword", "xmmword", "ymmword", "zmmword",
439 "offset", "flat", "near", "far", "short"})
440 Set.insert(CachedHashStringRef(KW));
441 // Operator keywords parsed by GAS/X86AsmParser in Intel mode.
442 for (StringRef KW : {"and", "eq", "ge", "gt", "le", "lt", "mod", "ne", "not",
443 "or", "shl", "shr", "xor"})
444 Set.insert(CachedHashStringRef(KW));
445}
446
448 const Triple &TheTriple,
449 const MCTargetOptions &Options) {
450 bool is64Bit = TheTriple.isX86_64();
451
452 MCAsmInfo *MAI;
453 if (TheTriple.isOSBinFormatMachO()) {
454 if (is64Bit)
455 MAI = new X86_64MCAsmInfoDarwin(TheTriple, Options);
456 else
457 MAI = new X86MCAsmInfoDarwin(TheTriple, Options);
458 } else if (TheTriple.isOSBinFormatELF()) {
459 // Force the use of an ELF container.
460 MAI = new X86ELFMCAsmInfo(TheTriple, Options);
461 } else if (TheTriple.isWindowsMSVCEnvironment() ||
462 TheTriple.isWindowsCoreCLREnvironment() || TheTriple.isUEFI()) {
463 if (Options.getAssemblyLanguage().equals_insensitive("masm"))
464 MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple, Options);
465 else
466 MAI = new X86MCAsmInfoMicrosoft(TheTriple, Options);
467 } else if (TheTriple.isOSCygMing() ||
468 TheTriple.isWindowsItaniumEnvironment()) {
469 MAI = new X86MCAsmInfoGNUCOFF(TheTriple, Options);
470 } else {
471 // The default is ELF.
472 MAI = new X86ELFMCAsmInfo(TheTriple, Options);
473 }
475
476 // Initialize initial frame state.
477 // Calculate amount of bytes used for return address storing
478 int stackGrowth = is64Bit ? -8 : -4;
479
480 // Initial state of the frame pointer is esp+stackGrowth.
481 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
483 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
484 MAI->addInitialFrameState(Inst);
485
486 // Add return address to move list
487 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
489 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
490 MAI->addInitialFrameState(Inst2);
491
492 return MAI;
493}
494
496 unsigned SyntaxVariant,
497 const MCAsmInfo &MAI,
498 const MCInstrInfo &MII,
499 const MCRegisterInfo &MRI) {
500 if (SyntaxVariant == 0)
501 return new X86ATTInstPrinter(MAI, MII, MRI);
502 if (SyntaxVariant == 1)
503 return new X86IntelInstPrinter(MAI, MII, MRI);
504 return nullptr;
505}
506
508 MCContext &Ctx) {
509 // Default to the stock relocation info.
510 return llvm::createMCRelocationInfo(TheTriple, Ctx);
511}
512
513namespace llvm {
514namespace X86_MC {
515
516class X86MCInstrAnalysis : public MCInstrAnalysis {
517 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete;
518 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete;
519 ~X86MCInstrAnalysis() override = default;
520
521public:
523
524#define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
525#include "X86GenSubtargetInfo.inc"
526
527 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
528 APInt &Mask) const override;
529 std::vector<std::pair<uint64_t, uint64_t>>
530 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
531 const MCSubtargetInfo &STI) const override;
532
533 bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
534 uint64_t &Target) const override;
535 std::optional<uint64_t>
537 uint64_t Addr, uint64_t Size) const override;
538 std::optional<uint64_t>
540 uint64_t Size) const override;
541};
542
543#define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
544#include "X86GenSubtargetInfo.inc"
545
547 const MCInst &Inst,
548 APInt &Mask) const {
549 const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
550 unsigned NumDefs = Desc.getNumDefs();
551 unsigned NumImplicitDefs = Desc.implicit_defs().size();
552 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
553 "Unexpected number of bits in the mask!");
554
555 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
556 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
557 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;
558
559 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
560 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
561 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);
562
563 auto ClearsSuperReg = [=](MCRegister RegID) {
564 // On X86-64, a general purpose integer register is viewed as a 64-bit
565 // register internal to the processor.
566 // An update to the lower 32 bits of a 64 bit integer register is
567 // architecturally defined to zero extend the upper 32 bits.
568 if (GR32RC.contains(RegID))
569 return true;
570
571 // Early exit if this instruction has no vex/evex/xop prefix.
572 if (!HasEVEX && !HasVEX && !HasXOP)
573 return false;
574
575 // All VEX and EVEX encoded instructions are defined to zero the high bits
576 // of the destination register up to VLMAX (i.e. the maximum vector register
577 // width pertaining to the instruction).
578 // We assume the same behavior for XOP instructions too.
579 return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
580 };
581
582 Mask.clearAllBits();
583 for (unsigned I = 0, E = NumDefs; I < E; ++I) {
584 const MCOperand &Op = Inst.getOperand(I);
585 if (ClearsSuperReg(Op.getReg()))
586 Mask.setBit(I);
587 }
588
589 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
590 const MCPhysReg Reg = Desc.implicit_defs()[I];
591 if (ClearsSuperReg(Reg))
592 Mask.setBit(NumDefs + I);
593 }
594
595 return Mask.getBoolValue();
596}
597
598static std::vector<std::pair<uint64_t, uint64_t>>
599findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
600 // Do a lightweight parsing of PLT entries.
601 std::vector<std::pair<uint64_t, uint64_t>> Result;
602 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
603 // Recognize a jmp.
604 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
605 // The jmp instruction at the beginning of each PLT entry jumps to the
606 // address of the base of the .got.plt section plus the immediate.
607 // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the
608 // offset to an address. Imm may be a negative int32_t if the GOT entry is
609 // in .got.
610 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
611 Result.emplace_back(PltSectionVA + Byte, Imm | (uint64_t(1) << 32));
612 Byte += 6;
613 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
614 // The jmp instruction at the beginning of each PLT entry jumps to the
615 // immediate.
616 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
617 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
618 Byte += 6;
619 } else
620 Byte++;
621 }
622 return Result;
623}
624
625static std::vector<std::pair<uint64_t, uint64_t>>
627 // Do a lightweight parsing of PLT entries.
628 std::vector<std::pair<uint64_t, uint64_t>> Result;
629 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
630 // Recognize a jmp.
631 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
632 // The jmp instruction at the beginning of each PLT entry jumps to the
633 // address of the next instruction plus the immediate.
634 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
635 Result.push_back(
636 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm));
637 Byte += 6;
638 } else
639 Byte++;
640 }
641 return Result;
642}
643
644std::vector<std::pair<uint64_t, uint64_t>>
646 ArrayRef<uint8_t> PltContents,
647 const MCSubtargetInfo &STI) const {
648 const Triple &TargetTriple = STI.getTargetTriple();
649 switch (TargetTriple.getArch()) {
650 case Triple::x86:
651 return findX86PltEntries(PltSectionVA, PltContents);
652 case Triple::x86_64:
653 return findX86_64PltEntries(PltSectionVA, PltContents);
654 default:
655 return {};
656 }
657}
658
660 uint64_t Size, uint64_t &Target) const {
661 if (Inst.getNumOperands() == 0 ||
662 Info->get(Inst.getOpcode()).operands()[0].OperandType !=
664 return false;
665 Target = Addr + Size + Inst.getOperand(0).getImm();
666 return true;
667}
668
670 const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr,
671 uint64_t Size) const {
672 const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
673 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
674 if (MemOpStart == -1)
675 return std::nullopt;
676 MemOpStart += X86II::getOperandBias(MCID);
677
678 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
679 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
680 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
681 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
682 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
683 if (SegReg.getReg() || IndexReg.getReg() || ScaleAmt.getImm() != 1 ||
684 !Disp.isImm())
685 return std::nullopt;
686
687 // RIP-relative addressing.
688 if (BaseReg.getReg() == X86::RIP)
689 return Addr + Size + Disp.getImm();
690
691 return std::nullopt;
692}
693
694std::optional<uint64_t>
696 uint64_t Size) const {
697 if (Inst.getOpcode() != X86::LEA64r)
698 return std::nullopt;
699 const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
700 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
701 if (MemOpStart == -1)
702 return std::nullopt;
703 MemOpStart += X86II::getOperandBias(MCID);
704 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
705 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
706 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
707 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
708 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
709 // Must be a simple rip-relative address.
710 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() || IndexReg.getReg() ||
711 ScaleAmt.getImm() != 1 || !Disp.isImm())
712 return std::nullopt;
713 // rip-relative ModR/M immediate is 32 bits.
714 assert(Size > 4 && "invalid instruction size for rip-relative lea");
715 return Size - 4;
716}
717
718} // end of namespace X86_MC
719
720} // end of namespace llvm
721
723 return new X86_MC::X86MCInstrAnalysis(Info);
724}
725
726// Force static initialization.
729 // Register the MC asm info.
731
732 // Register the MC instruction info.
734
735 // Register the MC register info.
737
738 // Register the MC subtarget info.
741
742 // Register the MC instruction analyzer.
744
745 // Register the code emitter.
747
748 // Register the obj target streamer.
751
752 // Register the asm target streamer.
754
755 // Register the null streamer.
757
760
761 // Register the MCInstPrinter.
763
764 // Register the MC relocation info.
766 }
767
768 // Register the asm backend.
773}
774
776 bool High) {
777#define DEFAULT_NOREG \
778 default: \
779 return X86::NoRegister;
780#define SUB_SUPER(R1, R2, R3, R4, R) \
781 case X86::R1: \
782 case X86::R2: \
783 case X86::R3: \
784 case X86::R4: \
785 return X86::R;
786#define A_SUB_SUPER(R) \
787 case X86::AH: \
788 SUB_SUPER(AL, AX, EAX, RAX, R)
789#define D_SUB_SUPER(R) \
790 case X86::DH: \
791 SUB_SUPER(DL, DX, EDX, RDX, R)
792#define C_SUB_SUPER(R) \
793 case X86::CH: \
794 SUB_SUPER(CL, CX, ECX, RCX, R)
795#define B_SUB_SUPER(R) \
796 case X86::BH: \
797 SUB_SUPER(BL, BX, EBX, RBX, R)
798#define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R)
799#define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R)
800#define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R)
801#define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R)
802#define NO_SUB_SUPER(NO, REG) \
803 SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG)
804#define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B)
805#define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W)
806#define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D)
807#define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO)
808 switch (Size) {
809 default:
810 llvm_unreachable("illegal register size");
811 case 8:
812 if (High) {
813 switch (Reg.id()) {
815 A_SUB_SUPER(AH)
816 D_SUB_SUPER(DH)
818 B_SUB_SUPER(BH)
819 }
820 } else {
821 switch (Reg.id()) {
823 A_SUB_SUPER(AL)
825 C_SUB_SUPER(CL)
826 B_SUB_SUPER(BL)
827 SI_SUB_SUPER(SIL)
828 DI_SUB_SUPER(DIL)
829 BP_SUB_SUPER(BPL)
830 SP_SUB_SUPER(SPL)
855 }
856 }
857 case 16:
858 switch (Reg.id()) {
860 A_SUB_SUPER(AX)
861 D_SUB_SUPER(DX)
862 C_SUB_SUPER(CX)
863 B_SUB_SUPER(BX)
865 DI_SUB_SUPER(DI)
866 BP_SUB_SUPER(BP)
867 SP_SUB_SUPER(SP)
892 }
893 case 32:
894 switch (Reg.id()) {
896 A_SUB_SUPER(EAX)
897 D_SUB_SUPER(EDX)
898 C_SUB_SUPER(ECX)
899 B_SUB_SUPER(EBX)
900 SI_SUB_SUPER(ESI)
901 DI_SUB_SUPER(EDI)
902 BP_SUB_SUPER(EBP)
903 SP_SUB_SUPER(ESP)
928 }
929 case 64:
930 switch (Reg.id()) {
932 A_SUB_SUPER(RAX)
933 D_SUB_SUPER(RDX)
934 C_SUB_SUPER(RCX)
935 B_SUB_SUPER(RBX)
936 SI_SUB_SUPER(RSI)
938 BP_SUB_SUPER(RBP)
939 SP_SUB_SUPER(RSP)
964 }
965 }
966}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo & RDI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
IRTranslator LLVM IR MI
static LVOptions Options
Definition LVOptions.cpp:25
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t High
#define CH(x, y, z)
Definition SHA256.cpp:34
SI optimize exec mask operations pre RA
#define LLVM_C_ABI
LLVM_C_ABI is the export/visibility macro used to mark symbols declared in llvm-c as exported when bu...
Definition Visibility.h:40
static bool is64Bit(const char *name)
#define NO_SUB_SUPER_W(NO)
#define NO_SUB_SUPER_Q(NO)
static MCRelocationInfo * createX86MCRelocationInfo(const Triple &TheTriple, MCContext &Ctx)
static MCInstrInfo * createX86MCInstrInfo()
#define C_SUB_SUPER(R)
#define NO_SUB_SUPER_D(NO)
#define DEFAULT_NOREG
static MCRegisterInfo * createX86MCRegisterInfo(const Triple &TT)
#define SP_SUB_SUPER(R)
static void populateReservedIdentifiers(MCAsmInfo &MAI, const MCRegisterInfo &MRI)
static MCInstPrinter * createX86MCInstPrinter(const Triple &T, unsigned SyntaxVariant, const MCAsmInfo &MAI, const MCInstrInfo &MII, const MCRegisterInfo &MRI)
static MCInstrAnalysis * createX86MCInstrAnalysis(const MCInstrInfo *Info)
#define SI_SUB_SUPER(R)
#define BP_SUB_SUPER(R)
#define B_SUB_SUPER(R)
LLVM_C_ABI void LLVMInitializeX86TargetMC()
#define DI_SUB_SUPER(R)
#define NO_SUB_SUPER_B(NO)
#define A_SUB_SUPER(R)
#define D_SUB_SUPER(R)
static MCAsmInfo * createX86MCAsmInfo(const MCRegisterInfo &MRI, const Triple &TheTriple, const MCTargetOptions &Options)
static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID)
Class for arbitrary precision integers.
Definition APInt.h:78
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
const T * data() const
Definition ArrayRef.h:138
A container which contains a StringRef plus a precomputed hash.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:66
void addInitialFrameState(const MCCFIInstruction &Inst)
Definition MCAsmInfo.cpp:53
llvm::DenseSet< llvm::CachedHashStringRef > & getReservedIdentifiers()
Definition MCAsmInfo.h:501
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
Definition MCDwarf.h:576
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Definition MCDwarf.h:618
Context object for machine code objects.
Definition MCContext.h:83
This is an instance of a target assembly language printer that converts an MCInst to valid target ass...
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
unsigned getNumOperands() const
Definition MCInst.h:212
unsigned getOpcode() const
Definition MCInst.h:202
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
const MCInstrInfo * Info
MCInstrAnalysis(const MCInstrInfo *Info)
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
int64_t getImm() const
Definition MCInst.h:84
bool isImm() const
Definition MCInst.h:66
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
MCRegisterClass - Base class of TargetRegisterClass.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
void mapLLVMRegToCVReg(MCRegister LLVMReg, int CVReg)
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
void mapLLVMRegToSEHReg(MCRegister LLVMReg, int SEHReg)
mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register number mapping.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
virtual int64_t getDwarfRegNum(MCRegister Reg, bool isEH) const
Map a target register to an equivalent dwarf register number.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Create MCExprs from relocations found in an object file.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isOSCygMing() const
Tests for either Cygwin or MinGW OS.
Definition Triple.h:735
bool isX86_64() const
Tests whether the target is x86 (64-bit).
Definition Triple.h:1119
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:791
bool isWindowsCoreCLREnvironment() const
Definition Triple.h:718
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:436
bool isUEFI() const
Tests whether the OS is UEFI.
Definition Triple.h:696
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:782
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
Definition Triple.h:707
bool isWindowsItaniumEnvironment() const
Definition Triple.h:722
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static const char * getRegisterName(MCRegister Reg)
bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const override
Given a branch instruction try to get the address the branch targets.
X86MCInstrAnalysis(const MCInstrInfo *MCII)
std::optional< uint64_t > evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr, uint64_t Size) const override
Given an instruction tries to get the address of a memory operand.
std::optional< uint64_t > getMemoryOperandRelocationOffset(const MCInst &Inst, uint64_t Size) const override
Given an instruction with a memory operand that could require relocation, returns the offset within t...
std::vector< std::pair< uint64_t, uint64_t > > findPltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents, const MCSubtargetInfo &STI) const override
Returns (PLT virtual address, GOT virtual address) pairs for PLT entries.
bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, APInt &Mask) const override
Returns true if at least one of the register writes performed by.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ RawFrmDst
RawFrmDst - This form is for instructions that use the destination index register DI/EDI/RDI.
@ VEX
VEX - encoding using 0xC4/0xC5.
@ XOP
XOP - Opcode prefix used by XOP instructions.
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
bool is32BitMemOperand(const MCInst &MI, unsigned Op)
bool is16BitMemOperand(const MCInst &MI, unsigned Op, const MCSubtargetInfo &STI)
bool hasLockPrefix(const MCInst &MI)
Returns true if this instruction has a LOCK prefix.
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
static std::vector< std::pair< uint64_t, uint64_t > > findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents)
static std::vector< std::pair< uint64_t, uint64_t > > findX86PltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents)
bool needsAddressSizeOverride(const MCInst &MI, const MCSubtargetInfo &STI, int MemoryOperand, uint64_t TSFlags)
Returns true if this instruction needs an Address-Size override prefix.
std::string ParseX86Triple(const Triple &TT)
MCSubtargetInfo * createX86MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS)
Create a X86 MCSubtargetInfo instance.
bool is64BitMemOperand(const MCInst &MI, unsigned Op)
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH)
uint32_t read32le(const void *P)
Definition Endian.h:432
This is an optimization pass for GlobalISel generic memory operations.
MCTargetStreamer * createX86ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI)
Implements X86-only directives for object files.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
MCTargetStreamer * createX86AsmTargetStreamer(MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrinter)
Implements X86-only directives for assembly emission.
MCCodeEmitter * createX86MCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Target & getTheX86_32Target()
Op::Description Desc
LLVM_ABI MCRelocationInfo * createMCRelocationInfo(const Triple &TT, MCContext &Ctx)
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
MCStreamer * createX86WinCOFFStreamer(MCContext &C, std::unique_ptr< MCAsmBackend > &&AB, std::unique_ptr< MCObjectWriter > &&OW, std::unique_ptr< MCCodeEmitter > &&CE)
Construct an X86 Windows COFF machine code streamer which will generate PE/COFF format object files.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Target & getTheX86_64Target()
MCTargetStreamer * createX86NullTargetStreamer(MCStreamer &S)
Implements X86-only null emission.
RegisterMCAsmInfoFn - Helper template for registering a target assembly info implementation.
static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn)
RegisterMCRegInfo - Register a MCRegisterInfo implementation for the given target.
static void RegisterMCAsmBackend(Target &T, Target::MCAsmBackendCtorTy Fn)
RegisterMCAsmBackend - Register a MCAsmBackend implementation for the given target.
static void RegisterMCCodeEmitter(Target &T, Target::MCCodeEmitterCtorTy Fn)
RegisterMCCodeEmitter - Register a MCCodeEmitter implementation for the given target.
static void RegisterMCSubtargetInfo(Target &T, Target::MCSubtargetInfoCtorFnTy Fn)
RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for the given target.
static void RegisterObjectTargetStreamer(Target &T, Target::ObjectTargetStreamerCtorTy Fn)
static void RegisterMCInstrAnalysis(Target &T, Target::MCInstrAnalysisCtorFnTy Fn)
RegisterMCInstrAnalysis - Register a MCInstrAnalysis implementation for the given target.
static void RegisterELFStreamer(Target &T, Target::ELFStreamerCtorTy Fn)
static void RegisterNullTargetStreamer(Target &T, Target::NullTargetStreamerCtorTy Fn)
static void RegisterMCInstPrinter(Target &T, Target::MCInstPrinterCtorTy Fn)
RegisterMCInstPrinter - Register a MCInstPrinter implementation for the given target.
static void RegisterCOFFStreamer(Target &T, Target::COFFStreamerCtorTy Fn)
static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn)
RegisterMCInstrInfo - Register a MCInstrInfo implementation for the given target.
static void RegisterAsmTargetStreamer(Target &T, Target::AsmTargetStreamerCtorTy Fn)
static void RegisterMCRelocationInfo(Target &T, Target::MCRelocationInfoCtorTy Fn)
RegisterMCRelocationInfo - Register an MCRelocationInfo implementation for the given target.