LLVM 23.0.0git
X86MCTargetDesc.cpp
Go to the documentation of this file.
1//===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides X86 specific target descriptions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "X86MCTargetDesc.h"
15#include "X86ATTInstPrinter.h"
16#include "X86BaseInfo.h"
17#include "X86IntelInstPrinter.h"
18#include "X86MCAsmInfo.h"
19#include "X86TargetStreamer.h"
20#include "llvm-c/Visibility.h"
21#include "llvm/ADT/APInt.h"
23#include "llvm/MC/MCDwarf.h"
25#include "llvm/MC/MCInstrInfo.h"
27#include "llvm/MC/MCStreamer.h"
33
34using namespace llvm;
35
36#define GET_REGINFO_MC_DESC
37#include "X86GenRegisterInfo.inc"
38
39#define GET_INSTRINFO_MC_DESC
40#define GET_INSTRINFO_MC_HELPERS
41#define ENABLE_INSTR_PREDICATE_VERIFIER
42#include "X86GenInstrInfo.inc"
43
44#define GET_SUBTARGETINFO_MC_DESC
45#include "X86GenSubtargetInfo.inc"
46
47std::string X86_MC::ParseX86Triple(const Triple &TT) {
48 std::string FS;
49 // SSE2 should default to enabled in 64-bit mode, but can be turned off
50 // explicitly.
51 if (TT.isX86_64())
52 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
53 else if (TT.getEnvironment() != Triple::CODE16)
54 FS = "-64bit-mode,+32bit-mode,-16bit-mode";
55 else
56 FS = "-64bit-mode,-32bit-mode,+16bit-mode";
57
58 if (TT.isX32())
59 FS += ",+x32";
60
61 return FS;
62}
63
64unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
65 if (TT.isX86_64())
67
68 if (TT.isOSDarwin())
70 if (TT.isOSCygMing())
71 // Unsupported by now, just quick fallback
74}
75
77 return MI.getFlags() & X86::IP_HAS_LOCK;
78}
79
80static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) {
81 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
82 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
83 const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
84
85 return (Base.isReg() && Base.getReg() && RC.contains(Base.getReg())) ||
86 (Index.isReg() && Index.getReg() && RC.contains(Index.getReg()));
87}
88
89bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op,
90 const MCSubtargetInfo &STI) {
91 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
92 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
93
94 if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && !Base.getReg() &&
95 Index.isReg() && !Index.getReg())
96 return true;
97 return isMemOperand(MI, Op, X86::GR16RegClassID);
98}
99
100bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) {
101 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
102 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
103 if (Base.isReg() && Base.getReg() == X86::EIP) {
104 assert(Index.isReg() && !Index.getReg() && "Invalid eip-based address");
105 return true;
106 }
107 if (Index.isReg() && Index.getReg() == X86::EIZ)
108 return true;
109 return isMemOperand(MI, Op, X86::GR32RegClassID);
110}
111
112#ifndef NDEBUG
113bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) {
114 return isMemOperand(MI, Op, X86::GR64RegClassID);
115}
116#endif
117
119 const MCSubtargetInfo &STI,
120 int MemoryOperand, uint64_t TSFlags) {
121 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
122 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
123 bool Is32BitMode = STI.hasFeature(X86::Is32Bit);
124 bool Is64BitMode = STI.hasFeature(X86::Is64Bit);
125 if ((Is16BitMode && AdSize == X86II::AdSize32) ||
126 (Is32BitMode && AdSize == X86II::AdSize16) ||
127 (Is64BitMode && AdSize == X86II::AdSize32))
128 return true;
129 uint64_t Form = TSFlags & X86II::FormMask;
130 switch (Form) {
131 default:
132 break;
133 case X86II::RawFrmDstSrc: {
134 MCRegister siReg = MI.getOperand(1).getReg();
135 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
136 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
137 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
138 "SI and DI register sizes do not match");
139 return (!Is32BitMode && siReg == X86::ESI) ||
140 (Is32BitMode && siReg == X86::SI);
141 }
142 case X86II::RawFrmSrc: {
143 MCRegister siReg = MI.getOperand(0).getReg();
144 return (!Is32BitMode && siReg == X86::ESI) ||
145 (Is32BitMode && siReg == X86::SI);
146 }
147 case X86II::RawFrmDst: {
148 MCRegister siReg = MI.getOperand(0).getReg();
149 return (!Is32BitMode && siReg == X86::EDI) ||
150 (Is32BitMode && siReg == X86::DI);
151 }
152 }
153
154 // Determine where the memory operand starts, if present.
155 if (MemoryOperand < 0)
156 return false;
157
158 if (STI.hasFeature(X86::Is64Bit)) {
159 assert(!is16BitMemOperand(MI, MemoryOperand, STI));
160 return is32BitMemOperand(MI, MemoryOperand);
161 }
162 if (STI.hasFeature(X86::Is32Bit)) {
163 assert(!is64BitMemOperand(MI, MemoryOperand));
164 return is16BitMemOperand(MI, MemoryOperand, STI);
165 }
166 assert(STI.hasFeature(X86::Is16Bit));
167 assert(!is64BitMemOperand(MI, MemoryOperand));
168 return !is16BitMemOperand(MI, MemoryOperand, STI);
169}
170
172 // FIXME: TableGen these.
173 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
174 unsigned SEH = MRI->getEncodingValue(Reg);
175 MRI->mapLLVMRegToSEHReg(Reg, SEH);
176 }
177
178 // Mapping from CodeView to MC register id.
179 static const struct {
181 MCPhysReg Reg;
182 } RegMap[] = {
183 {codeview::RegisterId::AL, X86::AL},
184 {codeview::RegisterId::CL, X86::CL},
185 {codeview::RegisterId::DL, X86::DL},
186 {codeview::RegisterId::BL, X86::BL},
187 {codeview::RegisterId::AH, X86::AH},
188 {codeview::RegisterId::CH, X86::CH},
189 {codeview::RegisterId::DH, X86::DH},
190 {codeview::RegisterId::BH, X86::BH},
191 {codeview::RegisterId::AX, X86::AX},
192 {codeview::RegisterId::CX, X86::CX},
193 {codeview::RegisterId::DX, X86::DX},
194 {codeview::RegisterId::BX, X86::BX},
195 {codeview::RegisterId::SP, X86::SP},
196 {codeview::RegisterId::BP, X86::BP},
197 {codeview::RegisterId::SI, X86::SI},
198 {codeview::RegisterId::DI, X86::DI},
199 {codeview::RegisterId::EAX, X86::EAX},
200 {codeview::RegisterId::ECX, X86::ECX},
201 {codeview::RegisterId::EDX, X86::EDX},
202 {codeview::RegisterId::EBX, X86::EBX},
203 {codeview::RegisterId::ESP, X86::ESP},
204 {codeview::RegisterId::EBP, X86::EBP},
205 {codeview::RegisterId::ESI, X86::ESI},
206 {codeview::RegisterId::EDI, X86::EDI},
207
208 {codeview::RegisterId::EFLAGS, X86::EFLAGS},
209
210 {codeview::RegisterId::ST0, X86::ST0},
211 {codeview::RegisterId::ST1, X86::ST1},
212 {codeview::RegisterId::ST2, X86::ST2},
213 {codeview::RegisterId::ST3, X86::ST3},
214 {codeview::RegisterId::ST4, X86::ST4},
215 {codeview::RegisterId::ST5, X86::ST5},
216 {codeview::RegisterId::ST6, X86::ST6},
217 {codeview::RegisterId::ST7, X86::ST7},
218
219 {codeview::RegisterId::ST0, X86::FP0},
220 {codeview::RegisterId::ST1, X86::FP1},
221 {codeview::RegisterId::ST2, X86::FP2},
222 {codeview::RegisterId::ST3, X86::FP3},
223 {codeview::RegisterId::ST4, X86::FP4},
224 {codeview::RegisterId::ST5, X86::FP5},
225 {codeview::RegisterId::ST6, X86::FP6},
226 {codeview::RegisterId::ST7, X86::FP7},
227
228 {codeview::RegisterId::MM0, X86::MM0},
229 {codeview::RegisterId::MM1, X86::MM1},
230 {codeview::RegisterId::MM2, X86::MM2},
231 {codeview::RegisterId::MM3, X86::MM3},
232 {codeview::RegisterId::MM4, X86::MM4},
233 {codeview::RegisterId::MM5, X86::MM5},
234 {codeview::RegisterId::MM6, X86::MM6},
235 {codeview::RegisterId::MM7, X86::MM7},
236
237 {codeview::RegisterId::XMM0, X86::XMM0},
238 {codeview::RegisterId::XMM1, X86::XMM1},
239 {codeview::RegisterId::XMM2, X86::XMM2},
240 {codeview::RegisterId::XMM3, X86::XMM3},
241 {codeview::RegisterId::XMM4, X86::XMM4},
242 {codeview::RegisterId::XMM5, X86::XMM5},
243 {codeview::RegisterId::XMM6, X86::XMM6},
244 {codeview::RegisterId::XMM7, X86::XMM7},
245
246 {codeview::RegisterId::XMM8, X86::XMM8},
247 {codeview::RegisterId::XMM9, X86::XMM9},
248 {codeview::RegisterId::XMM10, X86::XMM10},
249 {codeview::RegisterId::XMM11, X86::XMM11},
250 {codeview::RegisterId::XMM12, X86::XMM12},
251 {codeview::RegisterId::XMM13, X86::XMM13},
252 {codeview::RegisterId::XMM14, X86::XMM14},
253 {codeview::RegisterId::XMM15, X86::XMM15},
254
255 {codeview::RegisterId::SIL, X86::SIL},
256 {codeview::RegisterId::DIL, X86::DIL},
257 {codeview::RegisterId::BPL, X86::BPL},
258 {codeview::RegisterId::SPL, X86::SPL},
259 {codeview::RegisterId::RAX, X86::RAX},
260 {codeview::RegisterId::RBX, X86::RBX},
261 {codeview::RegisterId::RCX, X86::RCX},
262 {codeview::RegisterId::RDX, X86::RDX},
263 {codeview::RegisterId::RSI, X86::RSI},
264 {codeview::RegisterId::RDI, X86::RDI},
265 {codeview::RegisterId::RBP, X86::RBP},
266 {codeview::RegisterId::RSP, X86::RSP},
267 {codeview::RegisterId::R8, X86::R8},
268 {codeview::RegisterId::R9, X86::R9},
269 {codeview::RegisterId::R10, X86::R10},
270 {codeview::RegisterId::R11, X86::R11},
271 {codeview::RegisterId::R12, X86::R12},
272 {codeview::RegisterId::R13, X86::R13},
273 {codeview::RegisterId::R14, X86::R14},
274 {codeview::RegisterId::R15, X86::R15},
275 {codeview::RegisterId::R8B, X86::R8B},
276 {codeview::RegisterId::R9B, X86::R9B},
277 {codeview::RegisterId::R10B, X86::R10B},
278 {codeview::RegisterId::R11B, X86::R11B},
279 {codeview::RegisterId::R12B, X86::R12B},
280 {codeview::RegisterId::R13B, X86::R13B},
281 {codeview::RegisterId::R14B, X86::R14B},
282 {codeview::RegisterId::R15B, X86::R15B},
283 {codeview::RegisterId::R8W, X86::R8W},
284 {codeview::RegisterId::R9W, X86::R9W},
285 {codeview::RegisterId::R10W, X86::R10W},
286 {codeview::RegisterId::R11W, X86::R11W},
287 {codeview::RegisterId::R12W, X86::R12W},
288 {codeview::RegisterId::R13W, X86::R13W},
289 {codeview::RegisterId::R14W, X86::R14W},
290 {codeview::RegisterId::R15W, X86::R15W},
291 {codeview::RegisterId::R8D, X86::R8D},
292 {codeview::RegisterId::R9D, X86::R9D},
293 {codeview::RegisterId::R10D, X86::R10D},
294 {codeview::RegisterId::R11D, X86::R11D},
295 {codeview::RegisterId::R12D, X86::R12D},
296 {codeview::RegisterId::R13D, X86::R13D},
297 {codeview::RegisterId::R14D, X86::R14D},
298 {codeview::RegisterId::R15D, X86::R15D},
299 {codeview::RegisterId::AMD64_YMM0, X86::YMM0},
300 {codeview::RegisterId::AMD64_YMM1, X86::YMM1},
301 {codeview::RegisterId::AMD64_YMM2, X86::YMM2},
302 {codeview::RegisterId::AMD64_YMM3, X86::YMM3},
303 {codeview::RegisterId::AMD64_YMM4, X86::YMM4},
304 {codeview::RegisterId::AMD64_YMM5, X86::YMM5},
305 {codeview::RegisterId::AMD64_YMM6, X86::YMM6},
306 {codeview::RegisterId::AMD64_YMM7, X86::YMM7},
307 {codeview::RegisterId::AMD64_YMM8, X86::YMM8},
308 {codeview::RegisterId::AMD64_YMM9, X86::YMM9},
309 {codeview::RegisterId::AMD64_YMM10, X86::YMM10},
310 {codeview::RegisterId::AMD64_YMM11, X86::YMM11},
311 {codeview::RegisterId::AMD64_YMM12, X86::YMM12},
312 {codeview::RegisterId::AMD64_YMM13, X86::YMM13},
313 {codeview::RegisterId::AMD64_YMM14, X86::YMM14},
314 {codeview::RegisterId::AMD64_YMM15, X86::YMM15},
315 {codeview::RegisterId::AMD64_YMM16, X86::YMM16},
316 {codeview::RegisterId::AMD64_YMM17, X86::YMM17},
317 {codeview::RegisterId::AMD64_YMM18, X86::YMM18},
318 {codeview::RegisterId::AMD64_YMM19, X86::YMM19},
319 {codeview::RegisterId::AMD64_YMM20, X86::YMM20},
320 {codeview::RegisterId::AMD64_YMM21, X86::YMM21},
321 {codeview::RegisterId::AMD64_YMM22, X86::YMM22},
322 {codeview::RegisterId::AMD64_YMM23, X86::YMM23},
323 {codeview::RegisterId::AMD64_YMM24, X86::YMM24},
324 {codeview::RegisterId::AMD64_YMM25, X86::YMM25},
325 {codeview::RegisterId::AMD64_YMM26, X86::YMM26},
326 {codeview::RegisterId::AMD64_YMM27, X86::YMM27},
327 {codeview::RegisterId::AMD64_YMM28, X86::YMM28},
328 {codeview::RegisterId::AMD64_YMM29, X86::YMM29},
329 {codeview::RegisterId::AMD64_YMM30, X86::YMM30},
330 {codeview::RegisterId::AMD64_YMM31, X86::YMM31},
331 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0},
332 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1},
333 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2},
334 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3},
335 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4},
336 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5},
337 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6},
338 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7},
339 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8},
340 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9},
341 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10},
342 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11},
343 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12},
344 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13},
345 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14},
346 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15},
347 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16},
348 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17},
349 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18},
350 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19},
351 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20},
352 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21},
353 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22},
354 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23},
355 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24},
356 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25},
357 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26},
358 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27},
359 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28},
360 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29},
361 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30},
362 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31},
363 {codeview::RegisterId::AMD64_K0, X86::K0},
364 {codeview::RegisterId::AMD64_K1, X86::K1},
365 {codeview::RegisterId::AMD64_K2, X86::K2},
366 {codeview::RegisterId::AMD64_K3, X86::K3},
367 {codeview::RegisterId::AMD64_K4, X86::K4},
368 {codeview::RegisterId::AMD64_K5, X86::K5},
369 {codeview::RegisterId::AMD64_K6, X86::K6},
370 {codeview::RegisterId::AMD64_K7, X86::K7},
371 {codeview::RegisterId::AMD64_XMM16, X86::XMM16},
372 {codeview::RegisterId::AMD64_XMM17, X86::XMM17},
373 {codeview::RegisterId::AMD64_XMM18, X86::XMM18},
374 {codeview::RegisterId::AMD64_XMM19, X86::XMM19},
375 {codeview::RegisterId::AMD64_XMM20, X86::XMM20},
376 {codeview::RegisterId::AMD64_XMM21, X86::XMM21},
377 {codeview::RegisterId::AMD64_XMM22, X86::XMM22},
378 {codeview::RegisterId::AMD64_XMM23, X86::XMM23},
379 {codeview::RegisterId::AMD64_XMM24, X86::XMM24},
380 {codeview::RegisterId::AMD64_XMM25, X86::XMM25},
381 {codeview::RegisterId::AMD64_XMM26, X86::XMM26},
382 {codeview::RegisterId::AMD64_XMM27, X86::XMM27},
383 {codeview::RegisterId::AMD64_XMM28, X86::XMM28},
384 {codeview::RegisterId::AMD64_XMM29, X86::XMM29},
385 {codeview::RegisterId::AMD64_XMM30, X86::XMM30},
386 {codeview::RegisterId::AMD64_XMM31, X86::XMM31},
387
388 };
389 for (const auto &I : RegMap)
390 MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
391}
392
394 StringRef CPU, StringRef FS) {
395 std::string ArchFS = X86_MC::ParseX86Triple(TT);
396 assert(!ArchFS.empty() && "Failed to parse X86 triple");
397 if (!FS.empty())
398 ArchFS = (Twine(ArchFS) + "," + FS).str();
399
400 if (CPU.empty())
401 CPU = "generic";
402
403 return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, ArchFS);
404}
405
407 MCInstrInfo *X = new MCInstrInfo();
408 InitX86MCInstrInfo(X);
409 return X;
410}
411
413 unsigned RA = TT.isX86_64() ? X86::RIP // Should have dwarf #16.
414 : X86::EIP; // Should have dwarf #8.
415
417 InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
418 X86_MC::getDwarfRegFlavour(TT, true), RA);
420 return X;
421}
422
424 const MCRegisterInfo &MRI) {
425 // Register names: `call rsi` is misassembled as an indirect call.
426 for (unsigned i = 1, e = MRI.getNumRegs(); i < e; ++i)
427 if (const char *Name = MRI.getName(i))
428 if (Name[0])
429 Set.insert(StringRef(Name).lower());
430 // Keywords that GAS Intel syntax misparses as constants, modifiers, or
431 // pseudo-registers instead of symbol references (e.g., `call byte` calls
432 // address 1, not symbol "byte"; `call flat` errors out).
433 for (StringRef KW : {"byte", "word", "dword", "fword", "qword", "mmword",
434 "tbyte", "oword", "xmmword", "ymmword", "zmmword",
435 "offset", "flat", "near", "far", "short"})
436 Set.insert(KW);
437 // Operator keywords parsed by GAS/X86AsmParser in Intel mode.
438 for (StringRef KW : {"and", "eq", "ge", "gt", "le", "lt", "mod", "ne", "not",
439 "or", "shl", "shr", "xor"})
440 Set.insert(KW);
441}
442
444 const Triple &TheTriple,
445 const MCTargetOptions &Options) {
446 bool is64Bit = TheTriple.isX86_64();
447
448 MCAsmInfo *MAI;
449 if (TheTriple.isOSBinFormatMachO()) {
450 if (is64Bit) {
451 auto *P = new X86_64MCAsmInfoDarwin(TheTriple);
452 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
453 MAI = P;
454 } else {
455 auto *P = new X86MCAsmInfoDarwin(TheTriple);
456 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
457 MAI = P;
458 }
459 } else if (TheTriple.isOSBinFormatELF()) {
460 // Force the use of an ELF container.
461 auto *P = new X86ELFMCAsmInfo(TheTriple);
462 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
463 MAI = P;
464 } else if (TheTriple.isWindowsMSVCEnvironment() ||
465 TheTriple.isWindowsCoreCLREnvironment() || TheTriple.isUEFI()) {
466 if (Options.getAssemblyLanguage().equals_insensitive("masm")) {
467 auto *P = new X86MCAsmInfoMicrosoftMASM(TheTriple);
468 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
469 MAI = P;
470 } else {
471 auto *P = new X86MCAsmInfoMicrosoft(TheTriple);
472 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
473 MAI = P;
474 }
475 } else if (TheTriple.isOSCygMing() ||
476 TheTriple.isWindowsItaniumEnvironment()) {
477 auto *P = new X86MCAsmInfoGNUCOFF(TheTriple);
478 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
479 MAI = P;
480 } else {
481 // The default is ELF.
482 auto *P = new X86ELFMCAsmInfo(TheTriple);
483 populateReservedIdentifiers(P->ReservedIdentifiers, MRI);
484 MAI = P;
485 }
486
487 // Initialize initial frame state.
488 // Calculate amount of bytes used for return address storing
489 int stackGrowth = is64Bit ? -8 : -4;
490
491 // Initial state of the frame pointer is esp+stackGrowth.
492 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
494 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
495 MAI->addInitialFrameState(Inst);
496
497 // Add return address to move list
498 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
500 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
501 MAI->addInitialFrameState(Inst2);
502
503 return MAI;
504}
505
507 unsigned SyntaxVariant,
508 const MCAsmInfo &MAI,
509 const MCInstrInfo &MII,
510 const MCRegisterInfo &MRI) {
511 if (SyntaxVariant == 0)
512 return new X86ATTInstPrinter(MAI, MII, MRI);
513 if (SyntaxVariant == 1)
514 return new X86IntelInstPrinter(MAI, MII, MRI);
515 return nullptr;
516}
517
519 MCContext &Ctx) {
520 // Default to the stock relocation info.
521 return llvm::createMCRelocationInfo(TheTriple, Ctx);
522}
523
524namespace llvm {
525namespace X86_MC {
526
527class X86MCInstrAnalysis : public MCInstrAnalysis {
528 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete;
529 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete;
530 ~X86MCInstrAnalysis() override = default;
531
532public:
534
535#define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
536#include "X86GenSubtargetInfo.inc"
537
538 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
539 APInt &Mask) const override;
540 std::vector<std::pair<uint64_t, uint64_t>>
541 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
542 const MCSubtargetInfo &STI) const override;
543
544 bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
545 uint64_t &Target) const override;
546 std::optional<uint64_t>
548 uint64_t Addr, uint64_t Size) const override;
549 std::optional<uint64_t>
551 uint64_t Size) const override;
552};
553
554#define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
555#include "X86GenSubtargetInfo.inc"
556
558 const MCInst &Inst,
559 APInt &Mask) const {
560 const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
561 unsigned NumDefs = Desc.getNumDefs();
562 unsigned NumImplicitDefs = Desc.implicit_defs().size();
563 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
564 "Unexpected number of bits in the mask!");
565
566 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
567 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
568 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;
569
570 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
571 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
572 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);
573
574 auto ClearsSuperReg = [=](MCRegister RegID) {
575 // On X86-64, a general purpose integer register is viewed as a 64-bit
576 // register internal to the processor.
577 // An update to the lower 32 bits of a 64 bit integer register is
578 // architecturally defined to zero extend the upper 32 bits.
579 if (GR32RC.contains(RegID))
580 return true;
581
582 // Early exit if this instruction has no vex/evex/xop prefix.
583 if (!HasEVEX && !HasVEX && !HasXOP)
584 return false;
585
586 // All VEX and EVEX encoded instructions are defined to zero the high bits
587 // of the destination register up to VLMAX (i.e. the maximum vector register
588 // width pertaining to the instruction).
589 // We assume the same behavior for XOP instructions too.
590 return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
591 };
592
593 Mask.clearAllBits();
594 for (unsigned I = 0, E = NumDefs; I < E; ++I) {
595 const MCOperand &Op = Inst.getOperand(I);
596 if (ClearsSuperReg(Op.getReg()))
597 Mask.setBit(I);
598 }
599
600 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
601 const MCPhysReg Reg = Desc.implicit_defs()[I];
602 if (ClearsSuperReg(Reg))
603 Mask.setBit(NumDefs + I);
604 }
605
606 return Mask.getBoolValue();
607}
608
609static std::vector<std::pair<uint64_t, uint64_t>>
610findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
611 // Do a lightweight parsing of PLT entries.
612 std::vector<std::pair<uint64_t, uint64_t>> Result;
613 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
614 // Recognize a jmp.
615 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
616 // The jmp instruction at the beginning of each PLT entry jumps to the
617 // address of the base of the .got.plt section plus the immediate.
618 // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the
619 // offset to an address. Imm may be a negative int32_t if the GOT entry is
620 // in .got.
621 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
622 Result.emplace_back(PltSectionVA + Byte, Imm | (uint64_t(1) << 32));
623 Byte += 6;
624 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
625 // The jmp instruction at the beginning of each PLT entry jumps to the
626 // immediate.
627 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
628 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
629 Byte += 6;
630 } else
631 Byte++;
632 }
633 return Result;
634}
635
636static std::vector<std::pair<uint64_t, uint64_t>>
638 // Do a lightweight parsing of PLT entries.
639 std::vector<std::pair<uint64_t, uint64_t>> Result;
640 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
641 // Recognize a jmp.
642 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
643 // The jmp instruction at the beginning of each PLT entry jumps to the
644 // address of the next instruction plus the immediate.
645 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
646 Result.push_back(
647 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm));
648 Byte += 6;
649 } else
650 Byte++;
651 }
652 return Result;
653}
654
655std::vector<std::pair<uint64_t, uint64_t>>
657 ArrayRef<uint8_t> PltContents,
658 const MCSubtargetInfo &STI) const {
659 const Triple &TargetTriple = STI.getTargetTriple();
660 switch (TargetTriple.getArch()) {
661 case Triple::x86:
662 return findX86PltEntries(PltSectionVA, PltContents);
663 case Triple::x86_64:
664 return findX86_64PltEntries(PltSectionVA, PltContents);
665 default:
666 return {};
667 }
668}
669
671 uint64_t Size, uint64_t &Target) const {
672 if (Inst.getNumOperands() == 0 ||
673 Info->get(Inst.getOpcode()).operands()[0].OperandType !=
675 return false;
676 Target = Addr + Size + Inst.getOperand(0).getImm();
677 return true;
678}
679
681 const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr,
682 uint64_t Size) const {
683 const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
684 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
685 if (MemOpStart == -1)
686 return std::nullopt;
687 MemOpStart += X86II::getOperandBias(MCID);
688
689 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
690 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
691 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
692 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
693 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
694 if (SegReg.getReg() || IndexReg.getReg() || ScaleAmt.getImm() != 1 ||
695 !Disp.isImm())
696 return std::nullopt;
697
698 // RIP-relative addressing.
699 if (BaseReg.getReg() == X86::RIP)
700 return Addr + Size + Disp.getImm();
701
702 return std::nullopt;
703}
704
705std::optional<uint64_t>
707 uint64_t Size) const {
708 if (Inst.getOpcode() != X86::LEA64r)
709 return std::nullopt;
710 const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
711 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
712 if (MemOpStart == -1)
713 return std::nullopt;
714 MemOpStart += X86II::getOperandBias(MCID);
715 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
716 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
717 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
718 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
719 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
720 // Must be a simple rip-relative address.
721 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() || IndexReg.getReg() ||
722 ScaleAmt.getImm() != 1 || !Disp.isImm())
723 return std::nullopt;
724 // rip-relative ModR/M immediate is 32 bits.
725 assert(Size > 4 && "invalid instruction size for rip-relative lea");
726 return Size - 4;
727}
728
729} // end of namespace X86_MC
730
731} // end of namespace llvm
732
734 return new X86_MC::X86MCInstrAnalysis(Info);
735}
736
737// Force static initialization.
740 // Register the MC asm info.
742
743 // Register the MC instruction info.
745
746 // Register the MC register info.
748
749 // Register the MC subtarget info.
752
753 // Register the MC instruction analyzer.
755
756 // Register the code emitter.
758
759 // Register the obj target streamer.
762
763 // Register the asm target streamer.
765
766 // Register the null streamer.
768
771
772 // Register the MCInstPrinter.
774
775 // Register the MC relocation info.
777 }
778
779 // Register the asm backend.
784}
785
787 bool High) {
788#define DEFAULT_NOREG \
789 default: \
790 return X86::NoRegister;
791#define SUB_SUPER(R1, R2, R3, R4, R) \
792 case X86::R1: \
793 case X86::R2: \
794 case X86::R3: \
795 case X86::R4: \
796 return X86::R;
797#define A_SUB_SUPER(R) \
798 case X86::AH: \
799 SUB_SUPER(AL, AX, EAX, RAX, R)
800#define D_SUB_SUPER(R) \
801 case X86::DH: \
802 SUB_SUPER(DL, DX, EDX, RDX, R)
803#define C_SUB_SUPER(R) \
804 case X86::CH: \
805 SUB_SUPER(CL, CX, ECX, RCX, R)
806#define B_SUB_SUPER(R) \
807 case X86::BH: \
808 SUB_SUPER(BL, BX, EBX, RBX, R)
809#define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R)
810#define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R)
811#define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R)
812#define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R)
813#define NO_SUB_SUPER(NO, REG) \
814 SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG)
815#define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B)
816#define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W)
817#define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D)
818#define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO)
819 switch (Size) {
820 default:
821 llvm_unreachable("illegal register size");
822 case 8:
823 if (High) {
824 switch (Reg.id()) {
826 A_SUB_SUPER(AH)
827 D_SUB_SUPER(DH)
829 B_SUB_SUPER(BH)
830 }
831 } else {
832 switch (Reg.id()) {
834 A_SUB_SUPER(AL)
836 C_SUB_SUPER(CL)
837 B_SUB_SUPER(BL)
838 SI_SUB_SUPER(SIL)
839 DI_SUB_SUPER(DIL)
840 BP_SUB_SUPER(BPL)
841 SP_SUB_SUPER(SPL)
866 }
867 }
868 case 16:
869 switch (Reg.id()) {
871 A_SUB_SUPER(AX)
872 D_SUB_SUPER(DX)
873 C_SUB_SUPER(CX)
874 B_SUB_SUPER(BX)
876 DI_SUB_SUPER(DI)
877 BP_SUB_SUPER(BP)
878 SP_SUB_SUPER(SP)
903 }
904 case 32:
905 switch (Reg.id()) {
907 A_SUB_SUPER(EAX)
908 D_SUB_SUPER(EDX)
909 C_SUB_SUPER(ECX)
910 B_SUB_SUPER(EBX)
911 SI_SUB_SUPER(ESI)
912 DI_SUB_SUPER(EDI)
913 BP_SUB_SUPER(EBP)
914 SP_SUB_SUPER(ESP)
939 }
940 case 64:
941 switch (Reg.id()) {
943 A_SUB_SUPER(RAX)
944 D_SUB_SUPER(RDX)
945 C_SUB_SUPER(RCX)
946 B_SUB_SUPER(RBX)
947 SI_SUB_SUPER(RSI)
949 BP_SUB_SUPER(RBP)
950 SP_SUB_SUPER(RSP)
975 }
976 }
977}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo & RDI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
IRTranslator LLVM IR MI
static LVOptions Options
Definition LVOptions.cpp:25
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t High
#define P(N)
#define CH(x, y, z)
Definition SHA256.cpp:34
SI optimize exec mask operations pre RA
#define LLVM_C_ABI
LLVM_C_ABI is the export/visibility macro used to mark symbols declared in llvm-c as exported when bu...
Definition Visibility.h:40
static bool is64Bit(const char *name)
#define NO_SUB_SUPER_W(NO)
#define NO_SUB_SUPER_Q(NO)
static MCRelocationInfo * createX86MCRelocationInfo(const Triple &TheTriple, MCContext &Ctx)
static MCInstrInfo * createX86MCInstrInfo()
#define C_SUB_SUPER(R)
#define NO_SUB_SUPER_D(NO)
#define DEFAULT_NOREG
static MCRegisterInfo * createX86MCRegisterInfo(const Triple &TT)
#define SP_SUB_SUPER(R)
static MCInstPrinter * createX86MCInstPrinter(const Triple &T, unsigned SyntaxVariant, const MCAsmInfo &MAI, const MCInstrInfo &MII, const MCRegisterInfo &MRI)
static MCInstrAnalysis * createX86MCInstrAnalysis(const MCInstrInfo *Info)
static void populateReservedIdentifiers(StringSet<> &Set, const MCRegisterInfo &MRI)
#define SI_SUB_SUPER(R)
#define BP_SUB_SUPER(R)
#define B_SUB_SUPER(R)
LLVM_C_ABI void LLVMInitializeX86TargetMC()
#define DI_SUB_SUPER(R)
#define NO_SUB_SUPER_B(NO)
#define A_SUB_SUPER(R)
#define D_SUB_SUPER(R)
static MCAsmInfo * createX86MCAsmInfo(const MCRegisterInfo &MRI, const Triple &TheTriple, const MCTargetOptions &Options)
static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID)
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
const T * data() const
Definition ArrayRef.h:139
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void addInitialFrameState(const MCCFIInstruction &Inst)
Definition MCAsmInfo.cpp:53
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
Definition MCDwarf.h:576
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Definition MCDwarf.h:618
Context object for machine code objects.
Definition MCContext.h:83
This is an instance of a target assembly language printer that converts an MCInst to valid target ass...
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
unsigned getNumOperands() const
Definition MCInst.h:212
unsigned getOpcode() const
Definition MCInst.h:202
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
const MCInstrInfo * Info
MCInstrAnalysis(const MCInstrInfo *Info)
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
int64_t getImm() const
Definition MCInst.h:84
bool isImm() const
Definition MCInst.h:66
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
MCRegisterClass - Base class of TargetRegisterClass.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
void mapLLVMRegToCVReg(MCRegister LLVMReg, int CVReg)
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
void mapLLVMRegToSEHReg(MCRegister LLVMReg, int SEHReg)
mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register number mapping.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
virtual int64_t getDwarfRegNum(MCRegister Reg, bool isEH) const
Map a target register to an equivalent dwarf register number.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Create MCExprs from relocations found in an object file.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
StringSet - A wrapper for StringMap that provides set-like functionality.
Definition StringSet.h:25
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isOSCygMing() const
Tests for either Cygwin or MinGW OS.
Definition Triple.h:728
bool isX86_64() const
Tests whether the target is x86 (64-bit).
Definition Triple.h:1109
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:781
bool isWindowsCoreCLREnvironment() const
Definition Triple.h:711
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:429
bool isUEFI() const
Tests whether the OS is UEFI.
Definition Triple.h:689
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:772
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
Definition Triple.h:700
bool isWindowsItaniumEnvironment() const
Definition Triple.h:715
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const override
Given a branch instruction try to get the address the branch targets.
X86MCInstrAnalysis(const MCInstrInfo *MCII)
std::optional< uint64_t > evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr, uint64_t Size) const override
Given an instruction tries to get the address of a memory operand.
std::optional< uint64_t > getMemoryOperandRelocationOffset(const MCInst &Inst, uint64_t Size) const override
Given an instruction with a memory operand that could require relocation, returns the offset within t...
std::vector< std::pair< uint64_t, uint64_t > > findPltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents, const MCSubtargetInfo &STI) const override
Returns (PLT virtual address, GOT virtual address) pairs for PLT entries.
bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, APInt &Mask) const override
Returns true if at least one of the register writes performed by.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ RawFrmDst
RawFrmDst - This form is for instructions that use the destination index register DI/EDI/RDI.
@ VEX
VEX - encoding using 0xC4/0xC5.
@ XOP
XOP - Opcode prefix used by XOP instructions.
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
bool is32BitMemOperand(const MCInst &MI, unsigned Op)
bool is16BitMemOperand(const MCInst &MI, unsigned Op, const MCSubtargetInfo &STI)
bool hasLockPrefix(const MCInst &MI)
Returns true if this instruction has a LOCK prefix.
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
static std::vector< std::pair< uint64_t, uint64_t > > findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents)
static std::vector< std::pair< uint64_t, uint64_t > > findX86PltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents)
bool needsAddressSizeOverride(const MCInst &MI, const MCSubtargetInfo &STI, int MemoryOperand, uint64_t TSFlags)
Returns true if this instruction needs an Address-Size override prefix.
std::string ParseX86Triple(const Triple &TT)
MCSubtargetInfo * createX86MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS)
Create a X86 MCSubtargetInfo instance.
bool is64BitMemOperand(const MCInst &MI, unsigned Op)
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH)
uint32_t read32le(const void *P)
Definition Endian.h:432
This is an optimization pass for GlobalISel generic memory operations.
MCTargetStreamer * createX86ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI)
Implements X86-only directives for object files.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
MCTargetStreamer * createX86AsmTargetStreamer(MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrinter)
Implements X86-only directives for assembly emission.
MCCodeEmitter * createX86MCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Target & getTheX86_32Target()
Op::Description Desc
LLVM_ABI MCRelocationInfo * createMCRelocationInfo(const Triple &TT, MCContext &Ctx)
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
MCStreamer * createX86WinCOFFStreamer(MCContext &C, std::unique_ptr< MCAsmBackend > &&AB, std::unique_ptr< MCObjectWriter > &&OW, std::unique_ptr< MCCodeEmitter > &&CE)
Construct an X86 Windows COFF machine code streamer which will generate PE/COFF format object files.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Target & getTheX86_64Target()
MCTargetStreamer * createX86NullTargetStreamer(MCStreamer &S)
Implements X86-only null emission.
RegisterMCAsmInfoFn - Helper template for registering a target assembly info implementation.
static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn)
RegisterMCRegInfo - Register a MCRegisterInfo implementation for the given target.
static void RegisterMCAsmBackend(Target &T, Target::MCAsmBackendCtorTy Fn)
RegisterMCAsmBackend - Register a MCAsmBackend implementation for the given target.
static void RegisterMCCodeEmitter(Target &T, Target::MCCodeEmitterCtorTy Fn)
RegisterMCCodeEmitter - Register a MCCodeEmitter implementation for the given target.
static void RegisterMCSubtargetInfo(Target &T, Target::MCSubtargetInfoCtorFnTy Fn)
RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for the given target.
static void RegisterObjectTargetStreamer(Target &T, Target::ObjectTargetStreamerCtorTy Fn)
static void RegisterMCInstrAnalysis(Target &T, Target::MCInstrAnalysisCtorFnTy Fn)
RegisterMCInstrAnalysis - Register a MCInstrAnalysis implementation for the given target.
static void RegisterELFStreamer(Target &T, Target::ELFStreamerCtorTy Fn)
static void RegisterNullTargetStreamer(Target &T, Target::NullTargetStreamerCtorTy Fn)
static void RegisterMCInstPrinter(Target &T, Target::MCInstPrinterCtorTy Fn)
RegisterMCInstPrinter - Register a MCInstPrinter implementation for the given target.
static void RegisterCOFFStreamer(Target &T, Target::COFFStreamerCtorTy Fn)
static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn)
RegisterMCInstrInfo - Register a MCInstrInfo implementation for the given target.
static void RegisterAsmTargetStreamer(Target &T, Target::AsmTargetStreamerCtorTy Fn)
static void RegisterMCRelocationInfo(Target &T, Target::MCRelocationInfoCtorTy Fn)
RegisterMCRelocationInfo - Register an MCRelocationInfo implementation for the given target.