LLVM 18.0.0git
X86EncodingOptimization.cpp
Go to the documentation of this file.
1//===-- X86EncodingOptimization.cpp - X86 Encoding optimization -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the X86 encoding optimization
10//
11//===----------------------------------------------------------------------===//
12
14#include "X86BaseInfo.h"
15#include "llvm/MC/MCExpr.h"
16#include "llvm/MC/MCInst.h"
17#include "llvm/MC/MCInstrDesc.h"
19
20using namespace llvm;
21
23 unsigned OpIdx1, OpIdx2;
24 unsigned Opcode = MI.getOpcode();
25 unsigned NewOpc = 0;
26#define FROM_TO(FROM, TO, IDX1, IDX2) \
27 case X86::FROM: \
28 NewOpc = X86::TO; \
29 OpIdx1 = IDX1; \
30 OpIdx2 = IDX2; \
31 break;
32#define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 1)
33 switch (Opcode) {
34 default: {
35 // If the instruction is a commutable arithmetic instruction we might be
36 // able to commute the operands to get a 2 byte VEX prefix.
37 uint64_t TSFlags = Desc.TSFlags;
38 if (!Desc.isCommutable() || (TSFlags & X86II::EncodingMask) != X86II::VEX ||
42 MI.getNumOperands() != 3)
43 return false;
44 // These two are not truly commutable.
45 if (Opcode == X86::VMOVHLPSrr || Opcode == X86::VUNPCKHPDrr)
46 return false;
47 OpIdx1 = 1;
48 OpIdx2 = 2;
49 break;
50 }
51 case X86::VCMPPDrri:
52 case X86::VCMPPDYrri:
53 case X86::VCMPPSrri:
54 case X86::VCMPPSYrri:
55 case X86::VCMPSDrr:
56 case X86::VCMPSSrr: {
57 switch (MI.getOperand(3).getImm() & 0x7) {
58 default:
59 return false;
60 case 0x00: // EQUAL
61 case 0x03: // UNORDERED
62 case 0x04: // NOT EQUAL
63 case 0x07: // ORDERED
64 OpIdx1 = 1;
65 OpIdx2 = 2;
66 break;
67 }
68 break;
69 }
70 // Commute operands to get a smaller encoding by using VEX.R instead of
71 // VEX.B if one of the registers is extended, but other isn't.
72 FROM_TO(VMOVZPQILo2PQIrr, VMOVPQI2QIrr, 0, 1)
73 TO_REV(VMOVAPDrr)
74 TO_REV(VMOVAPDYrr)
75 TO_REV(VMOVAPSrr)
76 TO_REV(VMOVAPSYrr)
77 TO_REV(VMOVDQArr)
78 TO_REV(VMOVDQAYrr)
79 TO_REV(VMOVDQUrr)
80 TO_REV(VMOVDQUYrr)
81 TO_REV(VMOVUPDrr)
82 TO_REV(VMOVUPDYrr)
83 TO_REV(VMOVUPSrr)
84 TO_REV(VMOVUPSYrr)
85#undef TO_REV
86#define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 2)
87 TO_REV(VMOVSDrr)
88 TO_REV(VMOVSSrr)
89#undef TO_REV
90#undef FROM_TO
91 }
92 if (X86II::isX86_64ExtendedReg(MI.getOperand(OpIdx1).getReg()) ||
93 !X86II::isX86_64ExtendedReg(MI.getOperand(OpIdx2).getReg()))
94 return false;
95 if (NewOpc)
96 MI.setOpcode(NewOpc);
97 else
98 std::swap(MI.getOperand(OpIdx1), MI.getOperand(OpIdx2));
99 return true;
100}
101
102// NOTE: We may write this as an InstAlias if it's only used by AsmParser. See
103// validateTargetOperandClass.
105 unsigned NewOpc;
106#define TO_IMM1(FROM) \
107 case X86::FROM##i: \
108 NewOpc = X86::FROM##1; \
109 break;
110 switch (MI.getOpcode()) {
111 default:
112 return false;
113 TO_IMM1(RCR8r)
114 TO_IMM1(RCR16r)
115 TO_IMM1(RCR32r)
116 TO_IMM1(RCR64r)
117 TO_IMM1(RCL8r)
118 TO_IMM1(RCL16r)
119 TO_IMM1(RCL32r)
120 TO_IMM1(RCL64r)
121 TO_IMM1(ROR8r)
122 TO_IMM1(ROR16r)
123 TO_IMM1(ROR32r)
124 TO_IMM1(ROR64r)
125 TO_IMM1(ROL8r)
126 TO_IMM1(ROL16r)
127 TO_IMM1(ROL32r)
128 TO_IMM1(ROL64r)
129 TO_IMM1(SAR8r)
130 TO_IMM1(SAR16r)
131 TO_IMM1(SAR32r)
132 TO_IMM1(SAR64r)
133 TO_IMM1(SHR8r)
134 TO_IMM1(SHR16r)
135 TO_IMM1(SHR32r)
136 TO_IMM1(SHR64r)
137 TO_IMM1(SHL8r)
138 TO_IMM1(SHL16r)
139 TO_IMM1(SHL32r)
140 TO_IMM1(SHL64r)
141 TO_IMM1(RCR8m)
142 TO_IMM1(RCR16m)
143 TO_IMM1(RCR32m)
144 TO_IMM1(RCR64m)
145 TO_IMM1(RCL8m)
146 TO_IMM1(RCL16m)
147 TO_IMM1(RCL32m)
148 TO_IMM1(RCL64m)
149 TO_IMM1(ROR8m)
150 TO_IMM1(ROR16m)
151 TO_IMM1(ROR32m)
152 TO_IMM1(ROR64m)
153 TO_IMM1(ROL8m)
154 TO_IMM1(ROL16m)
155 TO_IMM1(ROL32m)
156 TO_IMM1(ROL64m)
157 TO_IMM1(SAR8m)
158 TO_IMM1(SAR16m)
159 TO_IMM1(SAR32m)
160 TO_IMM1(SAR64m)
161 TO_IMM1(SHR8m)
162 TO_IMM1(SHR16m)
163 TO_IMM1(SHR32m)
164 TO_IMM1(SHR64m)
165 TO_IMM1(SHL8m)
166 TO_IMM1(SHL16m)
167 TO_IMM1(SHL32m)
168 TO_IMM1(SHL64m)
169#undef TO_IMM1
170 }
171 MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);
172 if (!LastOp.isImm() || LastOp.getImm() != 1)
173 return false;
174 MI.setOpcode(NewOpc);
175 MI.erase(&LastOp);
176 return true;
177}
178
180 unsigned Opc1;
181 unsigned Opc2;
182#define FROM_TO(FROM, TO1, TO2) \
183 case X86::FROM: \
184 Opc1 = X86::TO1; \
185 Opc2 = X86::TO2; \
186 break;
187 switch (MI.getOpcode()) {
188 default:
189 return false;
190 FROM_TO(VPCMPBZ128rmi, VPCMPEQBZ128rm, VPCMPGTBZ128rm)
191 FROM_TO(VPCMPBZ128rmik, VPCMPEQBZ128rmk, VPCMPGTBZ128rmk)
192 FROM_TO(VPCMPBZ128rri, VPCMPEQBZ128rr, VPCMPGTBZ128rr)
193 FROM_TO(VPCMPBZ128rrik, VPCMPEQBZ128rrk, VPCMPGTBZ128rrk)
194 FROM_TO(VPCMPBZ256rmi, VPCMPEQBZ256rm, VPCMPGTBZ256rm)
195 FROM_TO(VPCMPBZ256rmik, VPCMPEQBZ256rmk, VPCMPGTBZ256rmk)
196 FROM_TO(VPCMPBZ256rri, VPCMPEQBZ256rr, VPCMPGTBZ256rr)
197 FROM_TO(VPCMPBZ256rrik, VPCMPEQBZ256rrk, VPCMPGTBZ256rrk)
198 FROM_TO(VPCMPBZrmi, VPCMPEQBZrm, VPCMPGTBZrm)
199 FROM_TO(VPCMPBZrmik, VPCMPEQBZrmk, VPCMPGTBZrmk)
200 FROM_TO(VPCMPBZrri, VPCMPEQBZrr, VPCMPGTBZrr)
201 FROM_TO(VPCMPBZrrik, VPCMPEQBZrrk, VPCMPGTBZrrk)
202 FROM_TO(VPCMPDZ128rmi, VPCMPEQDZ128rm, VPCMPGTDZ128rm)
203 FROM_TO(VPCMPDZ128rmib, VPCMPEQDZ128rmb, VPCMPGTDZ128rmb)
204 FROM_TO(VPCMPDZ128rmibk, VPCMPEQDZ128rmbk, VPCMPGTDZ128rmbk)
205 FROM_TO(VPCMPDZ128rmik, VPCMPEQDZ128rmk, VPCMPGTDZ128rmk)
206 FROM_TO(VPCMPDZ128rri, VPCMPEQDZ128rr, VPCMPGTDZ128rr)
207 FROM_TO(VPCMPDZ128rrik, VPCMPEQDZ128rrk, VPCMPGTDZ128rrk)
208 FROM_TO(VPCMPDZ256rmi, VPCMPEQDZ256rm, VPCMPGTDZ256rm)
209 FROM_TO(VPCMPDZ256rmib, VPCMPEQDZ256rmb, VPCMPGTDZ256rmb)
210 FROM_TO(VPCMPDZ256rmibk, VPCMPEQDZ256rmbk, VPCMPGTDZ256rmbk)
211 FROM_TO(VPCMPDZ256rmik, VPCMPEQDZ256rmk, VPCMPGTDZ256rmk)
212 FROM_TO(VPCMPDZ256rri, VPCMPEQDZ256rr, VPCMPGTDZ256rr)
213 FROM_TO(VPCMPDZ256rrik, VPCMPEQDZ256rrk, VPCMPGTDZ256rrk)
214 FROM_TO(VPCMPDZrmi, VPCMPEQDZrm, VPCMPGTDZrm)
215 FROM_TO(VPCMPDZrmib, VPCMPEQDZrmb, VPCMPGTDZrmb)
216 FROM_TO(VPCMPDZrmibk, VPCMPEQDZrmbk, VPCMPGTDZrmbk)
217 FROM_TO(VPCMPDZrmik, VPCMPEQDZrmk, VPCMPGTDZrmk)
218 FROM_TO(VPCMPDZrri, VPCMPEQDZrr, VPCMPGTDZrr)
219 FROM_TO(VPCMPDZrrik, VPCMPEQDZrrk, VPCMPGTDZrrk)
220 FROM_TO(VPCMPQZ128rmi, VPCMPEQQZ128rm, VPCMPGTQZ128rm)
221 FROM_TO(VPCMPQZ128rmib, VPCMPEQQZ128rmb, VPCMPGTQZ128rmb)
222 FROM_TO(VPCMPQZ128rmibk, VPCMPEQQZ128rmbk, VPCMPGTQZ128rmbk)
223 FROM_TO(VPCMPQZ128rmik, VPCMPEQQZ128rmk, VPCMPGTQZ128rmk)
224 FROM_TO(VPCMPQZ128rri, VPCMPEQQZ128rr, VPCMPGTQZ128rr)
225 FROM_TO(VPCMPQZ128rrik, VPCMPEQQZ128rrk, VPCMPGTQZ128rrk)
226 FROM_TO(VPCMPQZ256rmi, VPCMPEQQZ256rm, VPCMPGTQZ256rm)
227 FROM_TO(VPCMPQZ256rmib, VPCMPEQQZ256rmb, VPCMPGTQZ256rmb)
228 FROM_TO(VPCMPQZ256rmibk, VPCMPEQQZ256rmbk, VPCMPGTQZ256rmbk)
229 FROM_TO(VPCMPQZ256rmik, VPCMPEQQZ256rmk, VPCMPGTQZ256rmk)
230 FROM_TO(VPCMPQZ256rri, VPCMPEQQZ256rr, VPCMPGTQZ256rr)
231 FROM_TO(VPCMPQZ256rrik, VPCMPEQQZ256rrk, VPCMPGTQZ256rrk)
232 FROM_TO(VPCMPQZrmi, VPCMPEQQZrm, VPCMPGTQZrm)
233 FROM_TO(VPCMPQZrmib, VPCMPEQQZrmb, VPCMPGTQZrmb)
234 FROM_TO(VPCMPQZrmibk, VPCMPEQQZrmbk, VPCMPGTQZrmbk)
235 FROM_TO(VPCMPQZrmik, VPCMPEQQZrmk, VPCMPGTQZrmk)
236 FROM_TO(VPCMPQZrri, VPCMPEQQZrr, VPCMPGTQZrr)
237 FROM_TO(VPCMPQZrrik, VPCMPEQQZrrk, VPCMPGTQZrrk)
238 FROM_TO(VPCMPWZ128rmi, VPCMPEQWZ128rm, VPCMPGTWZ128rm)
239 FROM_TO(VPCMPWZ128rmik, VPCMPEQWZ128rmk, VPCMPGTWZ128rmk)
240 FROM_TO(VPCMPWZ128rri, VPCMPEQWZ128rr, VPCMPGTWZ128rr)
241 FROM_TO(VPCMPWZ128rrik, VPCMPEQWZ128rrk, VPCMPGTWZ128rrk)
242 FROM_TO(VPCMPWZ256rmi, VPCMPEQWZ256rm, VPCMPGTWZ256rm)
243 FROM_TO(VPCMPWZ256rmik, VPCMPEQWZ256rmk, VPCMPGTWZ256rmk)
244 FROM_TO(VPCMPWZ256rri, VPCMPEQWZ256rr, VPCMPGTWZ256rr)
245 FROM_TO(VPCMPWZ256rrik, VPCMPEQWZ256rrk, VPCMPGTWZ256rrk)
246 FROM_TO(VPCMPWZrmi, VPCMPEQWZrm, VPCMPGTWZrm)
247 FROM_TO(VPCMPWZrmik, VPCMPEQWZrmk, VPCMPGTWZrmk)
248 FROM_TO(VPCMPWZrri, VPCMPEQWZrr, VPCMPGTWZrr)
249 FROM_TO(VPCMPWZrrik, VPCMPEQWZrrk, VPCMPGTWZrrk)
250#undef FROM_TO
251 }
252 MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);
253 int64_t Imm = LastOp.getImm();
254 unsigned NewOpc;
255 if (Imm == 0)
256 NewOpc = Opc1;
257 else if(Imm == 6)
258 NewOpc = Opc2;
259 else
260 return false;
261 MI.setOpcode(NewOpc);
262 MI.erase(&LastOp);
263 return true;
264}
265
267 unsigned NewOpc;
268#define FROM_TO(FROM, TO, R0, R1) \
269 case X86::FROM: \
270 if (MI.getOperand(0).getReg() != X86::R0 || \
271 MI.getOperand(1).getReg() != X86::R1) \
272 return false; \
273 NewOpc = X86::TO; \
274 break;
275 switch (MI.getOpcode()) {
276 default:
277 return false;
278 FROM_TO(MOVSX16rr8, CBW, AX, AL) // movsbw %al, %ax --> cbtw
279 FROM_TO(MOVSX32rr16, CWDE, EAX, AX) // movswl %ax, %eax --> cwtl
280 FROM_TO(MOVSX64rr32, CDQE, RAX, EAX) // movslq %eax, %rax --> cltq
281#undef FROM_TO
282 }
283 MI.clear();
284 MI.setOpcode(NewOpc);
285 return true;
286}
287
288bool X86::optimizeINCDEC(MCInst &MI, bool In64BitMode) {
289 if (In64BitMode)
290 return false;
291 unsigned NewOpc;
292 // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions.
293#define FROM_TO(FROM, TO) \
294 case X86::FROM: \
295 NewOpc = X86::TO; \
296 break;
297 switch (MI.getOpcode()) {
298 default:
299 return false;
300 FROM_TO(DEC16r, DEC16r_alt)
301 FROM_TO(DEC32r, DEC32r_alt)
302 FROM_TO(INC16r, INC16r_alt)
303 FROM_TO(INC32r, INC32r_alt)
304 }
305 MI.setOpcode(NewOpc);
306 return true;
307}
308
309static bool isARegister(unsigned Reg) {
310 return Reg == X86::AL || Reg == X86::AX || Reg == X86::EAX || Reg == X86::RAX;
311}
312
313/// Simplify things like MOV32rm to MOV32o32a.
314bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) {
315 // Don't make these simplifications in 64-bit mode; other assemblers don't
316 // perform them because they make the code larger.
317 if (In64BitMode)
318 return false;
319 unsigned NewOpc;
320 // We don't currently select the correct instruction form for instructions
321 // which have a short %eax, etc. form. Handle this by custom lowering, for
322 // now.
323 //
324 // Note, we are currently not handling the following instructions:
325 // MOV64ao8, MOV64o8a
326 // XCHG16ar, XCHG32ar, XCHG64ar
327 switch (MI.getOpcode()) {
328 default:
329 return false;
330 FROM_TO(MOV8mr_NOREX, MOV8o32a)
331 FROM_TO(MOV8mr, MOV8o32a)
332 FROM_TO(MOV8rm_NOREX, MOV8ao32)
333 FROM_TO(MOV8rm, MOV8ao32)
334 FROM_TO(MOV16mr, MOV16o32a)
335 FROM_TO(MOV16rm, MOV16ao32)
336 FROM_TO(MOV32mr, MOV32o32a)
337 FROM_TO(MOV32rm, MOV32ao32)
338 }
339 bool IsStore = MI.getOperand(0).isReg() && MI.getOperand(1).isReg();
340 unsigned AddrBase = IsStore;
341 unsigned RegOp = IsStore ? 0 : 5;
342 unsigned AddrOp = AddrBase + 3;
343 // Check whether the destination register can be fixed.
344 unsigned Reg = MI.getOperand(RegOp).getReg();
345 if (!isARegister(Reg))
346 return false;
347 // Check whether this is an absolute address.
348 // FIXME: We know TLVP symbol refs aren't, but there should be a better way
349 // to do this here.
350 bool Absolute = true;
351 if (MI.getOperand(AddrOp).isExpr()) {
352 const MCExpr *MCE = MI.getOperand(AddrOp).getExpr();
353 if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
354 if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
355 Absolute = false;
356 }
357 if (Absolute && (MI.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 ||
358 MI.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 ||
359 MI.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0))
360 return false;
361 // If so, rewrite the instruction.
362 MCOperand Saved = MI.getOperand(AddrOp);
363 MCOperand Seg = MI.getOperand(AddrBase + X86::AddrSegmentReg);
364 MI.clear();
365 MI.setOpcode(NewOpc);
366 MI.addOperand(Saved);
367 MI.addOperand(Seg);
368 return true;
369}
370
371/// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
372/// a short fixed-register form.
374 unsigned NewOpc;
375 switch (MI.getOpcode()) {
376 default:
377 return false;
378 FROM_TO(ADC8ri, ADC8i8)
379 FROM_TO(ADC16ri, ADC16i16)
380 FROM_TO(ADC32ri, ADC32i32)
381 FROM_TO(ADC64ri32, ADC64i32)
382 FROM_TO(ADD8ri, ADD8i8)
383 FROM_TO(ADD16ri, ADD16i16)
384 FROM_TO(ADD32ri, ADD32i32)
385 FROM_TO(ADD64ri32, ADD64i32)
386 FROM_TO(AND8ri, AND8i8)
387 FROM_TO(AND16ri, AND16i16)
388 FROM_TO(AND32ri, AND32i32)
389 FROM_TO(AND64ri32, AND64i32)
390 FROM_TO(CMP8ri, CMP8i8)
391 FROM_TO(CMP16ri, CMP16i16)
392 FROM_TO(CMP32ri, CMP32i32)
393 FROM_TO(CMP64ri32, CMP64i32)
394 FROM_TO(OR8ri, OR8i8)
395 FROM_TO(OR16ri, OR16i16)
396 FROM_TO(OR32ri, OR32i32)
397 FROM_TO(OR64ri32, OR64i32)
398 FROM_TO(SBB8ri, SBB8i8)
399 FROM_TO(SBB16ri, SBB16i16)
400 FROM_TO(SBB32ri, SBB32i32)
401 FROM_TO(SBB64ri32, SBB64i32)
402 FROM_TO(SUB8ri, SUB8i8)
403 FROM_TO(SUB16ri, SUB16i16)
404 FROM_TO(SUB32ri, SUB32i32)
405 FROM_TO(SUB64ri32, SUB64i32)
406 FROM_TO(TEST8ri, TEST8i8)
407 FROM_TO(TEST16ri, TEST16i16)
408 FROM_TO(TEST32ri, TEST32i32)
409 FROM_TO(TEST64ri32, TEST64i32)
410 FROM_TO(XOR8ri, XOR8i8)
411 FROM_TO(XOR16ri, XOR16i16)
412 FROM_TO(XOR32ri, XOR32i32)
413 FROM_TO(XOR64ri32, XOR64i32)
414 }
415 // Check whether the destination register can be fixed.
416 unsigned Reg = MI.getOperand(0).getReg();
417 if (!isARegister(Reg))
418 return false;
419
420 // If so, rewrite the instruction.
421 MCOperand Saved = MI.getOperand(MI.getNumOperands() - 1);
422 MI.clear();
423 MI.setOpcode(NewOpc);
424 MI.addOperand(Saved);
425 return true;
426}
427
428unsigned X86::getOpcodeForShortImmediateForm(unsigned Opcode) {
429#define ENTRY(LONG, SHORT) \
430 case X86::LONG: \
431 return X86::SHORT;
432 switch (Opcode) {
433 default:
434 return Opcode;
435#include "X86EncodingOptimizationForImmediate.def"
436 }
437}
438
439unsigned X86::getOpcodeForLongImmediateForm(unsigned Opcode) {
440#define ENTRY(LONG, SHORT) \
441 case X86::SHORT: \
442 return X86::LONG;
443 switch (Opcode) {
444 default:
445 return Opcode;
446#include "X86EncodingOptimizationForImmediate.def"
447 }
448}
449
451 unsigned NewOpc;
452#define ENTRY(LONG, SHORT) \
453 case X86::LONG: \
454 NewOpc = X86::SHORT; \
455 break;
456 switch (MI.getOpcode()) {
457 default:
458 return false;
459#include "X86EncodingOptimizationForImmediate.def"
460 }
461 MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);
462 if (LastOp.isExpr()) {
463 const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(LastOp.getExpr());
464 if (!SRE || SRE->getKind() != MCSymbolRefExpr::VK_X86_ABS8)
465 return false;
466 } else if (LastOp.isImm()) {
467 if (!isInt<8>(LastOp.getImm()))
468 return false;
469 }
470 MI.setOpcode(NewOpc);
471 return true;
472}
473
475 // We may optimize twice here.
476 bool ShortImm = optimizeToShortImmediateForm(MI);
477 bool FixedReg = optimizeToFixedRegisterForm(MI);
478 return ShortImm || FixedReg;
479}
Fixup Statepoint Caller Saved
IRTranslator LLVM IR MI
uint64_t TSFlags
static bool optimizeToShortImmediateForm(MCInst &MI)
#define FROM_TO(FROM, TO, IDX1, IDX2)
static bool optimizeToFixedRegisterForm(MCInst &MI)
Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with a short fixed-register form.
#define TO_IMM1(FROM)
#define TO_REV(FROM)
static bool isARegister(unsigned Reg)
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
bool isImm() const
Definition: MCInst.h:62
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
VariantKind getKind() const
Definition: MCExpr.h:404
bool isX86_64ExtendedReg(unsigned RegNo)
Definition: X86BaseInfo.h:1186
@ MRMSrcReg
MRMSrcReg - This form is used for instructions that use the Mod/RM byte to specify a source,...
Definition: X86BaseInfo.h:700
bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)
bool optimizeMOV(MCInst &MI, bool In64BitMode)
Simplify things like MOV32rm to MOV32o32a.
bool optimizeMOVSX(MCInst &MI)
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
@ AddrScaleAmt
Definition: X86BaseInfo.h:33
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:38
@ AddrIndexReg
Definition: X86BaseInfo.h:34
bool optimizeINCDEC(MCInst &MI, bool In64BitMode)
unsigned getOpcodeForShortImmediateForm(unsigned Opcode)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
Description of the encoding of one expression Op.