23 unsigned OpIdx1, OpIdx2;
24 unsigned Opcode =
MI.getOpcode();
26#define FROM_TO(FROM, TO, IDX1, IDX2) \
32#define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 1)
42 MI.getNumOperands() != 3)
45 if (Opcode == X86::VMOVHLPSrr || Opcode == X86::VUNPCKHPDrr)
57 switch (
MI.getOperand(3).getImm() & 0x7) {
72 FROM_TO(VMOVZPQILo2PQIrr, VMOVPQI2QIrr, 0, 1)
86#define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 2)
106#define TO_IMM1(FROM) \
108 NewOpc = X86::FROM##1; \
110 switch (
MI.getOpcode()) {
174 MI.setOpcode(NewOpc);
182#define FROM_TO(FROM, TO1, TO2) \
187 switch (
MI.getOpcode()) {
190 FROM_TO(VPCMPBZ128rmi, VPCMPEQBZ128rm, VPCMPGTBZ128rm)
191 FROM_TO(VPCMPBZ128rmik, VPCMPEQBZ128rmk, VPCMPGTBZ128rmk)
192 FROM_TO(VPCMPBZ128rri, VPCMPEQBZ128rr, VPCMPGTBZ128rr)
193 FROM_TO(VPCMPBZ128rrik, VPCMPEQBZ128rrk, VPCMPGTBZ128rrk)
194 FROM_TO(VPCMPBZ256rmi, VPCMPEQBZ256rm, VPCMPGTBZ256rm)
195 FROM_TO(VPCMPBZ256rmik, VPCMPEQBZ256rmk, VPCMPGTBZ256rmk)
196 FROM_TO(VPCMPBZ256rri, VPCMPEQBZ256rr, VPCMPGTBZ256rr)
197 FROM_TO(VPCMPBZ256rrik, VPCMPEQBZ256rrk, VPCMPGTBZ256rrk)
198 FROM_TO(VPCMPBZrmi, VPCMPEQBZrm, VPCMPGTBZrm)
199 FROM_TO(VPCMPBZrmik, VPCMPEQBZrmk, VPCMPGTBZrmk)
200 FROM_TO(VPCMPBZrri, VPCMPEQBZrr, VPCMPGTBZrr)
201 FROM_TO(VPCMPBZrrik, VPCMPEQBZrrk, VPCMPGTBZrrk)
202 FROM_TO(VPCMPDZ128rmi, VPCMPEQDZ128rm, VPCMPGTDZ128rm)
203 FROM_TO(VPCMPDZ128rmib, VPCMPEQDZ128rmb, VPCMPGTDZ128rmb)
204 FROM_TO(VPCMPDZ128rmibk, VPCMPEQDZ128rmbk, VPCMPGTDZ128rmbk)
205 FROM_TO(VPCMPDZ128rmik, VPCMPEQDZ128rmk, VPCMPGTDZ128rmk)
206 FROM_TO(VPCMPDZ128rri, VPCMPEQDZ128rr, VPCMPGTDZ128rr)
207 FROM_TO(VPCMPDZ128rrik, VPCMPEQDZ128rrk, VPCMPGTDZ128rrk)
208 FROM_TO(VPCMPDZ256rmi, VPCMPEQDZ256rm, VPCMPGTDZ256rm)
209 FROM_TO(VPCMPDZ256rmib, VPCMPEQDZ256rmb, VPCMPGTDZ256rmb)
210 FROM_TO(VPCMPDZ256rmibk, VPCMPEQDZ256rmbk, VPCMPGTDZ256rmbk)
211 FROM_TO(VPCMPDZ256rmik, VPCMPEQDZ256rmk, VPCMPGTDZ256rmk)
212 FROM_TO(VPCMPDZ256rri, VPCMPEQDZ256rr, VPCMPGTDZ256rr)
213 FROM_TO(VPCMPDZ256rrik, VPCMPEQDZ256rrk, VPCMPGTDZ256rrk)
214 FROM_TO(VPCMPDZrmi, VPCMPEQDZrm, VPCMPGTDZrm)
215 FROM_TO(VPCMPDZrmib, VPCMPEQDZrmb, VPCMPGTDZrmb)
216 FROM_TO(VPCMPDZrmibk, VPCMPEQDZrmbk, VPCMPGTDZrmbk)
217 FROM_TO(VPCMPDZrmik, VPCMPEQDZrmk, VPCMPGTDZrmk)
218 FROM_TO(VPCMPDZrri, VPCMPEQDZrr, VPCMPGTDZrr)
219 FROM_TO(VPCMPDZrrik, VPCMPEQDZrrk, VPCMPGTDZrrk)
220 FROM_TO(VPCMPQZ128rmi, VPCMPEQQZ128rm, VPCMPGTQZ128rm)
221 FROM_TO(VPCMPQZ128rmib, VPCMPEQQZ128rmb, VPCMPGTQZ128rmb)
222 FROM_TO(VPCMPQZ128rmibk, VPCMPEQQZ128rmbk, VPCMPGTQZ128rmbk)
223 FROM_TO(VPCMPQZ128rmik, VPCMPEQQZ128rmk, VPCMPGTQZ128rmk)
224 FROM_TO(VPCMPQZ128rri, VPCMPEQQZ128rr, VPCMPGTQZ128rr)
225 FROM_TO(VPCMPQZ128rrik, VPCMPEQQZ128rrk, VPCMPGTQZ128rrk)
226 FROM_TO(VPCMPQZ256rmi, VPCMPEQQZ256rm, VPCMPGTQZ256rm)
227 FROM_TO(VPCMPQZ256rmib, VPCMPEQQZ256rmb, VPCMPGTQZ256rmb)
228 FROM_TO(VPCMPQZ256rmibk, VPCMPEQQZ256rmbk, VPCMPGTQZ256rmbk)
229 FROM_TO(VPCMPQZ256rmik, VPCMPEQQZ256rmk, VPCMPGTQZ256rmk)
230 FROM_TO(VPCMPQZ256rri, VPCMPEQQZ256rr, VPCMPGTQZ256rr)
231 FROM_TO(VPCMPQZ256rrik, VPCMPEQQZ256rrk, VPCMPGTQZ256rrk)
232 FROM_TO(VPCMPQZrmi, VPCMPEQQZrm, VPCMPGTQZrm)
233 FROM_TO(VPCMPQZrmib, VPCMPEQQZrmb, VPCMPGTQZrmb)
234 FROM_TO(VPCMPQZrmibk, VPCMPEQQZrmbk, VPCMPGTQZrmbk)
235 FROM_TO(VPCMPQZrmik, VPCMPEQQZrmk, VPCMPGTQZrmk)
236 FROM_TO(VPCMPQZrri, VPCMPEQQZrr, VPCMPGTQZrr)
237 FROM_TO(VPCMPQZrrik, VPCMPEQQZrrk, VPCMPGTQZrrk)
238 FROM_TO(VPCMPWZ128rmi, VPCMPEQWZ128rm, VPCMPGTWZ128rm)
239 FROM_TO(VPCMPWZ128rmik, VPCMPEQWZ128rmk, VPCMPGTWZ128rmk)
240 FROM_TO(VPCMPWZ128rri, VPCMPEQWZ128rr, VPCMPGTWZ128rr)
241 FROM_TO(VPCMPWZ128rrik, VPCMPEQWZ128rrk, VPCMPGTWZ128rrk)
242 FROM_TO(VPCMPWZ256rmi, VPCMPEQWZ256rm, VPCMPGTWZ256rm)
243 FROM_TO(VPCMPWZ256rmik, VPCMPEQWZ256rmk, VPCMPGTWZ256rmk)
244 FROM_TO(VPCMPWZ256rri, VPCMPEQWZ256rr, VPCMPGTWZ256rr)
245 FROM_TO(VPCMPWZ256rrik, VPCMPEQWZ256rrk, VPCMPGTWZ256rrk)
246 FROM_TO(VPCMPWZrmi, VPCMPEQWZrm, VPCMPGTWZrm)
247 FROM_TO(VPCMPWZrmik, VPCMPEQWZrmk, VPCMPGTWZrmk)
248 FROM_TO(VPCMPWZrri, VPCMPEQWZrr, VPCMPGTWZrr)
249 FROM_TO(VPCMPWZrrik, VPCMPEQWZrrk, VPCMPGTWZrrk)
253 int64_t Imm = LastOp.
getImm();
261 MI.setOpcode(NewOpc);
268#define FROM_TO(FROM, TO, R0, R1) \
270 if (MI.getOperand(0).getReg() != X86::R0 || \
271 MI.getOperand(1).getReg() != X86::R1) \
275 switch (
MI.getOpcode()) {
278 FROM_TO(MOVSX16rr8, CBW, AX, AL)
279 FROM_TO(MOVSX32rr16, CWDE, EAX, AX)
280 FROM_TO(MOVSX64rr32, CDQE, RAX, EAX)
284 MI.setOpcode(NewOpc);
293#define FROM_TO(FROM, TO) \
297 switch (
MI.getOpcode()) {
305 MI.setOpcode(NewOpc);
310 return Reg == X86::AL || Reg == X86::AX || Reg == X86::EAX || Reg == X86::RAX;
327 switch (
MI.getOpcode()) {
330 FROM_TO(MOV8mr_NOREX, MOV8o32a)
332 FROM_TO(MOV8rm_NOREX, MOV8ao32)
339 bool IsStore =
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg();
340 unsigned AddrBase = IsStore;
341 unsigned RegOp = IsStore ? 0 : 5;
342 unsigned AddrOp = AddrBase + 3;
344 unsigned Reg =
MI.getOperand(RegOp).getReg();
350 bool Absolute =
true;
351 if (
MI.getOperand(AddrOp).isExpr()) {
352 const MCExpr *MCE =
MI.getOperand(AddrOp).getExpr();
365 MI.setOpcode(NewOpc);
375 switch (
MI.getOpcode()) {
416 unsigned Reg =
MI.getOperand(0).getReg();
423 MI.setOpcode(NewOpc);
429#define ENTRY(LONG, SHORT) \
435#include "X86EncodingOptimizationForImmediate.def"
440#define ENTRY(LONG, SHORT) \
446#include "X86EncodingOptimizationForImmediate.def"
452#define ENTRY(LONG, SHORT) \
454 NewOpc = X86::SHORT; \
456 switch (
MI.getOpcode()) {
459#include "X86EncodingOptimizationForImmediate.def"
466 }
else if (LastOp.
isImm()) {
467 if (!isInt<8>(LastOp.
getImm()))
470 MI.setOpcode(NewOpc);
478 return ShortImm || FixedReg;
Fixup Statepoint Caller Saved
static bool optimizeToShortImmediateForm(MCInst &MI)
#define FROM_TO(FROM, TO, IDX1, IDX2)
static bool optimizeToFixedRegisterForm(MCInst &MI)
Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with a short fixed-register form.
static bool isARegister(unsigned Reg)
Base class for the full range of assembler expressions which are needed for parsing.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
Instances of this class represent operands of the MCInst class.
const MCExpr * getExpr() const
Represent a reference to a symbol from inside an expression.
VariantKind getKind() const
bool isX86_64ExtendedReg(unsigned RegNo)
@ MRMSrcReg
MRMSrcReg - This form is used for instructions that use the Mod/RM byte to specify a source,...
bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)
bool optimizeMOV(MCInst &MI, bool In64BitMode)
Simplify things like MOV32rm to MOV32o32a.
bool optimizeMOVSX(MCInst &MI)
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
bool optimizeINCDEC(MCInst &MI, bool In64BitMode)
unsigned getOpcodeForShortImmediateForm(unsigned Opcode)
This is an optimization pass for GlobalISel generic memory operations.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.