File: | lib/Target/X86/X86InstrInfo.cpp |
Warning: | line 1772, column 14 Value stored to 'CommutableOpIdx1' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the X86 implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "X86InstrInfo.h" |
14 | #include "X86.h" |
15 | #include "X86InstrBuilder.h" |
16 | #include "X86InstrFoldTables.h" |
17 | #include "X86MachineFunctionInfo.h" |
18 | #include "X86Subtarget.h" |
19 | #include "X86TargetMachine.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/Sequence.h" |
22 | #include "llvm/CodeGen/LivePhysRegs.h" |
23 | #include "llvm/CodeGen/LiveVariables.h" |
24 | #include "llvm/CodeGen/MachineConstantPool.h" |
25 | #include "llvm/CodeGen/MachineDominators.h" |
26 | #include "llvm/CodeGen/MachineFrameInfo.h" |
27 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
28 | #include "llvm/CodeGen/MachineModuleInfo.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/StackMaps.h" |
31 | #include "llvm/IR/DerivedTypes.h" |
32 | #include "llvm/IR/Function.h" |
33 | #include "llvm/IR/LLVMContext.h" |
34 | #include "llvm/MC/MCAsmInfo.h" |
35 | #include "llvm/MC/MCExpr.h" |
36 | #include "llvm/MC/MCInst.h" |
37 | #include "llvm/Support/CommandLine.h" |
38 | #include "llvm/Support/Debug.h" |
39 | #include "llvm/Support/ErrorHandling.h" |
40 | #include "llvm/Support/raw_ostream.h" |
41 | #include "llvm/Target/TargetOptions.h" |
42 | |
43 | using namespace llvm; |
44 | |
45 | #define DEBUG_TYPE"x86-instr-info" "x86-instr-info" |
46 | |
47 | #define GET_INSTRINFO_CTOR_DTOR |
48 | #include "X86GenInstrInfo.inc" |
49 | |
50 | static cl::opt<bool> |
51 | NoFusing("disable-spill-fusing", |
52 | cl::desc("Disable fusing of spill code into instructions"), |
53 | cl::Hidden); |
54 | static cl::opt<bool> |
55 | PrintFailedFusing("print-failed-fuse-candidates", |
56 | cl::desc("Print instructions that the allocator wants to" |
57 | " fuse, but the X86 backend currently can't"), |
58 | cl::Hidden); |
59 | static cl::opt<bool> |
60 | ReMatPICStubLoad("remat-pic-stub-load", |
61 | cl::desc("Re-materialize load from stub in PIC mode"), |
62 | cl::init(false), cl::Hidden); |
63 | static cl::opt<unsigned> |
64 | PartialRegUpdateClearance("partial-reg-update-clearance", |
65 | cl::desc("Clearance between two register writes " |
66 | "for inserting XOR to avoid partial " |
67 | "register update"), |
68 | cl::init(64), cl::Hidden); |
69 | static cl::opt<unsigned> |
70 | UndefRegClearance("undef-reg-clearance", |
71 | cl::desc("How many idle instructions we would like before " |
72 | "certain undef register reads"), |
73 | cl::init(128), cl::Hidden); |
74 | |
75 | |
76 | // Pin the vtable to this file. |
77 | void X86InstrInfo::anchor() {} |
78 | |
79 | X86InstrInfo::X86InstrInfo(X86Subtarget &STI) |
80 | : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 |
81 | : X86::ADJCALLSTACKDOWN32), |
82 | (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 |
83 | : X86::ADJCALLSTACKUP32), |
84 | X86::CATCHRET, |
85 | (STI.is64Bit() ? X86::RETQ : X86::RETL)), |
86 | Subtarget(STI), RI(STI.getTargetTriple()) { |
87 | } |
88 | |
89 | bool |
90 | X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, |
91 | unsigned &SrcReg, unsigned &DstReg, |
92 | unsigned &SubIdx) const { |
93 | switch (MI.getOpcode()) { |
94 | default: break; |
95 | case X86::MOVSX16rr8: |
96 | case X86::MOVZX16rr8: |
97 | case X86::MOVSX32rr8: |
98 | case X86::MOVZX32rr8: |
99 | case X86::MOVSX64rr8: |
100 | if (!Subtarget.is64Bit()) |
101 | // It's not always legal to reference the low 8-bit of the larger |
102 | // register in 32-bit mode. |
103 | return false; |
104 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
105 | case X86::MOVSX32rr16: |
106 | case X86::MOVZX32rr16: |
107 | case X86::MOVSX64rr16: |
108 | case X86::MOVSX64rr32: { |
109 | if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) |
110 | // Be conservative. |
111 | return false; |
112 | SrcReg = MI.getOperand(1).getReg(); |
113 | DstReg = MI.getOperand(0).getReg(); |
114 | switch (MI.getOpcode()) { |
115 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 115); |
116 | case X86::MOVSX16rr8: |
117 | case X86::MOVZX16rr8: |
118 | case X86::MOVSX32rr8: |
119 | case X86::MOVZX32rr8: |
120 | case X86::MOVSX64rr8: |
121 | SubIdx = X86::sub_8bit; |
122 | break; |
123 | case X86::MOVSX32rr16: |
124 | case X86::MOVZX32rr16: |
125 | case X86::MOVSX64rr16: |
126 | SubIdx = X86::sub_16bit; |
127 | break; |
128 | case X86::MOVSX64rr32: |
129 | SubIdx = X86::sub_32bit; |
130 | break; |
131 | } |
132 | return true; |
133 | } |
134 | } |
135 | return false; |
136 | } |
137 | |
138 | int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const { |
139 | const MachineFunction *MF = MI.getParent()->getParent(); |
140 | const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); |
141 | |
142 | if (isFrameInstr(MI)) { |
143 | unsigned StackAlign = TFI->getStackAlignment(); |
144 | int SPAdj = alignTo(getFrameSize(MI), StackAlign); |
145 | SPAdj -= getFrameAdjustment(MI); |
146 | if (!isFrameSetup(MI)) |
147 | SPAdj = -SPAdj; |
148 | return SPAdj; |
149 | } |
150 | |
151 | // To know whether a call adjusts the stack, we need information |
152 | // that is bound to the following ADJCALLSTACKUP pseudo. |
153 | // Look for the next ADJCALLSTACKUP that follows the call. |
154 | if (MI.isCall()) { |
155 | const MachineBasicBlock *MBB = MI.getParent(); |
156 | auto I = ++MachineBasicBlock::const_iterator(MI); |
157 | for (auto E = MBB->end(); I != E; ++I) { |
158 | if (I->getOpcode() == getCallFrameDestroyOpcode() || |
159 | I->isCall()) |
160 | break; |
161 | } |
162 | |
163 | // If we could not find a frame destroy opcode, then it has already |
164 | // been simplified, so we don't care. |
165 | if (I->getOpcode() != getCallFrameDestroyOpcode()) |
166 | return 0; |
167 | |
168 | return -(I->getOperand(1).getImm()); |
169 | } |
170 | |
171 | // Currently handle only PUSHes we can reasonably expect to see |
172 | // in call sequences |
173 | switch (MI.getOpcode()) { |
174 | default: |
175 | return 0; |
176 | case X86::PUSH32i8: |
177 | case X86::PUSH32r: |
178 | case X86::PUSH32rmm: |
179 | case X86::PUSH32rmr: |
180 | case X86::PUSHi32: |
181 | return 4; |
182 | case X86::PUSH64i8: |
183 | case X86::PUSH64r: |
184 | case X86::PUSH64rmm: |
185 | case X86::PUSH64rmr: |
186 | case X86::PUSH64i32: |
187 | return 8; |
188 | } |
189 | } |
190 | |
191 | /// Return true and the FrameIndex if the specified |
192 | /// operand and follow operands form a reference to the stack frame. |
193 | bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op, |
194 | int &FrameIndex) const { |
195 | if (MI.getOperand(Op + X86::AddrBaseReg).isFI() && |
196 | MI.getOperand(Op + X86::AddrScaleAmt).isImm() && |
197 | MI.getOperand(Op + X86::AddrIndexReg).isReg() && |
198 | MI.getOperand(Op + X86::AddrDisp).isImm() && |
199 | MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 && |
200 | MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 && |
201 | MI.getOperand(Op + X86::AddrDisp).getImm() == 0) { |
202 | FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex(); |
203 | return true; |
204 | } |
205 | return false; |
206 | } |
207 | |
208 | static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) { |
209 | switch (Opcode) { |
210 | default: |
211 | return false; |
212 | case X86::MOV8rm: |
213 | case X86::KMOVBkm: |
214 | MemBytes = 1; |
215 | return true; |
216 | case X86::MOV16rm: |
217 | case X86::KMOVWkm: |
218 | MemBytes = 2; |
219 | return true; |
220 | case X86::MOV32rm: |
221 | case X86::MOVSSrm: |
222 | case X86::VMOVSSZrm: |
223 | case X86::VMOVSSrm: |
224 | case X86::KMOVDkm: |
225 | MemBytes = 4; |
226 | return true; |
227 | case X86::MOV64rm: |
228 | case X86::LD_Fp64m: |
229 | case X86::MOVSDrm: |
230 | case X86::VMOVSDrm: |
231 | case X86::VMOVSDZrm: |
232 | case X86::MMX_MOVD64rm: |
233 | case X86::MMX_MOVQ64rm: |
234 | case X86::KMOVQkm: |
235 | MemBytes = 8; |
236 | return true; |
237 | case X86::MOVAPSrm: |
238 | case X86::MOVUPSrm: |
239 | case X86::MOVAPDrm: |
240 | case X86::MOVUPDrm: |
241 | case X86::MOVDQArm: |
242 | case X86::MOVDQUrm: |
243 | case X86::VMOVAPSrm: |
244 | case X86::VMOVUPSrm: |
245 | case X86::VMOVAPDrm: |
246 | case X86::VMOVUPDrm: |
247 | case X86::VMOVDQArm: |
248 | case X86::VMOVDQUrm: |
249 | case X86::VMOVAPSZ128rm: |
250 | case X86::VMOVUPSZ128rm: |
251 | case X86::VMOVAPSZ128rm_NOVLX: |
252 | case X86::VMOVUPSZ128rm_NOVLX: |
253 | case X86::VMOVAPDZ128rm: |
254 | case X86::VMOVUPDZ128rm: |
255 | case X86::VMOVDQU8Z128rm: |
256 | case X86::VMOVDQU16Z128rm: |
257 | case X86::VMOVDQA32Z128rm: |
258 | case X86::VMOVDQU32Z128rm: |
259 | case X86::VMOVDQA64Z128rm: |
260 | case X86::VMOVDQU64Z128rm: |
261 | MemBytes = 16; |
262 | return true; |
263 | case X86::VMOVAPSYrm: |
264 | case X86::VMOVUPSYrm: |
265 | case X86::VMOVAPDYrm: |
266 | case X86::VMOVUPDYrm: |
267 | case X86::VMOVDQAYrm: |
268 | case X86::VMOVDQUYrm: |
269 | case X86::VMOVAPSZ256rm: |
270 | case X86::VMOVUPSZ256rm: |
271 | case X86::VMOVAPSZ256rm_NOVLX: |
272 | case X86::VMOVUPSZ256rm_NOVLX: |
273 | case X86::VMOVAPDZ256rm: |
274 | case X86::VMOVUPDZ256rm: |
275 | case X86::VMOVDQU8Z256rm: |
276 | case X86::VMOVDQU16Z256rm: |
277 | case X86::VMOVDQA32Z256rm: |
278 | case X86::VMOVDQU32Z256rm: |
279 | case X86::VMOVDQA64Z256rm: |
280 | case X86::VMOVDQU64Z256rm: |
281 | MemBytes = 32; |
282 | return true; |
283 | case X86::VMOVAPSZrm: |
284 | case X86::VMOVUPSZrm: |
285 | case X86::VMOVAPDZrm: |
286 | case X86::VMOVUPDZrm: |
287 | case X86::VMOVDQU8Zrm: |
288 | case X86::VMOVDQU16Zrm: |
289 | case X86::VMOVDQA32Zrm: |
290 | case X86::VMOVDQU32Zrm: |
291 | case X86::VMOVDQA64Zrm: |
292 | case X86::VMOVDQU64Zrm: |
293 | MemBytes = 64; |
294 | return true; |
295 | } |
296 | } |
297 | |
298 | static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) { |
299 | switch (Opcode) { |
300 | default: |
301 | return false; |
302 | case X86::MOV8mr: |
303 | case X86::KMOVBmk: |
304 | MemBytes = 1; |
305 | return true; |
306 | case X86::MOV16mr: |
307 | case X86::KMOVWmk: |
308 | MemBytes = 2; |
309 | return true; |
310 | case X86::MOV32mr: |
311 | case X86::MOVSSmr: |
312 | case X86::VMOVSSmr: |
313 | case X86::VMOVSSZmr: |
314 | case X86::KMOVDmk: |
315 | MemBytes = 4; |
316 | return true; |
317 | case X86::MOV64mr: |
318 | case X86::ST_FpP64m: |
319 | case X86::MOVSDmr: |
320 | case X86::VMOVSDmr: |
321 | case X86::VMOVSDZmr: |
322 | case X86::MMX_MOVD64mr: |
323 | case X86::MMX_MOVQ64mr: |
324 | case X86::MMX_MOVNTQmr: |
325 | case X86::KMOVQmk: |
326 | MemBytes = 8; |
327 | return true; |
328 | case X86::MOVAPSmr: |
329 | case X86::MOVUPSmr: |
330 | case X86::MOVAPDmr: |
331 | case X86::MOVUPDmr: |
332 | case X86::MOVDQAmr: |
333 | case X86::MOVDQUmr: |
334 | case X86::VMOVAPSmr: |
335 | case X86::VMOVUPSmr: |
336 | case X86::VMOVAPDmr: |
337 | case X86::VMOVUPDmr: |
338 | case X86::VMOVDQAmr: |
339 | case X86::VMOVDQUmr: |
340 | case X86::VMOVUPSZ128mr: |
341 | case X86::VMOVAPSZ128mr: |
342 | case X86::VMOVUPSZ128mr_NOVLX: |
343 | case X86::VMOVAPSZ128mr_NOVLX: |
344 | case X86::VMOVUPDZ128mr: |
345 | case X86::VMOVAPDZ128mr: |
346 | case X86::VMOVDQA32Z128mr: |
347 | case X86::VMOVDQU32Z128mr: |
348 | case X86::VMOVDQA64Z128mr: |
349 | case X86::VMOVDQU64Z128mr: |
350 | case X86::VMOVDQU8Z128mr: |
351 | case X86::VMOVDQU16Z128mr: |
352 | MemBytes = 16; |
353 | return true; |
354 | case X86::VMOVUPSYmr: |
355 | case X86::VMOVAPSYmr: |
356 | case X86::VMOVUPDYmr: |
357 | case X86::VMOVAPDYmr: |
358 | case X86::VMOVDQUYmr: |
359 | case X86::VMOVDQAYmr: |
360 | case X86::VMOVUPSZ256mr: |
361 | case X86::VMOVAPSZ256mr: |
362 | case X86::VMOVUPSZ256mr_NOVLX: |
363 | case X86::VMOVAPSZ256mr_NOVLX: |
364 | case X86::VMOVUPDZ256mr: |
365 | case X86::VMOVAPDZ256mr: |
366 | case X86::VMOVDQU8Z256mr: |
367 | case X86::VMOVDQU16Z256mr: |
368 | case X86::VMOVDQA32Z256mr: |
369 | case X86::VMOVDQU32Z256mr: |
370 | case X86::VMOVDQA64Z256mr: |
371 | case X86::VMOVDQU64Z256mr: |
372 | MemBytes = 32; |
373 | return true; |
374 | case X86::VMOVUPSZmr: |
375 | case X86::VMOVAPSZmr: |
376 | case X86::VMOVUPDZmr: |
377 | case X86::VMOVAPDZmr: |
378 | case X86::VMOVDQU8Zmr: |
379 | case X86::VMOVDQU16Zmr: |
380 | case X86::VMOVDQA32Zmr: |
381 | case X86::VMOVDQU32Zmr: |
382 | case X86::VMOVDQA64Zmr: |
383 | case X86::VMOVDQU64Zmr: |
384 | MemBytes = 64; |
385 | return true; |
386 | } |
387 | return false; |
388 | } |
389 | |
390 | unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
391 | int &FrameIndex) const { |
392 | unsigned Dummy; |
393 | return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy); |
394 | } |
395 | |
396 | unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
397 | int &FrameIndex, |
398 | unsigned &MemBytes) const { |
399 | if (isFrameLoadOpcode(MI.getOpcode(), MemBytes)) |
400 | if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) |
401 | return MI.getOperand(0).getReg(); |
402 | return 0; |
403 | } |
404 | |
405 | unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, |
406 | int &FrameIndex) const { |
407 | unsigned Dummy; |
408 | if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) { |
409 | unsigned Reg; |
410 | if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) |
411 | return Reg; |
412 | // Check for post-frame index elimination operations |
413 | SmallVector<const MachineMemOperand *, 1> Accesses; |
414 | if (hasLoadFromStackSlot(MI, Accesses)) { |
415 | FrameIndex = |
416 | cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) |
417 | ->getFrameIndex(); |
418 | return 1; |
419 | } |
420 | } |
421 | return 0; |
422 | } |
423 | |
424 | unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
425 | int &FrameIndex) const { |
426 | unsigned Dummy; |
427 | return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy); |
428 | } |
429 | |
430 | unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
431 | int &FrameIndex, |
432 | unsigned &MemBytes) const { |
433 | if (isFrameStoreOpcode(MI.getOpcode(), MemBytes)) |
434 | if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 && |
435 | isFrameOperand(MI, 0, FrameIndex)) |
436 | return MI.getOperand(X86::AddrNumOperands).getReg(); |
437 | return 0; |
438 | } |
439 | |
440 | unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, |
441 | int &FrameIndex) const { |
442 | unsigned Dummy; |
443 | if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) { |
444 | unsigned Reg; |
445 | if ((Reg = isStoreToStackSlot(MI, FrameIndex))) |
446 | return Reg; |
447 | // Check for post-frame index elimination operations |
448 | SmallVector<const MachineMemOperand *, 1> Accesses; |
449 | if (hasStoreToStackSlot(MI, Accesses)) { |
450 | FrameIndex = |
451 | cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) |
452 | ->getFrameIndex(); |
453 | return 1; |
454 | } |
455 | } |
456 | return 0; |
457 | } |
458 | |
459 | /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. |
460 | static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { |
461 | // Don't waste compile time scanning use-def chains of physregs. |
462 | if (!TargetRegisterInfo::isVirtualRegister(BaseReg)) |
463 | return false; |
464 | bool isPICBase = false; |
465 | for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), |
466 | E = MRI.def_instr_end(); I != E; ++I) { |
467 | MachineInstr *DefMI = &*I; |
468 | if (DefMI->getOpcode() != X86::MOVPC32r) |
469 | return false; |
470 | assert(!isPICBase && "More than one PIC base?")((!isPICBase && "More than one PIC base?") ? static_cast <void> (0) : __assert_fail ("!isPICBase && \"More than one PIC base?\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 470, __PRETTY_FUNCTION__)); |
471 | isPICBase = true; |
472 | } |
473 | return isPICBase; |
474 | } |
475 | |
476 | bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, |
477 | AliasAnalysis *AA) const { |
478 | switch (MI.getOpcode()) { |
479 | default: break; |
480 | case X86::MOV8rm: |
481 | case X86::MOV8rm_NOREX: |
482 | case X86::MOV16rm: |
483 | case X86::MOV32rm: |
484 | case X86::MOV64rm: |
485 | case X86::MOVSSrm: |
486 | case X86::MOVSDrm: |
487 | case X86::MOVAPSrm: |
488 | case X86::MOVUPSrm: |
489 | case X86::MOVAPDrm: |
490 | case X86::MOVUPDrm: |
491 | case X86::MOVDQArm: |
492 | case X86::MOVDQUrm: |
493 | case X86::VMOVSSrm: |
494 | case X86::VMOVSDrm: |
495 | case X86::VMOVAPSrm: |
496 | case X86::VMOVUPSrm: |
497 | case X86::VMOVAPDrm: |
498 | case X86::VMOVUPDrm: |
499 | case X86::VMOVDQArm: |
500 | case X86::VMOVDQUrm: |
501 | case X86::VMOVAPSYrm: |
502 | case X86::VMOVUPSYrm: |
503 | case X86::VMOVAPDYrm: |
504 | case X86::VMOVUPDYrm: |
505 | case X86::VMOVDQAYrm: |
506 | case X86::VMOVDQUYrm: |
507 | case X86::MMX_MOVD64rm: |
508 | case X86::MMX_MOVQ64rm: |
509 | // AVX-512 |
510 | case X86::VMOVSSZrm: |
511 | case X86::VMOVSDZrm: |
512 | case X86::VMOVAPDZ128rm: |
513 | case X86::VMOVAPDZ256rm: |
514 | case X86::VMOVAPDZrm: |
515 | case X86::VMOVAPSZ128rm: |
516 | case X86::VMOVAPSZ256rm: |
517 | case X86::VMOVAPSZ128rm_NOVLX: |
518 | case X86::VMOVAPSZ256rm_NOVLX: |
519 | case X86::VMOVAPSZrm: |
520 | case X86::VMOVDQA32Z128rm: |
521 | case X86::VMOVDQA32Z256rm: |
522 | case X86::VMOVDQA32Zrm: |
523 | case X86::VMOVDQA64Z128rm: |
524 | case X86::VMOVDQA64Z256rm: |
525 | case X86::VMOVDQA64Zrm: |
526 | case X86::VMOVDQU16Z128rm: |
527 | case X86::VMOVDQU16Z256rm: |
528 | case X86::VMOVDQU16Zrm: |
529 | case X86::VMOVDQU32Z128rm: |
530 | case X86::VMOVDQU32Z256rm: |
531 | case X86::VMOVDQU32Zrm: |
532 | case X86::VMOVDQU64Z128rm: |
533 | case X86::VMOVDQU64Z256rm: |
534 | case X86::VMOVDQU64Zrm: |
535 | case X86::VMOVDQU8Z128rm: |
536 | case X86::VMOVDQU8Z256rm: |
537 | case X86::VMOVDQU8Zrm: |
538 | case X86::VMOVUPDZ128rm: |
539 | case X86::VMOVUPDZ256rm: |
540 | case X86::VMOVUPDZrm: |
541 | case X86::VMOVUPSZ128rm: |
542 | case X86::VMOVUPSZ256rm: |
543 | case X86::VMOVUPSZ128rm_NOVLX: |
544 | case X86::VMOVUPSZ256rm_NOVLX: |
545 | case X86::VMOVUPSZrm: { |
546 | // Loads from constant pools are trivially rematerializable. |
547 | if (MI.getOperand(1 + X86::AddrBaseReg).isReg() && |
548 | MI.getOperand(1 + X86::AddrScaleAmt).isImm() && |
549 | MI.getOperand(1 + X86::AddrIndexReg).isReg() && |
550 | MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && |
551 | MI.isDereferenceableInvariantLoad(AA)) { |
552 | unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); |
553 | if (BaseReg == 0 || BaseReg == X86::RIP) |
554 | return true; |
555 | // Allow re-materialization of PIC load. |
556 | if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal()) |
557 | return false; |
558 | const MachineFunction &MF = *MI.getParent()->getParent(); |
559 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
560 | return regIsPICBase(BaseReg, MRI); |
561 | } |
562 | return false; |
563 | } |
564 | |
565 | case X86::LEA32r: |
566 | case X86::LEA64r: { |
567 | if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() && |
568 | MI.getOperand(1 + X86::AddrIndexReg).isReg() && |
569 | MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && |
570 | !MI.getOperand(1 + X86::AddrDisp).isReg()) { |
571 | // lea fi#, lea GV, etc. are all rematerializable. |
572 | if (!MI.getOperand(1 + X86::AddrBaseReg).isReg()) |
573 | return true; |
574 | unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); |
575 | if (BaseReg == 0) |
576 | return true; |
577 | // Allow re-materialization of lea PICBase + x. |
578 | const MachineFunction &MF = *MI.getParent()->getParent(); |
579 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
580 | return regIsPICBase(BaseReg, MRI); |
581 | } |
582 | return false; |
583 | } |
584 | } |
585 | |
586 | // All other instructions marked M_REMATERIALIZABLE are always trivially |
587 | // rematerializable. |
588 | return true; |
589 | } |
590 | |
591 | void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, |
592 | MachineBasicBlock::iterator I, |
593 | unsigned DestReg, unsigned SubIdx, |
594 | const MachineInstr &Orig, |
595 | const TargetRegisterInfo &TRI) const { |
596 | bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI); |
597 | if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) { |
598 | // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side |
599 | // effects. |
600 | int Value; |
601 | switch (Orig.getOpcode()) { |
602 | case X86::MOV32r0: Value = 0; break; |
603 | case X86::MOV32r1: Value = 1; break; |
604 | case X86::MOV32r_1: Value = -1; break; |
605 | default: |
606 | llvm_unreachable("Unexpected instruction!")::llvm::llvm_unreachable_internal("Unexpected instruction!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 606); |
607 | } |
608 | |
609 | const DebugLoc &DL = Orig.getDebugLoc(); |
610 | BuildMI(MBB, I, DL, get(X86::MOV32ri)) |
611 | .add(Orig.getOperand(0)) |
612 | .addImm(Value); |
613 | } else { |
614 | MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); |
615 | MBB.insert(I, MI); |
616 | } |
617 | |
618 | MachineInstr &NewMI = *std::prev(I); |
619 | NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); |
620 | } |
621 | |
622 | /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead. |
623 | bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const { |
624 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
625 | MachineOperand &MO = MI.getOperand(i); |
626 | if (MO.isReg() && MO.isDef() && |
627 | MO.getReg() == X86::EFLAGS && !MO.isDead()) { |
628 | return true; |
629 | } |
630 | } |
631 | return false; |
632 | } |
633 | |
634 | /// Check whether the shift count for a machine operand is non-zero. |
635 | inline static unsigned getTruncatedShiftCount(const MachineInstr &MI, |
636 | unsigned ShiftAmtOperandIdx) { |
637 | // The shift count is six bits with the REX.W prefix and five bits without. |
638 | unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31; |
639 | unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm(); |
640 | return Imm & ShiftCountMask; |
641 | } |
642 | |
643 | /// Check whether the given shift count is appropriate |
644 | /// can be represented by a LEA instruction. |
645 | inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { |
646 | // Left shift instructions can be transformed into load-effective-address |
647 | // instructions if we can encode them appropriately. |
648 | // A LEA instruction utilizes a SIB byte to encode its scale factor. |
649 | // The SIB.scale field is two bits wide which means that we can encode any |
650 | // shift amount less than 4. |
651 | return ShAmt < 4 && ShAmt > 0; |
652 | } |
653 | |
654 | bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, |
655 | unsigned Opc, bool AllowSP, unsigned &NewSrc, |
656 | bool &isKill, MachineOperand &ImplicitOp, |
657 | LiveVariables *LV) const { |
658 | MachineFunction &MF = *MI.getParent()->getParent(); |
659 | const TargetRegisterClass *RC; |
660 | if (AllowSP) { |
661 | RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; |
662 | } else { |
663 | RC = Opc != X86::LEA32r ? |
664 | &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; |
665 | } |
666 | unsigned SrcReg = Src.getReg(); |
667 | |
668 | // For both LEA64 and LEA32 the register already has essentially the right |
669 | // type (32-bit or 64-bit) we may just need to forbid SP. |
670 | if (Opc != X86::LEA64_32r) { |
671 | NewSrc = SrcReg; |
672 | isKill = Src.isKill(); |
673 | assert(!Src.isUndef() && "Undef op doesn't need optimization")((!Src.isUndef() && "Undef op doesn't need optimization" ) ? static_cast<void> (0) : __assert_fail ("!Src.isUndef() && \"Undef op doesn't need optimization\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 673, __PRETTY_FUNCTION__)); |
674 | |
675 | if (TargetRegisterInfo::isVirtualRegister(NewSrc) && |
676 | !MF.getRegInfo().constrainRegClass(NewSrc, RC)) |
677 | return false; |
678 | |
679 | return true; |
680 | } |
681 | |
682 | // This is for an LEA64_32r and incoming registers are 32-bit. One way or |
683 | // another we need to add 64-bit registers to the final MI. |
684 | if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) { |
685 | ImplicitOp = Src; |
686 | ImplicitOp.setImplicit(); |
687 | |
688 | NewSrc = getX86SubSuperRegister(Src.getReg(), 64); |
689 | isKill = Src.isKill(); |
690 | assert(!Src.isUndef() && "Undef op doesn't need optimization")((!Src.isUndef() && "Undef op doesn't need optimization" ) ? static_cast<void> (0) : __assert_fail ("!Src.isUndef() && \"Undef op doesn't need optimization\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 690, __PRETTY_FUNCTION__)); |
691 | } else { |
692 | // Virtual register of the wrong class, we have to create a temporary 64-bit |
693 | // vreg to feed into the LEA. |
694 | NewSrc = MF.getRegInfo().createVirtualRegister(RC); |
695 | MachineInstr *Copy = |
696 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
697 | .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) |
698 | .add(Src); |
699 | |
700 | // Which is obviously going to be dead after we're done with it. |
701 | isKill = true; |
702 | |
703 | if (LV) |
704 | LV->replaceKillInstruction(SrcReg, MI, *Copy); |
705 | } |
706 | |
707 | // We've set all the parameters without issue. |
708 | return true; |
709 | } |
710 | |
711 | MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( |
712 | unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI, |
713 | LiveVariables *LV, bool Is8BitOp) const { |
714 | // We handle 8-bit adds and various 16-bit opcodes in the switch below. |
715 | MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); |
716 | assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits((((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits ( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && "Unexpected type for LEA transform") ? static_cast<void> (0) : __assert_fail ("(Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && \"Unexpected type for LEA transform\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 718, __PRETTY_FUNCTION__)) |
717 | *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&(((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits ( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && "Unexpected type for LEA transform") ? static_cast<void> (0) : __assert_fail ("(Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && \"Unexpected type for LEA transform\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 718, __PRETTY_FUNCTION__)) |
718 | "Unexpected type for LEA transform")(((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits ( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && "Unexpected type for LEA transform") ? static_cast<void> (0) : __assert_fail ("(Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && \"Unexpected type for LEA transform\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 718, __PRETTY_FUNCTION__)); |
719 | |
720 | // TODO: For a 32-bit target, we need to adjust the LEA variables with |
721 | // something like this: |
722 | // Opcode = X86::LEA32r; |
723 | // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); |
724 | // OutRegLEA = |
725 | // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass) |
726 | // : RegInfo.createVirtualRegister(&X86::GR32RegClass); |
727 | if (!Subtarget.is64Bit()) |
728 | return nullptr; |
729 | |
730 | unsigned Opcode = X86::LEA64_32r; |
731 | unsigned InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
732 | unsigned OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass); |
733 | |
734 | // Build and insert into an implicit UNDEF value. This is OK because |
735 | // we will be shifting and then extracting the lower 8/16-bits. |
736 | // This has the potential to cause partial register stall. e.g. |
737 | // movw (%rbp,%rcx,2), %dx |
738 | // leal -65(%rdx), %esi |
739 | // But testing has shown this *does* help performance in 64-bit mode (at |
740 | // least on modern x86 machines). |
741 | MachineBasicBlock::iterator MBBI = MI.getIterator(); |
742 | unsigned Dest = MI.getOperand(0).getReg(); |
743 | unsigned Src = MI.getOperand(1).getReg(); |
744 | bool IsDead = MI.getOperand(0).isDead(); |
745 | bool IsKill = MI.getOperand(1).isKill(); |
746 | unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit; |
747 | assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization")((!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization" ) ? static_cast<void> (0) : __assert_fail ("!MI.getOperand(1).isUndef() && \"Undef op doesn't need optimization\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 747, __PRETTY_FUNCTION__)); |
748 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA); |
749 | MachineInstr *InsMI = |
750 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
751 | .addReg(InRegLEA, RegState::Define, SubReg) |
752 | .addReg(Src, getKillRegState(IsKill)); |
753 | |
754 | MachineInstrBuilder MIB = |
755 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA); |
756 | switch (MIOpc) { |
757 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 757); |
758 | case X86::SHL8ri: |
759 | case X86::SHL16ri: { |
760 | unsigned ShAmt = MI.getOperand(2).getImm(); |
761 | MIB.addReg(0).addImm(1ULL << ShAmt) |
762 | .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0); |
763 | break; |
764 | } |
765 | case X86::INC8r: |
766 | case X86::INC16r: |
767 | addRegOffset(MIB, InRegLEA, true, 1); |
768 | break; |
769 | case X86::DEC8r: |
770 | case X86::DEC16r: |
771 | addRegOffset(MIB, InRegLEA, true, -1); |
772 | break; |
773 | case X86::ADD8ri: |
774 | case X86::ADD8ri_DB: |
775 | case X86::ADD16ri: |
776 | case X86::ADD16ri8: |
777 | case X86::ADD16ri_DB: |
778 | case X86::ADD16ri8_DB: |
779 | addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm()); |
780 | break; |
781 | case X86::ADD8rr: |
782 | case X86::ADD8rr_DB: |
783 | case X86::ADD16rr: |
784 | case X86::ADD16rr_DB: { |
785 | unsigned Src2 = MI.getOperand(2).getReg(); |
786 | bool IsKill2 = MI.getOperand(2).isKill(); |
787 | assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization")((!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization" ) ? static_cast<void> (0) : __assert_fail ("!MI.getOperand(2).isUndef() && \"Undef op doesn't need optimization\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 787, __PRETTY_FUNCTION__)); |
788 | unsigned InRegLEA2 = 0; |
789 | MachineInstr *InsMI2 = nullptr; |
790 | if (Src == Src2) { |
791 | // ADD8rr/ADD16rr killed %reg1028, %reg1028 |
792 | // just a single insert_subreg. |
793 | addRegReg(MIB, InRegLEA, true, InRegLEA, false); |
794 | } else { |
795 | if (Subtarget.is64Bit()) |
796 | InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
797 | else |
798 | InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); |
799 | // Build and insert into an implicit UNDEF value. This is OK because |
800 | // we will be shifting and then extracting the lower 8/16-bits. |
801 | BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2); |
802 | InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
803 | .addReg(InRegLEA2, RegState::Define, SubReg) |
804 | .addReg(Src2, getKillRegState(IsKill2)); |
805 | addRegReg(MIB, InRegLEA, true, InRegLEA2, true); |
806 | } |
807 | if (LV && IsKill2 && InsMI2) |
808 | LV->replaceKillInstruction(Src2, MI, *InsMI2); |
809 | break; |
810 | } |
811 | } |
812 | |
813 | MachineInstr *NewMI = MIB; |
814 | MachineInstr *ExtMI = |
815 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
816 | .addReg(Dest, RegState::Define | getDeadRegState(IsDead)) |
817 | .addReg(OutRegLEA, RegState::Kill, SubReg); |
818 | |
819 | if (LV) { |
820 | // Update live variables. |
821 | LV->getVarInfo(InRegLEA).Kills.push_back(NewMI); |
822 | LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI); |
823 | if (IsKill) |
824 | LV->replaceKillInstruction(Src, MI, *InsMI); |
825 | if (IsDead) |
826 | LV->replaceKillInstruction(Dest, MI, *ExtMI); |
827 | } |
828 | |
829 | return ExtMI; |
830 | } |
831 | |
832 | /// This method must be implemented by targets that |
833 | /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target |
834 | /// may be able to convert a two-address instruction into a true |
835 | /// three-address instruction on demand. This allows the X86 target (for |
836 | /// example) to convert ADD and SHL instructions into LEA instructions if they |
837 | /// would require register copies due to two-addressness. |
838 | /// |
839 | /// This method returns a null pointer if the transformation cannot be |
840 | /// performed, otherwise it returns the new instruction. |
841 | /// |
842 | MachineInstr * |
843 | X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, |
844 | MachineInstr &MI, LiveVariables *LV) const { |
845 | // The following opcodes also sets the condition code register(s). Only |
846 | // convert them to equivalent lea if the condition code register def's |
847 | // are dead! |
848 | if (hasLiveCondCodeDef(MI)) |
849 | return nullptr; |
850 | |
851 | MachineFunction &MF = *MI.getParent()->getParent(); |
852 | // All instructions input are two-addr instructions. Get the known operands. |
853 | const MachineOperand &Dest = MI.getOperand(0); |
854 | const MachineOperand &Src = MI.getOperand(1); |
855 | |
856 | // Ideally, operations with undef should be folded before we get here, but we |
857 | // can't guarantee it. Bail out because optimizing undefs is a waste of time. |
858 | // Without this, we have to forward undef state to new register operands to |
859 | // avoid machine verifier errors. |
860 | if (Src.isUndef()) |
861 | return nullptr; |
862 | if (MI.getNumOperands() > 2) |
863 | if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef()) |
864 | return nullptr; |
865 | |
866 | MachineInstr *NewMI = nullptr; |
867 | bool Is64Bit = Subtarget.is64Bit(); |
868 | |
869 | bool Is8BitOp = false; |
870 | unsigned MIOpc = MI.getOpcode(); |
871 | switch (MIOpc) { |
872 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 872); |
873 | case X86::SHL64ri: { |
874 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!")((MI.getNumOperands() >= 3 && "Unknown shift instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown shift instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 874, __PRETTY_FUNCTION__)); |
875 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
876 | if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; |
877 | |
878 | // LEA can't handle RSP. |
879 | if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && |
880 | !MF.getRegInfo().constrainRegClass(Src.getReg(), |
881 | &X86::GR64_NOSPRegClass)) |
882 | return nullptr; |
883 | |
884 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) |
885 | .add(Dest) |
886 | .addReg(0) |
887 | .addImm(1ULL << ShAmt) |
888 | .add(Src) |
889 | .addImm(0) |
890 | .addReg(0); |
891 | break; |
892 | } |
893 | case X86::SHL32ri: { |
894 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!")((MI.getNumOperands() >= 3 && "Unknown shift instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown shift instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 894, __PRETTY_FUNCTION__)); |
895 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
896 | if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; |
897 | |
898 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
899 | |
900 | // LEA can't handle ESP. |
901 | bool isKill; |
902 | unsigned SrcReg; |
903 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
904 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, |
905 | SrcReg, isKill, ImplicitOp, LV)) |
906 | return nullptr; |
907 | |
908 | MachineInstrBuilder MIB = |
909 | BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
910 | .add(Dest) |
911 | .addReg(0) |
912 | .addImm(1ULL << ShAmt) |
913 | .addReg(SrcReg, getKillRegState(isKill)) |
914 | .addImm(0) |
915 | .addReg(0); |
916 | if (ImplicitOp.getReg() != 0) |
917 | MIB.add(ImplicitOp); |
918 | NewMI = MIB; |
919 | |
920 | break; |
921 | } |
922 | case X86::SHL8ri: |
923 | Is8BitOp = true; |
924 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
925 | case X86::SHL16ri: { |
926 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!")((MI.getNumOperands() >= 3 && "Unknown shift instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown shift instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 926, __PRETTY_FUNCTION__)); |
927 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
928 | if (!isTruncatedShiftCountForLEA(ShAmt)) |
929 | return nullptr; |
930 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
931 | } |
932 | case X86::INC64r: |
933 | case X86::INC32r: { |
934 | assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!")((MI.getNumOperands() >= 2 && "Unknown inc instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 2 && \"Unknown inc instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 934, __PRETTY_FUNCTION__)); |
935 | unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r : |
936 | (Is64Bit ? X86::LEA64_32r : X86::LEA32r); |
937 | bool isKill; |
938 | unsigned SrcReg; |
939 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
940 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, |
941 | ImplicitOp, LV)) |
942 | return nullptr; |
943 | |
944 | MachineInstrBuilder MIB = |
945 | BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
946 | .add(Dest) |
947 | .addReg(SrcReg, getKillRegState(isKill)); |
948 | if (ImplicitOp.getReg() != 0) |
949 | MIB.add(ImplicitOp); |
950 | |
951 | NewMI = addOffset(MIB, 1); |
952 | break; |
953 | } |
954 | case X86::DEC64r: |
955 | case X86::DEC32r: { |
956 | assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!")((MI.getNumOperands() >= 2 && "Unknown dec instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 2 && \"Unknown dec instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 956, __PRETTY_FUNCTION__)); |
957 | unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r |
958 | : (Is64Bit ? X86::LEA64_32r : X86::LEA32r); |
959 | |
960 | bool isKill; |
961 | unsigned SrcReg; |
962 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
963 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, |
964 | ImplicitOp, LV)) |
965 | return nullptr; |
966 | |
967 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
968 | .add(Dest) |
969 | .addReg(SrcReg, getKillRegState(isKill)); |
970 | if (ImplicitOp.getReg() != 0) |
971 | MIB.add(ImplicitOp); |
972 | |
973 | NewMI = addOffset(MIB, -1); |
974 | |
975 | break; |
976 | } |
977 | case X86::DEC8r: |
978 | case X86::INC8r: |
979 | Is8BitOp = true; |
980 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
981 | case X86::DEC16r: |
982 | case X86::INC16r: |
983 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
984 | case X86::ADD64rr: |
985 | case X86::ADD64rr_DB: |
986 | case X86::ADD32rr: |
987 | case X86::ADD32rr_DB: { |
988 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")((MI.getNumOperands() >= 3 && "Unknown add instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 988, __PRETTY_FUNCTION__)); |
989 | unsigned Opc; |
990 | if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) |
991 | Opc = X86::LEA64r; |
992 | else |
993 | Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
994 | |
995 | bool isKill; |
996 | unsigned SrcReg; |
997 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
998 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, |
999 | SrcReg, isKill, ImplicitOp, LV)) |
1000 | return nullptr; |
1001 | |
1002 | const MachineOperand &Src2 = MI.getOperand(2); |
1003 | bool isKill2; |
1004 | unsigned SrcReg2; |
1005 | MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); |
1006 | if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, |
1007 | SrcReg2, isKill2, ImplicitOp2, LV)) |
1008 | return nullptr; |
1009 | |
1010 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); |
1011 | if (ImplicitOp.getReg() != 0) |
1012 | MIB.add(ImplicitOp); |
1013 | if (ImplicitOp2.getReg() != 0) |
1014 | MIB.add(ImplicitOp2); |
1015 | |
1016 | NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); |
1017 | if (LV && Src2.isKill()) |
1018 | LV->replaceKillInstruction(SrcReg2, MI, *NewMI); |
1019 | break; |
1020 | } |
1021 | case X86::ADD8rr: |
1022 | case X86::ADD8rr_DB: |
1023 | Is8BitOp = true; |
1024 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
1025 | case X86::ADD16rr: |
1026 | case X86::ADD16rr_DB: |
1027 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
1028 | case X86::ADD64ri32: |
1029 | case X86::ADD64ri8: |
1030 | case X86::ADD64ri32_DB: |
1031 | case X86::ADD64ri8_DB: |
1032 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")((MI.getNumOperands() >= 3 && "Unknown add instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1032, __PRETTY_FUNCTION__)); |
1033 | NewMI = addOffset( |
1034 | BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), |
1035 | MI.getOperand(2)); |
1036 | break; |
1037 | case X86::ADD32ri: |
1038 | case X86::ADD32ri8: |
1039 | case X86::ADD32ri_DB: |
1040 | case X86::ADD32ri8_DB: { |
1041 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")((MI.getNumOperands() >= 3 && "Unknown add instruction!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1041, __PRETTY_FUNCTION__)); |
1042 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1043 | |
1044 | bool isKill; |
1045 | unsigned SrcReg; |
1046 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1047 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, |
1048 | SrcReg, isKill, ImplicitOp, LV)) |
1049 | return nullptr; |
1050 | |
1051 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1052 | .add(Dest) |
1053 | .addReg(SrcReg, getKillRegState(isKill)); |
1054 | if (ImplicitOp.getReg() != 0) |
1055 | MIB.add(ImplicitOp); |
1056 | |
1057 | NewMI = addOffset(MIB, MI.getOperand(2)); |
1058 | break; |
1059 | } |
1060 | case X86::ADD8ri: |
1061 | case X86::ADD8ri_DB: |
1062 | Is8BitOp = true; |
1063 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
1064 | case X86::ADD16ri: |
1065 | case X86::ADD16ri8: |
1066 | case X86::ADD16ri_DB: |
1067 | case X86::ADD16ri8_DB: |
1068 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
1069 | case X86::VMOVDQU8Z128rmk: |
1070 | case X86::VMOVDQU8Z256rmk: |
1071 | case X86::VMOVDQU8Zrmk: |
1072 | case X86::VMOVDQU16Z128rmk: |
1073 | case X86::VMOVDQU16Z256rmk: |
1074 | case X86::VMOVDQU16Zrmk: |
1075 | case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk: |
1076 | case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk: |
1077 | case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk: |
1078 | case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk: |
1079 | case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk: |
1080 | case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk: |
1081 | case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk: |
1082 | case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk: |
1083 | case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk: |
1084 | case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk: |
1085 | case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk: |
1086 | case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: { |
1087 | unsigned Opc; |
1088 | switch (MIOpc) { |
1089 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1089); |
1090 | case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break; |
1091 | case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break; |
1092 | case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break; |
1093 | case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break; |
1094 | case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break; |
1095 | case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break; |
1096 | case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; |
1097 | case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; |
1098 | case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break; |
1099 | case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; |
1100 | case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; |
1101 | case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break; |
1102 | case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; |
1103 | case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; |
1104 | case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break; |
1105 | case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; |
1106 | case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; |
1107 | case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break; |
1108 | case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; |
1109 | case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; |
1110 | case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break; |
1111 | case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; |
1112 | case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; |
1113 | case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break; |
1114 | case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; |
1115 | case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; |
1116 | case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break; |
1117 | case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; |
1118 | case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; |
1119 | case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break; |
1120 | } |
1121 | |
1122 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1123 | .add(Dest) |
1124 | .add(MI.getOperand(2)) |
1125 | .add(Src) |
1126 | .add(MI.getOperand(3)) |
1127 | .add(MI.getOperand(4)) |
1128 | .add(MI.getOperand(5)) |
1129 | .add(MI.getOperand(6)) |
1130 | .add(MI.getOperand(7)); |
1131 | break; |
1132 | } |
1133 | case X86::VMOVDQU8Z128rrk: |
1134 | case X86::VMOVDQU8Z256rrk: |
1135 | case X86::VMOVDQU8Zrrk: |
1136 | case X86::VMOVDQU16Z128rrk: |
1137 | case X86::VMOVDQU16Z256rrk: |
1138 | case X86::VMOVDQU16Zrrk: |
1139 | case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk: |
1140 | case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk: |
1141 | case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk: |
1142 | case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk: |
1143 | case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk: |
1144 | case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk: |
1145 | case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk: |
1146 | case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk: |
1147 | case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk: |
1148 | case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk: |
1149 | case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk: |
1150 | case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: { |
1151 | unsigned Opc; |
1152 | switch (MIOpc) { |
1153 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1153); |
1154 | case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break; |
1155 | case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break; |
1156 | case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break; |
1157 | case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break; |
1158 | case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break; |
1159 | case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break; |
1160 | case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; |
1161 | case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; |
1162 | case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break; |
1163 | case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; |
1164 | case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; |
1165 | case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break; |
1166 | case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; |
1167 | case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; |
1168 | case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break; |
1169 | case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; |
1170 | case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; |
1171 | case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break; |
1172 | case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; |
1173 | case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; |
1174 | case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break; |
1175 | case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; |
1176 | case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; |
1177 | case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break; |
1178 | case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; |
1179 | case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; |
1180 | case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break; |
1181 | case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; |
1182 | case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; |
1183 | case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break; |
1184 | } |
1185 | |
1186 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1187 | .add(Dest) |
1188 | .add(MI.getOperand(2)) |
1189 | .add(Src) |
1190 | .add(MI.getOperand(3)); |
1191 | break; |
1192 | } |
1193 | } |
1194 | |
1195 | if (!NewMI) return nullptr; |
1196 | |
1197 | if (LV) { // Update live variables |
1198 | if (Src.isKill()) |
1199 | LV->replaceKillInstruction(Src.getReg(), MI, *NewMI); |
1200 | if (Dest.isDead()) |
1201 | LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI); |
1202 | } |
1203 | |
1204 | MFI->insert(MI.getIterator(), NewMI); // Insert the new inst |
1205 | return NewMI; |
1206 | } |
1207 | |
1208 | /// This determines which of three possible cases of a three source commute |
1209 | /// the source indexes correspond to taking into account any mask operands. |
1210 | /// All prevents commuting a passthru operand. Returns -1 if the commute isn't |
1211 | /// possible. |
1212 | /// Case 0 - Possible to commute the first and second operands. |
1213 | /// Case 1 - Possible to commute the first and third operands. |
1214 | /// Case 2 - Possible to commute the second and third operands. |
1215 | static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, |
1216 | unsigned SrcOpIdx2) { |
1217 | // Put the lowest index to SrcOpIdx1 to simplify the checks below. |
1218 | if (SrcOpIdx1 > SrcOpIdx2) |
1219 | std::swap(SrcOpIdx1, SrcOpIdx2); |
1220 | |
1221 | unsigned Op1 = 1, Op2 = 2, Op3 = 3; |
1222 | if (X86II::isKMasked(TSFlags)) { |
1223 | Op2++; |
1224 | Op3++; |
1225 | } |
1226 | |
1227 | if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2) |
1228 | return 0; |
1229 | if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3) |
1230 | return 1; |
1231 | if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3) |
1232 | return 2; |
1233 | llvm_unreachable("Unknown three src commute case.")::llvm::llvm_unreachable_internal("Unknown three src commute case." , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1233); |
1234 | } |
1235 | |
1236 | unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands( |
1237 | const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, |
1238 | const X86InstrFMA3Group &FMA3Group) const { |
1239 | |
1240 | unsigned Opc = MI.getOpcode(); |
1241 | |
1242 | // TODO: Commuting the 1st operand of FMA*_Int requires some additional |
1243 | // analysis. The commute optimization is legal only if all users of FMA*_Int |
1244 | // use only the lowest element of the FMA*_Int instruction. Such analysis are |
1245 | // not implemented yet. So, just return 0 in that case. |
1246 | // When such analysis are available this place will be the right place for |
1247 | // calling it. |
1248 | assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&((!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && "Intrinsic instructions can't commute operand 1" ) ? static_cast<void> (0) : __assert_fail ("!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && \"Intrinsic instructions can't commute operand 1\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1249, __PRETTY_FUNCTION__)) |
1249 | "Intrinsic instructions can't commute operand 1")((!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && "Intrinsic instructions can't commute operand 1" ) ? static_cast<void> (0) : __assert_fail ("!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && \"Intrinsic instructions can't commute operand 1\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1249, __PRETTY_FUNCTION__)); |
1250 | |
1251 | // Determine which case this commute is or if it can't be done. |
1252 | unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, |
1253 | SrcOpIdx2); |
1254 | assert(Case < 3 && "Unexpected case number!")((Case < 3 && "Unexpected case number!") ? static_cast <void> (0) : __assert_fail ("Case < 3 && \"Unexpected case number!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1254, __PRETTY_FUNCTION__)); |
1255 | |
1256 | // Define the FMA forms mapping array that helps to map input FMA form |
1257 | // to output FMA form to preserve the operation semantics after |
1258 | // commuting the operands. |
1259 | const unsigned Form132Index = 0; |
1260 | const unsigned Form213Index = 1; |
1261 | const unsigned Form231Index = 2; |
1262 | static const unsigned FormMapping[][3] = { |
1263 | // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2; |
1264 | // FMA132 A, C, b; ==> FMA231 C, A, b; |
1265 | // FMA213 B, A, c; ==> FMA213 A, B, c; |
1266 | // FMA231 C, A, b; ==> FMA132 A, C, b; |
1267 | { Form231Index, Form213Index, Form132Index }, |
1268 | // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3; |
1269 | // FMA132 A, c, B; ==> FMA132 B, c, A; |
1270 | // FMA213 B, a, C; ==> FMA231 C, a, B; |
1271 | // FMA231 C, a, B; ==> FMA213 B, a, C; |
1272 | { Form132Index, Form231Index, Form213Index }, |
1273 | // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3; |
1274 | // FMA132 a, C, B; ==> FMA213 a, B, C; |
1275 | // FMA213 b, A, C; ==> FMA132 b, C, A; |
1276 | // FMA231 c, A, B; ==> FMA231 c, B, A; |
1277 | { Form213Index, Form132Index, Form231Index } |
1278 | }; |
1279 | |
1280 | unsigned FMAForms[3]; |
1281 | FMAForms[0] = FMA3Group.get132Opcode(); |
1282 | FMAForms[1] = FMA3Group.get213Opcode(); |
1283 | FMAForms[2] = FMA3Group.get231Opcode(); |
1284 | unsigned FormIndex; |
1285 | for (FormIndex = 0; FormIndex < 3; FormIndex++) |
1286 | if (Opc == FMAForms[FormIndex]) |
1287 | break; |
1288 | |
1289 | // Everything is ready, just adjust the FMA opcode and return it. |
1290 | FormIndex = FormMapping[Case][FormIndex]; |
1291 | return FMAForms[FormIndex]; |
1292 | } |
1293 | |
1294 | static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, |
1295 | unsigned SrcOpIdx2) { |
1296 | // Determine which case this commute is or if it can't be done. |
1297 | unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, |
1298 | SrcOpIdx2); |
1299 | assert(Case < 3 && "Unexpected case value!")((Case < 3 && "Unexpected case value!") ? static_cast <void> (0) : __assert_fail ("Case < 3 && \"Unexpected case value!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1299, __PRETTY_FUNCTION__)); |
1300 | |
1301 | // For each case we need to swap two pairs of bits in the final immediate. |
1302 | static const uint8_t SwapMasks[3][4] = { |
1303 | { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5. |
1304 | { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6. |
1305 | { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6. |
1306 | }; |
1307 | |
1308 | uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm(); |
1309 | // Clear out the bits we are swapping. |
1310 | uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] | |
1311 | SwapMasks[Case][2] | SwapMasks[Case][3]); |
1312 | // If the immediate had a bit of the pair set, then set the opposite bit. |
1313 | if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1]; |
1314 | if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0]; |
1315 | if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3]; |
1316 | if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2]; |
1317 | MI.getOperand(MI.getNumOperands()-1).setImm(NewImm); |
1318 | } |
1319 | |
1320 | // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be |
1321 | // commuted. |
1322 | static bool isCommutableVPERMV3Instruction(unsigned Opcode) { |
1323 | #define VPERM_CASES(Suffix) \ |
1324 | case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \ |
1325 | case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \ |
1326 | case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \ |
1327 | case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \ |
1328 | case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \ |
1329 | case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \ |
1330 | case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \ |
1331 | case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \ |
1332 | case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \ |
1333 | case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \ |
1334 | case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \ |
1335 | case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz: |
1336 | |
1337 | #define VPERM_CASES_BROADCAST(Suffix) \ |
1338 | VPERM_CASES(Suffix) \ |
1339 | case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \ |
1340 | case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \ |
1341 | case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \ |
1342 | case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \ |
1343 | case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \ |
1344 | case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz: |
1345 | |
1346 | switch (Opcode) { |
1347 | default: return false; |
1348 | VPERM_CASES(B) |
1349 | VPERM_CASES_BROADCAST(D) |
1350 | VPERM_CASES_BROADCAST(PD) |
1351 | VPERM_CASES_BROADCAST(PS) |
1352 | VPERM_CASES_BROADCAST(Q) |
1353 | VPERM_CASES(W) |
1354 | return true; |
1355 | } |
1356 | #undef VPERM_CASES_BROADCAST |
1357 | #undef VPERM_CASES |
1358 | } |
1359 | |
1360 | // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching |
1361 | // from the I opcode to the T opcode and vice versa. |
1362 | static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) { |
1363 | #define VPERM_CASES(Orig, New) \ |
1364 | case X86::Orig##128rr: return X86::New##128rr; \ |
1365 | case X86::Orig##128rrkz: return X86::New##128rrkz; \ |
1366 | case X86::Orig##128rm: return X86::New##128rm; \ |
1367 | case X86::Orig##128rmkz: return X86::New##128rmkz; \ |
1368 | case X86::Orig##256rr: return X86::New##256rr; \ |
1369 | case X86::Orig##256rrkz: return X86::New##256rrkz; \ |
1370 | case X86::Orig##256rm: return X86::New##256rm; \ |
1371 | case X86::Orig##256rmkz: return X86::New##256rmkz; \ |
1372 | case X86::Orig##rr: return X86::New##rr; \ |
1373 | case X86::Orig##rrkz: return X86::New##rrkz; \ |
1374 | case X86::Orig##rm: return X86::New##rm; \ |
1375 | case X86::Orig##rmkz: return X86::New##rmkz; |
1376 | |
1377 | #define VPERM_CASES_BROADCAST(Orig, New) \ |
1378 | VPERM_CASES(Orig, New) \ |
1379 | case X86::Orig##128rmb: return X86::New##128rmb; \ |
1380 | case X86::Orig##128rmbkz: return X86::New##128rmbkz; \ |
1381 | case X86::Orig##256rmb: return X86::New##256rmb; \ |
1382 | case X86::Orig##256rmbkz: return X86::New##256rmbkz; \ |
1383 | case X86::Orig##rmb: return X86::New##rmb; \ |
1384 | case X86::Orig##rmbkz: return X86::New##rmbkz; |
1385 | |
1386 | switch (Opcode) { |
1387 | VPERM_CASES(VPERMI2B, VPERMT2B) |
1388 | VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D) |
1389 | VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD) |
1390 | VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS) |
1391 | VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q) |
1392 | VPERM_CASES(VPERMI2W, VPERMT2W) |
1393 | VPERM_CASES(VPERMT2B, VPERMI2B) |
1394 | VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D) |
1395 | VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD) |
1396 | VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS) |
1397 | VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q) |
1398 | VPERM_CASES(VPERMT2W, VPERMI2W) |
1399 | } |
1400 | |
1401 | llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1401); |
1402 | #undef VPERM_CASES_BROADCAST |
1403 | #undef VPERM_CASES |
1404 | } |
1405 | |
1406 | MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
1407 | unsigned OpIdx1, |
1408 | unsigned OpIdx2) const { |
1409 | auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { |
1410 | if (NewMI) |
1411 | return *MI.getParent()->getParent()->CloneMachineInstr(&MI); |
1412 | return MI; |
1413 | }; |
1414 | |
1415 | switch (MI.getOpcode()) { |
1416 | case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) |
1417 | case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) |
1418 | case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) |
1419 | case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) |
1420 | case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) |
1421 | case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) |
1422 | unsigned Opc; |
1423 | unsigned Size; |
1424 | switch (MI.getOpcode()) { |
1425 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1425); |
1426 | case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; |
1427 | case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; |
1428 | case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; |
1429 | case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; |
1430 | case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; |
1431 | case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; |
1432 | } |
1433 | unsigned Amt = MI.getOperand(3).getImm(); |
1434 | auto &WorkingMI = cloneIfNew(MI); |
1435 | WorkingMI.setDesc(get(Opc)); |
1436 | WorkingMI.getOperand(3).setImm(Size - Amt); |
1437 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1438 | OpIdx1, OpIdx2); |
1439 | } |
1440 | case X86::PFSUBrr: |
1441 | case X86::PFSUBRrr: { |
1442 | // PFSUB x, y: x = x - y |
1443 | // PFSUBR x, y: x = y - x |
1444 | unsigned Opc = |
1445 | (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr); |
1446 | auto &WorkingMI = cloneIfNew(MI); |
1447 | WorkingMI.setDesc(get(Opc)); |
1448 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1449 | OpIdx1, OpIdx2); |
1450 | } |
1451 | case X86::BLENDPDrri: |
1452 | case X86::BLENDPSrri: |
1453 | case X86::VBLENDPDrri: |
1454 | case X86::VBLENDPSrri: |
1455 | // If we're optimizing for size, try to use MOVSD/MOVSS. |
1456 | if (MI.getParent()->getParent()->getFunction().hasOptSize()) { |
1457 | unsigned Mask, Opc; |
1458 | switch (MI.getOpcode()) { |
1459 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1459); |
1460 | case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break; |
1461 | case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break; |
1462 | case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break; |
1463 | case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break; |
1464 | } |
1465 | if ((MI.getOperand(3).getImm() ^ Mask) == 1) { |
1466 | auto &WorkingMI = cloneIfNew(MI); |
1467 | WorkingMI.setDesc(get(Opc)); |
1468 | WorkingMI.RemoveOperand(3); |
1469 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, |
1470 | /*NewMI=*/false, |
1471 | OpIdx1, OpIdx2); |
1472 | } |
1473 | } |
1474 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
1475 | case X86::PBLENDWrri: |
1476 | case X86::VBLENDPDYrri: |
1477 | case X86::VBLENDPSYrri: |
1478 | case X86::VPBLENDDrri: |
1479 | case X86::VPBLENDWrri: |
1480 | case X86::VPBLENDDYrri: |
1481 | case X86::VPBLENDWYrri:{ |
1482 | int8_t Mask; |
1483 | switch (MI.getOpcode()) { |
1484 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1484); |
1485 | case X86::BLENDPDrri: Mask = (int8_t)0x03; break; |
1486 | case X86::BLENDPSrri: Mask = (int8_t)0x0F; break; |
1487 | case X86::PBLENDWrri: Mask = (int8_t)0xFF; break; |
1488 | case X86::VBLENDPDrri: Mask = (int8_t)0x03; break; |
1489 | case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break; |
1490 | case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break; |
1491 | case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break; |
1492 | case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break; |
1493 | case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break; |
1494 | case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break; |
1495 | case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break; |
1496 | } |
1497 | // Only the least significant bits of Imm are used. |
1498 | // Using int8_t to ensure it will be sign extended to the int64_t that |
1499 | // setImm takes in order to match isel behavior. |
1500 | int8_t Imm = MI.getOperand(3).getImm() & Mask; |
1501 | auto &WorkingMI = cloneIfNew(MI); |
1502 | WorkingMI.getOperand(3).setImm(Mask ^ Imm); |
1503 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1504 | OpIdx1, OpIdx2); |
1505 | } |
1506 | case X86::INSERTPSrr: |
1507 | case X86::VINSERTPSrr: |
1508 | case X86::VINSERTPSZrr: { |
1509 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); |
1510 | unsigned ZMask = Imm & 15; |
1511 | unsigned DstIdx = (Imm >> 4) & 3; |
1512 | unsigned SrcIdx = (Imm >> 6) & 3; |
1513 | |
1514 | // We can commute insertps if we zero 2 of the elements, the insertion is |
1515 | // "inline" and we don't override the insertion with a zero. |
1516 | if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 && |
1517 | countPopulation(ZMask) == 2) { |
1518 | unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15); |
1519 | assert(AltIdx < 4 && "Illegal insertion index")((AltIdx < 4 && "Illegal insertion index") ? static_cast <void> (0) : __assert_fail ("AltIdx < 4 && \"Illegal insertion index\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1519, __PRETTY_FUNCTION__)); |
1520 | unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask; |
1521 | auto &WorkingMI = cloneIfNew(MI); |
1522 | WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm); |
1523 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1524 | OpIdx1, OpIdx2); |
1525 | } |
1526 | return nullptr; |
1527 | } |
1528 | case X86::MOVSDrr: |
1529 | case X86::MOVSSrr: |
1530 | case X86::VMOVSDrr: |
1531 | case X86::VMOVSSrr:{ |
1532 | // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD. |
1533 | assert(Subtarget.hasSSE41() && "Commuting MOVSD/MOVSS requires SSE41!")((Subtarget.hasSSE41() && "Commuting MOVSD/MOVSS requires SSE41!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE41() && \"Commuting MOVSD/MOVSS requires SSE41!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1533, __PRETTY_FUNCTION__)); |
1534 | |
1535 | unsigned Mask, Opc; |
1536 | switch (MI.getOpcode()) { |
1537 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1537); |
1538 | case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break; |
1539 | case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break; |
1540 | case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break; |
1541 | case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break; |
1542 | } |
1543 | |
1544 | auto &WorkingMI = cloneIfNew(MI); |
1545 | WorkingMI.setDesc(get(Opc)); |
1546 | WorkingMI.addOperand(MachineOperand::CreateImm(Mask)); |
1547 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1548 | OpIdx1, OpIdx2); |
1549 | } |
1550 | case X86::PCLMULQDQrr: |
1551 | case X86::VPCLMULQDQrr: |
1552 | case X86::VPCLMULQDQYrr: |
1553 | case X86::VPCLMULQDQZrr: |
1554 | case X86::VPCLMULQDQZ128rr: |
1555 | case X86::VPCLMULQDQZ256rr: { |
1556 | // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0] |
1557 | // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0] |
1558 | unsigned Imm = MI.getOperand(3).getImm(); |
1559 | unsigned Src1Hi = Imm & 0x01; |
1560 | unsigned Src2Hi = Imm & 0x10; |
1561 | auto &WorkingMI = cloneIfNew(MI); |
1562 | WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); |
1563 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1564 | OpIdx1, OpIdx2); |
1565 | } |
1566 | case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri: |
1567 | case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri: |
1568 | case X86::VPCMPBZrri: case X86::VPCMPUBZrri: |
1569 | case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri: |
1570 | case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri: |
1571 | case X86::VPCMPDZrri: case X86::VPCMPUDZrri: |
1572 | case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri: |
1573 | case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri: |
1574 | case X86::VPCMPQZrri: case X86::VPCMPUQZrri: |
1575 | case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri: |
1576 | case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri: |
1577 | case X86::VPCMPWZrri: case X86::VPCMPUWZrri: |
1578 | case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik: |
1579 | case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik: |
1580 | case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik: |
1581 | case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik: |
1582 | case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik: |
1583 | case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik: |
1584 | case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik: |
1585 | case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik: |
1586 | case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik: |
1587 | case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik: |
1588 | case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik: |
1589 | case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: { |
1590 | // Flip comparison mode immediate (if necessary). |
1591 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7; |
1592 | Imm = X86::getSwappedVPCMPImm(Imm); |
1593 | auto &WorkingMI = cloneIfNew(MI); |
1594 | WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm); |
1595 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1596 | OpIdx1, OpIdx2); |
1597 | } |
1598 | case X86::VPCOMBri: case X86::VPCOMUBri: |
1599 | case X86::VPCOMDri: case X86::VPCOMUDri: |
1600 | case X86::VPCOMQri: case X86::VPCOMUQri: |
1601 | case X86::VPCOMWri: case X86::VPCOMUWri: { |
1602 | // Flip comparison mode immediate (if necessary). |
1603 | unsigned Imm = MI.getOperand(3).getImm() & 0x7; |
1604 | Imm = X86::getSwappedVPCOMImm(Imm); |
1605 | auto &WorkingMI = cloneIfNew(MI); |
1606 | WorkingMI.getOperand(3).setImm(Imm); |
1607 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1608 | OpIdx1, OpIdx2); |
1609 | } |
1610 | case X86::VPERM2F128rr: |
1611 | case X86::VPERM2I128rr: { |
1612 | // Flip permute source immediate. |
1613 | // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi. |
1614 | // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi. |
1615 | int8_t Imm = MI.getOperand(3).getImm() & 0xFF; |
1616 | auto &WorkingMI = cloneIfNew(MI); |
1617 | WorkingMI.getOperand(3).setImm(Imm ^ 0x22); |
1618 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1619 | OpIdx1, OpIdx2); |
1620 | } |
1621 | case X86::MOVHLPSrr: |
1622 | case X86::UNPCKHPDrr: |
1623 | case X86::VMOVHLPSrr: |
1624 | case X86::VUNPCKHPDrr: |
1625 | case X86::VMOVHLPSZrr: |
1626 | case X86::VUNPCKHPDZ128rr: { |
1627 | assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!")((Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Commuting MOVHLP/UNPCKHPD requires SSE2!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1627, __PRETTY_FUNCTION__)); |
1628 | |
1629 | unsigned Opc = MI.getOpcode(); |
1630 | switch (Opc) { |
1631 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 1631); |
1632 | case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break; |
1633 | case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break; |
1634 | case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break; |
1635 | case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break; |
1636 | case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break; |
1637 | case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break; |
1638 | } |
1639 | auto &WorkingMI = cloneIfNew(MI); |
1640 | WorkingMI.setDesc(get(Opc)); |
1641 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1642 | OpIdx1, OpIdx2); |
1643 | } |
1644 | case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: { |
1645 | auto &WorkingMI = cloneIfNew(MI); |
1646 | unsigned OpNo = MI.getDesc().getNumOperands() - 1; |
1647 | X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm()); |
1648 | WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC)); |
1649 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1650 | OpIdx1, OpIdx2); |
1651 | } |
1652 | case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: |
1653 | case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: |
1654 | case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: |
1655 | case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: |
1656 | case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: |
1657 | case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: |
1658 | case X86::VPTERNLOGDZrrik: |
1659 | case X86::VPTERNLOGDZ128rrik: |
1660 | case X86::VPTERNLOGDZ256rrik: |
1661 | case X86::VPTERNLOGQZrrik: |
1662 | case X86::VPTERNLOGQZ128rrik: |
1663 | case X86::VPTERNLOGQZ256rrik: |
1664 | case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: |
1665 | case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: |
1666 | case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: |
1667 | case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: |
1668 | case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: |
1669 | case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: |
1670 | case X86::VPTERNLOGDZ128rmbi: |
1671 | case X86::VPTERNLOGDZ256rmbi: |
1672 | case X86::VPTERNLOGDZrmbi: |
1673 | case X86::VPTERNLOGQZ128rmbi: |
1674 | case X86::VPTERNLOGQZ256rmbi: |
1675 | case X86::VPTERNLOGQZrmbi: |
1676 | case X86::VPTERNLOGDZ128rmbikz: |
1677 | case X86::VPTERNLOGDZ256rmbikz: |
1678 | case X86::VPTERNLOGDZrmbikz: |
1679 | case X86::VPTERNLOGQZ128rmbikz: |
1680 | case X86::VPTERNLOGQZ256rmbikz: |
1681 | case X86::VPTERNLOGQZrmbikz: { |
1682 | auto &WorkingMI = cloneIfNew(MI); |
1683 | commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2); |
1684 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1685 | OpIdx1, OpIdx2); |
1686 | } |
1687 | default: { |
1688 | if (isCommutableVPERMV3Instruction(MI.getOpcode())) { |
1689 | unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode()); |
1690 | auto &WorkingMI = cloneIfNew(MI); |
1691 | WorkingMI.setDesc(get(Opc)); |
1692 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1693 | OpIdx1, OpIdx2); |
1694 | } |
1695 | |
1696 | const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), |
1697 | MI.getDesc().TSFlags); |
1698 | if (FMA3Group) { |
1699 | unsigned Opc = |
1700 | getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group); |
1701 | auto &WorkingMI = cloneIfNew(MI); |
1702 | WorkingMI.setDesc(get(Opc)); |
1703 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
1704 | OpIdx1, OpIdx2); |
1705 | } |
1706 | |
1707 | return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
1708 | } |
1709 | } |
1710 | } |
1711 | |
1712 | bool |
1713 | X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI, |
1714 | unsigned &SrcOpIdx1, |
1715 | unsigned &SrcOpIdx2, |
1716 | bool IsIntrinsic) const { |
1717 | uint64_t TSFlags = MI.getDesc().TSFlags; |
1718 | |
1719 | unsigned FirstCommutableVecOp = 1; |
1720 | unsigned LastCommutableVecOp = 3; |
1721 | unsigned KMaskOp = -1U; |
1722 | if (X86II::isKMasked(TSFlags)) { |
1723 | // For k-zero-masked operations it is Ok to commute the first vector |
1724 | // operand. |
1725 | // For regular k-masked operations a conservative choice is done as the |
1726 | // elements of the first vector operand, for which the corresponding bit |
1727 | // in the k-mask operand is set to 0, are copied to the result of the |
1728 | // instruction. |
1729 | // TODO/FIXME: The commute still may be legal if it is known that the |
1730 | // k-mask operand is set to either all ones or all zeroes. |
1731 | // It is also Ok to commute the 1st operand if all users of MI use only |
1732 | // the elements enabled by the k-mask operand. For example, |
1733 | // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i] |
1734 | // : v1[i]; |
1735 | // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 -> |
1736 | // // Ok, to commute v1 in FMADD213PSZrk. |
1737 | |
1738 | // The k-mask operand has index = 2 for masked and zero-masked operations. |
1739 | KMaskOp = 2; |
1740 | |
1741 | // The operand with index = 1 is used as a source for those elements for |
1742 | // which the corresponding bit in the k-mask is set to 0. |
1743 | if (X86II::isKMergeMasked(TSFlags)) |
1744 | FirstCommutableVecOp = 3; |
1745 | |
1746 | LastCommutableVecOp++; |
1747 | } else if (IsIntrinsic) { |
1748 | // Commuting the first operand of an intrinsic instruction isn't possible |
1749 | // unless we can prove that only the lowest element of the result is used. |
1750 | FirstCommutableVecOp = 2; |
1751 | } |
1752 | |
1753 | if (isMem(MI, LastCommutableVecOp)) |
1754 | LastCommutableVecOp--; |
1755 | |
1756 | // Only the first RegOpsNum operands are commutable. |
1757 | // Also, the value 'CommuteAnyOperandIndex' is valid here as it means |
1758 | // that the operand is not specified/fixed. |
1759 | if (SrcOpIdx1 != CommuteAnyOperandIndex && |
1760 | (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp || |
1761 | SrcOpIdx1 == KMaskOp)) |
1762 | return false; |
1763 | if (SrcOpIdx2 != CommuteAnyOperandIndex && |
1764 | (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp || |
1765 | SrcOpIdx2 == KMaskOp)) |
1766 | return false; |
1767 | |
1768 | // Look for two different register operands assumed to be commutable |
1769 | // regardless of the FMA opcode. The FMA opcode is adjusted later. |
1770 | if (SrcOpIdx1 == CommuteAnyOperandIndex || |
1771 | SrcOpIdx2 == CommuteAnyOperandIndex) { |
1772 | unsigned CommutableOpIdx1 = SrcOpIdx1; |
Value stored to 'CommutableOpIdx1' during its initialization is never read | |
1773 | unsigned CommutableOpIdx2 = SrcOpIdx2; |
1774 | |
1775 | // At least one of operands to be commuted is not specified and |
1776 | // this method is free to choose appropriate commutable operands. |
1777 | if (SrcOpIdx1 == SrcOpIdx2) |
1778 | // Both of operands are not fixed. By default set one of commutable |
1779 | // operands to the last register operand of the instruction. |
1780 | CommutableOpIdx2 = LastCommutableVecOp; |
1781 | else if (SrcOpIdx2 == CommuteAnyOperandIndex) |
1782 | // Only one of operands is not fixed. |
1783 | CommutableOpIdx2 = SrcOpIdx1; |
1784 | |
1785 | // CommutableOpIdx2 is well defined now. Let's choose another commutable |
1786 | // operand and assign its index to CommutableOpIdx1. |
1787 | unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); |
1788 | for (CommutableOpIdx1 = LastCommutableVecOp; |
1789 | CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) { |
1790 | // Just ignore and skip the k-mask operand. |
1791 | if (CommutableOpIdx1 == KMaskOp) |
1792 | continue; |
1793 | |
1794 | // The commuted operands must have different registers. |
1795 | // Otherwise, the commute transformation does not change anything and |
1796 | // is useless then. |
1797 | if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg()) |
1798 | break; |
1799 | } |
1800 | |
1801 | // No appropriate commutable operands were found. |
1802 | if (CommutableOpIdx1 < FirstCommutableVecOp) |
1803 | return false; |
1804 | |
1805 | // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2 |
1806 | // to return those values. |
1807 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
1808 | CommutableOpIdx1, CommutableOpIdx2)) |
1809 | return false; |
1810 | } |
1811 | |
1812 | return true; |
1813 | } |
1814 | |
1815 | bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, |
1816 | unsigned &SrcOpIdx2) const { |
1817 | const MCInstrDesc &Desc = MI.getDesc(); |
1818 | if (!Desc.isCommutable()) |
1819 | return false; |
1820 | |
1821 | switch (MI.getOpcode()) { |
1822 | case X86::CMPSDrr: |
1823 | case X86::CMPSSrr: |
1824 | case X86::CMPPDrri: |
1825 | case X86::CMPPSrri: |
1826 | case X86::VCMPSDrr: |
1827 | case X86::VCMPSSrr: |
1828 | case X86::VCMPPDrri: |
1829 | case X86::VCMPPSrri: |
1830 | case X86::VCMPPDYrri: |
1831 | case X86::VCMPPSYrri: |
1832 | case X86::VCMPSDZrr: |
1833 | case X86::VCMPSSZrr: |
1834 | case X86::VCMPPDZrri: |
1835 | case X86::VCMPPSZrri: |
1836 | case X86::VCMPPDZ128rri: |
1837 | case X86::VCMPPSZ128rri: |
1838 | case X86::VCMPPDZ256rri: |
1839 | case X86::VCMPPSZ256rri: { |
1840 | // Float comparison can be safely commuted for |
1841 | // Ordered/Unordered/Equal/NotEqual tests |
1842 | unsigned Imm = MI.getOperand(3).getImm() & 0x7; |
1843 | switch (Imm) { |
1844 | case 0x00: // EQUAL |
1845 | case 0x03: // UNORDERED |
1846 | case 0x04: // NOT EQUAL |
1847 | case 0x07: // ORDERED |
1848 | // The indices of the commutable operands are 1 and 2. |
1849 | // Assign them to the returned operand indices here. |
1850 | return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2); |
1851 | } |
1852 | return false; |
1853 | } |
1854 | case X86::MOVSDrr: |
1855 | case X86::MOVSSrr: |
1856 | case X86::VMOVSDrr: |
1857 | case X86::VMOVSSrr: |
1858 | if (Subtarget.hasSSE41()) |
1859 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
1860 | return false; |
1861 | case X86::MOVHLPSrr: |
1862 | case X86::UNPCKHPDrr: |
1863 | case X86::VMOVHLPSrr: |
1864 | case X86::VUNPCKHPDrr: |
1865 | case X86::VMOVHLPSZrr: |
1866 | case X86::VUNPCKHPDZ128rr: |
1867 | if (Subtarget.hasSSE2()) |
1868 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
1869 | return false; |
1870 | case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: |
1871 | case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: |
1872 | case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: |
1873 | case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: |
1874 | case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: |
1875 | case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: |
1876 | case X86::VPTERNLOGDZrrik: |
1877 | case X86::VPTERNLOGDZ128rrik: |
1878 | case X86::VPTERNLOGDZ256rrik: |
1879 | case X86::VPTERNLOGQZrrik: |
1880 | case X86::VPTERNLOGQZ128rrik: |
1881 | case X86::VPTERNLOGQZ256rrik: |
1882 | case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: |
1883 | case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: |
1884 | case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: |
1885 | case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: |
1886 | case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: |
1887 | case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: |
1888 | case X86::VPTERNLOGDZ128rmbi: |
1889 | case X86::VPTERNLOGDZ256rmbi: |
1890 | case X86::VPTERNLOGDZrmbi: |
1891 | case X86::VPTERNLOGQZ128rmbi: |
1892 | case X86::VPTERNLOGQZ256rmbi: |
1893 | case X86::VPTERNLOGQZrmbi: |
1894 | case X86::VPTERNLOGDZ128rmbikz: |
1895 | case X86::VPTERNLOGDZ256rmbikz: |
1896 | case X86::VPTERNLOGDZrmbikz: |
1897 | case X86::VPTERNLOGQZ128rmbikz: |
1898 | case X86::VPTERNLOGQZ256rmbikz: |
1899 | case X86::VPTERNLOGQZrmbikz: |
1900 | return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
1901 | case X86::VPMADD52HUQZ128r: |
1902 | case X86::VPMADD52HUQZ128rk: |
1903 | case X86::VPMADD52HUQZ128rkz: |
1904 | case X86::VPMADD52HUQZ256r: |
1905 | case X86::VPMADD52HUQZ256rk: |
1906 | case X86::VPMADD52HUQZ256rkz: |
1907 | case X86::VPMADD52HUQZr: |
1908 | case X86::VPMADD52HUQZrk: |
1909 | case X86::VPMADD52HUQZrkz: |
1910 | case X86::VPMADD52LUQZ128r: |
1911 | case X86::VPMADD52LUQZ128rk: |
1912 | case X86::VPMADD52LUQZ128rkz: |
1913 | case X86::VPMADD52LUQZ256r: |
1914 | case X86::VPMADD52LUQZ256rk: |
1915 | case X86::VPMADD52LUQZ256rkz: |
1916 | case X86::VPMADD52LUQZr: |
1917 | case X86::VPMADD52LUQZrk: |
1918 | case X86::VPMADD52LUQZrkz: { |
1919 | unsigned CommutableOpIdx1 = 2; |
1920 | unsigned CommutableOpIdx2 = 3; |
1921 | if (X86II::isKMasked(Desc.TSFlags)) { |
1922 | // Skip the mask register. |
1923 | ++CommutableOpIdx1; |
1924 | ++CommutableOpIdx2; |
1925 | } |
1926 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
1927 | CommutableOpIdx1, CommutableOpIdx2)) |
1928 | return false; |
1929 | if (!MI.getOperand(SrcOpIdx1).isReg() || |
1930 | !MI.getOperand(SrcOpIdx2).isReg()) |
1931 | // No idea. |
1932 | return false; |
1933 | return true; |
1934 | } |
1935 | |
1936 | default: |
1937 | const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), |
1938 | MI.getDesc().TSFlags); |
1939 | if (FMA3Group) |
1940 | return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2, |
1941 | FMA3Group->isIntrinsic()); |
1942 | |
1943 | // Handled masked instructions since we need to skip over the mask input |
1944 | // and the preserved input. |
1945 | if (X86II::isKMasked(Desc.TSFlags)) { |
1946 | // First assume that the first input is the mask operand and skip past it. |
1947 | unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1; |
1948 | unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2; |
1949 | // Check if the first input is tied. If there isn't one then we only |
1950 | // need to skip the mask operand which we did above. |
1951 | if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(), |
1952 | MCOI::TIED_TO) != -1)) { |
1953 | // If this is zero masking instruction with a tied operand, we need to |
1954 | // move the first index back to the first input since this must |
1955 | // be a 3 input instruction and we want the first two non-mask inputs. |
1956 | // Otherwise this is a 2 input instruction with a preserved input and |
1957 | // mask, so we need to move the indices to skip one more input. |
1958 | if (X86II::isKMergeMasked(Desc.TSFlags)) { |
1959 | ++CommutableOpIdx1; |
1960 | ++CommutableOpIdx2; |
1961 | } else { |
1962 | --CommutableOpIdx1; |
1963 | } |
1964 | } |
1965 | |
1966 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
1967 | CommutableOpIdx1, CommutableOpIdx2)) |
1968 | return false; |
1969 | |
1970 | if (!MI.getOperand(SrcOpIdx1).isReg() || |
1971 | !MI.getOperand(SrcOpIdx2).isReg()) |
1972 | // No idea. |
1973 | return false; |
1974 | return true; |
1975 | } |
1976 | |
1977 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
1978 | } |
1979 | return false; |
1980 | } |
1981 | |
1982 | X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) { |
1983 | switch (MI.getOpcode()) { |
1984 | default: return X86::COND_INVALID; |
1985 | case X86::JCC_1: |
1986 | return static_cast<X86::CondCode>( |
1987 | MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); |
1988 | } |
1989 | } |
1990 | |
1991 | /// Return condition code of a SETCC opcode. |
1992 | X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) { |
1993 | switch (MI.getOpcode()) { |
1994 | default: return X86::COND_INVALID; |
1995 | case X86::SETCCr: case X86::SETCCm: |
1996 | return static_cast<X86::CondCode>( |
1997 | MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); |
1998 | } |
1999 | } |
2000 | |
2001 | /// Return condition code of a CMov opcode. |
2002 | X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) { |
2003 | switch (MI.getOpcode()) { |
2004 | default: return X86::COND_INVALID; |
2005 | case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: |
2006 | case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm: |
2007 | return static_cast<X86::CondCode>( |
2008 | MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); |
2009 | } |
2010 | } |
2011 | |
2012 | /// Return the inverse of the specified condition, |
2013 | /// e.g. turning COND_E to COND_NE. |
2014 | X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { |
2015 | switch (CC) { |
2016 | default: llvm_unreachable("Illegal condition code!")::llvm::llvm_unreachable_internal("Illegal condition code!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2016); |
2017 | case X86::COND_E: return X86::COND_NE; |
2018 | case X86::COND_NE: return X86::COND_E; |
2019 | case X86::COND_L: return X86::COND_GE; |
2020 | case X86::COND_LE: return X86::COND_G; |
2021 | case X86::COND_G: return X86::COND_LE; |
2022 | case X86::COND_GE: return X86::COND_L; |
2023 | case X86::COND_B: return X86::COND_AE; |
2024 | case X86::COND_BE: return X86::COND_A; |
2025 | case X86::COND_A: return X86::COND_BE; |
2026 | case X86::COND_AE: return X86::COND_B; |
2027 | case X86::COND_S: return X86::COND_NS; |
2028 | case X86::COND_NS: return X86::COND_S; |
2029 | case X86::COND_P: return X86::COND_NP; |
2030 | case X86::COND_NP: return X86::COND_P; |
2031 | case X86::COND_O: return X86::COND_NO; |
2032 | case X86::COND_NO: return X86::COND_O; |
2033 | case X86::COND_NE_OR_P: return X86::COND_E_AND_NP; |
2034 | case X86::COND_E_AND_NP: return X86::COND_NE_OR_P; |
2035 | } |
2036 | } |
2037 | |
2038 | /// Assuming the flags are set by MI(a,b), return the condition code if we |
2039 | /// modify the instructions such that flags are set by MI(b,a). |
2040 | static X86::CondCode getSwappedCondition(X86::CondCode CC) { |
2041 | switch (CC) { |
2042 | default: return X86::COND_INVALID; |
2043 | case X86::COND_E: return X86::COND_E; |
2044 | case X86::COND_NE: return X86::COND_NE; |
2045 | case X86::COND_L: return X86::COND_G; |
2046 | case X86::COND_LE: return X86::COND_GE; |
2047 | case X86::COND_G: return X86::COND_L; |
2048 | case X86::COND_GE: return X86::COND_LE; |
2049 | case X86::COND_B: return X86::COND_A; |
2050 | case X86::COND_BE: return X86::COND_AE; |
2051 | case X86::COND_A: return X86::COND_B; |
2052 | case X86::COND_AE: return X86::COND_BE; |
2053 | } |
2054 | } |
2055 | |
2056 | std::pair<X86::CondCode, bool> |
2057 | X86::getX86ConditionCode(CmpInst::Predicate Predicate) { |
2058 | X86::CondCode CC = X86::COND_INVALID; |
2059 | bool NeedSwap = false; |
2060 | switch (Predicate) { |
2061 | default: break; |
2062 | // Floating-point Predicates |
2063 | case CmpInst::FCMP_UEQ: CC = X86::COND_E; break; |
2064 | case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2065 | case CmpInst::FCMP_OGT: CC = X86::COND_A; break; |
2066 | case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2067 | case CmpInst::FCMP_OGE: CC = X86::COND_AE; break; |
2068 | case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2069 | case CmpInst::FCMP_ULT: CC = X86::COND_B; break; |
2070 | case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2071 | case CmpInst::FCMP_ULE: CC = X86::COND_BE; break; |
2072 | case CmpInst::FCMP_ONE: CC = X86::COND_NE; break; |
2073 | case CmpInst::FCMP_UNO: CC = X86::COND_P; break; |
2074 | case CmpInst::FCMP_ORD: CC = X86::COND_NP; break; |
2075 | case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2076 | case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break; |
2077 | |
2078 | // Integer Predicates |
2079 | case CmpInst::ICMP_EQ: CC = X86::COND_E; break; |
2080 | case CmpInst::ICMP_NE: CC = X86::COND_NE; break; |
2081 | case CmpInst::ICMP_UGT: CC = X86::COND_A; break; |
2082 | case CmpInst::ICMP_UGE: CC = X86::COND_AE; break; |
2083 | case CmpInst::ICMP_ULT: CC = X86::COND_B; break; |
2084 | case CmpInst::ICMP_ULE: CC = X86::COND_BE; break; |
2085 | case CmpInst::ICMP_SGT: CC = X86::COND_G; break; |
2086 | case CmpInst::ICMP_SGE: CC = X86::COND_GE; break; |
2087 | case CmpInst::ICMP_SLT: CC = X86::COND_L; break; |
2088 | case CmpInst::ICMP_SLE: CC = X86::COND_LE; break; |
2089 | } |
2090 | |
2091 | return std::make_pair(CC, NeedSwap); |
2092 | } |
2093 | |
2094 | /// Return a setcc opcode based on whether it has memory operand. |
2095 | unsigned X86::getSETOpc(bool HasMemoryOperand) { |
2096 | return HasMemoryOperand ? X86::SETCCr : X86::SETCCm; |
2097 | } |
2098 | |
2099 | /// Return a cmov opcode for the given register size in bytes, and operand type. |
2100 | unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) { |
2101 | switch(RegBytes) { |
2102 | default: llvm_unreachable("Illegal register size!")::llvm::llvm_unreachable_internal("Illegal register size!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2102); |
2103 | case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr; |
2104 | case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr; |
2105 | case 8: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV64rr; |
2106 | } |
2107 | } |
2108 | |
2109 | /// Get the VPCMP immediate for the given condition. |
2110 | unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) { |
2111 | switch (CC) { |
2112 | default: llvm_unreachable("Unexpected SETCC condition")::llvm::llvm_unreachable_internal("Unexpected SETCC condition" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2112); |
2113 | case ISD::SETNE: return 4; |
2114 | case ISD::SETEQ: return 0; |
2115 | case ISD::SETULT: |
2116 | case ISD::SETLT: return 1; |
2117 | case ISD::SETUGT: |
2118 | case ISD::SETGT: return 6; |
2119 | case ISD::SETUGE: |
2120 | case ISD::SETGE: return 5; |
2121 | case ISD::SETULE: |
2122 | case ISD::SETLE: return 2; |
2123 | } |
2124 | } |
2125 | |
2126 | /// Get the VPCMP immediate if the opcodes are swapped. |
2127 | unsigned X86::getSwappedVPCMPImm(unsigned Imm) { |
2128 | switch (Imm) { |
2129 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2129); |
2130 | case 0x01: Imm = 0x06; break; // LT -> NLE |
2131 | case 0x02: Imm = 0x05; break; // LE -> NLT |
2132 | case 0x05: Imm = 0x02; break; // NLT -> LE |
2133 | case 0x06: Imm = 0x01; break; // NLE -> LT |
2134 | case 0x00: // EQ |
2135 | case 0x03: // FALSE |
2136 | case 0x04: // NE |
2137 | case 0x07: // TRUE |
2138 | break; |
2139 | } |
2140 | |
2141 | return Imm; |
2142 | } |
2143 | |
2144 | /// Get the VPCOM immediate if the opcodes are swapped. |
2145 | unsigned X86::getSwappedVPCOMImm(unsigned Imm) { |
2146 | switch (Imm) { |
2147 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2147); |
2148 | case 0x00: Imm = 0x02; break; // LT -> GT |
2149 | case 0x01: Imm = 0x03; break; // LE -> GE |
2150 | case 0x02: Imm = 0x00; break; // GT -> LT |
2151 | case 0x03: Imm = 0x01; break; // GE -> LE |
2152 | case 0x04: // EQ |
2153 | case 0x05: // NE |
2154 | case 0x06: // FALSE |
2155 | case 0x07: // TRUE |
2156 | break; |
2157 | } |
2158 | |
2159 | return Imm; |
2160 | } |
2161 | |
2162 | bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { |
2163 | if (!MI.isTerminator()) return false; |
2164 | |
2165 | // Conditional branch is a special case. |
2166 | if (MI.isBranch() && !MI.isBarrier()) |
2167 | return true; |
2168 | if (!MI.isPredicable()) |
2169 | return true; |
2170 | return !isPredicated(MI); |
2171 | } |
2172 | |
2173 | bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { |
2174 | switch (MI.getOpcode()) { |
2175 | case X86::TCRETURNdi: |
2176 | case X86::TCRETURNri: |
2177 | case X86::TCRETURNmi: |
2178 | case X86::TCRETURNdi64: |
2179 | case X86::TCRETURNri64: |
2180 | case X86::TCRETURNmi64: |
2181 | return true; |
2182 | default: |
2183 | return false; |
2184 | } |
2185 | } |
2186 | |
2187 | bool X86InstrInfo::canMakeTailCallConditional( |
2188 | SmallVectorImpl<MachineOperand> &BranchCond, |
2189 | const MachineInstr &TailCall) const { |
2190 | if (TailCall.getOpcode() != X86::TCRETURNdi && |
2191 | TailCall.getOpcode() != X86::TCRETURNdi64) { |
2192 | // Only direct calls can be done with a conditional branch. |
2193 | return false; |
2194 | } |
2195 | |
2196 | const MachineFunction *MF = TailCall.getParent()->getParent(); |
2197 | if (Subtarget.isTargetWin64() && MF->hasWinCFI()) { |
2198 | // Conditional tail calls confuse the Win64 unwinder. |
2199 | return false; |
2200 | } |
2201 | |
2202 | assert(BranchCond.size() == 1)((BranchCond.size() == 1) ? static_cast<void> (0) : __assert_fail ("BranchCond.size() == 1", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2202, __PRETTY_FUNCTION__)); |
2203 | if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { |
2204 | // Can't make a conditional tail call with this condition. |
2205 | return false; |
2206 | } |
2207 | |
2208 | const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); |
2209 | if (X86FI->getTCReturnAddrDelta() != 0 || |
2210 | TailCall.getOperand(1).getImm() != 0) { |
2211 | // A conditional tail call cannot do any stack adjustment. |
2212 | return false; |
2213 | } |
2214 | |
2215 | return true; |
2216 | } |
2217 | |
2218 | void X86InstrInfo::replaceBranchWithTailCall( |
2219 | MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, |
2220 | const MachineInstr &TailCall) const { |
2221 | assert(canMakeTailCallConditional(BranchCond, TailCall))((canMakeTailCallConditional(BranchCond, TailCall)) ? static_cast <void> (0) : __assert_fail ("canMakeTailCallConditional(BranchCond, TailCall)" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2221, __PRETTY_FUNCTION__)); |
2222 | |
2223 | MachineBasicBlock::iterator I = MBB.end(); |
2224 | while (I != MBB.begin()) { |
2225 | --I; |
2226 | if (I->isDebugInstr()) |
2227 | continue; |
2228 | if (!I->isBranch()) |
2229 | assert(0 && "Can't find the branch to replace!")((0 && "Can't find the branch to replace!") ? static_cast <void> (0) : __assert_fail ("0 && \"Can't find the branch to replace!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2229, __PRETTY_FUNCTION__)); |
2230 | |
2231 | X86::CondCode CC = X86::getCondFromBranch(*I); |
2232 | assert(BranchCond.size() == 1)((BranchCond.size() == 1) ? static_cast<void> (0) : __assert_fail ("BranchCond.size() == 1", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2232, __PRETTY_FUNCTION__)); |
2233 | if (CC != BranchCond[0].getImm()) |
2234 | continue; |
2235 | |
2236 | break; |
2237 | } |
2238 | |
2239 | unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc |
2240 | : X86::TCRETURNdi64cc; |
2241 | |
2242 | auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc)); |
2243 | MIB->addOperand(TailCall.getOperand(0)); // Destination. |
2244 | MIB.addImm(0); // Stack offset (not used). |
2245 | MIB->addOperand(BranchCond[0]); // Condition. |
2246 | MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters. |
2247 | |
2248 | // Add implicit uses and defs of all live regs potentially clobbered by the |
2249 | // call. This way they still appear live across the call. |
2250 | LivePhysRegs LiveRegs(getRegisterInfo()); |
2251 | LiveRegs.addLiveOuts(MBB); |
2252 | SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers; |
2253 | LiveRegs.stepForward(*MIB, Clobbers); |
2254 | for (const auto &C : Clobbers) { |
2255 | MIB.addReg(C.first, RegState::Implicit); |
2256 | MIB.addReg(C.first, RegState::Implicit | RegState::Define); |
2257 | } |
2258 | |
2259 | I->eraseFromParent(); |
2260 | } |
2261 | |
2262 | // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may |
2263 | // not be a fallthrough MBB now due to layout changes). Return nullptr if the |
2264 | // fallthrough MBB cannot be identified. |
2265 | static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB, |
2266 | MachineBasicBlock *TBB) { |
2267 | // Look for non-EHPad successors other than TBB. If we find exactly one, it |
2268 | // is the fallthrough MBB. If we find zero, then TBB is both the target MBB |
2269 | // and fallthrough MBB. If we find more than one, we cannot identify the |
2270 | // fallthrough MBB and should return nullptr. |
2271 | MachineBasicBlock *FallthroughBB = nullptr; |
2272 | for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) { |
2273 | if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB)) |
2274 | continue; |
2275 | // Return a nullptr if we found more than one fallthrough successor. |
2276 | if (FallthroughBB && FallthroughBB != TBB) |
2277 | return nullptr; |
2278 | FallthroughBB = *SI; |
2279 | } |
2280 | return FallthroughBB; |
2281 | } |
2282 | |
2283 | bool X86InstrInfo::AnalyzeBranchImpl( |
2284 | MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, |
2285 | SmallVectorImpl<MachineOperand> &Cond, |
2286 | SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { |
2287 | |
2288 | // Start from the bottom of the block and work up, examining the |
2289 | // terminator instructions. |
2290 | MachineBasicBlock::iterator I = MBB.end(); |
2291 | MachineBasicBlock::iterator UnCondBrIter = MBB.end(); |
2292 | while (I != MBB.begin()) { |
2293 | --I; |
2294 | if (I->isDebugInstr()) |
2295 | continue; |
2296 | |
2297 | // Working from the bottom, when we see a non-terminator instruction, we're |
2298 | // done. |
2299 | if (!isUnpredicatedTerminator(*I)) |
2300 | break; |
2301 | |
2302 | // A terminator that isn't a branch can't easily be handled by this |
2303 | // analysis. |
2304 | if (!I->isBranch()) |
2305 | return true; |
2306 | |
2307 | // Handle unconditional branches. |
2308 | if (I->getOpcode() == X86::JMP_1) { |
2309 | UnCondBrIter = I; |
2310 | |
2311 | if (!AllowModify) { |
2312 | TBB = I->getOperand(0).getMBB(); |
2313 | continue; |
2314 | } |
2315 | |
2316 | // If the block has any instructions after a JMP, delete them. |
2317 | while (std::next(I) != MBB.end()) |
2318 | std::next(I)->eraseFromParent(); |
2319 | |
2320 | Cond.clear(); |
2321 | FBB = nullptr; |
2322 | |
2323 | // Delete the JMP if it's equivalent to a fall-through. |
2324 | if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { |
2325 | TBB = nullptr; |
2326 | I->eraseFromParent(); |
2327 | I = MBB.end(); |
2328 | UnCondBrIter = MBB.end(); |
2329 | continue; |
2330 | } |
2331 | |
2332 | // TBB is used to indicate the unconditional destination. |
2333 | TBB = I->getOperand(0).getMBB(); |
2334 | continue; |
2335 | } |
2336 | |
2337 | // Handle conditional branches. |
2338 | X86::CondCode BranchCode = X86::getCondFromBranch(*I); |
2339 | if (BranchCode == X86::COND_INVALID) |
2340 | return true; // Can't handle indirect branch. |
2341 | |
2342 | // In practice we should never have an undef eflags operand, if we do |
2343 | // abort here as we are not prepared to preserve the flag. |
2344 | if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef()) |
2345 | return true; |
2346 | |
2347 | // Working from the bottom, handle the first conditional branch. |
2348 | if (Cond.empty()) { |
2349 | MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); |
2350 | if (AllowModify && UnCondBrIter != MBB.end() && |
2351 | MBB.isLayoutSuccessor(TargetBB)) { |
2352 | // If we can modify the code and it ends in something like: |
2353 | // |
2354 | // jCC L1 |
2355 | // jmp L2 |
2356 | // L1: |
2357 | // ... |
2358 | // L2: |
2359 | // |
2360 | // Then we can change this to: |
2361 | // |
2362 | // jnCC L2 |
2363 | // L1: |
2364 | // ... |
2365 | // L2: |
2366 | // |
2367 | // Which is a bit more efficient. |
2368 | // We conditionally jump to the fall-through block. |
2369 | BranchCode = GetOppositeBranchCondition(BranchCode); |
2370 | MachineBasicBlock::iterator OldInst = I; |
2371 | |
2372 | BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1)) |
2373 | .addMBB(UnCondBrIter->getOperand(0).getMBB()) |
2374 | .addImm(BranchCode); |
2375 | BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) |
2376 | .addMBB(TargetBB); |
2377 | |
2378 | OldInst->eraseFromParent(); |
2379 | UnCondBrIter->eraseFromParent(); |
2380 | |
2381 | // Restart the analysis. |
2382 | UnCondBrIter = MBB.end(); |
2383 | I = MBB.end(); |
2384 | continue; |
2385 | } |
2386 | |
2387 | FBB = TBB; |
2388 | TBB = I->getOperand(0).getMBB(); |
2389 | Cond.push_back(MachineOperand::CreateImm(BranchCode)); |
2390 | CondBranches.push_back(&*I); |
2391 | continue; |
2392 | } |
2393 | |
2394 | // Handle subsequent conditional branches. Only handle the case where all |
2395 | // conditional branches branch to the same destination and their condition |
2396 | // opcodes fit one of the special multi-branch idioms. |
2397 | assert(Cond.size() == 1)((Cond.size() == 1) ? static_cast<void> (0) : __assert_fail ("Cond.size() == 1", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2397, __PRETTY_FUNCTION__)); |
2398 | assert(TBB)((TBB) ? static_cast<void> (0) : __assert_fail ("TBB", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2398, __PRETTY_FUNCTION__)); |
2399 | |
2400 | // If the conditions are the same, we can leave them alone. |
2401 | X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); |
2402 | auto NewTBB = I->getOperand(0).getMBB(); |
2403 | if (OldBranchCode == BranchCode && TBB == NewTBB) |
2404 | continue; |
2405 | |
2406 | // If they differ, see if they fit one of the known patterns. Theoretically, |
2407 | // we could handle more patterns here, but we shouldn't expect to see them |
2408 | // if instruction selection has done a reasonable job. |
2409 | if (TBB == NewTBB && |
2410 | ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) || |
2411 | (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) { |
2412 | BranchCode = X86::COND_NE_OR_P; |
2413 | } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) || |
2414 | (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) { |
2415 | if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB))) |
2416 | return true; |
2417 | |
2418 | // X86::COND_E_AND_NP usually has two different branch destinations. |
2419 | // |
2420 | // JP B1 |
2421 | // JE B2 |
2422 | // JMP B1 |
2423 | // B1: |
2424 | // B2: |
2425 | // |
2426 | // Here this condition branches to B2 only if NP && E. It has another |
2427 | // equivalent form: |
2428 | // |
2429 | // JNE B1 |
2430 | // JNP B2 |
2431 | // JMP B1 |
2432 | // B1: |
2433 | // B2: |
2434 | // |
2435 | // Similarly it branches to B2 only if E && NP. That is why this condition |
2436 | // is named with COND_E_AND_NP. |
2437 | BranchCode = X86::COND_E_AND_NP; |
2438 | } else |
2439 | return true; |
2440 | |
2441 | // Update the MachineOperand. |
2442 | Cond[0].setImm(BranchCode); |
2443 | CondBranches.push_back(&*I); |
2444 | } |
2445 | |
2446 | return false; |
2447 | } |
2448 | |
2449 | bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
2450 | MachineBasicBlock *&TBB, |
2451 | MachineBasicBlock *&FBB, |
2452 | SmallVectorImpl<MachineOperand> &Cond, |
2453 | bool AllowModify) const { |
2454 | SmallVector<MachineInstr *, 4> CondBranches; |
2455 | return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify); |
2456 | } |
2457 | |
2458 | bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB, |
2459 | MachineBranchPredicate &MBP, |
2460 | bool AllowModify) const { |
2461 | using namespace std::placeholders; |
2462 | |
2463 | SmallVector<MachineOperand, 4> Cond; |
2464 | SmallVector<MachineInstr *, 4> CondBranches; |
2465 | if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches, |
2466 | AllowModify)) |
2467 | return true; |
2468 | |
2469 | if (Cond.size() != 1) |
2470 | return true; |
2471 | |
2472 | assert(MBP.TrueDest && "expected!")((MBP.TrueDest && "expected!") ? static_cast<void> (0) : __assert_fail ("MBP.TrueDest && \"expected!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2472, __PRETTY_FUNCTION__)); |
2473 | |
2474 | if (!MBP.FalseDest) |
2475 | MBP.FalseDest = MBB.getNextNode(); |
2476 | |
2477 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2478 | |
2479 | MachineInstr *ConditionDef = nullptr; |
2480 | bool SingleUseCondition = true; |
2481 | |
2482 | for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) { |
2483 | if (I->modifiesRegister(X86::EFLAGS, TRI)) { |
2484 | ConditionDef = &*I; |
2485 | break; |
2486 | } |
2487 | |
2488 | if (I->readsRegister(X86::EFLAGS, TRI)) |
2489 | SingleUseCondition = false; |
2490 | } |
2491 | |
2492 | if (!ConditionDef) |
2493 | return true; |
2494 | |
2495 | if (SingleUseCondition) { |
2496 | for (auto *Succ : MBB.successors()) |
2497 | if (Succ->isLiveIn(X86::EFLAGS)) |
2498 | SingleUseCondition = false; |
2499 | } |
2500 | |
2501 | MBP.ConditionDef = ConditionDef; |
2502 | MBP.SingleUseCondition = SingleUseCondition; |
2503 | |
2504 | // Currently we only recognize the simple pattern: |
2505 | // |
2506 | // test %reg, %reg |
2507 | // je %label |
2508 | // |
2509 | const unsigned TestOpcode = |
2510 | Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr; |
2511 | |
2512 | if (ConditionDef->getOpcode() == TestOpcode && |
2513 | ConditionDef->getNumOperands() == 3 && |
2514 | ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) && |
2515 | (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) { |
2516 | MBP.LHS = ConditionDef->getOperand(0); |
2517 | MBP.RHS = MachineOperand::CreateImm(0); |
2518 | MBP.Predicate = Cond[0].getImm() == X86::COND_NE |
2519 | ? MachineBranchPredicate::PRED_NE |
2520 | : MachineBranchPredicate::PRED_EQ; |
2521 | return false; |
2522 | } |
2523 | |
2524 | return true; |
2525 | } |
2526 | |
2527 | unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB, |
2528 | int *BytesRemoved) const { |
2529 | assert(!BytesRemoved && "code size not handled")((!BytesRemoved && "code size not handled") ? static_cast <void> (0) : __assert_fail ("!BytesRemoved && \"code size not handled\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2529, __PRETTY_FUNCTION__)); |
2530 | |
2531 | MachineBasicBlock::iterator I = MBB.end(); |
2532 | unsigned Count = 0; |
2533 | |
2534 | while (I != MBB.begin()) { |
2535 | --I; |
2536 | if (I->isDebugInstr()) |
2537 | continue; |
2538 | if (I->getOpcode() != X86::JMP_1 && |
2539 | X86::getCondFromBranch(*I) == X86::COND_INVALID) |
2540 | break; |
2541 | // Remove the branch. |
2542 | I->eraseFromParent(); |
2543 | I = MBB.end(); |
2544 | ++Count; |
2545 | } |
2546 | |
2547 | return Count; |
2548 | } |
2549 | |
2550 | unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB, |
2551 | MachineBasicBlock *TBB, |
2552 | MachineBasicBlock *FBB, |
2553 | ArrayRef<MachineOperand> Cond, |
2554 | const DebugLoc &DL, |
2555 | int *BytesAdded) const { |
2556 | // Shouldn't be a fall through. |
2557 | assert(TBB && "insertBranch must not be told to insert a fallthrough")((TBB && "insertBranch must not be told to insert a fallthrough" ) ? static_cast<void> (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2557, __PRETTY_FUNCTION__)); |
2558 | assert((Cond.size() == 1 || Cond.size() == 0) &&(((Cond.size() == 1 || Cond.size() == 0) && "X86 branch conditions have one component!" ) ? static_cast<void> (0) : __assert_fail ("(Cond.size() == 1 || Cond.size() == 0) && \"X86 branch conditions have one component!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2559, __PRETTY_FUNCTION__)) |
2559 | "X86 branch conditions have one component!")(((Cond.size() == 1 || Cond.size() == 0) && "X86 branch conditions have one component!" ) ? static_cast<void> (0) : __assert_fail ("(Cond.size() == 1 || Cond.size() == 0) && \"X86 branch conditions have one component!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2559, __PRETTY_FUNCTION__)); |
2560 | assert(!BytesAdded && "code size not handled")((!BytesAdded && "code size not handled") ? static_cast <void> (0) : __assert_fail ("!BytesAdded && \"code size not handled\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2560, __PRETTY_FUNCTION__)); |
2561 | |
2562 | if (Cond.empty()) { |
2563 | // Unconditional branch? |
2564 | assert(!FBB && "Unconditional branch with multiple successors!")((!FBB && "Unconditional branch with multiple successors!" ) ? static_cast<void> (0) : __assert_fail ("!FBB && \"Unconditional branch with multiple successors!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2564, __PRETTY_FUNCTION__)); |
2565 | BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); |
2566 | return 1; |
2567 | } |
2568 | |
2569 | // If FBB is null, it is implied to be a fall-through block. |
2570 | bool FallThru = FBB == nullptr; |
2571 | |
2572 | // Conditional branch. |
2573 | unsigned Count = 0; |
2574 | X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); |
2575 | switch (CC) { |
2576 | case X86::COND_NE_OR_P: |
2577 | // Synthesize NE_OR_P with two branches. |
2578 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE); |
2579 | ++Count; |
2580 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P); |
2581 | ++Count; |
2582 | break; |
2583 | case X86::COND_E_AND_NP: |
2584 | // Use the next block of MBB as FBB if it is null. |
2585 | if (FBB == nullptr) { |
2586 | FBB = getFallThroughMBB(&MBB, TBB); |
2587 | assert(FBB && "MBB cannot be the last block in function when the false "((FBB && "MBB cannot be the last block in function when the false " "body is a fall-through.") ? static_cast<void> (0) : __assert_fail ("FBB && \"MBB cannot be the last block in function when the false \" \"body is a fall-through.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2588, __PRETTY_FUNCTION__)) |
2588 | "body is a fall-through.")((FBB && "MBB cannot be the last block in function when the false " "body is a fall-through.") ? static_cast<void> (0) : __assert_fail ("FBB && \"MBB cannot be the last block in function when the false \" \"body is a fall-through.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2588, __PRETTY_FUNCTION__)); |
2589 | } |
2590 | // Synthesize COND_E_AND_NP with two branches. |
2591 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE); |
2592 | ++Count; |
2593 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP); |
2594 | ++Count; |
2595 | break; |
2596 | default: { |
2597 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC); |
2598 | ++Count; |
2599 | } |
2600 | } |
2601 | if (!FallThru) { |
2602 | // Two-way Conditional branch. Insert the second branch. |
2603 | BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); |
2604 | ++Count; |
2605 | } |
2606 | return Count; |
2607 | } |
2608 | |
2609 | bool X86InstrInfo:: |
2610 | canInsertSelect(const MachineBasicBlock &MBB, |
2611 | ArrayRef<MachineOperand> Cond, |
2612 | unsigned TrueReg, unsigned FalseReg, |
2613 | int &CondCycles, int &TrueCycles, int &FalseCycles) const { |
2614 | // Not all subtargets have cmov instructions. |
2615 | if (!Subtarget.hasCMov()) |
2616 | return false; |
2617 | if (Cond.size() != 1) |
2618 | return false; |
2619 | // We cannot do the composite conditions, at least not in SSA form. |
2620 | if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND) |
2621 | return false; |
2622 | |
2623 | // Check register classes. |
2624 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
2625 | const TargetRegisterClass *RC = |
2626 | RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
2627 | if (!RC) |
2628 | return false; |
2629 | |
2630 | // We have cmov instructions for 16, 32, and 64 bit general purpose registers. |
2631 | if (X86::GR16RegClass.hasSubClassEq(RC) || |
2632 | X86::GR32RegClass.hasSubClassEq(RC) || |
2633 | X86::GR64RegClass.hasSubClassEq(RC)) { |
2634 | // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy |
2635 | // Bridge. Probably Ivy Bridge as well. |
2636 | CondCycles = 2; |
2637 | TrueCycles = 2; |
2638 | FalseCycles = 2; |
2639 | return true; |
2640 | } |
2641 | |
2642 | // Can't do vectors. |
2643 | return false; |
2644 | } |
2645 | |
2646 | void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, |
2647 | MachineBasicBlock::iterator I, |
2648 | const DebugLoc &DL, unsigned DstReg, |
2649 | ArrayRef<MachineOperand> Cond, unsigned TrueReg, |
2650 | unsigned FalseReg) const { |
2651 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
2652 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); |
2653 | const TargetRegisterClass &RC = *MRI.getRegClass(DstReg); |
2654 | assert(Cond.size() == 1 && "Invalid Cond array")((Cond.size() == 1 && "Invalid Cond array") ? static_cast <void> (0) : __assert_fail ("Cond.size() == 1 && \"Invalid Cond array\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2654, __PRETTY_FUNCTION__)); |
2655 | unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8, |
2656 | false /*HasMemoryOperand*/); |
2657 | BuildMI(MBB, I, DL, get(Opc), DstReg) |
2658 | .addReg(FalseReg) |
2659 | .addReg(TrueReg) |
2660 | .addImm(Cond[0].getImm()); |
2661 | } |
2662 | |
2663 | /// Test if the given register is a physical h register. |
2664 | static bool isHReg(unsigned Reg) { |
2665 | return X86::GR8_ABCD_HRegClass.contains(Reg); |
2666 | } |
2667 | |
2668 | // Try and copy between VR128/VR64 and GR64 registers. |
2669 | static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, |
2670 | const X86Subtarget &Subtarget) { |
2671 | bool HasAVX = Subtarget.hasAVX(); |
2672 | bool HasAVX512 = Subtarget.hasAVX512(); |
2673 | |
2674 | // SrcReg(MaskReg) -> DestReg(GR64) |
2675 | // SrcReg(MaskReg) -> DestReg(GR32) |
2676 | |
2677 | // All KMASK RegClasses hold the same k registers, can be tested against anyone. |
2678 | if (X86::VK16RegClass.contains(SrcReg)) { |
2679 | if (X86::GR64RegClass.contains(DestReg)) { |
2680 | assert(Subtarget.hasBWI())((Subtarget.hasBWI()) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI()", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2680, __PRETTY_FUNCTION__)); |
2681 | return X86::KMOVQrk; |
2682 | } |
2683 | if (X86::GR32RegClass.contains(DestReg)) |
2684 | return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk; |
2685 | } |
2686 | |
2687 | // SrcReg(GR64) -> DestReg(MaskReg) |
2688 | // SrcReg(GR32) -> DestReg(MaskReg) |
2689 | |
2690 | // All KMASK RegClasses hold the same k registers, can be tested against anyone. |
2691 | if (X86::VK16RegClass.contains(DestReg)) { |
2692 | if (X86::GR64RegClass.contains(SrcReg)) { |
2693 | assert(Subtarget.hasBWI())((Subtarget.hasBWI()) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI()", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2693, __PRETTY_FUNCTION__)); |
2694 | return X86::KMOVQkr; |
2695 | } |
2696 | if (X86::GR32RegClass.contains(SrcReg)) |
2697 | return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr; |
2698 | } |
2699 | |
2700 | |
2701 | // SrcReg(VR128) -> DestReg(GR64) |
2702 | // SrcReg(VR64) -> DestReg(GR64) |
2703 | // SrcReg(GR64) -> DestReg(VR128) |
2704 | // SrcReg(GR64) -> DestReg(VR64) |
2705 | |
2706 | if (X86::GR64RegClass.contains(DestReg)) { |
2707 | if (X86::VR128XRegClass.contains(SrcReg)) |
2708 | // Copy from a VR128 register to a GR64 register. |
2709 | return HasAVX512 ? X86::VMOVPQIto64Zrr : |
2710 | HasAVX ? X86::VMOVPQIto64rr : |
2711 | X86::MOVPQIto64rr; |
2712 | if (X86::VR64RegClass.contains(SrcReg)) |
2713 | // Copy from a VR64 register to a GR64 register. |
2714 | return X86::MMX_MOVD64from64rr; |
2715 | } else if (X86::GR64RegClass.contains(SrcReg)) { |
2716 | // Copy from a GR64 register to a VR128 register. |
2717 | if (X86::VR128XRegClass.contains(DestReg)) |
2718 | return HasAVX512 ? X86::VMOV64toPQIZrr : |
2719 | HasAVX ? X86::VMOV64toPQIrr : |
2720 | X86::MOV64toPQIrr; |
2721 | // Copy from a GR64 register to a VR64 register. |
2722 | if (X86::VR64RegClass.contains(DestReg)) |
2723 | return X86::MMX_MOVD64to64rr; |
2724 | } |
2725 | |
2726 | // SrcReg(VR128) -> DestReg(GR32) |
2727 | // SrcReg(GR32) -> DestReg(VR128) |
2728 | |
2729 | if (X86::GR32RegClass.contains(DestReg) && |
2730 | X86::VR128XRegClass.contains(SrcReg)) |
2731 | // Copy from a VR128 register to a GR32 register. |
2732 | return HasAVX512 ? X86::VMOVPDI2DIZrr : |
2733 | HasAVX ? X86::VMOVPDI2DIrr : |
2734 | X86::MOVPDI2DIrr; |
2735 | |
2736 | if (X86::VR128XRegClass.contains(DestReg) && |
2737 | X86::GR32RegClass.contains(SrcReg)) |
2738 | // Copy from a VR128 register to a VR128 register. |
2739 | return HasAVX512 ? X86::VMOVDI2PDIZrr : |
2740 | HasAVX ? X86::VMOVDI2PDIrr : |
2741 | X86::MOVDI2PDIrr; |
2742 | return 0; |
2743 | } |
2744 | |
2745 | void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
2746 | MachineBasicBlock::iterator MI, |
2747 | const DebugLoc &DL, unsigned DestReg, |
2748 | unsigned SrcReg, bool KillSrc) const { |
2749 | // First deal with the normal symmetric copies. |
2750 | bool HasAVX = Subtarget.hasAVX(); |
2751 | bool HasVLX = Subtarget.hasVLX(); |
2752 | unsigned Opc = 0; |
2753 | if (X86::GR64RegClass.contains(DestReg, SrcReg)) |
2754 | Opc = X86::MOV64rr; |
2755 | else if (X86::GR32RegClass.contains(DestReg, SrcReg)) |
2756 | Opc = X86::MOV32rr; |
2757 | else if (X86::GR16RegClass.contains(DestReg, SrcReg)) |
2758 | Opc = X86::MOV16rr; |
2759 | else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { |
2760 | // Copying to or from a physical H register on x86-64 requires a NOREX |
2761 | // move. Otherwise use a normal move. |
2762 | if ((isHReg(DestReg) || isHReg(SrcReg)) && |
2763 | Subtarget.is64Bit()) { |
2764 | Opc = X86::MOV8rr_NOREX; |
2765 | // Both operands must be encodable without an REX prefix. |
2766 | assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&((X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && "8-bit H register can not be copied outside GR8_NOREX") ? static_cast <void> (0) : __assert_fail ("X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && \"8-bit H register can not be copied outside GR8_NOREX\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2767, __PRETTY_FUNCTION__)) |
2767 | "8-bit H register can not be copied outside GR8_NOREX")((X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && "8-bit H register can not be copied outside GR8_NOREX") ? static_cast <void> (0) : __assert_fail ("X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && \"8-bit H register can not be copied outside GR8_NOREX\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2767, __PRETTY_FUNCTION__)); |
2768 | } else |
2769 | Opc = X86::MOV8rr; |
2770 | } |
2771 | else if (X86::VR64RegClass.contains(DestReg, SrcReg)) |
2772 | Opc = X86::MMX_MOVQ64rr; |
2773 | else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) { |
2774 | if (HasVLX) |
2775 | Opc = X86::VMOVAPSZ128rr; |
2776 | else if (X86::VR128RegClass.contains(DestReg, SrcReg)) |
2777 | Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; |
2778 | else { |
2779 | // If this an extended register and we don't have VLX we need to use a |
2780 | // 512-bit move. |
2781 | Opc = X86::VMOVAPSZrr; |
2782 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2783 | DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, |
2784 | &X86::VR512RegClass); |
2785 | SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, |
2786 | &X86::VR512RegClass); |
2787 | } |
2788 | } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) { |
2789 | if (HasVLX) |
2790 | Opc = X86::VMOVAPSZ256rr; |
2791 | else if (X86::VR256RegClass.contains(DestReg, SrcReg)) |
2792 | Opc = X86::VMOVAPSYrr; |
2793 | else { |
2794 | // If this an extended register and we don't have VLX we need to use a |
2795 | // 512-bit move. |
2796 | Opc = X86::VMOVAPSZrr; |
2797 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2798 | DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, |
2799 | &X86::VR512RegClass); |
2800 | SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, |
2801 | &X86::VR512RegClass); |
2802 | } |
2803 | } else if (X86::VR512RegClass.contains(DestReg, SrcReg)) |
2804 | Opc = X86::VMOVAPSZrr; |
2805 | // All KMASK RegClasses hold the same k registers, can be tested against anyone. |
2806 | else if (X86::VK16RegClass.contains(DestReg, SrcReg)) |
2807 | Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk; |
2808 | if (!Opc) |
2809 | Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); |
2810 | |
2811 | if (Opc) { |
2812 | BuildMI(MBB, MI, DL, get(Opc), DestReg) |
2813 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2814 | return; |
2815 | } |
2816 | |
2817 | if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) { |
2818 | // FIXME: We use a fatal error here because historically LLVM has tried |
2819 | // lower some of these physreg copies and we want to ensure we get |
2820 | // reasonable bug reports if someone encounters a case no other testing |
2821 | // found. This path should be removed after the LLVM 7 release. |
2822 | report_fatal_error("Unable to copy EFLAGS physical register!"); |
2823 | } |
2824 | |
2825 | LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " << RI.getName(DestReg ) << '\n'; } } while (false) |
2826 | << RI.getName(DestReg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " << RI.getName(DestReg ) << '\n'; } } while (false); |
2827 | report_fatal_error("Cannot emit physreg copy instruction"); |
2828 | } |
2829 | |
2830 | bool X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI, |
2831 | const MachineOperand *&Src, |
2832 | const MachineOperand *&Dest) const { |
2833 | if (MI.isMoveReg()) { |
2834 | Dest = &MI.getOperand(0); |
2835 | Src = &MI.getOperand(1); |
2836 | return true; |
2837 | } |
2838 | return false; |
2839 | } |
2840 | |
2841 | static unsigned getLoadStoreRegOpcode(unsigned Reg, |
2842 | const TargetRegisterClass *RC, |
2843 | bool isStackAligned, |
2844 | const X86Subtarget &STI, |
2845 | bool load) { |
2846 | bool HasAVX = STI.hasAVX(); |
2847 | bool HasAVX512 = STI.hasAVX512(); |
2848 | bool HasVLX = STI.hasVLX(); |
2849 | |
2850 | switch (STI.getRegisterInfo()->getSpillSize(*RC)) { |
2851 | default: |
2852 | llvm_unreachable("Unknown spill size")::llvm::llvm_unreachable_internal("Unknown spill size", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2852); |
2853 | case 1: |
2854 | assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass")((X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass" ) ? static_cast<void> (0) : __assert_fail ("X86::GR8RegClass.hasSubClassEq(RC) && \"Unknown 1-byte regclass\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2854, __PRETTY_FUNCTION__)); |
2855 | if (STI.is64Bit()) |
2856 | // Copying to or from a physical H register on x86-64 requires a NOREX |
2857 | // move. Otherwise use a normal move. |
2858 | if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) |
2859 | return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; |
2860 | return load ? X86::MOV8rm : X86::MOV8mr; |
2861 | case 2: |
2862 | if (X86::VK16RegClass.hasSubClassEq(RC)) |
2863 | return load ? X86::KMOVWkm : X86::KMOVWmk; |
2864 | assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass")((X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass" ) ? static_cast<void> (0) : __assert_fail ("X86::GR16RegClass.hasSubClassEq(RC) && \"Unknown 2-byte regclass\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2864, __PRETTY_FUNCTION__)); |
2865 | return load ? X86::MOV16rm : X86::MOV16mr; |
2866 | case 4: |
2867 | if (X86::GR32RegClass.hasSubClassEq(RC)) |
2868 | return load ? X86::MOV32rm : X86::MOV32mr; |
2869 | if (X86::FR32XRegClass.hasSubClassEq(RC)) |
2870 | return load ? |
2871 | (HasAVX512 ? X86::VMOVSSZrm : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) : |
2872 | (HasAVX512 ? X86::VMOVSSZmr : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr); |
2873 | if (X86::RFP32RegClass.hasSubClassEq(RC)) |
2874 | return load ? X86::LD_Fp32m : X86::ST_Fp32m; |
2875 | if (X86::VK32RegClass.hasSubClassEq(RC)) { |
2876 | assert(STI.hasBWI() && "KMOVD requires BWI")((STI.hasBWI() && "KMOVD requires BWI") ? static_cast <void> (0) : __assert_fail ("STI.hasBWI() && \"KMOVD requires BWI\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2876, __PRETTY_FUNCTION__)); |
2877 | return load ? X86::KMOVDkm : X86::KMOVDmk; |
2878 | } |
2879 | llvm_unreachable("Unknown 4-byte regclass")::llvm::llvm_unreachable_internal("Unknown 4-byte regclass", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2879); |
2880 | case 8: |
2881 | if (X86::GR64RegClass.hasSubClassEq(RC)) |
2882 | return load ? X86::MOV64rm : X86::MOV64mr; |
2883 | if (X86::FR64XRegClass.hasSubClassEq(RC)) |
2884 | return load ? |
2885 | (HasAVX512 ? X86::VMOVSDZrm : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) : |
2886 | (HasAVX512 ? X86::VMOVSDZmr : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr); |
2887 | if (X86::VR64RegClass.hasSubClassEq(RC)) |
2888 | return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; |
2889 | if (X86::RFP64RegClass.hasSubClassEq(RC)) |
2890 | return load ? X86::LD_Fp64m : X86::ST_Fp64m; |
2891 | if (X86::VK64RegClass.hasSubClassEq(RC)) { |
2892 | assert(STI.hasBWI() && "KMOVQ requires BWI")((STI.hasBWI() && "KMOVQ requires BWI") ? static_cast <void> (0) : __assert_fail ("STI.hasBWI() && \"KMOVQ requires BWI\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2892, __PRETTY_FUNCTION__)); |
2893 | return load ? X86::KMOVQkm : X86::KMOVQmk; |
2894 | } |
2895 | llvm_unreachable("Unknown 8-byte regclass")::llvm::llvm_unreachable_internal("Unknown 8-byte regclass", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2895); |
2896 | case 10: |
2897 | assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass")((X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass" ) ? static_cast<void> (0) : __assert_fail ("X86::RFP80RegClass.hasSubClassEq(RC) && \"Unknown 10-byte regclass\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2897, __PRETTY_FUNCTION__)); |
2898 | return load ? X86::LD_Fp80m : X86::ST_FpP80m; |
2899 | case 16: { |
2900 | if (X86::VR128XRegClass.hasSubClassEq(RC)) { |
2901 | // If stack is realigned we can use aligned stores. |
2902 | if (isStackAligned) |
2903 | return load ? |
2904 | (HasVLX ? X86::VMOVAPSZ128rm : |
2905 | HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX : |
2906 | HasAVX ? X86::VMOVAPSrm : |
2907 | X86::MOVAPSrm): |
2908 | (HasVLX ? X86::VMOVAPSZ128mr : |
2909 | HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX : |
2910 | HasAVX ? X86::VMOVAPSmr : |
2911 | X86::MOVAPSmr); |
2912 | else |
2913 | return load ? |
2914 | (HasVLX ? X86::VMOVUPSZ128rm : |
2915 | HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX : |
2916 | HasAVX ? X86::VMOVUPSrm : |
2917 | X86::MOVUPSrm): |
2918 | (HasVLX ? X86::VMOVUPSZ128mr : |
2919 | HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX : |
2920 | HasAVX ? X86::VMOVUPSmr : |
2921 | X86::MOVUPSmr); |
2922 | } |
2923 | if (X86::BNDRRegClass.hasSubClassEq(RC)) { |
2924 | if (STI.is64Bit()) |
2925 | return load ? X86::BNDMOV64rm : X86::BNDMOV64mr; |
2926 | else |
2927 | return load ? X86::BNDMOV32rm : X86::BNDMOV32mr; |
2928 | } |
2929 | llvm_unreachable("Unknown 16-byte regclass")::llvm::llvm_unreachable_internal("Unknown 16-byte regclass", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2929); |
2930 | } |
2931 | case 32: |
2932 | assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass")((X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass" ) ? static_cast<void> (0) : __assert_fail ("X86::VR256XRegClass.hasSubClassEq(RC) && \"Unknown 32-byte regclass\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2932, __PRETTY_FUNCTION__)); |
2933 | // If stack is realigned we can use aligned stores. |
2934 | if (isStackAligned) |
2935 | return load ? |
2936 | (HasVLX ? X86::VMOVAPSZ256rm : |
2937 | HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : |
2938 | X86::VMOVAPSYrm) : |
2939 | (HasVLX ? X86::VMOVAPSZ256mr : |
2940 | HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX : |
2941 | X86::VMOVAPSYmr); |
2942 | else |
2943 | return load ? |
2944 | (HasVLX ? X86::VMOVUPSZ256rm : |
2945 | HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX : |
2946 | X86::VMOVUPSYrm) : |
2947 | (HasVLX ? X86::VMOVUPSZ256mr : |
2948 | HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX : |
2949 | X86::VMOVUPSYmr); |
2950 | case 64: |
2951 | assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass")((X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass" ) ? static_cast<void> (0) : __assert_fail ("X86::VR512RegClass.hasSubClassEq(RC) && \"Unknown 64-byte regclass\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2951, __PRETTY_FUNCTION__)); |
2952 | assert(STI.hasAVX512() && "Using 512-bit register requires AVX512")((STI.hasAVX512() && "Using 512-bit register requires AVX512" ) ? static_cast<void> (0) : __assert_fail ("STI.hasAVX512() && \"Using 512-bit register requires AVX512\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2952, __PRETTY_FUNCTION__)); |
2953 | if (isStackAligned) |
2954 | return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; |
2955 | else |
2956 | return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; |
2957 | } |
2958 | } |
2959 | |
2960 | bool X86InstrInfo::getMemOperandWithOffset( |
2961 | const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset, |
2962 | const TargetRegisterInfo *TRI) const { |
2963 | const MCInstrDesc &Desc = MemOp.getDesc(); |
2964 | int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); |
2965 | if (MemRefBegin < 0) |
2966 | return false; |
2967 | |
2968 | MemRefBegin += X86II::getOperandBias(Desc); |
2969 | |
2970 | BaseOp = &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg); |
2971 | if (!BaseOp->isReg()) // Can be an MO_FrameIndex |
2972 | return false; |
2973 | |
2974 | if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) |
2975 | return false; |
2976 | |
2977 | if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != |
2978 | X86::NoRegister) |
2979 | return false; |
2980 | |
2981 | const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp); |
2982 | |
2983 | // Displacement can be symbolic |
2984 | if (!DispMO.isImm()) |
2985 | return false; |
2986 | |
2987 | Offset = DispMO.getImm(); |
2988 | |
2989 | assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "((BaseOp->isReg() && "getMemOperandWithOffset only supports base " "operands of type register.") ? static_cast<void> (0) : __assert_fail ("BaseOp->isReg() && \"getMemOperandWithOffset only supports base \" \"operands of type register.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2990, __PRETTY_FUNCTION__)) |
2990 | "operands of type register.")((BaseOp->isReg() && "getMemOperandWithOffset only supports base " "operands of type register.") ? static_cast<void> (0) : __assert_fail ("BaseOp->isReg() && \"getMemOperandWithOffset only supports base \" \"operands of type register.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 2990, __PRETTY_FUNCTION__)); |
2991 | return true; |
2992 | } |
2993 | |
2994 | static unsigned getStoreRegOpcode(unsigned SrcReg, |
2995 | const TargetRegisterClass *RC, |
2996 | bool isStackAligned, |
2997 | const X86Subtarget &STI) { |
2998 | return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false); |
2999 | } |
3000 | |
3001 | |
3002 | static unsigned getLoadRegOpcode(unsigned DestReg, |
3003 | const TargetRegisterClass *RC, |
3004 | bool isStackAligned, |
3005 | const X86Subtarget &STI) { |
3006 | return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true); |
3007 | } |
3008 | |
3009 | void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
3010 | MachineBasicBlock::iterator MI, |
3011 | unsigned SrcReg, bool isKill, int FrameIdx, |
3012 | const TargetRegisterClass *RC, |
3013 | const TargetRegisterInfo *TRI) const { |
3014 | const MachineFunction &MF = *MBB.getParent(); |
3015 | assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&((MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize (*RC) && "Stack slot too small for store") ? static_cast <void> (0) : __assert_fail ("MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && \"Stack slot too small for store\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3016, __PRETTY_FUNCTION__)) |
3016 | "Stack slot too small for store")((MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize (*RC) && "Stack slot too small for store") ? static_cast <void> (0) : __assert_fail ("MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && \"Stack slot too small for store\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3016, __PRETTY_FUNCTION__)); |
3017 | unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); |
3018 | bool isAligned = |
3019 | (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || |
3020 | RI.canRealignStack(MF); |
3021 | unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); |
3022 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) |
3023 | .addReg(SrcReg, getKillRegState(isKill)); |
3024 | } |
3025 | |
3026 | void X86InstrInfo::storeRegToAddr( |
3027 | MachineFunction &MF, unsigned SrcReg, bool isKill, |
3028 | SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, |
3029 | ArrayRef<MachineMemOperand *> MMOs, |
3030 | SmallVectorImpl<MachineInstr *> &NewMIs) const { |
3031 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
3032 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
3033 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; |
3034 | unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); |
3035 | DebugLoc DL; |
3036 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); |
3037 | for (unsigned i = 0, e = Addr.size(); i != e; ++i) |
3038 | MIB.add(Addr[i]); |
3039 | MIB.addReg(SrcReg, getKillRegState(isKill)); |
3040 | MIB.setMemRefs(MMOs); |
3041 | NewMIs.push_back(MIB); |
3042 | } |
3043 | |
3044 | |
3045 | void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
3046 | MachineBasicBlock::iterator MI, |
3047 | unsigned DestReg, int FrameIdx, |
3048 | const TargetRegisterClass *RC, |
3049 | const TargetRegisterInfo *TRI) const { |
3050 | const MachineFunction &MF = *MBB.getParent(); |
3051 | unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); |
3052 | bool isAligned = |
3053 | (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || |
3054 | RI.canRealignStack(MF); |
3055 | unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); |
3056 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx); |
3057 | } |
3058 | |
3059 | void X86InstrInfo::loadRegFromAddr( |
3060 | MachineFunction &MF, unsigned DestReg, |
3061 | SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, |
3062 | ArrayRef<MachineMemOperand *> MMOs, |
3063 | SmallVectorImpl<MachineInstr *> &NewMIs) const { |
3064 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
3065 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
3066 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; |
3067 | unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); |
3068 | DebugLoc DL; |
3069 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); |
3070 | for (unsigned i = 0, e = Addr.size(); i != e; ++i) |
3071 | MIB.add(Addr[i]); |
3072 | MIB.setMemRefs(MMOs); |
3073 | NewMIs.push_back(MIB); |
3074 | } |
3075 | |
3076 | bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, |
3077 | unsigned &SrcReg2, int &CmpMask, |
3078 | int &CmpValue) const { |
3079 | switch (MI.getOpcode()) { |
3080 | default: break; |
3081 | case X86::CMP64ri32: |
3082 | case X86::CMP64ri8: |
3083 | case X86::CMP32ri: |
3084 | case X86::CMP32ri8: |
3085 | case X86::CMP16ri: |
3086 | case X86::CMP16ri8: |
3087 | case X86::CMP8ri: |
3088 | SrcReg = MI.getOperand(0).getReg(); |
3089 | SrcReg2 = 0; |
3090 | if (MI.getOperand(1).isImm()) { |
3091 | CmpMask = ~0; |
3092 | CmpValue = MI.getOperand(1).getImm(); |
3093 | } else { |
3094 | CmpMask = CmpValue = 0; |
3095 | } |
3096 | return true; |
3097 | // A SUB can be used to perform comparison. |
3098 | case X86::SUB64rm: |
3099 | case X86::SUB32rm: |
3100 | case X86::SUB16rm: |
3101 | case X86::SUB8rm: |
3102 | SrcReg = MI.getOperand(1).getReg(); |
3103 | SrcReg2 = 0; |
3104 | CmpMask = 0; |
3105 | CmpValue = 0; |
3106 | return true; |
3107 | case X86::SUB64rr: |
3108 | case X86::SUB32rr: |
3109 | case X86::SUB16rr: |
3110 | case X86::SUB8rr: |
3111 | SrcReg = MI.getOperand(1).getReg(); |
3112 | SrcReg2 = MI.getOperand(2).getReg(); |
3113 | CmpMask = 0; |
3114 | CmpValue = 0; |
3115 | return true; |
3116 | case X86::SUB64ri32: |
3117 | case X86::SUB64ri8: |
3118 | case X86::SUB32ri: |
3119 | case X86::SUB32ri8: |
3120 | case X86::SUB16ri: |
3121 | case X86::SUB16ri8: |
3122 | case X86::SUB8ri: |
3123 | SrcReg = MI.getOperand(1).getReg(); |
3124 | SrcReg2 = 0; |
3125 | if (MI.getOperand(2).isImm()) { |
3126 | CmpMask = ~0; |
3127 | CmpValue = MI.getOperand(2).getImm(); |
3128 | } else { |
3129 | CmpMask = CmpValue = 0; |
3130 | } |
3131 | return true; |
3132 | case X86::CMP64rr: |
3133 | case X86::CMP32rr: |
3134 | case X86::CMP16rr: |
3135 | case X86::CMP8rr: |
3136 | SrcReg = MI.getOperand(0).getReg(); |
3137 | SrcReg2 = MI.getOperand(1).getReg(); |
3138 | CmpMask = 0; |
3139 | CmpValue = 0; |
3140 | return true; |
3141 | case X86::TEST8rr: |
3142 | case X86::TEST16rr: |
3143 | case X86::TEST32rr: |
3144 | case X86::TEST64rr: |
3145 | SrcReg = MI.getOperand(0).getReg(); |
3146 | if (MI.getOperand(1).getReg() != SrcReg) |
3147 | return false; |
3148 | // Compare against zero. |
3149 | SrcReg2 = 0; |
3150 | CmpMask = ~0; |
3151 | CmpValue = 0; |
3152 | return true; |
3153 | } |
3154 | return false; |
3155 | } |
3156 | |
3157 | /// Check whether the first instruction, whose only |
3158 | /// purpose is to update flags, can be made redundant. |
3159 | /// CMPrr can be made redundant by SUBrr if the operands are the same. |
3160 | /// This function can be extended later on. |
3161 | /// SrcReg, SrcRegs: register operands for FlagI. |
3162 | /// ImmValue: immediate for FlagI if it takes an immediate. |
3163 | inline static bool isRedundantFlagInstr(const MachineInstr &FlagI, |
3164 | unsigned SrcReg, unsigned SrcReg2, |
3165 | int ImmMask, int ImmValue, |
3166 | const MachineInstr &OI) { |
3167 | if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) || |
3168 | (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) || |
3169 | (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) || |
3170 | (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) && |
3171 | ((OI.getOperand(1).getReg() == SrcReg && |
3172 | OI.getOperand(2).getReg() == SrcReg2) || |
3173 | (OI.getOperand(1).getReg() == SrcReg2 && |
3174 | OI.getOperand(2).getReg() == SrcReg))) |
3175 | return true; |
3176 | |
3177 | if (ImmMask != 0 && |
3178 | ((FlagI.getOpcode() == X86::CMP64ri32 && |
3179 | OI.getOpcode() == X86::SUB64ri32) || |
3180 | (FlagI.getOpcode() == X86::CMP64ri8 && |
3181 | OI.getOpcode() == X86::SUB64ri8) || |
3182 | (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) || |
3183 | (FlagI.getOpcode() == X86::CMP32ri8 && |
3184 | OI.getOpcode() == X86::SUB32ri8) || |
3185 | (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) || |
3186 | (FlagI.getOpcode() == X86::CMP16ri8 && |
3187 | OI.getOpcode() == X86::SUB16ri8) || |
3188 | (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) && |
3189 | OI.getOperand(1).getReg() == SrcReg && |
3190 | OI.getOperand(2).getImm() == ImmValue) |
3191 | return true; |
3192 | return false; |
3193 | } |
3194 | |
3195 | /// Check whether the definition can be converted |
3196 | /// to remove a comparison against zero. |
3197 | inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) { |
3198 | NoSignFlag = false; |
3199 | |
3200 | switch (MI.getOpcode()) { |
3201 | default: return false; |
3202 | |
3203 | // The shift instructions only modify ZF if their shift count is non-zero. |
3204 | // N.B.: The processor truncates the shift count depending on the encoding. |
3205 | case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: |
3206 | case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: |
3207 | return getTruncatedShiftCount(MI, 2) != 0; |
3208 | |
3209 | // Some left shift instructions can be turned into LEA instructions but only |
3210 | // if their flags aren't used. Avoid transforming such instructions. |
3211 | case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ |
3212 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
3213 | if (isTruncatedShiftCountForLEA(ShAmt)) return false; |
3214 | return ShAmt != 0; |
3215 | } |
3216 | |
3217 | case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: |
3218 | case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: |
3219 | return getTruncatedShiftCount(MI, 3) != 0; |
3220 | |
3221 | case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: |
3222 | case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: |
3223 | case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: |
3224 | case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: |
3225 | case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: |
3226 | case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: |
3227 | case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: |
3228 | case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: |
3229 | case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: |
3230 | case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: |
3231 | case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: |
3232 | case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: |
3233 | case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: |
3234 | case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: |
3235 | case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: |
3236 | case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: |
3237 | case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: |
3238 | case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: |
3239 | case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: |
3240 | case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: |
3241 | case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: |
3242 | case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: |
3243 | case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: |
3244 | case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: |
3245 | case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: |
3246 | case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: |
3247 | case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: |
3248 | case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri: |
3249 | case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8: |
3250 | case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr: |
3251 | case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm: |
3252 | case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm: |
3253 | case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri: |
3254 | case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8: |
3255 | case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr: |
3256 | case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm: |
3257 | case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm: |
3258 | case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: |
3259 | case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: |
3260 | case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: |
3261 | case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: |
3262 | case X86::ANDN32rr: case X86::ANDN32rm: |
3263 | case X86::ANDN64rr: case X86::ANDN64rm: |
3264 | case X86::BLSI32rr: case X86::BLSI32rm: |
3265 | case X86::BLSI64rr: case X86::BLSI64rm: |
3266 | case X86::BLSMSK32rr:case X86::BLSMSK32rm: |
3267 | case X86::BLSMSK64rr:case X86::BLSMSK64rm: |
3268 | case X86::BLSR32rr: case X86::BLSR32rm: |
3269 | case X86::BLSR64rr: case X86::BLSR64rm: |
3270 | case X86::BZHI32rr: case X86::BZHI32rm: |
3271 | case X86::BZHI64rr: case X86::BZHI64rm: |
3272 | case X86::LZCNT16rr: case X86::LZCNT16rm: |
3273 | case X86::LZCNT32rr: case X86::LZCNT32rm: |
3274 | case X86::LZCNT64rr: case X86::LZCNT64rm: |
3275 | case X86::POPCNT16rr:case X86::POPCNT16rm: |
3276 | case X86::POPCNT32rr:case X86::POPCNT32rm: |
3277 | case X86::POPCNT64rr:case X86::POPCNT64rm: |
3278 | case X86::TZCNT16rr: case X86::TZCNT16rm: |
3279 | case X86::TZCNT32rr: case X86::TZCNT32rm: |
3280 | case X86::TZCNT64rr: case X86::TZCNT64rm: |
3281 | case X86::BLCFILL32rr: case X86::BLCFILL32rm: |
3282 | case X86::BLCFILL64rr: case X86::BLCFILL64rm: |
3283 | case X86::BLCI32rr: case X86::BLCI32rm: |
3284 | case X86::BLCI64rr: case X86::BLCI64rm: |
3285 | case X86::BLCIC32rr: case X86::BLCIC32rm: |
3286 | case X86::BLCIC64rr: case X86::BLCIC64rm: |
3287 | case X86::BLCMSK32rr: case X86::BLCMSK32rm: |
3288 | case X86::BLCMSK64rr: case X86::BLCMSK64rm: |
3289 | case X86::BLCS32rr: case X86::BLCS32rm: |
3290 | case X86::BLCS64rr: case X86::BLCS64rm: |
3291 | case X86::BLSFILL32rr: case X86::BLSFILL32rm: |
3292 | case X86::BLSFILL64rr: case X86::BLSFILL64rm: |
3293 | case X86::BLSIC32rr: case X86::BLSIC32rm: |
3294 | case X86::BLSIC64rr: case X86::BLSIC64rm: |
3295 | case X86::T1MSKC32rr: case X86::T1MSKC32rm: |
3296 | case X86::T1MSKC64rr: case X86::T1MSKC64rm: |
3297 | case X86::TZMSK32rr: case X86::TZMSK32rm: |
3298 | case X86::TZMSK64rr: case X86::TZMSK64rm: |
3299 | return true; |
3300 | case X86::BEXTR32rr: case X86::BEXTR64rr: |
3301 | case X86::BEXTR32rm: case X86::BEXTR64rm: |
3302 | case X86::BEXTRI32ri: case X86::BEXTRI32mi: |
3303 | case X86::BEXTRI64ri: case X86::BEXTRI64mi: |
3304 | // BEXTR doesn't update the sign flag so we can't use it. |
3305 | NoSignFlag = true; |
3306 | return true; |
3307 | } |
3308 | } |
3309 | |
3310 | /// Check whether the use can be converted to remove a comparison against zero. |
3311 | static X86::CondCode isUseDefConvertible(const MachineInstr &MI) { |
3312 | switch (MI.getOpcode()) { |
3313 | default: return X86::COND_INVALID; |
3314 | case X86::LZCNT16rr: case X86::LZCNT16rm: |
3315 | case X86::LZCNT32rr: case X86::LZCNT32rm: |
3316 | case X86::LZCNT64rr: case X86::LZCNT64rm: |
3317 | return X86::COND_B; |
3318 | case X86::POPCNT16rr:case X86::POPCNT16rm: |
3319 | case X86::POPCNT32rr:case X86::POPCNT32rm: |
3320 | case X86::POPCNT64rr:case X86::POPCNT64rm: |
3321 | return X86::COND_E; |
3322 | case X86::TZCNT16rr: case X86::TZCNT16rm: |
3323 | case X86::TZCNT32rr: case X86::TZCNT32rm: |
3324 | case X86::TZCNT64rr: case X86::TZCNT64rm: |
3325 | return X86::COND_B; |
3326 | case X86::BSF16rr: case X86::BSF16rm: |
3327 | case X86::BSF32rr: case X86::BSF32rm: |
3328 | case X86::BSF64rr: case X86::BSF64rm: |
3329 | case X86::BSR16rr: case X86::BSR16rm: |
3330 | case X86::BSR32rr: case X86::BSR32rm: |
3331 | case X86::BSR64rr: case X86::BSR64rm: |
3332 | return X86::COND_E; |
3333 | } |
3334 | } |
3335 | |
3336 | /// Check if there exists an earlier instruction that |
3337 | /// operates on the same source operands and sets flags in the same way as |
3338 | /// Compare; remove Compare if possible. |
3339 | bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, |
3340 | unsigned SrcReg2, int CmpMask, |
3341 | int CmpValue, |
3342 | const MachineRegisterInfo *MRI) const { |
3343 | // Check whether we can replace SUB with CMP. |
3344 | unsigned NewOpcode = 0; |
3345 | switch (CmpInstr.getOpcode()) { |
3346 | default: break; |
3347 | case X86::SUB64ri32: |
3348 | case X86::SUB64ri8: |
3349 | case X86::SUB32ri: |
3350 | case X86::SUB32ri8: |
3351 | case X86::SUB16ri: |
3352 | case X86::SUB16ri8: |
3353 | case X86::SUB8ri: |
3354 | case X86::SUB64rm: |
3355 | case X86::SUB32rm: |
3356 | case X86::SUB16rm: |
3357 | case X86::SUB8rm: |
3358 | case X86::SUB64rr: |
3359 | case X86::SUB32rr: |
3360 | case X86::SUB16rr: |
3361 | case X86::SUB8rr: { |
3362 | if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) |
3363 | return false; |
3364 | // There is no use of the destination register, we can replace SUB with CMP. |
3365 | switch (CmpInstr.getOpcode()) { |
3366 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3366); |
3367 | case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; |
3368 | case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; |
3369 | case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; |
3370 | case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; |
3371 | case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; |
3372 | case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; |
3373 | case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; |
3374 | case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; |
3375 | case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; |
3376 | case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; |
3377 | case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; |
3378 | case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; |
3379 | case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; |
3380 | case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; |
3381 | case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; |
3382 | } |
3383 | CmpInstr.setDesc(get(NewOpcode)); |
3384 | CmpInstr.RemoveOperand(0); |
3385 | // Fall through to optimize Cmp if Cmp is CMPrr or CMPri. |
3386 | if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || |
3387 | NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) |
3388 | return false; |
3389 | } |
3390 | } |
3391 | |
3392 | // Get the unique definition of SrcReg. |
3393 | MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); |
3394 | if (!MI) return false; |
3395 | |
3396 | // CmpInstr is the first instruction of the BB. |
3397 | MachineBasicBlock::iterator I = CmpInstr, Def = MI; |
3398 | |
3399 | // If we are comparing against zero, check whether we can use MI to update |
3400 | // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize. |
3401 | bool IsCmpZero = (CmpMask != 0 && CmpValue == 0); |
3402 | if (IsCmpZero && MI->getParent() != CmpInstr.getParent()) |
3403 | return false; |
3404 | |
3405 | // If we have a use of the source register between the def and our compare |
3406 | // instruction we can eliminate the compare iff the use sets EFLAGS in the |
3407 | // right way. |
3408 | bool ShouldUpdateCC = false; |
3409 | bool NoSignFlag = false; |
3410 | X86::CondCode NewCC = X86::COND_INVALID; |
3411 | if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) { |
3412 | // Scan forward from the use until we hit the use we're looking for or the |
3413 | // compare instruction. |
3414 | for (MachineBasicBlock::iterator J = MI;; ++J) { |
3415 | // Do we have a convertible instruction? |
3416 | NewCC = isUseDefConvertible(*J); |
3417 | if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() && |
3418 | J->getOperand(1).getReg() == SrcReg) { |
3419 | assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!")((J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!" ) ? static_cast<void> (0) : __assert_fail ("J->definesRegister(X86::EFLAGS) && \"Must be an EFLAGS def!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3419, __PRETTY_FUNCTION__)); |
3420 | ShouldUpdateCC = true; // Update CC later on. |
3421 | // This is not a def of SrcReg, but still a def of EFLAGS. Keep going |
3422 | // with the new def. |
3423 | Def = J; |
3424 | MI = &*Def; |
3425 | break; |
3426 | } |
3427 | |
3428 | if (J == I) |
3429 | return false; |
3430 | } |
3431 | } |
3432 | |
3433 | // We are searching for an earlier instruction that can make CmpInstr |
3434 | // redundant and that instruction will be saved in Sub. |
3435 | MachineInstr *Sub = nullptr; |
3436 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3437 | |
3438 | // We iterate backward, starting from the instruction before CmpInstr and |
3439 | // stop when reaching the definition of a source register or done with the BB. |
3440 | // RI points to the instruction before CmpInstr. |
3441 | // If the definition is in this basic block, RE points to the definition; |
3442 | // otherwise, RE is the rend of the basic block. |
3443 | MachineBasicBlock::reverse_iterator |
3444 | RI = ++I.getReverse(), |
3445 | RE = CmpInstr.getParent() == MI->getParent() |
3446 | ? Def.getReverse() /* points to MI */ |
3447 | : CmpInstr.getParent()->rend(); |
3448 | MachineInstr *Movr0Inst = nullptr; |
3449 | for (; RI != RE; ++RI) { |
3450 | MachineInstr &Instr = *RI; |
3451 | // Check whether CmpInstr can be made redundant by the current instruction. |
3452 | if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, |
3453 | CmpValue, Instr)) { |
3454 | Sub = &Instr; |
3455 | break; |
3456 | } |
3457 | |
3458 | if (Instr.modifiesRegister(X86::EFLAGS, TRI) || |
3459 | Instr.readsRegister(X86::EFLAGS, TRI)) { |
3460 | // This instruction modifies or uses EFLAGS. |
3461 | |
3462 | // MOV32r0 etc. are implemented with xor which clobbers condition code. |
3463 | // They are safe to move up, if the definition to EFLAGS is dead and |
3464 | // earlier instructions do not read or write EFLAGS. |
3465 | if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 && |
3466 | Instr.registerDefIsDead(X86::EFLAGS, TRI)) { |
3467 | Movr0Inst = &Instr; |
3468 | continue; |
3469 | } |
3470 | |
3471 | // We can't remove CmpInstr. |
3472 | return false; |
3473 | } |
3474 | } |
3475 | |
3476 | // Return false if no candidates exist. |
3477 | if (!IsCmpZero && !Sub) |
3478 | return false; |
3479 | |
3480 | bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && |
3481 | Sub->getOperand(2).getReg() == SrcReg); |
3482 | |
3483 | // Scan forward from the instruction after CmpInstr for uses of EFLAGS. |
3484 | // It is safe to remove CmpInstr if EFLAGS is redefined or killed. |
3485 | // If we are done with the basic block, we need to check whether EFLAGS is |
3486 | // live-out. |
3487 | bool IsSafe = false; |
3488 | SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate; |
3489 | MachineBasicBlock::iterator E = CmpInstr.getParent()->end(); |
3490 | for (++I; I != E; ++I) { |
3491 | const MachineInstr &Instr = *I; |
3492 | bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); |
3493 | bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); |
3494 | // We should check the usage if this instruction uses and updates EFLAGS. |
3495 | if (!UseEFLAGS && ModifyEFLAGS) { |
3496 | // It is safe to remove CmpInstr if EFLAGS is updated again. |
3497 | IsSafe = true; |
3498 | break; |
3499 | } |
3500 | if (!UseEFLAGS && !ModifyEFLAGS) |
3501 | continue; |
3502 | |
3503 | // EFLAGS is used by this instruction. |
3504 | X86::CondCode OldCC = X86::COND_INVALID; |
3505 | if (IsCmpZero || IsSwapped) { |
3506 | // We decode the condition code from opcode. |
3507 | if (Instr.isBranch()) |
3508 | OldCC = X86::getCondFromBranch(Instr); |
3509 | else { |
3510 | OldCC = X86::getCondFromSETCC(Instr); |
3511 | if (OldCC == X86::COND_INVALID) |
3512 | OldCC = X86::getCondFromCMov(Instr); |
3513 | } |
3514 | if (OldCC == X86::COND_INVALID) return false; |
3515 | } |
3516 | X86::CondCode ReplacementCC = X86::COND_INVALID; |
3517 | if (IsCmpZero) { |
3518 | switch (OldCC) { |
3519 | default: break; |
3520 | case X86::COND_A: case X86::COND_AE: |
3521 | case X86::COND_B: case X86::COND_BE: |
3522 | case X86::COND_G: case X86::COND_GE: |
3523 | case X86::COND_L: case X86::COND_LE: |
3524 | case X86::COND_O: case X86::COND_NO: |
3525 | // CF and OF are used, we can't perform this optimization. |
3526 | return false; |
3527 | case X86::COND_S: case X86::COND_NS: |
3528 | // If SF is used, but the instruction doesn't update the SF, then we |
3529 | // can't do the optimization. |
3530 | if (NoSignFlag) |
3531 | return false; |
3532 | break; |
3533 | } |
3534 | |
3535 | // If we're updating the condition code check if we have to reverse the |
3536 | // condition. |
3537 | if (ShouldUpdateCC) |
3538 | switch (OldCC) { |
3539 | default: |
3540 | return false; |
3541 | case X86::COND_E: |
3542 | ReplacementCC = NewCC; |
3543 | break; |
3544 | case X86::COND_NE: |
3545 | ReplacementCC = GetOppositeBranchCondition(NewCC); |
3546 | break; |
3547 | } |
3548 | } else if (IsSwapped) { |
3549 | // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs |
3550 | // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. |
3551 | // We swap the condition code and synthesize the new opcode. |
3552 | ReplacementCC = getSwappedCondition(OldCC); |
3553 | if (ReplacementCC == X86::COND_INVALID) return false; |
3554 | } |
3555 | |
3556 | if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) { |
3557 | // Push the MachineInstr to OpsToUpdate. |
3558 | // If it is safe to remove CmpInstr, the condition code of these |
3559 | // instructions will be modified. |
3560 | OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC)); |
3561 | } |
3562 | if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { |
3563 | // It is safe to remove CmpInstr if EFLAGS is updated again or killed. |
3564 | IsSafe = true; |
3565 | break; |
3566 | } |
3567 | } |
3568 | |
3569 | // If EFLAGS is not killed nor re-defined, we should check whether it is |
3570 | // live-out. If it is live-out, do not optimize. |
3571 | if ((IsCmpZero || IsSwapped) && !IsSafe) { |
3572 | MachineBasicBlock *MBB = CmpInstr.getParent(); |
3573 | for (MachineBasicBlock *Successor : MBB->successors()) |
3574 | if (Successor->isLiveIn(X86::EFLAGS)) |
3575 | return false; |
3576 | } |
3577 | |
3578 | // The instruction to be updated is either Sub or MI. |
3579 | Sub = IsCmpZero ? MI : Sub; |
3580 | // Move Movr0Inst to the appropriate place before Sub. |
3581 | if (Movr0Inst) { |
3582 | // Look backwards until we find a def that doesn't use the current EFLAGS. |
3583 | Def = Sub; |
3584 | MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(), |
3585 | InsertE = Sub->getParent()->rend(); |
3586 | for (; InsertI != InsertE; ++InsertI) { |
3587 | MachineInstr *Instr = &*InsertI; |
3588 | if (!Instr->readsRegister(X86::EFLAGS, TRI) && |
3589 | Instr->modifiesRegister(X86::EFLAGS, TRI)) { |
3590 | Sub->getParent()->remove(Movr0Inst); |
3591 | Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), |
3592 | Movr0Inst); |
3593 | break; |
3594 | } |
3595 | } |
3596 | if (InsertI == InsertE) |
3597 | return false; |
3598 | } |
3599 | |
3600 | // Make sure Sub instruction defines EFLAGS and mark the def live. |
3601 | unsigned i = 0, e = Sub->getNumOperands(); |
3602 | for (; i != e; ++i) { |
3603 | MachineOperand &MO = Sub->getOperand(i); |
3604 | if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { |
3605 | MO.setIsDead(false); |
3606 | break; |
3607 | } |
3608 | } |
3609 | assert(i != e && "Unable to locate a def EFLAGS operand")((i != e && "Unable to locate a def EFLAGS operand") ? static_cast<void> (0) : __assert_fail ("i != e && \"Unable to locate a def EFLAGS operand\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3609, __PRETTY_FUNCTION__)); |
3610 | |
3611 | CmpInstr.eraseFromParent(); |
3612 | |
3613 | // Modify the condition code of instructions in OpsToUpdate. |
3614 | for (auto &Op : OpsToUpdate) { |
3615 | Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1) |
3616 | .setImm(Op.second); |
3617 | } |
3618 | return true; |
3619 | } |
3620 | |
3621 | /// Try to remove the load by folding it to a register |
3622 | /// operand at the use. We fold the load instructions if load defines a virtual |
3623 | /// register, the virtual register is used once in the same BB, and the |
3624 | /// instructions in-between do not load or store, and have no side effects. |
3625 | MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, |
3626 | const MachineRegisterInfo *MRI, |
3627 | unsigned &FoldAsLoadDefReg, |
3628 | MachineInstr *&DefMI) const { |
3629 | // Check whether we can move DefMI here. |
3630 | DefMI = MRI->getVRegDef(FoldAsLoadDefReg); |
3631 | assert(DefMI)((DefMI) ? static_cast<void> (0) : __assert_fail ("DefMI" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3631, __PRETTY_FUNCTION__)); |
3632 | bool SawStore = false; |
3633 | if (!DefMI->isSafeToMove(nullptr, SawStore)) |
3634 | return nullptr; |
3635 | |
3636 | // Collect information about virtual register operands of MI. |
3637 | SmallVector<unsigned, 1> SrcOperandIds; |
3638 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
3639 | MachineOperand &MO = MI.getOperand(i); |
3640 | if (!MO.isReg()) |
3641 | continue; |
3642 | unsigned Reg = MO.getReg(); |
3643 | if (Reg != FoldAsLoadDefReg) |
3644 | continue; |
3645 | // Do not fold if we have a subreg use or a def. |
3646 | if (MO.getSubReg() || MO.isDef()) |
3647 | return nullptr; |
3648 | SrcOperandIds.push_back(i); |
3649 | } |
3650 | if (SrcOperandIds.empty()) |
3651 | return nullptr; |
3652 | |
3653 | // Check whether we can fold the def into SrcOperandId. |
3654 | if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) { |
3655 | FoldAsLoadDefReg = 0; |
3656 | return FoldMI; |
3657 | } |
3658 | |
3659 | return nullptr; |
3660 | } |
3661 | |
3662 | /// Expand a single-def pseudo instruction to a two-addr |
3663 | /// instruction with two undef reads of the register being defined. |
3664 | /// This is used for mapping: |
3665 | /// %xmm4 = V_SET0 |
3666 | /// to: |
3667 | /// %xmm4 = PXORrr undef %xmm4, undef %xmm4 |
3668 | /// |
3669 | static bool Expand2AddrUndef(MachineInstrBuilder &MIB, |
3670 | const MCInstrDesc &Desc) { |
3671 | assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.")((Desc.getNumOperands() == 3 && "Expected two-addr instruction." ) ? static_cast<void> (0) : __assert_fail ("Desc.getNumOperands() == 3 && \"Expected two-addr instruction.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3671, __PRETTY_FUNCTION__)); |
3672 | unsigned Reg = MIB->getOperand(0).getReg(); |
3673 | MIB->setDesc(Desc); |
3674 | |
3675 | // MachineInstr::addOperand() will insert explicit operands before any |
3676 | // implicit operands. |
3677 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); |
3678 | // But we don't trust that. |
3679 | assert(MIB->getOperand(1).getReg() == Reg &&((MIB->getOperand(1).getReg() == Reg && MIB->getOperand (2).getReg() == Reg && "Misplaced operand") ? static_cast <void> (0) : __assert_fail ("MIB->getOperand(1).getReg() == Reg && MIB->getOperand(2).getReg() == Reg && \"Misplaced operand\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3680, __PRETTY_FUNCTION__)) |
3680 | MIB->getOperand(2).getReg() == Reg && "Misplaced operand")((MIB->getOperand(1).getReg() == Reg && MIB->getOperand (2).getReg() == Reg && "Misplaced operand") ? static_cast <void> (0) : __assert_fail ("MIB->getOperand(1).getReg() == Reg && MIB->getOperand(2).getReg() == Reg && \"Misplaced operand\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3680, __PRETTY_FUNCTION__)); |
3681 | return true; |
3682 | } |
3683 | |
3684 | /// Expand a single-def pseudo instruction to a two-addr |
3685 | /// instruction with two %k0 reads. |
3686 | /// This is used for mapping: |
3687 | /// %k4 = K_SET1 |
3688 | /// to: |
3689 | /// %k4 = KXNORrr %k0, %k0 |
3690 | static bool Expand2AddrKreg(MachineInstrBuilder &MIB, |
3691 | const MCInstrDesc &Desc, unsigned Reg) { |
3692 | assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.")((Desc.getNumOperands() == 3 && "Expected two-addr instruction." ) ? static_cast<void> (0) : __assert_fail ("Desc.getNumOperands() == 3 && \"Expected two-addr instruction.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3692, __PRETTY_FUNCTION__)); |
3693 | MIB->setDesc(Desc); |
3694 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); |
3695 | return true; |
3696 | } |
3697 | |
3698 | static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, |
3699 | bool MinusOne) { |
3700 | MachineBasicBlock &MBB = *MIB->getParent(); |
3701 | DebugLoc DL = MIB->getDebugLoc(); |
3702 | unsigned Reg = MIB->getOperand(0).getReg(); |
3703 | |
3704 | // Insert the XOR. |
3705 | BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) |
3706 | .addReg(Reg, RegState::Undef) |
3707 | .addReg(Reg, RegState::Undef); |
3708 | |
3709 | // Turn the pseudo into an INC or DEC. |
3710 | MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r)); |
3711 | MIB.addReg(Reg); |
3712 | |
3713 | return true; |
3714 | } |
3715 | |
3716 | static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, |
3717 | const TargetInstrInfo &TII, |
3718 | const X86Subtarget &Subtarget) { |
3719 | MachineBasicBlock &MBB = *MIB->getParent(); |
3720 | DebugLoc DL = MIB->getDebugLoc(); |
3721 | int64_t Imm = MIB->getOperand(1).getImm(); |
3722 | assert(Imm != 0 && "Using push/pop for 0 is not efficient.")((Imm != 0 && "Using push/pop for 0 is not efficient." ) ? static_cast<void> (0) : __assert_fail ("Imm != 0 && \"Using push/pop for 0 is not efficient.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3722, __PRETTY_FUNCTION__)); |
3723 | MachineBasicBlock::iterator I = MIB.getInstr(); |
3724 | |
3725 | int StackAdjustment; |
3726 | |
3727 | if (Subtarget.is64Bit()) { |
3728 | assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||((MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode () == X86::MOV32ImmSExti8) ? static_cast<void> (0) : __assert_fail ("MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode() == X86::MOV32ImmSExti8" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3729, __PRETTY_FUNCTION__)) |
3729 | MIB->getOpcode() == X86::MOV32ImmSExti8)((MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode () == X86::MOV32ImmSExti8) ? static_cast<void> (0) : __assert_fail ("MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode() == X86::MOV32ImmSExti8" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3729, __PRETTY_FUNCTION__)); |
3730 | |
3731 | // Can't use push/pop lowering if the function might write to the red zone. |
3732 | X86MachineFunctionInfo *X86FI = |
3733 | MBB.getParent()->getInfo<X86MachineFunctionInfo>(); |
3734 | if (X86FI->getUsesRedZone()) { |
3735 | MIB->setDesc(TII.get(MIB->getOpcode() == |
3736 | X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri)); |
3737 | return true; |
3738 | } |
3739 | |
3740 | // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and |
3741 | // widen the register if necessary. |
3742 | StackAdjustment = 8; |
3743 | BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm); |
3744 | MIB->setDesc(TII.get(X86::POP64r)); |
3745 | MIB->getOperand(0) |
3746 | .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64)); |
3747 | } else { |
3748 | assert(MIB->getOpcode() == X86::MOV32ImmSExti8)((MIB->getOpcode() == X86::MOV32ImmSExti8) ? static_cast< void> (0) : __assert_fail ("MIB->getOpcode() == X86::MOV32ImmSExti8" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3748, __PRETTY_FUNCTION__)); |
3749 | StackAdjustment = 4; |
3750 | BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm); |
3751 | MIB->setDesc(TII.get(X86::POP32r)); |
3752 | } |
3753 | |
3754 | // Build CFI if necessary. |
3755 | MachineFunction &MF = *MBB.getParent(); |
3756 | const X86FrameLowering *TFL = Subtarget.getFrameLowering(); |
3757 | bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); |
3758 | bool NeedsDwarfCFI = |
3759 | !IsWin64Prologue && |
3760 | (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry()); |
3761 | bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; |
3762 | if (EmitCFI) { |
3763 | TFL->BuildCFI(MBB, I, DL, |
3764 | MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment)); |
3765 | TFL->BuildCFI(MBB, std::next(I), DL, |
3766 | MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment)); |
3767 | } |
3768 | |
3769 | return true; |
3770 | } |
3771 | |
3772 | // LoadStackGuard has so far only been implemented for 64-bit MachO. Different |
3773 | // code sequence is needed for other targets. |
3774 | static void expandLoadStackGuard(MachineInstrBuilder &MIB, |
3775 | const TargetInstrInfo &TII) { |
3776 | MachineBasicBlock &MBB = *MIB->getParent(); |
3777 | DebugLoc DL = MIB->getDebugLoc(); |
3778 | unsigned Reg = MIB->getOperand(0).getReg(); |
3779 | const GlobalValue *GV = |
3780 | cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); |
3781 | auto Flags = MachineMemOperand::MOLoad | |
3782 | MachineMemOperand::MODereferenceable | |
3783 | MachineMemOperand::MOInvariant; |
3784 | MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( |
3785 | MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8); |
3786 | MachineBasicBlock::iterator I = MIB.getInstr(); |
3787 | |
3788 | BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) |
3789 | .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) |
3790 | .addMemOperand(MMO); |
3791 | MIB->setDebugLoc(DL); |
3792 | MIB->setDesc(TII.get(X86::MOV64rm)); |
3793 | MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); |
3794 | } |
3795 | |
3796 | static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { |
3797 | MachineBasicBlock &MBB = *MIB->getParent(); |
3798 | MachineFunction &MF = *MBB.getParent(); |
3799 | const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); |
3800 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); |
3801 | unsigned XorOp = |
3802 | MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr; |
3803 | MIB->setDesc(TII.get(XorOp)); |
3804 | MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef); |
3805 | return true; |
3806 | } |
3807 | |
3808 | // This is used to handle spills for 128/256-bit registers when we have AVX512, |
3809 | // but not VLX. If it uses an extended register we need to use an instruction |
3810 | // that loads the lower 128/256-bit, but is available with only AVX512F. |
3811 | static bool expandNOVLXLoad(MachineInstrBuilder &MIB, |
3812 | const TargetRegisterInfo *TRI, |
3813 | const MCInstrDesc &LoadDesc, |
3814 | const MCInstrDesc &BroadcastDesc, |
3815 | unsigned SubIdx) { |
3816 | unsigned DestReg = MIB->getOperand(0).getReg(); |
3817 | // Check if DestReg is XMM16-31 or YMM16-31. |
3818 | if (TRI->getEncodingValue(DestReg) < 16) { |
3819 | // We can use a normal VEX encoded load. |
3820 | MIB->setDesc(LoadDesc); |
3821 | } else { |
3822 | // Use a 128/256-bit VBROADCAST instruction. |
3823 | MIB->setDesc(BroadcastDesc); |
3824 | // Change the destination to a 512-bit register. |
3825 | DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass); |
3826 | MIB->getOperand(0).setReg(DestReg); |
3827 | } |
3828 | return true; |
3829 | } |
3830 | |
3831 | // This is used to handle spills for 128/256-bit registers when we have AVX512, |
3832 | // but not VLX. If it uses an extended register we need to use an instruction |
3833 | // that stores the lower 128/256-bit, but is available with only AVX512F. |
3834 | static bool expandNOVLXStore(MachineInstrBuilder &MIB, |
3835 | const TargetRegisterInfo *TRI, |
3836 | const MCInstrDesc &StoreDesc, |
3837 | const MCInstrDesc &ExtractDesc, |
3838 | unsigned SubIdx) { |
3839 | unsigned SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg(); |
3840 | // Check if DestReg is XMM16-31 or YMM16-31. |
3841 | if (TRI->getEncodingValue(SrcReg) < 16) { |
3842 | // We can use a normal VEX encoded store. |
3843 | MIB->setDesc(StoreDesc); |
3844 | } else { |
3845 | // Use a VEXTRACTF instruction. |
3846 | MIB->setDesc(ExtractDesc); |
3847 | // Change the destination to a 512-bit register. |
3848 | SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass); |
3849 | MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg); |
3850 | MIB.addImm(0x0); // Append immediate to extract from the lower bits. |
3851 | } |
3852 | |
3853 | return true; |
3854 | } |
3855 | |
3856 | static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { |
3857 | MIB->setDesc(Desc); |
3858 | int64_t ShiftAmt = MIB->getOperand(2).getImm(); |
3859 | // Temporarily remove the immediate so we can add another source register. |
3860 | MIB->RemoveOperand(2); |
3861 | // Add the register. Don't copy the kill flag if there is one. |
3862 | MIB.addReg(MIB->getOperand(1).getReg(), |
3863 | getUndefRegState(MIB->getOperand(1).isUndef())); |
3864 | // Add back the immediate. |
3865 | MIB.addImm(ShiftAmt); |
3866 | return true; |
3867 | } |
3868 | |
3869 | bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
3870 | bool HasAVX = Subtarget.hasAVX(); |
3871 | MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); |
3872 | switch (MI.getOpcode()) { |
3873 | case X86::MOV32r0: |
3874 | return Expand2AddrUndef(MIB, get(X86::XOR32rr)); |
3875 | case X86::MOV32r1: |
3876 | return expandMOV32r1(MIB, *this, /*MinusOne=*/ false); |
3877 | case X86::MOV32r_1: |
3878 | return expandMOV32r1(MIB, *this, /*MinusOne=*/ true); |
3879 | case X86::MOV32ImmSExti8: |
3880 | case X86::MOV64ImmSExti8: |
3881 | return ExpandMOVImmSExti8(MIB, *this, Subtarget); |
3882 | case X86::SETB_C8r: |
3883 | return Expand2AddrUndef(MIB, get(X86::SBB8rr)); |
3884 | case X86::SETB_C16r: |
3885 | return Expand2AddrUndef(MIB, get(X86::SBB16rr)); |
3886 | case X86::SETB_C32r: |
3887 | return Expand2AddrUndef(MIB, get(X86::SBB32rr)); |
3888 | case X86::SETB_C64r: |
3889 | return Expand2AddrUndef(MIB, get(X86::SBB64rr)); |
3890 | case X86::MMX_SET0: |
3891 | return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr)); |
3892 | case X86::V_SET0: |
3893 | case X86::FsFLD0SS: |
3894 | case X86::FsFLD0SD: |
3895 | return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); |
3896 | case X86::AVX_SET0: { |
3897 | assert(HasAVX && "AVX not supported")((HasAVX && "AVX not supported") ? static_cast<void > (0) : __assert_fail ("HasAVX && \"AVX not supported\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 3897, __PRETTY_FUNCTION__)); |
3898 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3899 | unsigned SrcReg = MIB->getOperand(0).getReg(); |
3900 | unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); |
3901 | MIB->getOperand(0).setReg(XReg); |
3902 | Expand2AddrUndef(MIB, get(X86::VXORPSrr)); |
3903 | MIB.addReg(SrcReg, RegState::ImplicitDefine); |
3904 | return true; |
3905 | } |
3906 | case X86::AVX512_128_SET0: |
3907 | case X86::AVX512_FsFLD0SS: |
3908 | case X86::AVX512_FsFLD0SD: { |
3909 | bool HasVLX = Subtarget.hasVLX(); |
3910 | unsigned SrcReg = MIB->getOperand(0).getReg(); |
3911 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3912 | if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) |
3913 | return Expand2AddrUndef(MIB, |
3914 | get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); |
3915 | // Extended register without VLX. Use a larger XOR. |
3916 | SrcReg = |
3917 | TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass); |
3918 | MIB->getOperand(0).setReg(SrcReg); |
3919 | return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); |
3920 | } |
3921 | case X86::AVX512_256_SET0: |
3922 | case X86::AVX512_512_SET0: { |
3923 | bool HasVLX = Subtarget.hasVLX(); |
3924 | unsigned SrcReg = MIB->getOperand(0).getReg(); |
3925 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3926 | if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) { |
3927 | unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); |
3928 | MIB->getOperand(0).setReg(XReg); |
3929 | Expand2AddrUndef(MIB, |
3930 | get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); |
3931 | MIB.addReg(SrcReg, RegState::ImplicitDefine); |
3932 | return true; |
3933 | } |
3934 | return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); |
3935 | } |
3936 | case X86::V_SETALLONES: |
3937 | return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); |
3938 | case X86::AVX2_SETALLONES: |
3939 | return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); |
3940 | case X86::AVX1_SETALLONES: { |
3941 | unsigned Reg = MIB->getOperand(0).getReg(); |
3942 | // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS. |
3943 | MIB->setDesc(get(X86::VCMPPSYrri)); |
3944 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf); |
3945 | return true; |
3946 | } |
3947 | case X86::AVX512_512_SETALLONES: { |
3948 | unsigned Reg = MIB->getOperand(0).getReg(); |
3949 | MIB->setDesc(get(X86::VPTERNLOGDZrri)); |
3950 | // VPTERNLOGD needs 3 register inputs and an immediate. |
3951 | // 0xff will return 1s for any input. |
3952 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef) |
3953 | .addReg(Reg, RegState::Undef).addImm(0xff); |
3954 | return true; |
3955 | } |
3956 | case X86::AVX512_512_SEXT_MASK_32: |
3957 | case X86::AVX512_512_SEXT_MASK_64: { |
3958 | unsigned Reg = MIB->getOperand(0).getReg(); |
3959 | unsigned MaskReg = MIB->getOperand(1).getReg(); |
3960 | unsigned MaskState = getRegState(MIB->getOperand(1)); |
3961 | unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ? |
3962 | X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz; |
3963 | MI.RemoveOperand(1); |
3964 | MIB->setDesc(get(Opc)); |
3965 | // VPTERNLOG needs 3 register inputs and an immediate. |
3966 | // 0xff will return 1s for any input. |
3967 | MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState) |
3968 | .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff); |
3969 | return true; |
3970 | } |
3971 | case X86::VMOVAPSZ128rm_NOVLX: |
3972 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm), |
3973 | get(X86::VBROADCASTF32X4rm), X86::sub_xmm); |
3974 | case X86::VMOVUPSZ128rm_NOVLX: |
3975 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm), |
3976 | get(X86::VBROADCASTF32X4rm), X86::sub_xmm); |
3977 | case X86::VMOVAPSZ256rm_NOVLX: |
3978 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm), |
3979 | get(X86::VBROADCASTF64X4rm), X86::sub_ymm); |
3980 | case X86::VMOVUPSZ256rm_NOVLX: |
3981 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm), |
3982 | get(X86::VBROADCASTF64X4rm), X86::sub_ymm); |
3983 | case X86::VMOVAPSZ128mr_NOVLX: |
3984 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr), |
3985 | get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); |
3986 | case X86::VMOVUPSZ128mr_NOVLX: |
3987 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr), |
3988 | get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); |
3989 | case X86::VMOVAPSZ256mr_NOVLX: |
3990 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr), |
3991 | get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); |
3992 | case X86::VMOVUPSZ256mr_NOVLX: |
3993 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), |
3994 | get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); |
3995 | case X86::MOV32ri64: { |
3996 | unsigned Reg = MIB->getOperand(0).getReg(); |
3997 | unsigned Reg32 = RI.getSubReg(Reg, X86::sub_32bit); |
3998 | MI.setDesc(get(X86::MOV32ri)); |
3999 | MIB->getOperand(0).setReg(Reg32); |
4000 | MIB.addReg(Reg, RegState::ImplicitDefine); |
4001 | return true; |
4002 | } |
4003 | |
4004 | // KNL does not recognize dependency-breaking idioms for mask registers, |
4005 | // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1. |
4006 | // Using %k0 as the undef input register is a performance heuristic based |
4007 | // on the assumption that %k0 is used less frequently than the other mask |
4008 | // registers, since it is not usable as a write mask. |
4009 | // FIXME: A more advanced approach would be to choose the best input mask |
4010 | // register based on context. |
4011 | case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0); |
4012 | case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0); |
4013 | case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0); |
4014 | case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0); |
4015 | case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0); |
4016 | case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0); |
4017 | case TargetOpcode::LOAD_STACK_GUARD: |
4018 | expandLoadStackGuard(MIB, *this); |
4019 | return true; |
4020 | case X86::XOR64_FP: |
4021 | case X86::XOR32_FP: |
4022 | return expandXorFP(MIB, *this); |
4023 | case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8)); |
4024 | case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8)); |
4025 | case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8)); |
4026 | case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8)); |
4027 | case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break; |
4028 | case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break; |
4029 | case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break; |
4030 | case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break; |
4031 | case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break; |
4032 | case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break; |
4033 | case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break; |
4034 | case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break; |
4035 | case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break; |
4036 | case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break; |
4037 | case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break; |
4038 | } |
4039 | return false; |
4040 | } |
4041 | |
4042 | /// Return true for all instructions that only update |
4043 | /// the first 32 or 64-bits of the destination register and leave the rest |
4044 | /// unmodified. This can be used to avoid folding loads if the instructions |
4045 | /// only update part of the destination register, and the non-updated part is |
4046 | /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these |
4047 | /// instructions breaks the partial register dependency and it can improve |
4048 | /// performance. e.g.: |
4049 | /// |
4050 | /// movss (%rdi), %xmm0 |
4051 | /// cvtss2sd %xmm0, %xmm0 |
4052 | /// |
4053 | /// Instead of |
4054 | /// cvtss2sd (%rdi), %xmm0 |
4055 | /// |
4056 | /// FIXME: This should be turned into a TSFlags. |
4057 | /// |
4058 | static bool hasPartialRegUpdate(unsigned Opcode, |
4059 | const X86Subtarget &Subtarget, |
4060 | bool ForLoadFold = false) { |
4061 | switch (Opcode) { |
4062 | case X86::CVTSI2SSrr: |
4063 | case X86::CVTSI2SSrm: |
4064 | case X86::CVTSI642SSrr: |
4065 | case X86::CVTSI642SSrm: |
4066 | case X86::CVTSI2SDrr: |
4067 | case X86::CVTSI2SDrm: |
4068 | case X86::CVTSI642SDrr: |
4069 | case X86::CVTSI642SDrm: |
4070 | // Load folding won't effect the undef register update since the input is |
4071 | // a GPR. |
4072 | return !ForLoadFold; |
4073 | case X86::CVTSD2SSrr: |
4074 | case X86::CVTSD2SSrm: |
4075 | case X86::CVTSS2SDrr: |
4076 | case X86::CVTSS2SDrm: |
4077 | case X86::MOVHPDrm: |
4078 | case X86::MOVHPSrm: |
4079 | case X86::MOVLPDrm: |
4080 | case X86::MOVLPSrm: |
4081 | case X86::RCPSSr: |
4082 | case X86::RCPSSm: |
4083 | case X86::RCPSSr_Int: |
4084 | case X86::RCPSSm_Int: |
4085 | case X86::ROUNDSDr: |
4086 | case X86::ROUNDSDm: |
4087 | case X86::ROUNDSSr: |
4088 | case X86::ROUNDSSm: |
4089 | case X86::RSQRTSSr: |
4090 | case X86::RSQRTSSm: |
4091 | case X86::RSQRTSSr_Int: |
4092 | case X86::RSQRTSSm_Int: |
4093 | case X86::SQRTSSr: |
4094 | case X86::SQRTSSm: |
4095 | case X86::SQRTSSr_Int: |
4096 | case X86::SQRTSSm_Int: |
4097 | case X86::SQRTSDr: |
4098 | case X86::SQRTSDm: |
4099 | case X86::SQRTSDr_Int: |
4100 | case X86::SQRTSDm_Int: |
4101 | return true; |
4102 | // GPR |
4103 | case X86::POPCNT32rm: |
4104 | case X86::POPCNT32rr: |
4105 | case X86::POPCNT64rm: |
4106 | case X86::POPCNT64rr: |
4107 | return Subtarget.hasPOPCNTFalseDeps(); |
4108 | case X86::LZCNT32rm: |
4109 | case X86::LZCNT32rr: |
4110 | case X86::LZCNT64rm: |
4111 | case X86::LZCNT64rr: |
4112 | case X86::TZCNT32rm: |
4113 | case X86::TZCNT32rr: |
4114 | case X86::TZCNT64rm: |
4115 | case X86::TZCNT64rr: |
4116 | return Subtarget.hasLZCNTFalseDeps(); |
4117 | } |
4118 | |
4119 | return false; |
4120 | } |
4121 | |
4122 | /// Inform the BreakFalseDeps pass how many idle |
4123 | /// instructions we would like before a partial register update. |
4124 | unsigned X86InstrInfo::getPartialRegUpdateClearance( |
4125 | const MachineInstr &MI, unsigned OpNum, |
4126 | const TargetRegisterInfo *TRI) const { |
4127 | if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget)) |
4128 | return 0; |
4129 | |
4130 | // If MI is marked as reading Reg, the partial register update is wanted. |
4131 | const MachineOperand &MO = MI.getOperand(0); |
4132 | unsigned Reg = MO.getReg(); |
4133 | if (TargetRegisterInfo::isVirtualRegister(Reg)) { |
4134 | if (MO.readsReg() || MI.readsVirtualRegister(Reg)) |
4135 | return 0; |
4136 | } else { |
4137 | if (MI.readsRegister(Reg, TRI)) |
4138 | return 0; |
4139 | } |
4140 | |
4141 | // If any instructions in the clearance range are reading Reg, insert a |
4142 | // dependency breaking instruction, which is inexpensive and is likely to |
4143 | // be hidden in other instruction's cycles. |
4144 | return PartialRegUpdateClearance; |
4145 | } |
4146 | |
4147 | // Return true for any instruction the copies the high bits of the first source |
4148 | // operand into the unused high bits of the destination operand. |
4149 | static bool hasUndefRegUpdate(unsigned Opcode, bool ForLoadFold = false) { |
4150 | switch (Opcode) { |
4151 | case X86::VCVTSI2SSrr: |
4152 | case X86::VCVTSI2SSrm: |
4153 | case X86::VCVTSI2SSrr_Int: |
4154 | case X86::VCVTSI2SSrm_Int: |
4155 | case X86::VCVTSI642SSrr: |
4156 | case X86::VCVTSI642SSrm: |
4157 | case X86::VCVTSI642SSrr_Int: |
4158 | case X86::VCVTSI642SSrm_Int: |
4159 | case X86::VCVTSI2SDrr: |
4160 | case X86::VCVTSI2SDrm: |
4161 | case X86::VCVTSI2SDrr_Int: |
4162 | case X86::VCVTSI2SDrm_Int: |
4163 | case X86::VCVTSI642SDrr: |
4164 | case X86::VCVTSI642SDrm: |
4165 | case X86::VCVTSI642SDrr_Int: |
4166 | case X86::VCVTSI642SDrm_Int: |
4167 | // AVX-512 |
4168 | case X86::VCVTSI2SSZrr: |
4169 | case X86::VCVTSI2SSZrm: |
4170 | case X86::VCVTSI2SSZrr_Int: |
4171 | case X86::VCVTSI2SSZrrb_Int: |
4172 | case X86::VCVTSI2SSZrm_Int: |
4173 | case X86::VCVTSI642SSZrr: |
4174 | case X86::VCVTSI642SSZrm: |
4175 | case X86::VCVTSI642SSZrr_Int: |
4176 | case X86::VCVTSI642SSZrrb_Int: |
4177 | case X86::VCVTSI642SSZrm_Int: |
4178 | case X86::VCVTSI2SDZrr: |
4179 | case X86::VCVTSI2SDZrm: |
4180 | case X86::VCVTSI2SDZrr_Int: |
4181 | case X86::VCVTSI2SDZrm_Int: |
4182 | case X86::VCVTSI642SDZrr: |
4183 | case X86::VCVTSI642SDZrm: |
4184 | case X86::VCVTSI642SDZrr_Int: |
4185 | case X86::VCVTSI642SDZrrb_Int: |
4186 | case X86::VCVTSI642SDZrm_Int: |
4187 | case X86::VCVTUSI2SSZrr: |
4188 | case X86::VCVTUSI2SSZrm: |
4189 | case X86::VCVTUSI2SSZrr_Int: |
4190 | case X86::VCVTUSI2SSZrrb_Int: |
4191 | case X86::VCVTUSI2SSZrm_Int: |
4192 | case X86::VCVTUSI642SSZrr: |
4193 | case X86::VCVTUSI642SSZrm: |
4194 | case X86::VCVTUSI642SSZrr_Int: |
4195 | case X86::VCVTUSI642SSZrrb_Int: |
4196 | case X86::VCVTUSI642SSZrm_Int: |
4197 | case X86::VCVTUSI2SDZrr: |
4198 | case X86::VCVTUSI2SDZrm: |
4199 | case X86::VCVTUSI2SDZrr_Int: |
4200 | case X86::VCVTUSI2SDZrm_Int: |
4201 | case X86::VCVTUSI642SDZrr: |
4202 | case X86::VCVTUSI642SDZrm: |
4203 | case X86::VCVTUSI642SDZrr_Int: |
4204 | case X86::VCVTUSI642SDZrrb_Int: |
4205 | case X86::VCVTUSI642SDZrm_Int: |
4206 | // Load folding won't effect the undef register update since the input is |
4207 | // a GPR. |
4208 | return !ForLoadFold; |
4209 | case X86::VCVTSD2SSrr: |
4210 | case X86::VCVTSD2SSrm: |
4211 | case X86::VCVTSD2SSrr_Int: |
4212 | case X86::VCVTSD2SSrm_Int: |
4213 | case X86::VCVTSS2SDrr: |
4214 | case X86::VCVTSS2SDrm: |
4215 | case X86::VCVTSS2SDrr_Int: |
4216 | case X86::VCVTSS2SDrm_Int: |
4217 | case X86::VRCPSSr: |
4218 | case X86::VRCPSSr_Int: |
4219 | case X86::VRCPSSm: |
4220 | case X86::VRCPSSm_Int: |
4221 | case X86::VROUNDSDr: |
4222 | case X86::VROUNDSDm: |
4223 | case X86::VROUNDSDr_Int: |
4224 | case X86::VROUNDSDm_Int: |
4225 | case X86::VROUNDSSr: |
4226 | case X86::VROUNDSSm: |
4227 | case X86::VROUNDSSr_Int: |
4228 | case X86::VROUNDSSm_Int: |
4229 | case X86::VRSQRTSSr: |
4230 | case X86::VRSQRTSSr_Int: |
4231 | case X86::VRSQRTSSm: |
4232 | case X86::VRSQRTSSm_Int: |
4233 | case X86::VSQRTSSr: |
4234 | case X86::VSQRTSSr_Int: |
4235 | case X86::VSQRTSSm: |
4236 | case X86::VSQRTSSm_Int: |
4237 | case X86::VSQRTSDr: |
4238 | case X86::VSQRTSDr_Int: |
4239 | case X86::VSQRTSDm: |
4240 | case X86::VSQRTSDm_Int: |
4241 | // AVX-512 |
4242 | case X86::VCVTSD2SSZrr: |
4243 | case X86::VCVTSD2SSZrr_Int: |
4244 | case X86::VCVTSD2SSZrrb_Int: |
4245 | case X86::VCVTSD2SSZrm: |
4246 | case X86::VCVTSD2SSZrm_Int: |
4247 | case X86::VCVTSS2SDZrr: |
4248 | case X86::VCVTSS2SDZrr_Int: |
4249 | case X86::VCVTSS2SDZrrb_Int: |
4250 | case X86::VCVTSS2SDZrm: |
4251 | case X86::VCVTSS2SDZrm_Int: |
4252 | case X86::VGETEXPSDZr: |
4253 | case X86::VGETEXPSDZrb: |
4254 | case X86::VGETEXPSDZm: |
4255 | case X86::VGETEXPSSZr: |
4256 | case X86::VGETEXPSSZrb: |
4257 | case X86::VGETEXPSSZm: |
4258 | case X86::VGETMANTSDZrri: |
4259 | case X86::VGETMANTSDZrrib: |
4260 | case X86::VGETMANTSDZrmi: |
4261 | case X86::VGETMANTSSZrri: |
4262 | case X86::VGETMANTSSZrrib: |
4263 | case X86::VGETMANTSSZrmi: |
4264 | case X86::VRNDSCALESDZr: |
4265 | case X86::VRNDSCALESDZr_Int: |
4266 | case X86::VRNDSCALESDZrb_Int: |
4267 | case X86::VRNDSCALESDZm: |
4268 | case X86::VRNDSCALESDZm_Int: |
4269 | case X86::VRNDSCALESSZr: |
4270 | case X86::VRNDSCALESSZr_Int: |
4271 | case X86::VRNDSCALESSZrb_Int: |
4272 | case X86::VRNDSCALESSZm: |
4273 | case X86::VRNDSCALESSZm_Int: |
4274 | case X86::VRCP14SDZrr: |
4275 | case X86::VRCP14SDZrm: |
4276 | case X86::VRCP14SSZrr: |
4277 | case X86::VRCP14SSZrm: |
4278 | case X86::VRCP28SDZr: |
4279 | case X86::VRCP28SDZrb: |
4280 | case X86::VRCP28SDZm: |
4281 | case X86::VRCP28SSZr: |
4282 | case X86::VRCP28SSZrb: |
4283 | case X86::VRCP28SSZm: |
4284 | case X86::VREDUCESSZrmi: |
4285 | case X86::VREDUCESSZrri: |
4286 | case X86::VREDUCESSZrrib: |
4287 | case X86::VRSQRT14SDZrr: |
4288 | case X86::VRSQRT14SDZrm: |
4289 | case X86::VRSQRT14SSZrr: |
4290 | case X86::VRSQRT14SSZrm: |
4291 | case X86::VRSQRT28SDZr: |
4292 | case X86::VRSQRT28SDZrb: |
4293 | case X86::VRSQRT28SDZm: |
4294 | case X86::VRSQRT28SSZr: |
4295 | case X86::VRSQRT28SSZrb: |
4296 | case X86::VRSQRT28SSZm: |
4297 | case X86::VSQRTSSZr: |
4298 | case X86::VSQRTSSZr_Int: |
4299 | case X86::VSQRTSSZrb_Int: |
4300 | case X86::VSQRTSSZm: |
4301 | case X86::VSQRTSSZm_Int: |
4302 | case X86::VSQRTSDZr: |
4303 | case X86::VSQRTSDZr_Int: |
4304 | case X86::VSQRTSDZrb_Int: |
4305 | case X86::VSQRTSDZm: |
4306 | case X86::VSQRTSDZm_Int: |
4307 | return true; |
4308 | } |
4309 | |
4310 | return false; |
4311 | } |
4312 | |
4313 | /// Inform the BreakFalseDeps pass how many idle instructions we would like |
4314 | /// before certain undef register reads. |
4315 | /// |
4316 | /// This catches the VCVTSI2SD family of instructions: |
4317 | /// |
4318 | /// vcvtsi2sdq %rax, undef %xmm0, %xmm14 |
4319 | /// |
4320 | /// We should to be careful *not* to catch VXOR idioms which are presumably |
4321 | /// handled specially in the pipeline: |
4322 | /// |
4323 | /// vxorps undef %xmm1, undef %xmm1, %xmm1 |
4324 | /// |
4325 | /// Like getPartialRegUpdateClearance, this makes a strong assumption that the |
4326 | /// high bits that are passed-through are not live. |
4327 | unsigned |
4328 | X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, |
4329 | const TargetRegisterInfo *TRI) const { |
4330 | if (!hasUndefRegUpdate(MI.getOpcode())) |
4331 | return 0; |
4332 | |
4333 | // Set the OpNum parameter to the first source operand. |
4334 | OpNum = 1; |
4335 | |
4336 | const MachineOperand &MO = MI.getOperand(OpNum); |
4337 | if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { |
4338 | return UndefRegClearance; |
4339 | } |
4340 | return 0; |
4341 | } |
4342 | |
4343 | void X86InstrInfo::breakPartialRegDependency( |
4344 | MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { |
4345 | unsigned Reg = MI.getOperand(OpNum).getReg(); |
4346 | // If MI kills this register, the false dependence is already broken. |
4347 | if (MI.killsRegister(Reg, TRI)) |
4348 | return; |
4349 | |
4350 | if (X86::VR128RegClass.contains(Reg)) { |
4351 | // These instructions are all floating point domain, so xorps is the best |
4352 | // choice. |
4353 | unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr; |
4354 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg) |
4355 | .addReg(Reg, RegState::Undef) |
4356 | .addReg(Reg, RegState::Undef); |
4357 | MI.addRegisterKilled(Reg, TRI, true); |
4358 | } else if (X86::VR256RegClass.contains(Reg)) { |
4359 | // Use vxorps to clear the full ymm register. |
4360 | // It wants to read and write the xmm sub-register. |
4361 | unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); |
4362 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg) |
4363 | .addReg(XReg, RegState::Undef) |
4364 | .addReg(XReg, RegState::Undef) |
4365 | .addReg(Reg, RegState::ImplicitDefine); |
4366 | MI.addRegisterKilled(Reg, TRI, true); |
4367 | } else if (X86::GR64RegClass.contains(Reg)) { |
4368 | // Using XOR32rr because it has shorter encoding and zeros up the upper bits |
4369 | // as well. |
4370 | unsigned XReg = TRI->getSubReg(Reg, X86::sub_32bit); |
4371 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg) |
4372 | .addReg(XReg, RegState::Undef) |
4373 | .addReg(XReg, RegState::Undef) |
4374 | .addReg(Reg, RegState::ImplicitDefine); |
4375 | MI.addRegisterKilled(Reg, TRI, true); |
4376 | } else if (X86::GR32RegClass.contains(Reg)) { |
4377 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg) |
4378 | .addReg(Reg, RegState::Undef) |
4379 | .addReg(Reg, RegState::Undef); |
4380 | MI.addRegisterKilled(Reg, TRI, true); |
4381 | } |
4382 | } |
4383 | |
4384 | static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, |
4385 | int PtrOffset = 0) { |
4386 | unsigned NumAddrOps = MOs.size(); |
4387 | |
4388 | if (NumAddrOps < 4) { |
4389 | // FrameIndex only - add an immediate offset (whether its zero or not). |
4390 | for (unsigned i = 0; i != NumAddrOps; ++i) |
4391 | MIB.add(MOs[i]); |
4392 | addOffset(MIB, PtrOffset); |
4393 | } else { |
4394 | // General Memory Addressing - we need to add any offset to an existing |
4395 | // offset. |
4396 | assert(MOs.size() == 5 && "Unexpected memory operand list length")((MOs.size() == 5 && "Unexpected memory operand list length" ) ? static_cast<void> (0) : __assert_fail ("MOs.size() == 5 && \"Unexpected memory operand list length\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 4396, __PRETTY_FUNCTION__)); |
4397 | for (unsigned i = 0; i != NumAddrOps; ++i) { |
4398 | const MachineOperand &MO = MOs[i]; |
4399 | if (i == 3 && PtrOffset != 0) { |
4400 | MIB.addDisp(MO, PtrOffset); |
4401 | } else { |
4402 | MIB.add(MO); |
4403 | } |
4404 | } |
4405 | } |
4406 | } |
4407 | |
4408 | static void updateOperandRegConstraints(MachineFunction &MF, |
4409 | MachineInstr &NewMI, |
4410 | const TargetInstrInfo &TII) { |
4411 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
4412 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); |
4413 | |
4414 | for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) { |
4415 | MachineOperand &MO = NewMI.getOperand(Idx); |
4416 | // We only need to update constraints on virtual register operands. |
4417 | if (!MO.isReg()) |
4418 | continue; |
4419 | unsigned Reg = MO.getReg(); |
4420 | if (!TRI.isVirtualRegister(Reg)) |
4421 | continue; |
4422 | |
4423 | auto *NewRC = MRI.constrainRegClass( |
4424 | Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF)); |
4425 | if (!NewRC) { |
4426 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false) |
4427 | dbgs() << "WARNING: Unable to update register constraint for operand "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false) |
4428 | << Idx << " of instruction:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false) |
4429 | NewMI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false); |
4430 | } |
4431 | } |
4432 | } |
4433 | |
4434 | static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, |
4435 | ArrayRef<MachineOperand> MOs, |
4436 | MachineBasicBlock::iterator InsertPt, |
4437 | MachineInstr &MI, |
4438 | const TargetInstrInfo &TII) { |
4439 | // Create the base instruction with the memory operand as the first part. |
4440 | // Omit the implicit operands, something BuildMI can't do. |
4441 | MachineInstr *NewMI = |
4442 | MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); |
4443 | MachineInstrBuilder MIB(MF, NewMI); |
4444 | addOperands(MIB, MOs); |
4445 | |
4446 | // Loop over the rest of the ri operands, converting them over. |
4447 | unsigned NumOps = MI.getDesc().getNumOperands() - 2; |
4448 | for (unsigned i = 0; i != NumOps; ++i) { |
4449 | MachineOperand &MO = MI.getOperand(i + 2); |
4450 | MIB.add(MO); |
4451 | } |
4452 | for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { |
4453 | MachineOperand &MO = MI.getOperand(i); |
4454 | MIB.add(MO); |
4455 | } |
4456 | |
4457 | updateOperandRegConstraints(MF, *NewMI, TII); |
4458 | |
4459 | MachineBasicBlock *MBB = InsertPt->getParent(); |
4460 | MBB->insert(InsertPt, NewMI); |
4461 | |
4462 | return MIB; |
4463 | } |
4464 | |
4465 | static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, |
4466 | unsigned OpNo, ArrayRef<MachineOperand> MOs, |
4467 | MachineBasicBlock::iterator InsertPt, |
4468 | MachineInstr &MI, const TargetInstrInfo &TII, |
4469 | int PtrOffset = 0) { |
4470 | // Omit the implicit operands, something BuildMI can't do. |
4471 | MachineInstr *NewMI = |
4472 | MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); |
4473 | MachineInstrBuilder MIB(MF, NewMI); |
4474 | |
4475 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
4476 | MachineOperand &MO = MI.getOperand(i); |
4477 | if (i == OpNo) { |
4478 | assert(MO.isReg() && "Expected to fold into reg operand!")((MO.isReg() && "Expected to fold into reg operand!") ? static_cast<void> (0) : __assert_fail ("MO.isReg() && \"Expected to fold into reg operand!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 4478, __PRETTY_FUNCTION__)); |
4479 | addOperands(MIB, MOs, PtrOffset); |
4480 | } else { |
4481 | MIB.add(MO); |
4482 | } |
4483 | } |
4484 | |
4485 | updateOperandRegConstraints(MF, *NewMI, TII); |
4486 | |
4487 | MachineBasicBlock *MBB = InsertPt->getParent(); |
4488 | MBB->insert(InsertPt, NewMI); |
4489 | |
4490 | return MIB; |
4491 | } |
4492 | |
4493 | static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, |
4494 | ArrayRef<MachineOperand> MOs, |
4495 | MachineBasicBlock::iterator InsertPt, |
4496 | MachineInstr &MI) { |
4497 | MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, |
4498 | MI.getDebugLoc(), TII.get(Opcode)); |
4499 | addOperands(MIB, MOs); |
4500 | return MIB.addImm(0); |
4501 | } |
4502 | |
4503 | MachineInstr *X86InstrInfo::foldMemoryOperandCustom( |
4504 | MachineFunction &MF, MachineInstr &MI, unsigned OpNum, |
4505 | ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, |
4506 | unsigned Size, unsigned Align) const { |
4507 | switch (MI.getOpcode()) { |
4508 | case X86::INSERTPSrr: |
4509 | case X86::VINSERTPSrr: |
4510 | case X86::VINSERTPSZrr: |
4511 | // Attempt to convert the load of inserted vector into a fold load |
4512 | // of a single float. |
4513 | if (OpNum == 2) { |
4514 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); |
4515 | unsigned ZMask = Imm & 15; |
4516 | unsigned DstIdx = (Imm >> 4) & 3; |
4517 | unsigned SrcIdx = (Imm >> 6) & 3; |
4518 | |
4519 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
4520 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
4521 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
4522 | if (Size <= RCSize && 4 <= Align) { |
4523 | int PtrOffset = SrcIdx * 4; |
4524 | unsigned NewImm = (DstIdx << 4) | ZMask; |
4525 | unsigned NewOpCode = |
4526 | (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm : |
4527 | (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm : |
4528 | X86::INSERTPSrm; |
4529 | MachineInstr *NewMI = |
4530 | FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); |
4531 | NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); |
4532 | return NewMI; |
4533 | } |
4534 | } |
4535 | break; |
4536 | case X86::MOVHLPSrr: |
4537 | case X86::VMOVHLPSrr: |
4538 | case X86::VMOVHLPSZrr: |
4539 | // Move the upper 64-bits of the second operand to the lower 64-bits. |
4540 | // To fold the load, adjust the pointer to the upper and use (V)MOVLPS. |
4541 | // TODO: In most cases AVX doesn't have a 8-byte alignment requirement. |
4542 | if (OpNum == 2) { |
4543 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
4544 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
4545 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
4546 | if (Size <= RCSize && 8 <= Align) { |
4547 | unsigned NewOpCode = |
4548 | (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : |
4549 | (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : |
4550 | X86::MOVLPSrm; |
4551 | MachineInstr *NewMI = |
4552 | FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8); |
4553 | return NewMI; |
4554 | } |
4555 | } |
4556 | break; |
4557 | }; |
4558 | |
4559 | return nullptr; |
4560 | } |
4561 | |
4562 | static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, |
4563 | MachineInstr &MI) { |
4564 | if (!hasUndefRegUpdate(MI.getOpcode(), /*ForLoadFold*/true) || |
4565 | !MI.getOperand(1).isReg()) |
4566 | return false; |
4567 | |
4568 | // The are two cases we need to handle depending on where in the pipeline |
4569 | // the folding attempt is being made. |
4570 | // -Register has the undef flag set. |
4571 | // -Register is produced by the IMPLICIT_DEF instruction. |
4572 | |
4573 | if (MI.getOperand(1).isUndef()) |
4574 | return true; |
4575 | |
4576 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
4577 | MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg()); |
4578 | return VRegDef && VRegDef->isImplicitDef(); |
4579 | } |
4580 | |
4581 | |
4582 | MachineInstr *X86InstrInfo::foldMemoryOperandImpl( |
4583 | MachineFunction &MF, MachineInstr &MI, unsigned OpNum, |
4584 | ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, |
4585 | unsigned Size, unsigned Align, bool AllowCommute) const { |
4586 | bool isSlowTwoMemOps = Subtarget.slowTwoMemOps(); |
4587 | bool isTwoAddrFold = false; |
4588 | |
4589 | // For CPUs that favor the register form of a call or push, |
4590 | // do not fold loads into calls or pushes, unless optimizing for size |
4591 | // aggressively. |
4592 | if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && |
4593 | (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || |
4594 | MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || |
4595 | MI.getOpcode() == X86::PUSH64r)) |
4596 | return nullptr; |
4597 | |
4598 | // Avoid partial and undef register update stalls unless optimizing for size. |
4599 | if (!MF.getFunction().hasOptSize() && |
4600 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || |
4601 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
4602 | return nullptr; |
4603 | |
4604 | unsigned NumOps = MI.getDesc().getNumOperands(); |
4605 | bool isTwoAddr = |
4606 | NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; |
4607 | |
4608 | // FIXME: AsmPrinter doesn't know how to handle |
4609 | // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. |
4610 | if (MI.getOpcode() == X86::ADD32ri && |
4611 | MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) |
4612 | return nullptr; |
4613 | |
4614 | // GOTTPOFF relocation loads can only be folded into add instructions. |
4615 | // FIXME: Need to exclude other relocations that only support specific |
4616 | // instructions. |
4617 | if (MOs.size() == X86::AddrNumOperands && |
4618 | MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF && |
4619 | MI.getOpcode() != X86::ADD64rr) |
4620 | return nullptr; |
4621 | |
4622 | MachineInstr *NewMI = nullptr; |
4623 | |
4624 | // Attempt to fold any custom cases we have. |
4625 | if (MachineInstr *CustomMI = |
4626 | foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align)) |
4627 | return CustomMI; |
4628 | |
4629 | const X86MemoryFoldTableEntry *I = nullptr; |
4630 | |
4631 | // Folding a memory location into the two-address part of a two-address |
4632 | // instruction is different than folding it other places. It requires |
4633 | // replacing the *two* registers with the memory location. |
4634 | if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() && |
4635 | MI.getOperand(1).isReg() && |
4636 | MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) { |
4637 | I = lookupTwoAddrFoldTable(MI.getOpcode()); |
4638 | isTwoAddrFold = true; |
4639 | } else { |
4640 | if (OpNum == 0) { |
4641 | if (MI.getOpcode() == X86::MOV32r0) { |
4642 | NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); |
4643 | if (NewMI) |
4644 | return NewMI; |
4645 | } |
4646 | } |
4647 | |
4648 | I = lookupFoldTable(MI.getOpcode(), OpNum); |
4649 | } |
4650 | |
4651 | if (I != nullptr) { |
4652 | unsigned Opcode = I->DstOp; |
4653 | unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; |
4654 | if (Align < MinAlign) |
4655 | return nullptr; |
4656 | bool NarrowToMOV32rm = false; |
4657 | if (Size) { |
4658 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
4659 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, |
4660 | &RI, MF); |
4661 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
4662 | if (Size < RCSize) { |
4663 | // Check if it's safe to fold the load. If the size of the object is |
4664 | // narrower than the load width, then it's not. |
4665 | if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) |
4666 | return nullptr; |
4667 | // If this is a 64-bit load, but the spill slot is 32, then we can do |
4668 | // a 32-bit load which is implicitly zero-extended. This likely is |
4669 | // due to live interval analysis remat'ing a load from stack slot. |
4670 | if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) |
4671 | return nullptr; |
4672 | Opcode = X86::MOV32rm; |
4673 | NarrowToMOV32rm = true; |
4674 | } |
4675 | } |
4676 | |
4677 | if (isTwoAddrFold) |
4678 | NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); |
4679 | else |
4680 | NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); |
4681 | |
4682 | if (NarrowToMOV32rm) { |
4683 | // If this is the special case where we use a MOV32rm to load a 32-bit |
4684 | // value and zero-extend the top bits. Change the destination register |
4685 | // to a 32-bit one. |
4686 | unsigned DstReg = NewMI->getOperand(0).getReg(); |
4687 | if (TargetRegisterInfo::isPhysicalRegister(DstReg)) |
4688 | NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); |
4689 | else |
4690 | NewMI->getOperand(0).setSubReg(X86::sub_32bit); |
4691 | } |
4692 | return NewMI; |
4693 | } |
4694 | |
4695 | // If the instruction and target operand are commutable, commute the |
4696 | // instruction and try again. |
4697 | if (AllowCommute) { |
4698 | unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; |
4699 | if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { |
4700 | bool HasDef = MI.getDesc().getNumDefs(); |
4701 | unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; |
4702 | unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); |
4703 | unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); |
4704 | bool Tied1 = |
4705 | 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); |
4706 | bool Tied2 = |
4707 | 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); |
4708 | |
4709 | // If either of the commutable operands are tied to the destination |
4710 | // then we can not commute + fold. |
4711 | if ((HasDef && Reg0 == Reg1 && Tied1) || |
4712 | (HasDef && Reg0 == Reg2 && Tied2)) |
4713 | return nullptr; |
4714 | |
4715 | MachineInstr *CommutedMI = |
4716 | commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); |
4717 | if (!CommutedMI) { |
4718 | // Unable to commute. |
4719 | return nullptr; |
4720 | } |
4721 | if (CommutedMI != &MI) { |
4722 | // New instruction. We can't fold from this. |
4723 | CommutedMI->eraseFromParent(); |
4724 | return nullptr; |
4725 | } |
4726 | |
4727 | // Attempt to fold with the commuted version of the instruction. |
4728 | NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, |
4729 | Size, Align, /*AllowCommute=*/false); |
4730 | if (NewMI) |
4731 | return NewMI; |
4732 | |
4733 | // Folding failed again - undo the commute before returning. |
4734 | MachineInstr *UncommutedMI = |
4735 | commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); |
4736 | if (!UncommutedMI) { |
4737 | // Unable to commute. |
4738 | return nullptr; |
4739 | } |
4740 | if (UncommutedMI != &MI) { |
4741 | // New instruction. It doesn't need to be kept. |
4742 | UncommutedMI->eraseFromParent(); |
4743 | return nullptr; |
4744 | } |
4745 | |
4746 | // Return here to prevent duplicate fuse failure report. |
4747 | return nullptr; |
4748 | } |
4749 | } |
4750 | |
4751 | // No fusion |
4752 | if (PrintFailedFusing && !MI.isCopy()) |
4753 | dbgs() << "We failed to fuse operand " << OpNum << " in " << MI; |
4754 | return nullptr; |
4755 | } |
4756 | |
4757 | MachineInstr * |
4758 | X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
4759 | ArrayRef<unsigned> Ops, |
4760 | MachineBasicBlock::iterator InsertPt, |
4761 | int FrameIndex, LiveIntervals *LIS) const { |
4762 | // Check switch flag |
4763 | if (NoFusing) |
4764 | return nullptr; |
4765 | |
4766 | // Avoid partial and undef register update stalls unless optimizing for size. |
4767 | if (!MF.getFunction().hasOptSize() && |
4768 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || |
4769 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
4770 | return nullptr; |
4771 | |
4772 | // Don't fold subreg spills, or reloads that use a high subreg. |
4773 | for (auto Op : Ops) { |
4774 | MachineOperand &MO = MI.getOperand(Op); |
4775 | auto SubReg = MO.getSubReg(); |
4776 | if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi)) |
4777 | return nullptr; |
4778 | } |
4779 | |
4780 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
4781 | unsigned Size = MFI.getObjectSize(FrameIndex); |
4782 | unsigned Alignment = MFI.getObjectAlignment(FrameIndex); |
4783 | // If the function stack isn't realigned we don't want to fold instructions |
4784 | // that need increased alignment. |
4785 | if (!RI.needsStackRealignment(MF)) |
4786 | Alignment = |
4787 | std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment()); |
4788 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
4789 | unsigned NewOpc = 0; |
4790 | unsigned RCSize = 0; |
4791 | switch (MI.getOpcode()) { |
4792 | default: return nullptr; |
4793 | case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; |
4794 | case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; |
4795 | case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; |
4796 | case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; |
4797 | } |
4798 | // Check if it's safe to fold the load. If the size of the object is |
4799 | // narrower than the load width, then it's not. |
4800 | if (Size < RCSize) |
4801 | return nullptr; |
4802 | // Change to CMPXXri r, 0 first. |
4803 | MI.setDesc(get(NewOpc)); |
4804 | MI.getOperand(1).ChangeToImmediate(0); |
4805 | } else if (Ops.size() != 1) |
4806 | return nullptr; |
4807 | |
4808 | return foldMemoryOperandImpl(MF, MI, Ops[0], |
4809 | MachineOperand::CreateFI(FrameIndex), InsertPt, |
4810 | Size, Alignment, /*AllowCommute=*/true); |
4811 | } |
4812 | |
4813 | /// Check if \p LoadMI is a partial register load that we can't fold into \p MI |
4814 | /// because the latter uses contents that wouldn't be defined in the folded |
4815 | /// version. For instance, this transformation isn't legal: |
4816 | /// movss (%rdi), %xmm0 |
4817 | /// addps %xmm0, %xmm0 |
4818 | /// -> |
4819 | /// addps (%rdi), %xmm0 |
4820 | /// |
4821 | /// But this one is: |
4822 | /// movss (%rdi), %xmm0 |
4823 | /// addss %xmm0, %xmm0 |
4824 | /// -> |
4825 | /// addss (%rdi), %xmm0 |
4826 | /// |
4827 | static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, |
4828 | const MachineInstr &UserMI, |
4829 | const MachineFunction &MF) { |
4830 | unsigned Opc = LoadMI.getOpcode(); |
4831 | unsigned UserOpc = UserMI.getOpcode(); |
4832 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
4833 | const TargetRegisterClass *RC = |
4834 | MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); |
4835 | unsigned RegSize = TRI.getRegSizeInBits(*RC); |
4836 | |
4837 | if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) && |
4838 | RegSize > 32) { |
4839 | // These instructions only load 32 bits, we can't fold them if the |
4840 | // destination register is wider than 32 bits (4 bytes), and its user |
4841 | // instruction isn't scalar (SS). |
4842 | switch (UserOpc) { |
4843 | case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int: |
4844 | case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int: |
4845 | case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int: |
4846 | case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int: |
4847 | case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int: |
4848 | case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int: |
4849 | case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int: |
4850 | case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz: |
4851 | case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz: |
4852 | case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz: |
4853 | case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz: |
4854 | case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz: |
4855 | case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz: |
4856 | case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int: |
4857 | case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int: |
4858 | case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int: |
4859 | case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int: |
4860 | case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int: |
4861 | case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int: |
4862 | case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int: |
4863 | case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int: |
4864 | case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int: |
4865 | case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int: |
4866 | case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int: |
4867 | case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int: |
4868 | case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int: |
4869 | case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int: |
4870 | case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk: |
4871 | case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk: |
4872 | case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk: |
4873 | case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk: |
4874 | case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk: |
4875 | case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk: |
4876 | case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz: |
4877 | case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz: |
4878 | case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz: |
4879 | case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz: |
4880 | case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz: |
4881 | case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz: |
4882 | return false; |
4883 | default: |
4884 | return true; |
4885 | } |
4886 | } |
4887 | |
4888 | if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) && |
4889 | RegSize > 64) { |
4890 | // These instructions only load 64 bits, we can't fold them if the |
4891 | // destination register is wider than 64 bits (8 bytes), and its user |
4892 | // instruction isn't scalar (SD). |
4893 | switch (UserOpc) { |
4894 | case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int: |
4895 | case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int: |
4896 | case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int: |
4897 | case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int: |
4898 | case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int: |
4899 | case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int: |
4900 | case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int: |
4901 | case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz: |
4902 | case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz: |
4903 | case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz: |
4904 | case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz: |
4905 | case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz: |
4906 | case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz: |
4907 | case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int: |
4908 | case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int: |
4909 | case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int: |
4910 | case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int: |
4911 | case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int: |
4912 | case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int: |
4913 | case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int: |
4914 | case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int: |
4915 | case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int: |
4916 | case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int: |
4917 | case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int: |
4918 | case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int: |
4919 | case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int: |
4920 | case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int: |
4921 | case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk: |
4922 | case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk: |
4923 | case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk: |
4924 | case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk: |
4925 | case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk: |
4926 | case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk: |
4927 | case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz: |
4928 | case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz: |
4929 | case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz: |
4930 | case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz: |
4931 | case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz: |
4932 | case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz: |
4933 | return false; |
4934 | default: |
4935 | return true; |
4936 | } |
4937 | } |
4938 | |
4939 | return false; |
4940 | } |
4941 | |
4942 | MachineInstr *X86InstrInfo::foldMemoryOperandImpl( |
4943 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
4944 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
4945 | LiveIntervals *LIS) const { |
4946 | |
4947 | // TODO: Support the case where LoadMI loads a wide register, but MI |
4948 | // only uses a subreg. |
4949 | for (auto Op : Ops) { |
4950 | if (MI.getOperand(Op).getSubReg()) |
4951 | return nullptr; |
4952 | } |
4953 | |
4954 | // If loading from a FrameIndex, fold directly from the FrameIndex. |
4955 | unsigned NumOps = LoadMI.getDesc().getNumOperands(); |
4956 | int FrameIndex; |
4957 | if (isLoadFromStackSlot(LoadMI, FrameIndex)) { |
4958 | if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) |
4959 | return nullptr; |
4960 | return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); |
4961 | } |
4962 | |
4963 | // Check switch flag |
4964 | if (NoFusing) return nullptr; |
4965 | |
4966 | // Avoid partial and undef register update stalls unless optimizing for size. |
4967 | if (!MF.getFunction().hasOptSize() && |
4968 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || |
4969 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
4970 | return nullptr; |
4971 | |
4972 | // Determine the alignment of the load. |
4973 | unsigned Alignment = 0; |
4974 | if (LoadMI.hasOneMemOperand()) |
4975 | Alignment = (*LoadMI.memoperands_begin())->getAlignment(); |
4976 | else |
4977 | switch (LoadMI.getOpcode()) { |
4978 | case X86::AVX512_512_SET0: |
4979 | case X86::AVX512_512_SETALLONES: |
4980 | Alignment = 64; |
4981 | break; |
4982 | case X86::AVX2_SETALLONES: |
4983 | case X86::AVX1_SETALLONES: |
4984 | case X86::AVX_SET0: |
4985 | case X86::AVX512_256_SET0: |
4986 | Alignment = 32; |
4987 | break; |
4988 | case X86::V_SET0: |
4989 | case X86::V_SETALLONES: |
4990 | case X86::AVX512_128_SET0: |
4991 | Alignment = 16; |
4992 | break; |
4993 | case X86::MMX_SET0: |
4994 | case X86::FsFLD0SD: |
4995 | case X86::AVX512_FsFLD0SD: |
4996 | Alignment = 8; |
4997 | break; |
4998 | case X86::FsFLD0SS: |
4999 | case X86::AVX512_FsFLD0SS: |
5000 | Alignment = 4; |
5001 | break; |
5002 | default: |
5003 | return nullptr; |
5004 | } |
5005 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
5006 | unsigned NewOpc = 0; |
5007 | switch (MI.getOpcode()) { |
5008 | default: return nullptr; |
5009 | case X86::TEST8rr: NewOpc = X86::CMP8ri; break; |
5010 | case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; |
5011 | case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; |
5012 | case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; |
5013 | } |
5014 | // Change to CMPXXri r, 0 first. |
5015 | MI.setDesc(get(NewOpc)); |
5016 | MI.getOperand(1).ChangeToImmediate(0); |
5017 | } else if (Ops.size() != 1) |
5018 | return nullptr; |
5019 | |
5020 | // Make sure the subregisters match. |
5021 | // Otherwise we risk changing the size of the load. |
5022 | if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg()) |
5023 | return nullptr; |
5024 | |
5025 | SmallVector<MachineOperand,X86::AddrNumOperands> MOs; |
5026 | switch (LoadMI.getOpcode()) { |
5027 | case X86::MMX_SET0: |
5028 | case X86::V_SET0: |
5029 | case X86::V_SETALLONES: |
5030 | case X86::AVX2_SETALLONES: |
5031 | case X86::AVX1_SETALLONES: |
5032 | case X86::AVX_SET0: |
5033 | case X86::AVX512_128_SET0: |
5034 | case X86::AVX512_256_SET0: |
5035 | case X86::AVX512_512_SET0: |
5036 | case X86::AVX512_512_SETALLONES: |
5037 | case X86::FsFLD0SD: |
5038 | case X86::AVX512_FsFLD0SD: |
5039 | case X86::FsFLD0SS: |
5040 | case X86::AVX512_FsFLD0SS: { |
5041 | // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. |
5042 | // Create a constant-pool entry and operands to load from it. |
5043 | |
5044 | // Medium and large mode can't fold loads this way. |
5045 | if (MF.getTarget().getCodeModel() != CodeModel::Small && |
5046 | MF.getTarget().getCodeModel() != CodeModel::Kernel) |
5047 | return nullptr; |
5048 | |
5049 | // x86-32 PIC requires a PIC base register for constant pools. |
5050 | unsigned PICBase = 0; |
5051 | if (MF.getTarget().isPositionIndependent()) { |
5052 | if (Subtarget.is64Bit()) |
5053 | PICBase = X86::RIP; |
5054 | else |
5055 | // FIXME: PICBase = getGlobalBaseReg(&MF); |
5056 | // This doesn't work for several reasons. |
5057 | // 1. GlobalBaseReg may have been spilled. |
5058 | // 2. It may not be live at MI. |
5059 | return nullptr; |
5060 | } |
5061 | |
5062 | // Create a constant-pool entry. |
5063 | MachineConstantPool &MCP = *MF.getConstantPool(); |
5064 | Type *Ty; |
5065 | unsigned Opc = LoadMI.getOpcode(); |
5066 | if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS) |
5067 | Ty = Type::getFloatTy(MF.getFunction().getContext()); |
5068 | else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD) |
5069 | Ty = Type::getDoubleTy(MF.getFunction().getContext()); |
5070 | else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES) |
5071 | Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16); |
5072 | else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 || |
5073 | Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES) |
5074 | Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8); |
5075 | else if (Opc == X86::MMX_SET0) |
5076 | Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 2); |
5077 | else |
5078 | Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4); |
5079 | |
5080 | bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES || |
5081 | Opc == X86::AVX512_512_SETALLONES || |
5082 | Opc == X86::AVX1_SETALLONES); |
5083 | const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : |
5084 | Constant::getNullValue(Ty); |
5085 | unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); |
5086 | |
5087 | // Create operands to load from the constant pool entry. |
5088 | MOs.push_back(MachineOperand::CreateReg(PICBase, false)); |
5089 | MOs.push_back(MachineOperand::CreateImm(1)); |
5090 | MOs.push_back(MachineOperand::CreateReg(0, false)); |
5091 | MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); |
5092 | MOs.push_back(MachineOperand::CreateReg(0, false)); |
5093 | break; |
5094 | } |
5095 | default: { |
5096 | if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) |
5097 | return nullptr; |
5098 | |
5099 | // Folding a normal load. Just copy the load's address operands. |
5100 | MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, |
5101 | LoadMI.operands_begin() + NumOps); |
5102 | break; |
5103 | } |
5104 | } |
5105 | return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, |
5106 | /*Size=*/0, Alignment, /*AllowCommute=*/true); |
5107 | } |
5108 | |
5109 | static SmallVector<MachineMemOperand *, 2> |
5110 | extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { |
5111 | SmallVector<MachineMemOperand *, 2> LoadMMOs; |
5112 | |
5113 | for (MachineMemOperand *MMO : MMOs) { |
5114 | if (!MMO->isLoad()) |
5115 | continue; |
5116 | |
5117 | if (!MMO->isStore()) { |
5118 | // Reuse the MMO. |
5119 | LoadMMOs.push_back(MMO); |
5120 | } else { |
5121 | // Clone the MMO and unset the store flag. |
5122 | LoadMMOs.push_back(MF.getMachineMemOperand( |
5123 | MMO->getPointerInfo(), MMO->getFlags() & ~MachineMemOperand::MOStore, |
5124 | MMO->getSize(), MMO->getBaseAlignment(), MMO->getAAInfo(), nullptr, |
5125 | MMO->getSyncScopeID(), MMO->getOrdering(), |
5126 | MMO->getFailureOrdering())); |
5127 | } |
5128 | } |
5129 | |
5130 | return LoadMMOs; |
5131 | } |
5132 | |
5133 | static SmallVector<MachineMemOperand *, 2> |
5134 | extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { |
5135 | SmallVector<MachineMemOperand *, 2> StoreMMOs; |
5136 | |
5137 | for (MachineMemOperand *MMO : MMOs) { |
5138 | if (!MMO->isStore()) |
5139 | continue; |
5140 | |
5141 | if (!MMO->isLoad()) { |
5142 | // Reuse the MMO. |
5143 | StoreMMOs.push_back(MMO); |
5144 | } else { |
5145 | // Clone the MMO and unset the load flag. |
5146 | StoreMMOs.push_back(MF.getMachineMemOperand( |
5147 | MMO->getPointerInfo(), MMO->getFlags() & ~MachineMemOperand::MOLoad, |
5148 | MMO->getSize(), MMO->getBaseAlignment(), MMO->getAAInfo(), nullptr, |
5149 | MMO->getSyncScopeID(), MMO->getOrdering(), |
5150 | MMO->getFailureOrdering())); |
5151 | } |
5152 | } |
5153 | |
5154 | return StoreMMOs; |
5155 | } |
5156 | |
5157 | bool X86InstrInfo::unfoldMemoryOperand( |
5158 | MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, |
5159 | bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const { |
5160 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode()); |
5161 | if (I == nullptr) |
5162 | return false; |
5163 | unsigned Opc = I->DstOp; |
5164 | unsigned Index = I->Flags & TB_INDEX_MASK; |
5165 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
5166 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
5167 | if (UnfoldLoad && !FoldedLoad) |
5168 | return false; |
5169 | UnfoldLoad &= FoldedLoad; |
5170 | if (UnfoldStore && !FoldedStore) |
5171 | return false; |
5172 | UnfoldStore &= FoldedStore; |
5173 | |
5174 | const MCInstrDesc &MCID = get(Opc); |
5175 | const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); |
5176 | // TODO: Check if 32-byte or greater accesses are slow too? |
5177 | if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass && |
5178 | Subtarget.isUnalignedMem16Slow()) |
5179 | // Without memoperands, loadRegFromAddr and storeRegToStackSlot will |
5180 | // conservatively assume the address is unaligned. That's bad for |
5181 | // performance. |
5182 | return false; |
5183 | SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; |
5184 | SmallVector<MachineOperand,2> BeforeOps; |
5185 | SmallVector<MachineOperand,2> AfterOps; |
5186 | SmallVector<MachineOperand,4> ImpOps; |
5187 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
5188 | MachineOperand &Op = MI.getOperand(i); |
5189 | if (i >= Index && i < Index + X86::AddrNumOperands) |
5190 | AddrOps.push_back(Op); |
5191 | else if (Op.isReg() && Op.isImplicit()) |
5192 | ImpOps.push_back(Op); |
5193 | else if (i < Index) |
5194 | BeforeOps.push_back(Op); |
5195 | else if (i > Index) |
5196 | AfterOps.push_back(Op); |
5197 | } |
5198 | |
5199 | // Emit the load instruction. |
5200 | if (UnfoldLoad) { |
5201 | auto MMOs = extractLoadMMOs(MI.memoperands(), MF); |
5202 | loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs); |
5203 | if (UnfoldStore) { |
5204 | // Address operands cannot be marked isKill. |
5205 | for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { |
5206 | MachineOperand &MO = NewMIs[0]->getOperand(i); |
5207 | if (MO.isReg()) |
5208 | MO.setIsKill(false); |
5209 | } |
5210 | } |
5211 | } |
5212 | |
5213 | // Emit the data processing instruction. |
5214 | MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true); |
5215 | MachineInstrBuilder MIB(MF, DataMI); |
5216 | |
5217 | if (FoldedStore) |
5218 | MIB.addReg(Reg, RegState::Define); |
5219 | for (MachineOperand &BeforeOp : BeforeOps) |
5220 | MIB.add(BeforeOp); |
5221 | if (FoldedLoad) |
5222 | MIB.addReg(Reg); |
5223 | for (MachineOperand &AfterOp : AfterOps) |
5224 | MIB.add(AfterOp); |
5225 | for (MachineOperand &ImpOp : ImpOps) { |
5226 | MIB.addReg(ImpOp.getReg(), |
5227 | getDefRegState(ImpOp.isDef()) | |
5228 | RegState::Implicit | |
5229 | getKillRegState(ImpOp.isKill()) | |
5230 | getDeadRegState(ImpOp.isDead()) | |
5231 | getUndefRegState(ImpOp.isUndef())); |
5232 | } |
5233 | // Change CMP32ri r, 0 back to TEST32rr r, r, etc. |
5234 | switch (DataMI->getOpcode()) { |
5235 | default: break; |
5236 | case X86::CMP64ri32: |
5237 | case X86::CMP64ri8: |
5238 | case X86::CMP32ri: |
5239 | case X86::CMP32ri8: |
5240 | case X86::CMP16ri: |
5241 | case X86::CMP16ri8: |
5242 | case X86::CMP8ri: { |
5243 | MachineOperand &MO0 = DataMI->getOperand(0); |
5244 | MachineOperand &MO1 = DataMI->getOperand(1); |
5245 | if (MO1.getImm() == 0) { |
5246 | unsigned NewOpc; |
5247 | switch (DataMI->getOpcode()) { |
5248 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5248); |
5249 | case X86::CMP64ri8: |
5250 | case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; |
5251 | case X86::CMP32ri8: |
5252 | case X86::CMP32ri: NewOpc = X86::TEST32rr; break; |
5253 | case X86::CMP16ri8: |
5254 | case X86::CMP16ri: NewOpc = X86::TEST16rr; break; |
5255 | case X86::CMP8ri: NewOpc = X86::TEST8rr; break; |
5256 | } |
5257 | DataMI->setDesc(get(NewOpc)); |
5258 | MO1.ChangeToRegister(MO0.getReg(), false); |
5259 | } |
5260 | } |
5261 | } |
5262 | NewMIs.push_back(DataMI); |
5263 | |
5264 | // Emit the store instruction. |
5265 | if (UnfoldStore) { |
5266 | const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); |
5267 | auto MMOs = extractStoreMMOs(MI.memoperands(), MF); |
5268 | storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs); |
5269 | } |
5270 | |
5271 | return true; |
5272 | } |
5273 | |
5274 | bool |
5275 | X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, |
5276 | SmallVectorImpl<SDNode*> &NewNodes) const { |
5277 | if (!N->isMachineOpcode()) |
5278 | return false; |
5279 | |
5280 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode()); |
5281 | if (I == nullptr) |
5282 | return false; |
5283 | unsigned Opc = I->DstOp; |
5284 | unsigned Index = I->Flags & TB_INDEX_MASK; |
5285 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
5286 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
5287 | const MCInstrDesc &MCID = get(Opc); |
5288 | MachineFunction &MF = DAG.getMachineFunction(); |
5289 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5290 | const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); |
5291 | unsigned NumDefs = MCID.NumDefs; |
5292 | std::vector<SDValue> AddrOps; |
5293 | std::vector<SDValue> BeforeOps; |
5294 | std::vector<SDValue> AfterOps; |
5295 | SDLoc dl(N); |
5296 | unsigned NumOps = N->getNumOperands(); |
5297 | for (unsigned i = 0; i != NumOps-1; ++i) { |
5298 | SDValue Op = N->getOperand(i); |
5299 | if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) |
5300 | AddrOps.push_back(Op); |
5301 | else if (i < Index-NumDefs) |
5302 | BeforeOps.push_back(Op); |
5303 | else if (i > Index-NumDefs) |
5304 | AfterOps.push_back(Op); |
5305 | } |
5306 | SDValue Chain = N->getOperand(NumOps-1); |
5307 | AddrOps.push_back(Chain); |
5308 | |
5309 | // Emit the load instruction. |
5310 | SDNode *Load = nullptr; |
5311 | if (FoldedLoad) { |
5312 | EVT VT = *TRI.legalclasstypes_begin(*RC); |
5313 | auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF); |
5314 | if (MMOs.empty() && RC == &X86::VR128RegClass && |
5315 | Subtarget.isUnalignedMem16Slow()) |
5316 | // Do not introduce a slow unaligned load. |
5317 | return false; |
5318 | // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte |
5319 | // memory access is slow above. |
5320 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
5321 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; |
5322 | Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl, |
5323 | VT, MVT::Other, AddrOps); |
5324 | NewNodes.push_back(Load); |
5325 | |
5326 | // Preserve memory reference information. |
5327 | DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs); |
5328 | } |
5329 | |
5330 | // Emit the data processing instruction. |
5331 | std::vector<EVT> VTs; |
5332 | const TargetRegisterClass *DstRC = nullptr; |
5333 | if (MCID.getNumDefs() > 0) { |
5334 | DstRC = getRegClass(MCID, 0, &RI, MF); |
5335 | VTs.push_back(*TRI.legalclasstypes_begin(*DstRC)); |
5336 | } |
5337 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
5338 | EVT VT = N->getValueType(i); |
5339 | if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) |
5340 | VTs.push_back(VT); |
5341 | } |
5342 | if (Load) |
5343 | BeforeOps.push_back(SDValue(Load, 0)); |
5344 | BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end()); |
5345 | // Change CMP32ri r, 0 back to TEST32rr r, r, etc. |
5346 | switch (Opc) { |
5347 | default: break; |
5348 | case X86::CMP64ri32: |
5349 | case X86::CMP64ri8: |
5350 | case X86::CMP32ri: |
5351 | case X86::CMP32ri8: |
5352 | case X86::CMP16ri: |
5353 | case X86::CMP16ri8: |
5354 | case X86::CMP8ri: |
5355 | if (isNullConstant(BeforeOps[1])) { |
5356 | switch (Opc) { |
5357 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5357); |
5358 | case X86::CMP64ri8: |
5359 | case X86::CMP64ri32: Opc = X86::TEST64rr; break; |
5360 | case X86::CMP32ri8: |
5361 | case X86::CMP32ri: Opc = X86::TEST32rr; break; |
5362 | case X86::CMP16ri8: |
5363 | case X86::CMP16ri: Opc = X86::TEST16rr; break; |
5364 | case X86::CMP8ri: Opc = X86::TEST8rr; break; |
5365 | } |
5366 | BeforeOps[1] = BeforeOps[0]; |
5367 | } |
5368 | } |
5369 | SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); |
5370 | NewNodes.push_back(NewNode); |
5371 | |
5372 | // Emit the store instruction. |
5373 | if (FoldedStore) { |
5374 | AddrOps.pop_back(); |
5375 | AddrOps.push_back(SDValue(NewNode, 0)); |
5376 | AddrOps.push_back(Chain); |
5377 | auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF); |
5378 | if (MMOs.empty() && RC == &X86::VR128RegClass && |
5379 | Subtarget.isUnalignedMem16Slow()) |
5380 | // Do not introduce a slow unaligned store. |
5381 | return false; |
5382 | // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte |
5383 | // memory access is slow above. |
5384 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
5385 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; |
5386 | SDNode *Store = |
5387 | DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), |
5388 | dl, MVT::Other, AddrOps); |
5389 | NewNodes.push_back(Store); |
5390 | |
5391 | // Preserve memory reference information. |
5392 | DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs); |
5393 | } |
5394 | |
5395 | return true; |
5396 | } |
5397 | |
5398 | unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, |
5399 | bool UnfoldLoad, bool UnfoldStore, |
5400 | unsigned *LoadRegIndex) const { |
5401 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc); |
5402 | if (I == nullptr) |
5403 | return 0; |
5404 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
5405 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
5406 | if (UnfoldLoad && !FoldedLoad) |
5407 | return 0; |
5408 | if (UnfoldStore && !FoldedStore) |
5409 | return 0; |
5410 | if (LoadRegIndex) |
5411 | *LoadRegIndex = I->Flags & TB_INDEX_MASK; |
5412 | return I->DstOp; |
5413 | } |
5414 | |
5415 | bool |
5416 | X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, |
5417 | int64_t &Offset1, int64_t &Offset2) const { |
5418 | if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) |
5419 | return false; |
5420 | unsigned Opc1 = Load1->getMachineOpcode(); |
5421 | unsigned Opc2 = Load2->getMachineOpcode(); |
5422 | switch (Opc1) { |
5423 | default: return false; |
5424 | case X86::MOV8rm: |
5425 | case X86::MOV16rm: |
5426 | case X86::MOV32rm: |
5427 | case X86::MOV64rm: |
5428 | case X86::LD_Fp32m: |
5429 | case X86::LD_Fp64m: |
5430 | case X86::LD_Fp80m: |
5431 | case X86::MOVSSrm: |
5432 | case X86::MOVSDrm: |
5433 | case X86::MMX_MOVD64rm: |
5434 | case X86::MMX_MOVQ64rm: |
5435 | case X86::MOVAPSrm: |
5436 | case X86::MOVUPSrm: |
5437 | case X86::MOVAPDrm: |
5438 | case X86::MOVUPDrm: |
5439 | case X86::MOVDQArm: |
5440 | case X86::MOVDQUrm: |
5441 | // AVX load instructions |
5442 | case X86::VMOVSSrm: |
5443 | case X86::VMOVSDrm: |
5444 | case X86::VMOVAPSrm: |
5445 | case X86::VMOVUPSrm: |
5446 | case X86::VMOVAPDrm: |
5447 | case X86::VMOVUPDrm: |
5448 | case X86::VMOVDQArm: |
5449 | case X86::VMOVDQUrm: |
5450 | case X86::VMOVAPSYrm: |
5451 | case X86::VMOVUPSYrm: |
5452 | case X86::VMOVAPDYrm: |
5453 | case X86::VMOVUPDYrm: |
5454 | case X86::VMOVDQAYrm: |
5455 | case X86::VMOVDQUYrm: |
5456 | // AVX512 load instructions |
5457 | case X86::VMOVSSZrm: |
5458 | case X86::VMOVSDZrm: |
5459 | case X86::VMOVAPSZ128rm: |
5460 | case X86::VMOVUPSZ128rm: |
5461 | case X86::VMOVAPSZ128rm_NOVLX: |
5462 | case X86::VMOVUPSZ128rm_NOVLX: |
5463 | case X86::VMOVAPDZ128rm: |
5464 | case X86::VMOVUPDZ128rm: |
5465 | case X86::VMOVDQU8Z128rm: |
5466 | case X86::VMOVDQU16Z128rm: |
5467 | case X86::VMOVDQA32Z128rm: |
5468 | case X86::VMOVDQU32Z128rm: |
5469 | case X86::VMOVDQA64Z128rm: |
5470 | case X86::VMOVDQU64Z128rm: |
5471 | case X86::VMOVAPSZ256rm: |
5472 | case X86::VMOVUPSZ256rm: |
5473 | case X86::VMOVAPSZ256rm_NOVLX: |
5474 | case X86::VMOVUPSZ256rm_NOVLX: |
5475 | case X86::VMOVAPDZ256rm: |
5476 | case X86::VMOVUPDZ256rm: |
5477 | case X86::VMOVDQU8Z256rm: |
5478 | case X86::VMOVDQU16Z256rm: |
5479 | case X86::VMOVDQA32Z256rm: |
5480 | case X86::VMOVDQU32Z256rm: |
5481 | case X86::VMOVDQA64Z256rm: |
5482 | case X86::VMOVDQU64Z256rm: |
5483 | case X86::VMOVAPSZrm: |
5484 | case X86::VMOVUPSZrm: |
5485 | case X86::VMOVAPDZrm: |
5486 | case X86::VMOVUPDZrm: |
5487 | case X86::VMOVDQU8Zrm: |
5488 | case X86::VMOVDQU16Zrm: |
5489 | case X86::VMOVDQA32Zrm: |
5490 | case X86::VMOVDQU32Zrm: |
5491 | case X86::VMOVDQA64Zrm: |
5492 | case X86::VMOVDQU64Zrm: |
5493 | case X86::KMOVBkm: |
5494 | case X86::KMOVWkm: |
5495 | case X86::KMOVDkm: |
5496 | case X86::KMOVQkm: |
5497 | break; |
5498 | } |
5499 | switch (Opc2) { |
5500 | default: return false; |
5501 | case X86::MOV8rm: |
5502 | case X86::MOV16rm: |
5503 | case X86::MOV32rm: |
5504 | case X86::MOV64rm: |
5505 | case X86::LD_Fp32m: |
5506 | case X86::LD_Fp64m: |
5507 | case X86::LD_Fp80m: |
5508 | case X86::MOVSSrm: |
5509 | case X86::MOVSDrm: |
5510 | case X86::MMX_MOVD64rm: |
5511 | case X86::MMX_MOVQ64rm: |
5512 | case X86::MOVAPSrm: |
5513 | case X86::MOVUPSrm: |
5514 | case X86::MOVAPDrm: |
5515 | case X86::MOVUPDrm: |
5516 | case X86::MOVDQArm: |
5517 | case X86::MOVDQUrm: |
5518 | // AVX load instructions |
5519 | case X86::VMOVSSrm: |
5520 | case X86::VMOVSDrm: |
5521 | case X86::VMOVAPSrm: |
5522 | case X86::VMOVUPSrm: |
5523 | case X86::VMOVAPDrm: |
5524 | case X86::VMOVUPDrm: |
5525 | case X86::VMOVDQArm: |
5526 | case X86::VMOVDQUrm: |
5527 | case X86::VMOVAPSYrm: |
5528 | case X86::VMOVUPSYrm: |
5529 | case X86::VMOVAPDYrm: |
5530 | case X86::VMOVUPDYrm: |
5531 | case X86::VMOVDQAYrm: |
5532 | case X86::VMOVDQUYrm: |
5533 | // AVX512 load instructions |
5534 | case X86::VMOVSSZrm: |
5535 | case X86::VMOVSDZrm: |
5536 | case X86::VMOVAPSZ128rm: |
5537 | case X86::VMOVUPSZ128rm: |
5538 | case X86::VMOVAPSZ128rm_NOVLX: |
5539 | case X86::VMOVUPSZ128rm_NOVLX: |
5540 | case X86::VMOVAPDZ128rm: |
5541 | case X86::VMOVUPDZ128rm: |
5542 | case X86::VMOVDQU8Z128rm: |
5543 | case X86::VMOVDQU16Z128rm: |
5544 | case X86::VMOVDQA32Z128rm: |
5545 | case X86::VMOVDQU32Z128rm: |
5546 | case X86::VMOVDQA64Z128rm: |
5547 | case X86::VMOVDQU64Z128rm: |
5548 | case X86::VMOVAPSZ256rm: |
5549 | case X86::VMOVUPSZ256rm: |
5550 | case X86::VMOVAPSZ256rm_NOVLX: |
5551 | case X86::VMOVUPSZ256rm_NOVLX: |
5552 | case X86::VMOVAPDZ256rm: |
5553 | case X86::VMOVUPDZ256rm: |
5554 | case X86::VMOVDQU8Z256rm: |
5555 | case X86::VMOVDQU16Z256rm: |
5556 | case X86::VMOVDQA32Z256rm: |
5557 | case X86::VMOVDQU32Z256rm: |
5558 | case X86::VMOVDQA64Z256rm: |
5559 | case X86::VMOVDQU64Z256rm: |
5560 | case X86::VMOVAPSZrm: |
5561 | case X86::VMOVUPSZrm: |
5562 | case X86::VMOVAPDZrm: |
5563 | case X86::VMOVUPDZrm: |
5564 | case X86::VMOVDQU8Zrm: |
5565 | case X86::VMOVDQU16Zrm: |
5566 | case X86::VMOVDQA32Zrm: |
5567 | case X86::VMOVDQU32Zrm: |
5568 | case X86::VMOVDQA64Zrm: |
5569 | case X86::VMOVDQU64Zrm: |
5570 | case X86::KMOVBkm: |
5571 | case X86::KMOVWkm: |
5572 | case X86::KMOVDkm: |
5573 | case X86::KMOVQkm: |
5574 | break; |
5575 | } |
5576 | |
5577 | // Lambda to check if both the loads have the same value for an operand index. |
5578 | auto HasSameOp = [&](int I) { |
5579 | return Load1->getOperand(I) == Load2->getOperand(I); |
5580 | }; |
5581 | |
5582 | // All operands except the displacement should match. |
5583 | if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) || |
5584 | !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg)) |
5585 | return false; |
5586 | |
5587 | // Chain Operand must be the same. |
5588 | if (!HasSameOp(5)) |
5589 | return false; |
5590 | |
5591 | // Now let's examine if the displacements are constants. |
5592 | auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp)); |
5593 | auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp)); |
5594 | if (!Disp1 || !Disp2) |
5595 | return false; |
5596 | |
5597 | Offset1 = Disp1->getSExtValue(); |
5598 | Offset2 = Disp2->getSExtValue(); |
5599 | return true; |
5600 | } |
5601 | |
5602 | bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, |
5603 | int64_t Offset1, int64_t Offset2, |
5604 | unsigned NumLoads) const { |
5605 | assert(Offset2 > Offset1)((Offset2 > Offset1) ? static_cast<void> (0) : __assert_fail ("Offset2 > Offset1", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5605, __PRETTY_FUNCTION__)); |
5606 | if ((Offset2 - Offset1) / 8 > 64) |
5607 | return false; |
5608 | |
5609 | unsigned Opc1 = Load1->getMachineOpcode(); |
5610 | unsigned Opc2 = Load2->getMachineOpcode(); |
5611 | if (Opc1 != Opc2) |
5612 | return false; // FIXME: overly conservative? |
5613 | |
5614 | switch (Opc1) { |
5615 | default: break; |
5616 | case X86::LD_Fp32m: |
5617 | case X86::LD_Fp64m: |
5618 | case X86::LD_Fp80m: |
5619 | case X86::MMX_MOVD64rm: |
5620 | case X86::MMX_MOVQ64rm: |
5621 | return false; |
5622 | } |
5623 | |
5624 | EVT VT = Load1->getValueType(0); |
5625 | switch (VT.getSimpleVT().SimpleTy) { |
5626 | default: |
5627 | // XMM registers. In 64-bit mode we can be a bit more aggressive since we |
5628 | // have 16 of them to play with. |
5629 | if (Subtarget.is64Bit()) { |
5630 | if (NumLoads >= 3) |
5631 | return false; |
5632 | } else if (NumLoads) { |
5633 | return false; |
5634 | } |
5635 | break; |
5636 | case MVT::i8: |
5637 | case MVT::i16: |
5638 | case MVT::i32: |
5639 | case MVT::i64: |
5640 | case MVT::f32: |
5641 | case MVT::f64: |
5642 | if (NumLoads) |
5643 | return false; |
5644 | break; |
5645 | } |
5646 | |
5647 | return true; |
5648 | } |
5649 | |
5650 | bool X86InstrInfo:: |
5651 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
5652 | assert(Cond.size() == 1 && "Invalid X86 branch condition!")((Cond.size() == 1 && "Invalid X86 branch condition!" ) ? static_cast<void> (0) : __assert_fail ("Cond.size() == 1 && \"Invalid X86 branch condition!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5652, __PRETTY_FUNCTION__)); |
5653 | X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); |
5654 | Cond[0].setImm(GetOppositeBranchCondition(CC)); |
5655 | return false; |
5656 | } |
5657 | |
5658 | bool X86InstrInfo:: |
5659 | isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { |
5660 | // FIXME: Return false for x87 stack register classes for now. We can't |
5661 | // allow any loads of these registers before FpGet_ST0_80. |
5662 | return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass || |
5663 | RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass || |
5664 | RC == &X86::RFP80RegClass); |
5665 | } |
5666 | |
5667 | /// Return a virtual register initialized with the |
5668 | /// the global base register value. Output instructions required to |
5669 | /// initialize the register in the function entry block, if necessary. |
5670 | /// |
5671 | /// TODO: Eliminate this and move the code to X86MachineFunctionInfo. |
5672 | /// |
5673 | unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { |
5674 | assert((!Subtarget.is64Bit() ||(((!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel ::Large) && "X86-64 PIC uses RIP relative addressing" ) ? static_cast<void> (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5677, __PRETTY_FUNCTION__)) |
5675 | MF->getTarget().getCodeModel() == CodeModel::Medium ||(((!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel ::Large) && "X86-64 PIC uses RIP relative addressing" ) ? static_cast<void> (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5677, __PRETTY_FUNCTION__)) |
5676 | MF->getTarget().getCodeModel() == CodeModel::Large) &&(((!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel ::Large) && "X86-64 PIC uses RIP relative addressing" ) ? static_cast<void> (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5677, __PRETTY_FUNCTION__)) |
5677 | "X86-64 PIC uses RIP relative addressing")(((!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel ::Large) && "X86-64 PIC uses RIP relative addressing" ) ? static_cast<void> (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 5677, __PRETTY_FUNCTION__)); |
5678 | |
5679 | X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); |
5680 | unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); |
5681 | if (GlobalBaseReg != 0) |
5682 | return GlobalBaseReg; |
5683 | |
5684 | // Create the register. The code to initialize it is inserted |
5685 | // later, by the CGBR pass (below). |
5686 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
5687 | GlobalBaseReg = RegInfo.createVirtualRegister( |
5688 | Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); |
5689 | X86FI->setGlobalBaseReg(GlobalBaseReg); |
5690 | return GlobalBaseReg; |
5691 | } |
5692 | |
5693 | // These are the replaceable SSE instructions. Some of these have Int variants |
5694 | // that we don't include here. We don't want to replace instructions selected |
5695 | // by intrinsics. |
5696 | static const uint16_t ReplaceableInstrs[][3] = { |
5697 | //PackedSingle PackedDouble PackedInt |
5698 | { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, |
5699 | { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, |
5700 | { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, |
5701 | { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, |
5702 | { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, |
5703 | { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, |
5704 | { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr }, |
5705 | { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr }, |
5706 | { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm }, |
5707 | { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm }, |
5708 | { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, |
5709 | { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, |
5710 | { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, |
5711 | { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, |
5712 | { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, |
5713 | { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, |
5714 | { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, |
5715 | { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, |
5716 | { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, |
5717 | { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm }, |
5718 | { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr }, |
5719 | { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm }, |
5720 | { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr }, |
5721 | { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm }, |
5722 | { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr }, |
5723 | { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm }, |
5724 | { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr }, |
5725 | { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr }, |
5726 | { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr }, |
5727 | // AVX 128-bit support |
5728 | { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, |
5729 | { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, |
5730 | { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, |
5731 | { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, |
5732 | { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, |
5733 | { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, |
5734 | { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr }, |
5735 | { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr }, |
5736 | { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm }, |
5737 | { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm }, |
5738 | { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, |
5739 | { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, |
5740 | { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, |
5741 | { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, |
5742 | { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, |
5743 | { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, |
5744 | { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, |
5745 | { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, |
5746 | { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, |
5747 | { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm }, |
5748 | { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr }, |
5749 | { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm }, |
5750 | { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr }, |
5751 | { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm }, |
5752 | { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr }, |
5753 | { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm }, |
5754 | { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr }, |
5755 | { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr }, |
5756 | { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr }, |
5757 | // AVX 256-bit support |
5758 | { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, |
5759 | { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, |
5760 | { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, |
5761 | { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, |
5762 | { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, |
5763 | { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr }, |
5764 | { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm }, |
5765 | { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr }, |
5766 | { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi }, |
5767 | { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri }, |
5768 | // AVX512 support |
5769 | { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr }, |
5770 | { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr }, |
5771 | { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr }, |
5772 | { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr }, |
5773 | { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr }, |
5774 | { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr }, |
5775 | { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm }, |
5776 | { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm }, |
5777 | { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128r, X86::VPBROADCASTDZ128r }, |
5778 | { X86::VBROADCASTSSZ128m, X86::VBROADCASTSSZ128m, X86::VPBROADCASTDZ128m }, |
5779 | { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256r, X86::VPBROADCASTDZ256r }, |
5780 | { X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m }, |
5781 | { X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr }, |
5782 | { X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm }, |
5783 | { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r }, |
5784 | { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m }, |
5785 | { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r }, |
5786 | { X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m }, |
5787 | { X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr }, |
5788 | { X86::VBROADCASTSDZm, X86::VBROADCASTSDZm, X86::VPBROADCASTQZm }, |
5789 | { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr }, |
5790 | { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm }, |
5791 | { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr }, |
5792 | { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm }, |
5793 | { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr }, |
5794 | { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm }, |
5795 | { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr }, |
5796 | { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm }, |
5797 | { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr }, |
5798 | { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm }, |
5799 | { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr }, |
5800 | { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm }, |
5801 | { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr }, |
5802 | { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr }, |
5803 | { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr }, |
5804 | { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr }, |
5805 | { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr }, |
5806 | { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr }, |
5807 | { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr }, |
5808 | { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr }, |
5809 | { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr }, |
5810 | { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr }, |
5811 | { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr }, |
5812 | { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr }, |
5813 | { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi }, |
5814 | { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri }, |
5815 | { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi }, |
5816 | { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri }, |
5817 | { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi }, |
5818 | { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri }, |
5819 | { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi }, |
5820 | { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri }, |
5821 | { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm }, |
5822 | { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr }, |
5823 | { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi }, |
5824 | { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri }, |
5825 | { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm }, |
5826 | { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr }, |
5827 | { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm }, |
5828 | { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr }, |
5829 | { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi }, |
5830 | { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri }, |
5831 | { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm }, |
5832 | { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr }, |
5833 | { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm }, |
5834 | { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr }, |
5835 | { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm }, |
5836 | { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr }, |
5837 | { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm }, |
5838 | { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr }, |
5839 | { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm }, |
5840 | { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr }, |
5841 | { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm }, |
5842 | { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr }, |
5843 | { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm }, |
5844 | { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr }, |
5845 | { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm }, |
5846 | { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr }, |
5847 | { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm }, |
5848 | { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr }, |
5849 | { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm }, |
5850 | { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr }, |
5851 | { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm }, |
5852 | { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr }, |
5853 | { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm }, |
5854 | { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr }, |
5855 | { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm }, |
5856 | { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr }, |
5857 | { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr }, |
5858 | { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr }, |
5859 | }; |
5860 | |
5861 | static const uint16_t ReplaceableInstrsAVX2[][3] = { |
5862 | //PackedSingle PackedDouble PackedInt |
5863 | { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, |
5864 | { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, |
5865 | { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, |
5866 | { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, |
5867 | { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, |
5868 | { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, |
5869 | { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, |
5870 | { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, |
5871 | { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, |
5872 | { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, |
5873 | { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, |
5874 | { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, |
5875 | { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm}, |
5876 | { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr}, |
5877 | { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, |
5878 | { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, |
5879 | { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, |
5880 | { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}, |
5881 | { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 }, |
5882 | { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri }, |
5883 | { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi }, |
5884 | { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi }, |
5885 | { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri }, |
5886 | { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm }, |
5887 | { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr }, |
5888 | { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm }, |
5889 | { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr }, |
5890 | { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm }, |
5891 | { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr }, |
5892 | { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm }, |
5893 | { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr }, |
5894 | }; |
5895 | |
5896 | static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = { |
5897 | //PackedSingle PackedDouble PackedInt |
5898 | { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, |
5899 | { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, |
5900 | { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, |
5901 | { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, |
5902 | }; |
5903 | |
5904 | static const uint16_t ReplaceableInstrsAVX512[][4] = { |
5905 | // Two integer columns for 64-bit and 32-bit elements. |
5906 | //PackedSingle PackedDouble PackedInt PackedInt |
5907 | { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr }, |
5908 | { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm }, |
5909 | { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr }, |
5910 | { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr }, |
5911 | { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm }, |
5912 | { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr }, |
5913 | { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm }, |
5914 | { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr }, |
5915 | { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr }, |
5916 | { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm }, |
5917 | { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr }, |
5918 | { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm }, |
5919 | { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr }, |
5920 | { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr }, |
5921 | { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm }, |
5922 | }; |
5923 | |
5924 | static const uint16_t ReplaceableInstrsAVX512DQ[][4] = { |
5925 | // Two integer columns for 64-bit and 32-bit elements. |
5926 | //PackedSingle PackedDouble PackedInt PackedInt |
5927 | { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, |
5928 | { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, |
5929 | { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, |
5930 | { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, |
5931 | { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm }, |
5932 | { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr }, |
5933 | { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, |
5934 | { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, |
5935 | { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, |
5936 | { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, |
5937 | { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, |
5938 | { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, |
5939 | { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm }, |
5940 | { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr }, |
5941 | { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, |
5942 | { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, |
5943 | { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm }, |
5944 | { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr }, |
5945 | { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm }, |
5946 | { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr }, |
5947 | { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm }, |
5948 | { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr }, |
5949 | { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm }, |
5950 | { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr }, |
5951 | }; |
5952 | |
5953 | static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = { |
5954 | // Two integer columns for 64-bit and 32-bit elements. |
5955 | //PackedSingle PackedDouble |
5956 | //PackedInt PackedInt |
5957 | { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk, |
5958 | X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk }, |
5959 | { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz, |
5960 | X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz }, |
5961 | { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk, |
5962 | X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk }, |
5963 | { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz, |
5964 | X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz }, |
5965 | { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk, |
5966 | X86::VPANDQZ128rmk, X86::VPANDDZ128rmk }, |
5967 | { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz, |
5968 | X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz }, |
5969 | { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk, |
5970 | X86::VPANDQZ128rrk, X86::VPANDDZ128rrk }, |
5971 | { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz, |
5972 | X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz }, |
5973 | { X86::VORPSZ128rmk, X86::VORPDZ128rmk, |
5974 | X86::VPORQZ128rmk, X86::VPORDZ128rmk }, |
5975 | { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz, |
5976 | X86::VPORQZ128rmkz, X86::VPORDZ128rmkz }, |
5977 | { X86::VORPSZ128rrk, X86::VORPDZ128rrk, |
5978 | X86::VPORQZ128rrk, X86::VPORDZ128rrk }, |
5979 | { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz, |
5980 | X86::VPORQZ128rrkz, X86::VPORDZ128rrkz }, |
5981 | { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk, |
5982 | X86::VPXORQZ128rmk, X86::VPXORDZ128rmk }, |
5983 | { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz, |
5984 | X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz }, |
5985 | { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk, |
5986 | X86::VPXORQZ128rrk, X86::VPXORDZ128rrk }, |
5987 | { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz, |
5988 | X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz }, |
5989 | { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk, |
5990 | X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk }, |
5991 | { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz, |
5992 | X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz }, |
5993 | { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk, |
5994 | X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk }, |
5995 | { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz, |
5996 | X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz }, |
5997 | { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk, |
5998 | X86::VPANDQZ256rmk, X86::VPANDDZ256rmk }, |
5999 | { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz, |
6000 | X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz }, |
6001 | { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk, |
6002 | X86::VPANDQZ256rrk, X86::VPANDDZ256rrk }, |
6003 | { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz, |
6004 | X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz }, |
6005 | { X86::VORPSZ256rmk, X86::VORPDZ256rmk, |
6006 | X86::VPORQZ256rmk, X86::VPORDZ256rmk }, |
6007 | { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz, |
6008 | X86::VPORQZ256rmkz, X86::VPORDZ256rmkz }, |
6009 | { X86::VORPSZ256rrk, X86::VORPDZ256rrk, |
6010 | X86::VPORQZ256rrk, X86::VPORDZ256rrk }, |
6011 | { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz, |
6012 | X86::VPORQZ256rrkz, X86::VPORDZ256rrkz }, |
6013 | { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk, |
6014 | X86::VPXORQZ256rmk, X86::VPXORDZ256rmk }, |
6015 | { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz, |
6016 | X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz }, |
6017 | { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk, |
6018 | X86::VPXORQZ256rrk, X86::VPXORDZ256rrk }, |
6019 | { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz, |
6020 | X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz }, |
6021 | { X86::VANDNPSZrmk, X86::VANDNPDZrmk, |
6022 | X86::VPANDNQZrmk, X86::VPANDNDZrmk }, |
6023 | { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz, |
6024 | X86::VPANDNQZrmkz, X86::VPANDNDZrmkz }, |
6025 | { X86::VANDNPSZrrk, X86::VANDNPDZrrk, |
6026 | X86::VPANDNQZrrk, X86::VPANDNDZrrk }, |
6027 | { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz, |
6028 | X86::VPANDNQZrrkz, X86::VPANDNDZrrkz }, |
6029 | { X86::VANDPSZrmk, X86::VANDPDZrmk, |
6030 | X86::VPANDQZrmk, X86::VPANDDZrmk }, |
6031 | { X86::VANDPSZrmkz, X86::VANDPDZrmkz, |
6032 | X86::VPANDQZrmkz, X86::VPANDDZrmkz }, |
6033 | { X86::VANDPSZrrk, X86::VANDPDZrrk, |
6034 | X86::VPANDQZrrk, X86::VPANDDZrrk }, |
6035 | { X86::VANDPSZrrkz, X86::VANDPDZrrkz, |
6036 | X86::VPANDQZrrkz, X86::VPANDDZrrkz }, |
6037 | { X86::VORPSZrmk, X86::VORPDZrmk, |
6038 | X86::VPORQZrmk, X86::VPORDZrmk }, |
6039 | { X86::VORPSZrmkz, X86::VORPDZrmkz, |
6040 | X86::VPORQZrmkz, X86::VPORDZrmkz }, |
6041 | { X86::VORPSZrrk, X86::VORPDZrrk, |
6042 | X86::VPORQZrrk, X86::VPORDZrrk }, |
6043 | { X86::VORPSZrrkz, X86::VORPDZrrkz, |
6044 | X86::VPORQZrrkz, X86::VPORDZrrkz }, |
6045 | { X86::VXORPSZrmk, X86::VXORPDZrmk, |
6046 | X86::VPXORQZrmk, X86::VPXORDZrmk }, |
6047 | { X86::VXORPSZrmkz, X86::VXORPDZrmkz, |
6048 | X86::VPXORQZrmkz, X86::VPXORDZrmkz }, |
6049 | { X86::VXORPSZrrk, X86::VXORPDZrrk, |
6050 | X86::VPXORQZrrk, X86::VPXORDZrrk }, |
6051 | { X86::VXORPSZrrkz, X86::VXORPDZrrkz, |
6052 | X86::VPXORQZrrkz, X86::VPXORDZrrkz }, |
6053 | // Broadcast loads can be handled the same as masked operations to avoid |
6054 | // changing element size. |
6055 | { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb, |
6056 | X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb }, |
6057 | { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb, |
6058 | X86::VPANDQZ128rmb, X86::VPANDDZ128rmb }, |
6059 | { X86::VORPSZ128rmb, X86::VORPDZ128rmb, |
6060 | X86::VPORQZ128rmb, X86::VPORDZ128rmb }, |
6061 | { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb, |
6062 | X86::VPXORQZ128rmb, X86::VPXORDZ128rmb }, |
6063 | { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb, |
6064 | X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb }, |
6065 | { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb, |
6066 | X86::VPANDQZ256rmb, X86::VPANDDZ256rmb }, |
6067 | { X86::VORPSZ256rmb, X86::VORPDZ256rmb, |
6068 | X86::VPORQZ256rmb, X86::VPORDZ256rmb }, |
6069 | { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb, |
6070 | X86::VPXORQZ256rmb, X86::VPXORDZ256rmb }, |
6071 | { X86::VANDNPSZrmb, X86::VANDNPDZrmb, |
6072 | X86::VPANDNQZrmb, X86::VPANDNDZrmb }, |
6073 | { X86::VANDPSZrmb, X86::VANDPDZrmb, |
6074 | X86::VPANDQZrmb, X86::VPANDDZrmb }, |
6075 | { X86::VANDPSZrmb, X86::VANDPDZrmb, |
6076 | X86::VPANDQZrmb, X86::VPANDDZrmb }, |
6077 | { X86::VORPSZrmb, X86::VORPDZrmb, |
6078 | X86::VPORQZrmb, X86::VPORDZrmb }, |
6079 | { X86::VXORPSZrmb, X86::VXORPDZrmb, |
6080 | X86::VPXORQZrmb, X86::VPXORDZrmb }, |
6081 | { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk, |
6082 | X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk }, |
6083 | { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk, |
6084 | X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk }, |
6085 | { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk, |
6086 | X86::VPORQZ128rmbk, X86::VPORDZ128rmbk }, |
6087 | { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk, |
6088 | X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk }, |
6089 | { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk, |
6090 | X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk }, |
6091 | { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk, |
6092 | X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk }, |
6093 | { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk, |
6094 | X86::VPORQZ256rmbk, X86::VPORDZ256rmbk }, |
6095 | { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk, |
6096 | X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk }, |
6097 | { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk, |
6098 | X86::VPANDNQZrmbk, X86::VPANDNDZrmbk }, |
6099 | { X86::VANDPSZrmbk, X86::VANDPDZrmbk, |
6100 | X86::VPANDQZrmbk, X86::VPANDDZrmbk }, |
6101 | { X86::VANDPSZrmbk, X86::VANDPDZrmbk, |
6102 | X86::VPANDQZrmbk, X86::VPANDDZrmbk }, |
6103 | { X86::VORPSZrmbk, X86::VORPDZrmbk, |
6104 | X86::VPORQZrmbk, X86::VPORDZrmbk }, |
6105 | { X86::VXORPSZrmbk, X86::VXORPDZrmbk, |
6106 | X86::VPXORQZrmbk, X86::VPXORDZrmbk }, |
6107 | { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz, |
6108 | X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz}, |
6109 | { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz, |
6110 | X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz }, |
6111 | { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz, |
6112 | X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz }, |
6113 | { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz, |
6114 | X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz }, |
6115 | { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz, |
6116 | X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz}, |
6117 | { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz, |
6118 | X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz }, |
6119 | { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz, |
6120 | X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz }, |
6121 | { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz, |
6122 | X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz }, |
6123 | { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz, |
6124 | X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz }, |
6125 | { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, |
6126 | X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, |
6127 | { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, |
6128 | X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, |
6129 | { X86::VORPSZrmbkz, X86::VORPDZrmbkz, |
6130 | X86::VPORQZrmbkz, X86::VPORDZrmbkz }, |
6131 | { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz, |
6132 | X86::VPXORQZrmbkz, X86::VPXORDZrmbkz }, |
6133 | }; |
6134 | |
6135 | // NOTE: These should only be used by the custom domain methods. |
6136 | static const uint16_t ReplaceableCustomInstrs[][3] = { |
6137 | //PackedSingle PackedDouble PackedInt |
6138 | { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi }, |
6139 | { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri }, |
6140 | { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi }, |
6141 | { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri }, |
6142 | { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi }, |
6143 | { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri }, |
6144 | }; |
6145 | static const uint16_t ReplaceableCustomAVX2Instrs[][3] = { |
6146 | //PackedSingle PackedDouble PackedInt |
6147 | { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi }, |
6148 | { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri }, |
6149 | { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi }, |
6150 | { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri }, |
6151 | }; |
6152 | |
6153 | // Special table for changing EVEX logic instructions to VEX. |
6154 | // TODO: Should we run EVEX->VEX earlier? |
6155 | static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = { |
6156 | // Two integer columns for 64-bit and 32-bit elements. |
6157 | //PackedSingle PackedDouble PackedInt PackedInt |
6158 | { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, |
6159 | { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, |
6160 | { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, |
6161 | { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, |
6162 | { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm }, |
6163 | { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr }, |
6164 | { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, |
6165 | { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, |
6166 | { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, |
6167 | { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, |
6168 | { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, |
6169 | { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, |
6170 | { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm }, |
6171 | { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr }, |
6172 | { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, |
6173 | { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, |
6174 | }; |
6175 | |
6176 | // FIXME: Some shuffle and unpack instructions have equivalents in different |
6177 | // domains, but they require a bit more work than just switching opcodes. |
6178 | |
6179 | static const uint16_t *lookup(unsigned opcode, unsigned domain, |
6180 | ArrayRef<uint16_t[3]> Table) { |
6181 | for (const uint16_t (&Row)[3] : Table) |
6182 | if (Row[domain-1] == opcode) |
6183 | return Row; |
6184 | return nullptr; |
6185 | } |
6186 | |
6187 | static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain, |
6188 | ArrayRef<uint16_t[4]> Table) { |
6189 | // If this is the integer domain make sure to check both integer columns. |
6190 | for (const uint16_t (&Row)[4] : Table) |
6191 | if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode)) |
6192 | return Row; |
6193 | return nullptr; |
6194 | } |
6195 | |
6196 | // Helper to attempt to widen/narrow blend masks. |
6197 | static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, |
6198 | unsigned NewWidth, unsigned *pNewMask = nullptr) { |
6199 | assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&((((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && "Illegal blend mask scale") ? static_cast<void> (0) : __assert_fail ("((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && \"Illegal blend mask scale\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6200, __PRETTY_FUNCTION__)) |
6200 | "Illegal blend mask scale")((((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && "Illegal blend mask scale") ? static_cast<void> (0) : __assert_fail ("((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && \"Illegal blend mask scale\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6200, __PRETTY_FUNCTION__)); |
6201 | unsigned NewMask = 0; |
6202 | |
6203 | if ((OldWidth % NewWidth) == 0) { |
6204 | unsigned Scale = OldWidth / NewWidth; |
6205 | unsigned SubMask = (1u << Scale) - 1; |
6206 | for (unsigned i = 0; i != NewWidth; ++i) { |
6207 | unsigned Sub = (OldMask >> (i * Scale)) & SubMask; |
6208 | if (Sub == SubMask) |
6209 | NewMask |= (1u << i); |
6210 | else if (Sub != 0x0) |
6211 | return false; |
6212 | } |
6213 | } else { |
6214 | unsigned Scale = NewWidth / OldWidth; |
6215 | unsigned SubMask = (1u << Scale) - 1; |
6216 | for (unsigned i = 0; i != OldWidth; ++i) { |
6217 | if (OldMask & (1 << i)) { |
6218 | NewMask |= (SubMask << (i * Scale)); |
6219 | } |
6220 | } |
6221 | } |
6222 | |
6223 | if (pNewMask) |
6224 | *pNewMask = NewMask; |
6225 | return true; |
6226 | } |
6227 | |
6228 | uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const { |
6229 | unsigned Opcode = MI.getOpcode(); |
6230 | unsigned NumOperands = MI.getDesc().getNumOperands(); |
6231 | |
6232 | auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) { |
6233 | uint16_t validDomains = 0; |
6234 | if (MI.getOperand(NumOperands - 1).isImm()) { |
6235 | unsigned Imm = MI.getOperand(NumOperands - 1).getImm(); |
6236 | if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4)) |
6237 | validDomains |= 0x2; // PackedSingle |
6238 | if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2)) |
6239 | validDomains |= 0x4; // PackedDouble |
6240 | if (!Is256 || Subtarget.hasAVX2()) |
6241 | validDomains |= 0x8; // PackedInt |
6242 | } |
6243 | return validDomains; |
6244 | }; |
6245 | |
6246 | switch (Opcode) { |
6247 | case X86::BLENDPDrmi: |
6248 | case X86::BLENDPDrri: |
6249 | case X86::VBLENDPDrmi: |
6250 | case X86::VBLENDPDrri: |
6251 | return GetBlendDomains(2, false); |
6252 | case X86::VBLENDPDYrmi: |
6253 | case X86::VBLENDPDYrri: |
6254 | return GetBlendDomains(4, true); |
6255 | case X86::BLENDPSrmi: |
6256 | case X86::BLENDPSrri: |
6257 | case X86::VBLENDPSrmi: |
6258 | case X86::VBLENDPSrri: |
6259 | case X86::VPBLENDDrmi: |
6260 | case X86::VPBLENDDrri: |
6261 | return GetBlendDomains(4, false); |
6262 | case X86::VBLENDPSYrmi: |
6263 | case X86::VBLENDPSYrri: |
6264 | case X86::VPBLENDDYrmi: |
6265 | case X86::VPBLENDDYrri: |
6266 | return GetBlendDomains(8, true); |
6267 | case X86::PBLENDWrmi: |
6268 | case X86::PBLENDWrri: |
6269 | case X86::VPBLENDWrmi: |
6270 | case X86::VPBLENDWrri: |
6271 | // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks. |
6272 | case X86::VPBLENDWYrmi: |
6273 | case X86::VPBLENDWYrri: |
6274 | return GetBlendDomains(8, false); |
6275 | case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: |
6276 | case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: |
6277 | case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: |
6278 | case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: |
6279 | case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: |
6280 | case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: |
6281 | case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: |
6282 | case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: |
6283 | case X86::VPORDZ128rr: case X86::VPORDZ128rm: |
6284 | case X86::VPORDZ256rr: case X86::VPORDZ256rm: |
6285 | case X86::VPORQZ128rr: case X86::VPORQZ128rm: |
6286 | case X86::VPORQZ256rr: case X86::VPORQZ256rm: |
6287 | case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: |
6288 | case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: |
6289 | case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: |
6290 | case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: |
6291 | // If we don't have DQI see if we can still switch from an EVEX integer |
6292 | // instruction to a VEX floating point instruction. |
6293 | if (Subtarget.hasDQI()) |
6294 | return 0; |
6295 | |
6296 | if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16) |
6297 | return 0; |
6298 | if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16) |
6299 | return 0; |
6300 | // Register forms will have 3 operands. Memory form will have more. |
6301 | if (NumOperands == 3 && |
6302 | RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16) |
6303 | return 0; |
6304 | |
6305 | // All domains are valid. |
6306 | return 0xe; |
6307 | case X86::MOVHLPSrr: |
6308 | // We can swap domains when both inputs are the same register. |
6309 | // FIXME: This doesn't catch all the cases we would like. If the input |
6310 | // register isn't KILLed by the instruction, the two address instruction |
6311 | // pass puts a COPY on one input. The other input uses the original |
6312 | // register. This prevents the same physical register from being used by |
6313 | // both inputs. |
6314 | if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && |
6315 | MI.getOperand(0).getSubReg() == 0 && |
6316 | MI.getOperand(1).getSubReg() == 0 && |
6317 | MI.getOperand(2).getSubReg() == 0) |
6318 | return 0x6; |
6319 | return 0; |
6320 | } |
6321 | return 0; |
6322 | } |
6323 | |
6324 | bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI, |
6325 | unsigned Domain) const { |
6326 | assert(Domain > 0 && Domain < 4 && "Invalid execution domain")((Domain > 0 && Domain < 4 && "Invalid execution domain" ) ? static_cast<void> (0) : __assert_fail ("Domain > 0 && Domain < 4 && \"Invalid execution domain\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6326, __PRETTY_FUNCTION__)); |
6327 | uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
6328 | assert(dom && "Not an SSE instruction")((dom && "Not an SSE instruction") ? static_cast<void > (0) : __assert_fail ("dom && \"Not an SSE instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6328, __PRETTY_FUNCTION__)); |
6329 | |
6330 | unsigned Opcode = MI.getOpcode(); |
6331 | unsigned NumOperands = MI.getDesc().getNumOperands(); |
6332 | |
6333 | auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) { |
6334 | if (MI.getOperand(NumOperands - 1).isImm()) { |
6335 | unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255; |
6336 | Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm); |
6337 | unsigned NewImm = Imm; |
6338 | |
6339 | const uint16_t *table = lookup(Opcode, dom, ReplaceableCustomInstrs); |
6340 | if (!table) |
6341 | table = lookup(Opcode, dom, ReplaceableCustomAVX2Instrs); |
6342 | |
6343 | if (Domain == 1) { // PackedSingle |
6344 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); |
6345 | } else if (Domain == 2) { // PackedDouble |
6346 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm); |
6347 | } else if (Domain == 3) { // PackedInt |
6348 | if (Subtarget.hasAVX2()) { |
6349 | // If we are already VPBLENDW use that, else use VPBLENDD. |
6350 | if ((ImmWidth / (Is256 ? 2 : 1)) != 8) { |
6351 | table = lookup(Opcode, dom, ReplaceableCustomAVX2Instrs); |
6352 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); |
6353 | } |
6354 | } else { |
6355 | assert(!Is256 && "128-bit vector expected")((!Is256 && "128-bit vector expected") ? static_cast< void> (0) : __assert_fail ("!Is256 && \"128-bit vector expected\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6355, __PRETTY_FUNCTION__)); |
6356 | AdjustBlendMask(Imm, ImmWidth, 8, &NewImm); |
6357 | } |
6358 | } |
6359 | |
6360 | assert(table && table[Domain - 1] && "Unknown domain op")((table && table[Domain - 1] && "Unknown domain op" ) ? static_cast<void> (0) : __assert_fail ("table && table[Domain - 1] && \"Unknown domain op\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6360, __PRETTY_FUNCTION__)); |
6361 | MI.setDesc(get(table[Domain - 1])); |
6362 | MI.getOperand(NumOperands - 1).setImm(NewImm & 255); |
6363 | } |
6364 | return true; |
6365 | }; |
6366 | |
6367 | switch (Opcode) { |
6368 | case X86::BLENDPDrmi: |
6369 | case X86::BLENDPDrri: |
6370 | case X86::VBLENDPDrmi: |
6371 | case X86::VBLENDPDrri: |
6372 | return SetBlendDomain(2, false); |
6373 | case X86::VBLENDPDYrmi: |
6374 | case X86::VBLENDPDYrri: |
6375 | return SetBlendDomain(4, true); |
6376 | case X86::BLENDPSrmi: |
6377 | case X86::BLENDPSrri: |
6378 | case X86::VBLENDPSrmi: |
6379 | case X86::VBLENDPSrri: |
6380 | case X86::VPBLENDDrmi: |
6381 | case X86::VPBLENDDrri: |
6382 | return SetBlendDomain(4, false); |
6383 | case X86::VBLENDPSYrmi: |
6384 | case X86::VBLENDPSYrri: |
6385 | case X86::VPBLENDDYrmi: |
6386 | case X86::VPBLENDDYrri: |
6387 | return SetBlendDomain(8, true); |
6388 | case X86::PBLENDWrmi: |
6389 | case X86::PBLENDWrri: |
6390 | case X86::VPBLENDWrmi: |
6391 | case X86::VPBLENDWrri: |
6392 | return SetBlendDomain(8, false); |
6393 | case X86::VPBLENDWYrmi: |
6394 | case X86::VPBLENDWYrri: |
6395 | return SetBlendDomain(16, true); |
6396 | case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: |
6397 | case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: |
6398 | case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: |
6399 | case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: |
6400 | case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: |
6401 | case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: |
6402 | case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: |
6403 | case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: |
6404 | case X86::VPORDZ128rr: case X86::VPORDZ128rm: |
6405 | case X86::VPORDZ256rr: case X86::VPORDZ256rm: |
6406 | case X86::VPORQZ128rr: case X86::VPORQZ128rm: |
6407 | case X86::VPORQZ256rr: case X86::VPORQZ256rm: |
6408 | case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: |
6409 | case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: |
6410 | case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: |
6411 | case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: { |
6412 | // Without DQI, convert EVEX instructions to VEX instructions. |
6413 | if (Subtarget.hasDQI()) |
6414 | return false; |
6415 | |
6416 | const uint16_t *table = lookupAVX512(MI.getOpcode(), dom, |
6417 | ReplaceableCustomAVX512LogicInstrs); |
6418 | assert(table && "Instruction not found in table?")((table && "Instruction not found in table?") ? static_cast <void> (0) : __assert_fail ("table && \"Instruction not found in table?\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6418, __PRETTY_FUNCTION__)); |
6419 | // Don't change integer Q instructions to D instructions and |
6420 | // use D intructions if we started with a PS instruction. |
6421 | if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
6422 | Domain = 4; |
6423 | MI.setDesc(get(table[Domain - 1])); |
6424 | return true; |
6425 | } |
6426 | case X86::UNPCKHPDrr: |
6427 | case X86::MOVHLPSrr: |
6428 | // We just need to commute the instruction which will switch the domains. |
6429 | if (Domain != dom && Domain != 3 && |
6430 | MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && |
6431 | MI.getOperand(0).getSubReg() == 0 && |
6432 | MI.getOperand(1).getSubReg() == 0 && |
6433 | MI.getOperand(2).getSubReg() == 0) { |
6434 | commuteInstruction(MI, false); |
6435 | return true; |
6436 | } |
6437 | // We must always return true for MOVHLPSrr. |
6438 | if (Opcode == X86::MOVHLPSrr) |
6439 | return true; |
6440 | } |
6441 | return false; |
6442 | } |
6443 | |
6444 | std::pair<uint16_t, uint16_t> |
6445 | X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const { |
6446 | uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
6447 | unsigned opcode = MI.getOpcode(); |
6448 | uint16_t validDomains = 0; |
6449 | if (domain) { |
6450 | // Attempt to match for custom instructions. |
6451 | validDomains = getExecutionDomainCustom(MI); |
6452 | if (validDomains) |
6453 | return std::make_pair(domain, validDomains); |
6454 | |
6455 | if (lookup(opcode, domain, ReplaceableInstrs)) { |
6456 | validDomains = 0xe; |
6457 | } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) { |
6458 | validDomains = Subtarget.hasAVX2() ? 0xe : 0x6; |
6459 | } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) { |
6460 | // Insert/extract instructions should only effect domain if AVX2 |
6461 | // is enabled. |
6462 | if (!Subtarget.hasAVX2()) |
6463 | return std::make_pair(0, 0); |
6464 | validDomains = 0xe; |
6465 | } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) { |
6466 | validDomains = 0xe; |
6467 | } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain, |
6468 | ReplaceableInstrsAVX512DQ)) { |
6469 | validDomains = 0xe; |
6470 | } else if (Subtarget.hasDQI()) { |
6471 | if (const uint16_t *table = lookupAVX512(opcode, domain, |
6472 | ReplaceableInstrsAVX512DQMasked)) { |
6473 | if (domain == 1 || (domain == 3 && table[3] == opcode)) |
6474 | validDomains = 0xa; |
6475 | else |
6476 | validDomains = 0xc; |
6477 | } |
6478 | } |
6479 | } |
6480 | return std::make_pair(domain, validDomains); |
6481 | } |
6482 | |
6483 | void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const { |
6484 | assert(Domain>0 && Domain<4 && "Invalid execution domain")((Domain>0 && Domain<4 && "Invalid execution domain" ) ? static_cast<void> (0) : __assert_fail ("Domain>0 && Domain<4 && \"Invalid execution domain\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6484, __PRETTY_FUNCTION__)); |
6485 | uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
6486 | assert(dom && "Not an SSE instruction")((dom && "Not an SSE instruction") ? static_cast<void > (0) : __assert_fail ("dom && \"Not an SSE instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6486, __PRETTY_FUNCTION__)); |
6487 | |
6488 | // Attempt to match for custom instructions. |
6489 | if (setExecutionDomainCustom(MI, Domain)) |
6490 | return; |
6491 | |
6492 | const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs); |
6493 | if (!table) { // try the other table |
6494 | assert((Subtarget.hasAVX2() || Domain < 3) &&(((Subtarget.hasAVX2() || Domain < 3) && "256-bit vector operations only available in AVX2" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasAVX2() || Domain < 3) && \"256-bit vector operations only available in AVX2\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6495, __PRETTY_FUNCTION__)) |
6495 | "256-bit vector operations only available in AVX2")(((Subtarget.hasAVX2() || Domain < 3) && "256-bit vector operations only available in AVX2" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasAVX2() || Domain < 3) && \"256-bit vector operations only available in AVX2\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6495, __PRETTY_FUNCTION__)); |
6496 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2); |
6497 | } |
6498 | if (!table) { // try the other table |
6499 | assert(Subtarget.hasAVX2() &&((Subtarget.hasAVX2() && "256-bit insert/extract only available in AVX2" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit insert/extract only available in AVX2\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6500, __PRETTY_FUNCTION__)) |
6500 | "256-bit insert/extract only available in AVX2")((Subtarget.hasAVX2() && "256-bit insert/extract only available in AVX2" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit insert/extract only available in AVX2\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6500, __PRETTY_FUNCTION__)); |
6501 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract); |
6502 | } |
6503 | if (!table) { // try the AVX512 table |
6504 | assert(Subtarget.hasAVX512() && "Requires AVX-512")((Subtarget.hasAVX512() && "Requires AVX-512") ? static_cast <void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Requires AVX-512\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6504, __PRETTY_FUNCTION__)); |
6505 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512); |
6506 | // Don't change integer Q instructions to D instructions. |
6507 | if (table && Domain == 3 && table[3] == MI.getOpcode()) |
6508 | Domain = 4; |
6509 | } |
6510 | if (!table) { // try the AVX512DQ table |
6511 | assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ")(((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasDQI() || Domain >= 3) && \"Requires AVX-512DQ\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6511, __PRETTY_FUNCTION__)); |
6512 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ); |
6513 | // Don't change integer Q instructions to D instructions and |
6514 | // use D intructions if we started with a PS instruction. |
6515 | if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
6516 | Domain = 4; |
6517 | } |
6518 | if (!table) { // try the AVX512DQMasked table |
6519 | assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ")(((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasDQI() || Domain >= 3) && \"Requires AVX-512DQ\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6519, __PRETTY_FUNCTION__)); |
6520 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked); |
6521 | if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
6522 | Domain = 4; |
6523 | } |
6524 | assert(table && "Cannot change domain")((table && "Cannot change domain") ? static_cast<void > (0) : __assert_fail ("table && \"Cannot change domain\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6524, __PRETTY_FUNCTION__)); |
6525 | MI.setDesc(get(table[Domain - 1])); |
6526 | } |
6527 | |
6528 | /// Return the noop instruction to use for a noop. |
6529 | void X86InstrInfo::getNoop(MCInst &NopInst) const { |
6530 | NopInst.setOpcode(X86::NOOP); |
6531 | } |
6532 | |
6533 | bool X86InstrInfo::isHighLatencyDef(int opc) const { |
6534 | switch (opc) { |
6535 | default: return false; |
6536 | case X86::DIVPDrm: |
6537 | case X86::DIVPDrr: |
6538 | case X86::DIVPSrm: |
6539 | case X86::DIVPSrr: |
6540 | case X86::DIVSDrm: |
6541 | case X86::DIVSDrm_Int: |
6542 | case X86::DIVSDrr: |
6543 | case X86::DIVSDrr_Int: |
6544 | case X86::DIVSSrm: |
6545 | case X86::DIVSSrm_Int: |
6546 | case X86::DIVSSrr: |
6547 | case X86::DIVSSrr_Int: |
6548 | case X86::SQRTPDm: |
6549 | case X86::SQRTPDr: |
6550 | case X86::SQRTPSm: |
6551 | case X86::SQRTPSr: |
6552 | case X86::SQRTSDm: |
6553 | case X86::SQRTSDm_Int: |
6554 | case X86::SQRTSDr: |
6555 | case X86::SQRTSDr_Int: |
6556 | case X86::SQRTSSm: |
6557 | case X86::SQRTSSm_Int: |
6558 | case X86::SQRTSSr: |
6559 | case X86::SQRTSSr_Int: |
6560 | // AVX instructions with high latency |
6561 | case X86::VDIVPDrm: |
6562 | case X86::VDIVPDrr: |
6563 | case X86::VDIVPDYrm: |
6564 | case X86::VDIVPDYrr: |
6565 | case X86::VDIVPSrm: |
6566 | case X86::VDIVPSrr: |
6567 | case X86::VDIVPSYrm: |
6568 | case X86::VDIVPSYrr: |
6569 | case X86::VDIVSDrm: |
6570 | case X86::VDIVSDrm_Int: |
6571 | case X86::VDIVSDrr: |
6572 | case X86::VDIVSDrr_Int: |
6573 | case X86::VDIVSSrm: |
6574 | case X86::VDIVSSrm_Int: |
6575 | case X86::VDIVSSrr: |
6576 | case X86::VDIVSSrr_Int: |
6577 | case X86::VSQRTPDm: |
6578 | case X86::VSQRTPDr: |
6579 | case X86::VSQRTPDYm: |
6580 | case X86::VSQRTPDYr: |
6581 | case X86::VSQRTPSm: |
6582 | case X86::VSQRTPSr: |
6583 | case X86::VSQRTPSYm: |
6584 | case X86::VSQRTPSYr: |
6585 | case X86::VSQRTSDm: |
6586 | case X86::VSQRTSDm_Int: |
6587 | case X86::VSQRTSDr: |
6588 | case X86::VSQRTSDr_Int: |
6589 | case X86::VSQRTSSm: |
6590 | case X86::VSQRTSSm_Int: |
6591 | case X86::VSQRTSSr: |
6592 | case X86::VSQRTSSr_Int: |
6593 | // AVX512 instructions with high latency |
6594 | case X86::VDIVPDZ128rm: |
6595 | case X86::VDIVPDZ128rmb: |
6596 | case X86::VDIVPDZ128rmbk: |
6597 | case X86::VDIVPDZ128rmbkz: |
6598 | case X86::VDIVPDZ128rmk: |
6599 | case X86::VDIVPDZ128rmkz: |
6600 | case X86::VDIVPDZ128rr: |
6601 | case X86::VDIVPDZ128rrk: |
6602 | case X86::VDIVPDZ128rrkz: |
6603 | case X86::VDIVPDZ256rm: |
6604 | case X86::VDIVPDZ256rmb: |
6605 | case X86::VDIVPDZ256rmbk: |
6606 | case X86::VDIVPDZ256rmbkz: |
6607 | case X86::VDIVPDZ256rmk: |
6608 | case X86::VDIVPDZ256rmkz: |
6609 | case X86::VDIVPDZ256rr: |
6610 | case X86::VDIVPDZ256rrk: |
6611 | case X86::VDIVPDZ256rrkz: |
6612 | case X86::VDIVPDZrrb: |
6613 | case X86::VDIVPDZrrbk: |
6614 | case X86::VDIVPDZrrbkz: |
6615 | case X86::VDIVPDZrm: |
6616 | case X86::VDIVPDZrmb: |
6617 | case X86::VDIVPDZrmbk: |
6618 | case X86::VDIVPDZrmbkz: |
6619 | case X86::VDIVPDZrmk: |
6620 | case X86::VDIVPDZrmkz: |
6621 | case X86::VDIVPDZrr: |
6622 | case X86::VDIVPDZrrk: |
6623 | case X86::VDIVPDZrrkz: |
6624 | case X86::VDIVPSZ128rm: |
6625 | case X86::VDIVPSZ128rmb: |
6626 | case X86::VDIVPSZ128rmbk: |
6627 | case X86::VDIVPSZ128rmbkz: |
6628 | case X86::VDIVPSZ128rmk: |
6629 | case X86::VDIVPSZ128rmkz: |
6630 | case X86::VDIVPSZ128rr: |
6631 | case X86::VDIVPSZ128rrk: |
6632 | case X86::VDIVPSZ128rrkz: |
6633 | case X86::VDIVPSZ256rm: |
6634 | case X86::VDIVPSZ256rmb: |
6635 | case X86::VDIVPSZ256rmbk: |
6636 | case X86::VDIVPSZ256rmbkz: |
6637 | case X86::VDIVPSZ256rmk: |
6638 | case X86::VDIVPSZ256rmkz: |
6639 | case X86::VDIVPSZ256rr: |
6640 | case X86::VDIVPSZ256rrk: |
6641 | case X86::VDIVPSZ256rrkz: |
6642 | case X86::VDIVPSZrrb: |
6643 | case X86::VDIVPSZrrbk: |
6644 | case X86::VDIVPSZrrbkz: |
6645 | case X86::VDIVPSZrm: |
6646 | case X86::VDIVPSZrmb: |
6647 | case X86::VDIVPSZrmbk: |
6648 | case X86::VDIVPSZrmbkz: |
6649 | case X86::VDIVPSZrmk: |
6650 | case X86::VDIVPSZrmkz: |
6651 | case X86::VDIVPSZrr: |
6652 | case X86::VDIVPSZrrk: |
6653 | case X86::VDIVPSZrrkz: |
6654 | case X86::VDIVSDZrm: |
6655 | case X86::VDIVSDZrr: |
6656 | case X86::VDIVSDZrm_Int: |
6657 | case X86::VDIVSDZrm_Intk: |
6658 | case X86::VDIVSDZrm_Intkz: |
6659 | case X86::VDIVSDZrr_Int: |
6660 | case X86::VDIVSDZrr_Intk: |
6661 | case X86::VDIVSDZrr_Intkz: |
6662 | case X86::VDIVSDZrrb_Int: |
6663 | case X86::VDIVSDZrrb_Intk: |
6664 | case X86::VDIVSDZrrb_Intkz: |
6665 | case X86::VDIVSSZrm: |
6666 | case X86::VDIVSSZrr: |
6667 | case X86::VDIVSSZrm_Int: |
6668 | case X86::VDIVSSZrm_Intk: |
6669 | case X86::VDIVSSZrm_Intkz: |
6670 | case X86::VDIVSSZrr_Int: |
6671 | case X86::VDIVSSZrr_Intk: |
6672 | case X86::VDIVSSZrr_Intkz: |
6673 | case X86::VDIVSSZrrb_Int: |
6674 | case X86::VDIVSSZrrb_Intk: |
6675 | case X86::VDIVSSZrrb_Intkz: |
6676 | case X86::VSQRTPDZ128m: |
6677 | case X86::VSQRTPDZ128mb: |
6678 | case X86::VSQRTPDZ128mbk: |
6679 | case X86::VSQRTPDZ128mbkz: |
6680 | case X86::VSQRTPDZ128mk: |
6681 | case X86::VSQRTPDZ128mkz: |
6682 | case X86::VSQRTPDZ128r: |
6683 | case X86::VSQRTPDZ128rk: |
6684 | case X86::VSQRTPDZ128rkz: |
6685 | case X86::VSQRTPDZ256m: |
6686 | case X86::VSQRTPDZ256mb: |
6687 | case X86::VSQRTPDZ256mbk: |
6688 | case X86::VSQRTPDZ256mbkz: |
6689 | case X86::VSQRTPDZ256mk: |
6690 | case X86::VSQRTPDZ256mkz: |
6691 | case X86::VSQRTPDZ256r: |
6692 | case X86::VSQRTPDZ256rk: |
6693 | case X86::VSQRTPDZ256rkz: |
6694 | case X86::VSQRTPDZm: |
6695 | case X86::VSQRTPDZmb: |
6696 | case X86::VSQRTPDZmbk: |
6697 | case X86::VSQRTPDZmbkz: |
6698 | case X86::VSQRTPDZmk: |
6699 | case X86::VSQRTPDZmkz: |
6700 | case X86::VSQRTPDZr: |
6701 | case X86::VSQRTPDZrb: |
6702 | case X86::VSQRTPDZrbk: |
6703 | case X86::VSQRTPDZrbkz: |
6704 | case X86::VSQRTPDZrk: |
6705 | case X86::VSQRTPDZrkz: |
6706 | case X86::VSQRTPSZ128m: |
6707 | case X86::VSQRTPSZ128mb: |
6708 | case X86::VSQRTPSZ128mbk: |
6709 | case X86::VSQRTPSZ128mbkz: |
6710 | case X86::VSQRTPSZ128mk: |
6711 | case X86::VSQRTPSZ128mkz: |
6712 | case X86::VSQRTPSZ128r: |
6713 | case X86::VSQRTPSZ128rk: |
6714 | case X86::VSQRTPSZ128rkz: |
6715 | case X86::VSQRTPSZ256m: |
6716 | case X86::VSQRTPSZ256mb: |
6717 | case X86::VSQRTPSZ256mbk: |
6718 | case X86::VSQRTPSZ256mbkz: |
6719 | case X86::VSQRTPSZ256mk: |
6720 | case X86::VSQRTPSZ256mkz: |
6721 | case X86::VSQRTPSZ256r: |
6722 | case X86::VSQRTPSZ256rk: |
6723 | case X86::VSQRTPSZ256rkz: |
6724 | case X86::VSQRTPSZm: |
6725 | case X86::VSQRTPSZmb: |
6726 | case X86::VSQRTPSZmbk: |
6727 | case X86::VSQRTPSZmbkz: |
6728 | case X86::VSQRTPSZmk: |
6729 | case X86::VSQRTPSZmkz: |
6730 | case X86::VSQRTPSZr: |
6731 | case X86::VSQRTPSZrb: |
6732 | case X86::VSQRTPSZrbk: |
6733 | case X86::VSQRTPSZrbkz: |
6734 | case X86::VSQRTPSZrk: |
6735 | case X86::VSQRTPSZrkz: |
6736 | case X86::VSQRTSDZm: |
6737 | case X86::VSQRTSDZm_Int: |
6738 | case X86::VSQRTSDZm_Intk: |
6739 | case X86::VSQRTSDZm_Intkz: |
6740 | case X86::VSQRTSDZr: |
6741 | case X86::VSQRTSDZr_Int: |
6742 | case X86::VSQRTSDZr_Intk: |
6743 | case X86::VSQRTSDZr_Intkz: |
6744 | case X86::VSQRTSDZrb_Int: |
6745 | case X86::VSQRTSDZrb_Intk: |
6746 | case X86::VSQRTSDZrb_Intkz: |
6747 | case X86::VSQRTSSZm: |
6748 | case X86::VSQRTSSZm_Int: |
6749 | case X86::VSQRTSSZm_Intk: |
6750 | case X86::VSQRTSSZm_Intkz: |
6751 | case X86::VSQRTSSZr: |
6752 | case X86::VSQRTSSZr_Int: |
6753 | case X86::VSQRTSSZr_Intk: |
6754 | case X86::VSQRTSSZr_Intkz: |
6755 | case X86::VSQRTSSZrb_Int: |
6756 | case X86::VSQRTSSZrb_Intk: |
6757 | case X86::VSQRTSSZrb_Intkz: |
6758 | |
6759 | case X86::VGATHERDPDYrm: |
6760 | case X86::VGATHERDPDZ128rm: |
6761 | case X86::VGATHERDPDZ256rm: |
6762 | case X86::VGATHERDPDZrm: |
6763 | case X86::VGATHERDPDrm: |
6764 | case X86::VGATHERDPSYrm: |
6765 | case X86::VGATHERDPSZ128rm: |
6766 | case X86::VGATHERDPSZ256rm: |
6767 | case X86::VGATHERDPSZrm: |
6768 | case X86::VGATHERDPSrm: |
6769 | case X86::VGATHERPF0DPDm: |
6770 | case X86::VGATHERPF0DPSm: |
6771 | case X86::VGATHERPF0QPDm: |
6772 | case X86::VGATHERPF0QPSm: |
6773 | case X86::VGATHERPF1DPDm: |
6774 | case X86::VGATHERPF1DPSm: |
6775 | case X86::VGATHERPF1QPDm: |
6776 | case X86::VGATHERPF1QPSm: |
6777 | case X86::VGATHERQPDYrm: |
6778 | case X86::VGATHERQPDZ128rm: |
6779 | case X86::VGATHERQPDZ256rm: |
6780 | case X86::VGATHERQPDZrm: |
6781 | case X86::VGATHERQPDrm: |
6782 | case X86::VGATHERQPSYrm: |
6783 | case X86::VGATHERQPSZ128rm: |
6784 | case X86::VGATHERQPSZ256rm: |
6785 | case X86::VGATHERQPSZrm: |
6786 | case X86::VGATHERQPSrm: |
6787 | case X86::VPGATHERDDYrm: |
6788 | case X86::VPGATHERDDZ128rm: |
6789 | case X86::VPGATHERDDZ256rm: |
6790 | case X86::VPGATHERDDZrm: |
6791 | case X86::VPGATHERDDrm: |
6792 | case X86::VPGATHERDQYrm: |
6793 | case X86::VPGATHERDQZ128rm: |
6794 | case X86::VPGATHERDQZ256rm: |
6795 | case X86::VPGATHERDQZrm: |
6796 | case X86::VPGATHERDQrm: |
6797 | case X86::VPGATHERQDYrm: |
6798 | case X86::VPGATHERQDZ128rm: |
6799 | case X86::VPGATHERQDZ256rm: |
6800 | case X86::VPGATHERQDZrm: |
6801 | case X86::VPGATHERQDrm: |
6802 | case X86::VPGATHERQQYrm: |
6803 | case X86::VPGATHERQQZ128rm: |
6804 | case X86::VPGATHERQQZ256rm: |
6805 | case X86::VPGATHERQQZrm: |
6806 | case X86::VPGATHERQQrm: |
6807 | case X86::VSCATTERDPDZ128mr: |
6808 | case X86::VSCATTERDPDZ256mr: |
6809 | case X86::VSCATTERDPDZmr: |
6810 | case X86::VSCATTERDPSZ128mr: |
6811 | case X86::VSCATTERDPSZ256mr: |
6812 | case X86::VSCATTERDPSZmr: |
6813 | case X86::VSCATTERPF0DPDm: |
6814 | case X86::VSCATTERPF0DPSm: |
6815 | case X86::VSCATTERPF0QPDm: |
6816 | case X86::VSCATTERPF0QPSm: |
6817 | case X86::VSCATTERPF1DPDm: |
6818 | case X86::VSCATTERPF1DPSm: |
6819 | case X86::VSCATTERPF1QPDm: |
6820 | case X86::VSCATTERPF1QPSm: |
6821 | case X86::VSCATTERQPDZ128mr: |
6822 | case X86::VSCATTERQPDZ256mr: |
6823 | case X86::VSCATTERQPDZmr: |
6824 | case X86::VSCATTERQPSZ128mr: |
6825 | case X86::VSCATTERQPSZ256mr: |
6826 | case X86::VSCATTERQPSZmr: |
6827 | case X86::VPSCATTERDDZ128mr: |
6828 | case X86::VPSCATTERDDZ256mr: |
6829 | case X86::VPSCATTERDDZmr: |
6830 | case X86::VPSCATTERDQZ128mr: |
6831 | case X86::VPSCATTERDQZ256mr: |
6832 | case X86::VPSCATTERDQZmr: |
6833 | case X86::VPSCATTERQDZ128mr: |
6834 | case X86::VPSCATTERQDZ256mr: |
6835 | case X86::VPSCATTERQDZmr: |
6836 | case X86::VPSCATTERQQZ128mr: |
6837 | case X86::VPSCATTERQQZ256mr: |
6838 | case X86::VPSCATTERQQZmr: |
6839 | return true; |
6840 | } |
6841 | } |
6842 | |
6843 | bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, |
6844 | const MachineRegisterInfo *MRI, |
6845 | const MachineInstr &DefMI, |
6846 | unsigned DefIdx, |
6847 | const MachineInstr &UseMI, |
6848 | unsigned UseIdx) const { |
6849 | return isHighLatencyDef(DefMI.getOpcode()); |
6850 | } |
6851 | |
6852 | bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst, |
6853 | const MachineBasicBlock *MBB) const { |
6854 | assert((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) &&(((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && "Reassociation needs binary operators") ? static_cast<void > (0) : __assert_fail ("(Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && \"Reassociation needs binary operators\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6855, __PRETTY_FUNCTION__)) |
6855 | "Reassociation needs binary operators")(((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && "Reassociation needs binary operators") ? static_cast<void > (0) : __assert_fail ("(Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && \"Reassociation needs binary operators\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6855, __PRETTY_FUNCTION__)); |
6856 | |
6857 | // Integer binary math/logic instructions have a third source operand: |
6858 | // the EFLAGS register. That operand must be both defined here and never |
6859 | // used; ie, it must be dead. If the EFLAGS operand is live, then we can |
6860 | // not change anything because rearranging the operands could affect other |
6861 | // instructions that depend on the exact status flags (zero, sign, etc.) |
6862 | // that are set by using these particular operands with this operation. |
6863 | if (Inst.getNumOperands() == 4) { |
6864 | assert(Inst.getOperand(3).isReg() &&((Inst.getOperand(3).isReg() && Inst.getOperand(3).getReg () == X86::EFLAGS && "Unexpected operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(3).isReg() && Inst.getOperand(3).getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6866, __PRETTY_FUNCTION__)) |
6865 | Inst.getOperand(3).getReg() == X86::EFLAGS &&((Inst.getOperand(3).isReg() && Inst.getOperand(3).getReg () == X86::EFLAGS && "Unexpected operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(3).isReg() && Inst.getOperand(3).getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6866, __PRETTY_FUNCTION__)) |
6866 | "Unexpected operand in reassociable instruction")((Inst.getOperand(3).isReg() && Inst.getOperand(3).getReg () == X86::EFLAGS && "Unexpected operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(3).isReg() && Inst.getOperand(3).getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 6866, __PRETTY_FUNCTION__)); |
6867 | if (!Inst.getOperand(3).isDead()) |
6868 | return false; |
6869 | } |
6870 | |
6871 | return TargetInstrInfo::hasReassociableOperands(Inst, MBB); |
6872 | } |
6873 | |
6874 | // TODO: There are many more machine instruction opcodes to match: |
6875 | // 1. Other data types (integer, vectors) |
6876 | // 2. Other math / logic operations (xor, or) |
6877 | // 3. Other forms of the same operation (intrinsics and other variants) |
6878 | bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { |
6879 | switch (Inst.getOpcode()) { |
6880 | case X86::AND8rr: |
6881 | case X86::AND16rr: |
6882 | case X86::AND32rr: |
6883 | case X86::AND64rr: |
6884 | case X86::OR8rr: |
6885 | case X86::OR16rr: |
6886 | case X86::OR32rr: |
6887 | case X86::OR64rr: |
6888 | case X86::XOR8rr: |
6889 | case X86::XOR16rr: |
6890 | case X86::XOR32rr: |
6891 | case X86::XOR64rr: |
6892 | case X86::IMUL16rr: |
6893 | case X86::IMUL32rr: |
6894 | case X86::IMUL64rr: |
6895 | case X86::PANDrr: |
6896 | case X86::PORrr: |
6897 | case X86::PXORrr: |
6898 | case X86::ANDPDrr: |
6899 | case X86::ANDPSrr: |
6900 | case X86::ORPDrr: |
6901 | case X86::ORPSrr: |
6902 | case X86::XORPDrr: |
6903 | case X86::XORPSrr: |
6904 | case X86::PADDBrr: |
6905 | case X86::PADDWrr: |
6906 | case X86::PADDDrr: |
6907 | case X86::PADDQrr: |
6908 | case X86::VPANDrr: |
6909 | case X86::VPANDYrr: |
6910 | case X86::VPANDDZ128rr: |
6911 | case X86::VPANDDZ256rr: |
6912 | case X86::VPANDDZrr: |
6913 | case X86::VPANDQZ128rr: |
6914 | case X86::VPANDQZ256rr: |
6915 | case X86::VPANDQZrr: |
6916 | case X86::VPORrr: |
6917 | case X86::VPORYrr: |
6918 | case X86::VPORDZ128rr: |
6919 | case X86::VPORDZ256rr: |
6920 | case X86::VPORDZrr: |
6921 | case X86::VPORQZ128rr: |
6922 | case X86::VPORQZ256rr: |
6923 | case X86::VPORQZrr: |
6924 | case X86::VPXORrr: |
6925 | case X86::VPXORYrr: |
6926 | case X86::VPXORDZ128rr: |
6927 | case X86::VPXORDZ256rr: |
6928 | case X86::VPXORDZrr: |
6929 | case X86::VPXORQZ128rr: |
6930 | case X86::VPXORQZ256rr: |
6931 | case X86::VPXORQZrr: |
6932 | case X86::VANDPDrr: |
6933 | case X86::VANDPSrr: |
6934 | case X86::VANDPDYrr: |
6935 | case X86::VANDPSYrr: |
6936 | case X86::VANDPDZ128rr: |
6937 | case X86::VANDPSZ128rr: |
6938 | case X86::VANDPDZ256rr: |
6939 | case X86::VANDPSZ256rr: |
6940 | case X86::VANDPDZrr: |
6941 | case X86::VANDPSZrr: |
6942 | case X86::VORPDrr: |
6943 | case X86::VORPSrr: |
6944 | case X86::VORPDYrr: |
6945 | case X86::VORPSYrr: |
6946 | case X86::VORPDZ128rr: |
6947 | case X86::VORPSZ128rr: |
6948 | case X86::VORPDZ256rr: |
6949 | case X86::VORPSZ256rr: |
6950 | case X86::VORPDZrr: |
6951 | case X86::VORPSZrr: |
6952 | case X86::VXORPDrr: |
6953 | case X86::VXORPSrr: |
6954 | case X86::VXORPDYrr: |
6955 | case X86::VXORPSYrr: |
6956 | case X86::VXORPDZ128rr: |
6957 | case X86::VXORPSZ128rr: |
6958 | case X86::VXORPDZ256rr: |
6959 | case X86::VXORPSZ256rr: |
6960 | case X86::VXORPDZrr: |
6961 | case X86::VXORPSZrr: |
6962 | case X86::KADDBrr: |
6963 | case X86::KADDWrr: |
6964 | case X86::KADDDrr: |
6965 | case X86::KADDQrr: |
6966 | case X86::KANDBrr: |
6967 | case X86::KANDWrr: |
6968 | case X86::KANDDrr: |
6969 | case X86::KANDQrr: |
6970 | case X86::KORBrr: |
6971 | case X86::KORWrr: |
6972 | case X86::KORDrr: |
6973 | case X86::KORQrr: |
6974 | case X86::KXORBrr: |
6975 | case X86::KXORWrr: |
6976 | case X86::KXORDrr: |
6977 | case X86::KXORQrr: |
6978 | case X86::VPADDBrr: |
6979 | case X86::VPADDWrr: |
6980 | case X86::VPADDDrr: |
6981 | case X86::VPADDQrr: |
6982 | case X86::VPADDBYrr: |
6983 | case X86::VPADDWYrr: |
6984 | case X86::VPADDDYrr: |
6985 | case X86::VPADDQYrr: |
6986 | case X86::VPADDBZ128rr: |
6987 | case X86::VPADDWZ128rr: |
6988 | case X86::VPADDDZ128rr: |
6989 | case X86::VPADDQZ128rr: |
6990 | case X86::VPADDBZ256rr: |
6991 | case X86::VPADDWZ256rr: |
6992 | case X86::VPADDDZ256rr: |
6993 | case X86::VPADDQZ256rr: |
6994 | case X86::VPADDBZrr: |
6995 | case X86::VPADDWZrr: |
6996 | case X86::VPADDDZrr: |
6997 | case X86::VPADDQZrr: |
6998 | case X86::VPMULLWrr: |
6999 | case X86::VPMULLWYrr: |
7000 | case X86::VPMULLWZ128rr: |
7001 | case X86::VPMULLWZ256rr: |
7002 | case X86::VPMULLWZrr: |
7003 | case X86::VPMULLDrr: |
7004 | case X86::VPMULLDYrr: |
7005 | case X86::VPMULLDZ128rr: |
7006 | case X86::VPMULLDZ256rr: |
7007 | case X86::VPMULLDZrr: |
7008 | case X86::VPMULLQZ128rr: |
7009 | case X86::VPMULLQZ256rr: |
7010 | case X86::VPMULLQZrr: |
7011 | // Normal min/max instructions are not commutative because of NaN and signed |
7012 | // zero semantics, but these are. Thus, there's no need to check for global |
7013 | // relaxed math; the instructions themselves have the properties we need. |
7014 | case X86::MAXCPDrr: |
7015 | case X86::MAXCPSrr: |
7016 | case X86::MAXCSDrr: |
7017 | case X86::MAXCSSrr: |
7018 | case X86::MINCPDrr: |
7019 | case X86::MINCPSrr: |
7020 | case X86::MINCSDrr: |
7021 | case X86::MINCSSrr: |
7022 | case X86::VMAXCPDrr: |
7023 | case X86::VMAXCPSrr: |
7024 | case X86::VMAXCPDYrr: |
7025 | case X86::VMAXCPSYrr: |
7026 | case X86::VMAXCPDZ128rr: |
7027 | case X86::VMAXCPSZ128rr: |
7028 | case X86::VMAXCPDZ256rr: |
7029 | case X86::VMAXCPSZ256rr: |
7030 | case X86::VMAXCPDZrr: |
7031 | case X86::VMAXCPSZrr: |
7032 | case X86::VMAXCSDrr: |
7033 | case X86::VMAXCSSrr: |
7034 | case X86::VMAXCSDZrr: |
7035 | case X86::VMAXCSSZrr: |
7036 | case X86::VMINCPDrr: |
7037 | case X86::VMINCPSrr: |
7038 | case X86::VMINCPDYrr: |
7039 | case X86::VMINCPSYrr: |
7040 | case X86::VMINCPDZ128rr: |
7041 | case X86::VMINCPSZ128rr: |
7042 | case X86::VMINCPDZ256rr: |
7043 | case X86::VMINCPSZ256rr: |
7044 | case X86::VMINCPDZrr: |
7045 | case X86::VMINCPSZrr: |
7046 | case X86::VMINCSDrr: |
7047 | case X86::VMINCSSrr: |
7048 | case X86::VMINCSDZrr: |
7049 | case X86::VMINCSSZrr: |
7050 | return true; |
7051 | case X86::ADDPDrr: |
7052 | case X86::ADDPSrr: |
7053 | case X86::ADDSDrr: |
7054 | case X86::ADDSSrr: |
7055 | case X86::MULPDrr: |
7056 | case X86::MULPSrr: |
7057 | case X86::MULSDrr: |
7058 | case X86::MULSSrr: |
7059 | case X86::VADDPDrr: |
7060 | case X86::VADDPSrr: |
7061 | case X86::VADDPDYrr: |
7062 | case X86::VADDPSYrr: |
7063 | case X86::VADDPDZ128rr: |
7064 | case X86::VADDPSZ128rr: |
7065 | case X86::VADDPDZ256rr: |
7066 | case X86::VADDPSZ256rr: |
7067 | case X86::VADDPDZrr: |
7068 | case X86::VADDPSZrr: |
7069 | case X86::VADDSDrr: |
7070 | case X86::VADDSSrr: |
7071 | case X86::VADDSDZrr: |
7072 | case X86::VADDSSZrr: |
7073 | case X86::VMULPDrr: |
7074 | case X86::VMULPSrr: |
7075 | case X86::VMULPDYrr: |
7076 | case X86::VMULPSYrr: |
7077 | case X86::VMULPDZ128rr: |
7078 | case X86::VMULPSZ128rr: |
7079 | case X86::VMULPDZ256rr: |
7080 | case X86::VMULPSZ256rr: |
7081 | case X86::VMULPDZrr: |
7082 | case X86::VMULPSZrr: |
7083 | case X86::VMULSDrr: |
7084 | case X86::VMULSSrr: |
7085 | case X86::VMULSDZrr: |
7086 | case X86::VMULSSZrr: |
7087 | return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath; |
7088 | default: |
7089 | return false; |
7090 | } |
7091 | } |
7092 | |
7093 | /// This is an architecture-specific helper function of reassociateOps. |
7094 | /// Set special operand attributes for new instructions after reassociation. |
7095 | void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1, |
7096 | MachineInstr &OldMI2, |
7097 | MachineInstr &NewMI1, |
7098 | MachineInstr &NewMI2) const { |
7099 | // Integer instructions define an implicit EFLAGS source register operand as |
7100 | // the third source (fourth total) operand. |
7101 | if (OldMI1.getNumOperands() != 4 || OldMI2.getNumOperands() != 4) |
7102 | return; |
7103 | |
7104 | assert(NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 &&((NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands () == 4 && "Unexpected instruction type for reassociation" ) ? static_cast<void> (0) : __assert_fail ("NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 && \"Unexpected instruction type for reassociation\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7105, __PRETTY_FUNCTION__)) |
7105 | "Unexpected instruction type for reassociation")((NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands () == 4 && "Unexpected instruction type for reassociation" ) ? static_cast<void> (0) : __assert_fail ("NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 && \"Unexpected instruction type for reassociation\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7105, __PRETTY_FUNCTION__)); |
7106 | |
7107 | MachineOperand &OldOp1 = OldMI1.getOperand(3); |
7108 | MachineOperand &OldOp2 = OldMI2.getOperand(3); |
7109 | MachineOperand &NewOp1 = NewMI1.getOperand(3); |
7110 | MachineOperand &NewOp2 = NewMI2.getOperand(3); |
7111 | |
7112 | assert(OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() &&((OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && "Must have dead EFLAGS operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && \"Must have dead EFLAGS operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7113, __PRETTY_FUNCTION__)) |
7113 | "Must have dead EFLAGS operand in reassociable instruction")((OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && "Must have dead EFLAGS operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && \"Must have dead EFLAGS operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7113, __PRETTY_FUNCTION__)); |
7114 | assert(OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() &&((OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && "Must have dead EFLAGS operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && \"Must have dead EFLAGS operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7115, __PRETTY_FUNCTION__)) |
7115 | "Must have dead EFLAGS operand in reassociable instruction")((OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && "Must have dead EFLAGS operand in reassociable instruction" ) ? static_cast<void> (0) : __assert_fail ("OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && \"Must have dead EFLAGS operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7115, __PRETTY_FUNCTION__)); |
7116 | |
7117 | (void)OldOp1; |
7118 | (void)OldOp2; |
7119 | |
7120 | assert(NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS &&((NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && "Unexpected operand in reassociable instruction") ? static_cast <void> (0) : __assert_fail ("NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7121, __PRETTY_FUNCTION__)) |
7121 | "Unexpected operand in reassociable instruction")((NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && "Unexpected operand in reassociable instruction") ? static_cast <void> (0) : __assert_fail ("NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7121, __PRETTY_FUNCTION__)); |
7122 | assert(NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS &&((NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && "Unexpected operand in reassociable instruction") ? static_cast <void> (0) : __assert_fail ("NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7123, __PRETTY_FUNCTION__)) |
7123 | "Unexpected operand in reassociable instruction")((NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && "Unexpected operand in reassociable instruction") ? static_cast <void> (0) : __assert_fail ("NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && \"Unexpected operand in reassociable instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7123, __PRETTY_FUNCTION__)); |
7124 | |
7125 | // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations |
7126 | // of this pass or other passes. The EFLAGS operands must be dead in these new |
7127 | // instructions because the EFLAGS operands in the original instructions must |
7128 | // be dead in order for reassociation to occur. |
7129 | NewOp1.setIsDead(); |
7130 | NewOp2.setIsDead(); |
7131 | } |
7132 | |
7133 | std::pair<unsigned, unsigned> |
7134 | X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { |
7135 | return std::make_pair(TF, 0u); |
7136 | } |
7137 | |
7138 | ArrayRef<std::pair<unsigned, const char *>> |
7139 | X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { |
7140 | using namespace X86II; |
7141 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
7142 | {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"}, |
7143 | {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"}, |
7144 | {MO_GOT, "x86-got"}, |
7145 | {MO_GOTOFF, "x86-gotoff"}, |
7146 | {MO_GOTPCREL, "x86-gotpcrel"}, |
7147 | {MO_PLT, "x86-plt"}, |
7148 | {MO_TLSGD, "x86-tlsgd"}, |
7149 | {MO_TLSLD, "x86-tlsld"}, |
7150 | {MO_TLSLDM, "x86-tlsldm"}, |
7151 | {MO_GOTTPOFF, "x86-gottpoff"}, |
7152 | {MO_INDNTPOFF, "x86-indntpoff"}, |
7153 | {MO_TPOFF, "x86-tpoff"}, |
7154 | {MO_DTPOFF, "x86-dtpoff"}, |
7155 | {MO_NTPOFF, "x86-ntpoff"}, |
7156 | {MO_GOTNTPOFF, "x86-gotntpoff"}, |
7157 | {MO_DLLIMPORT, "x86-dllimport"}, |
7158 | {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"}, |
7159 | {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"}, |
7160 | {MO_TLVP, "x86-tlvp"}, |
7161 | {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"}, |
7162 | {MO_SECREL, "x86-secrel"}, |
7163 | {MO_COFFSTUB, "x86-coffstub"}}; |
7164 | return makeArrayRef(TargetFlags); |
7165 | } |
7166 | |
7167 | namespace { |
7168 | /// Create Global Base Reg pass. This initializes the PIC |
7169 | /// global base register for x86-32. |
7170 | struct CGBR : public MachineFunctionPass { |
7171 | static char ID; |
7172 | CGBR() : MachineFunctionPass(ID) {} |
7173 | |
7174 | bool runOnMachineFunction(MachineFunction &MF) override { |
7175 | const X86TargetMachine *TM = |
7176 | static_cast<const X86TargetMachine *>(&MF.getTarget()); |
7177 | const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); |
7178 | |
7179 | // Don't do anything in the 64-bit small and kernel code models. They use |
7180 | // RIP-relative addressing for everything. |
7181 | if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || |
7182 | TM->getCodeModel() == CodeModel::Kernel)) |
7183 | return false; |
7184 | |
7185 | // Only emit a global base reg in PIC mode. |
7186 | if (!TM->isPositionIndependent()) |
7187 | return false; |
7188 | |
7189 | X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
7190 | unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); |
7191 | |
7192 | // If we didn't need a GlobalBaseReg, don't insert code. |
7193 | if (GlobalBaseReg == 0) |
7194 | return false; |
7195 | |
7196 | // Insert the set of GlobalBaseReg into the first MBB of the function |
7197 | MachineBasicBlock &FirstMBB = MF.front(); |
7198 | MachineBasicBlock::iterator MBBI = FirstMBB.begin(); |
7199 | DebugLoc DL = FirstMBB.findDebugLoc(MBBI); |
7200 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
7201 | const X86InstrInfo *TII = STI.getInstrInfo(); |
7202 | |
7203 | unsigned PC; |
7204 | if (STI.isPICStyleGOT()) |
7205 | PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); |
7206 | else |
7207 | PC = GlobalBaseReg; |
7208 | |
7209 | if (STI.is64Bit()) { |
7210 | if (TM->getCodeModel() == CodeModel::Medium) { |
7211 | // In the medium code model, use a RIP-relative LEA to materialize the |
7212 | // GOT. |
7213 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) |
7214 | .addReg(X86::RIP) |
7215 | .addImm(0) |
7216 | .addReg(0) |
7217 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") |
7218 | .addReg(0); |
7219 | } else if (TM->getCodeModel() == CodeModel::Large) { |
7220 | // In the large code model, we are aiming for this code, though the |
7221 | // register allocation may vary: |
7222 | // leaq .LN$pb(%rip), %rax |
7223 | // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx |
7224 | // addq %rcx, %rax |
7225 | // RAX now holds address of _GLOBAL_OFFSET_TABLE_. |
7226 | unsigned PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); |
7227 | unsigned GOTReg = |
7228 | RegInfo.createVirtualRegister(&X86::GR64RegClass); |
7229 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg) |
7230 | .addReg(X86::RIP) |
7231 | .addImm(0) |
7232 | .addReg(0) |
7233 | .addSym(MF.getPICBaseSymbol()) |
7234 | .addReg(0); |
7235 | std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol()); |
7236 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg) |
7237 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", |
7238 | X86II::MO_PIC_BASE_OFFSET); |
7239 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC) |
7240 | .addReg(PBReg, RegState::Kill) |
7241 | .addReg(GOTReg, RegState::Kill); |
7242 | } else { |
7243 | llvm_unreachable("unexpected code model")::llvm::llvm_unreachable_internal("unexpected code model", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86InstrInfo.cpp" , 7243); |
7244 | } |
7245 | } else { |
7246 | // Operand of MovePCtoStack is completely ignored by asm printer. It's |
7247 | // only used in JIT code emission as displacement to pc. |
7248 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); |
7249 | |
7250 | // If we're using vanilla 'GOT' PIC style, we should use relative |
7251 | // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external. |
7252 | if (STI.isPICStyleGOT()) { |
7253 | // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], |
7254 | // %some_register |
7255 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) |
7256 | .addReg(PC) |
7257 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", |
7258 | X86II::MO_GOT_ABSOLUTE_ADDRESS); |
7259 | } |
7260 | } |
7261 | |
7262 | return true; |
7263 | } |
7264 | |
7265 | StringRef getPassName() const override { |
7266 | return "X86 PIC Global Base Reg Initialization"; |
7267 | } |
7268 | |
7269 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
7270 | AU.setPreservesCFG(); |
7271 | MachineFunctionPass::getAnalysisUsage(AU); |
7272 | } |
7273 | }; |
7274 | } |
7275 | |
7276 | char CGBR::ID = 0; |
7277 | FunctionPass* |
7278 | llvm::createX86GlobalBaseRegPass() { return new CGBR(); } |
7279 | |
7280 | namespace { |
7281 | struct LDTLSCleanup : public MachineFunctionPass { |
7282 | static char ID; |
7283 | LDTLSCleanup() : MachineFunctionPass(ID) {} |
7284 | |
7285 | bool runOnMachineFunction(MachineFunction &MF) override { |
7286 | if (skipFunction(MF.getFunction())) |
7287 | return false; |
7288 | |
7289 | X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
7290 | if (MFI->getNumLocalDynamicTLSAccesses() < 2) { |
7291 | // No point folding accesses if there isn't at least two. |
7292 | return false; |
7293 | } |
7294 | |
7295 | MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); |
7296 | return VisitNode(DT->getRootNode(), 0); |
7297 | } |
7298 | |
7299 | // Visit the dominator subtree rooted at Node in pre-order. |
7300 | // If TLSBaseAddrReg is non-null, then use that to replace any |
7301 | // TLS_base_addr instructions. Otherwise, create the register |
7302 | // when the first such instruction is seen, and then use it |
7303 | // as we encounter more instructions. |
7304 | bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { |
7305 | MachineBasicBlock *BB = Node->getBlock(); |
7306 | bool Changed = false; |
7307 | |
7308 | // Traverse the current block. |
7309 | for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; |
7310 | ++I) { |
7311 | switch (I->getOpcode()) { |
7312 | case X86::TLS_base_addr32: |
7313 | case X86::TLS_base_addr64: |
7314 | if (TLSBaseAddrReg) |
7315 | I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg); |
7316 | else |
7317 | I = SetRegister(*I, &TLSBaseAddrReg); |
7318 | Changed = true; |
7319 | break; |
7320 | default: |
7321 | break; |
7322 | } |
7323 | } |
7324 | |
7325 | // Visit the children of this block in the dominator tree. |
7326 | for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end(); |
7327 | I != E; ++I) { |
7328 | Changed |= VisitNode(*I, TLSBaseAddrReg); |
7329 | } |
7330 | |
7331 | return Changed; |
7332 | } |
7333 | |
7334 | // Replace the TLS_base_addr instruction I with a copy from |
7335 | // TLSBaseAddrReg, returning the new instruction. |
7336 | MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I, |
7337 | unsigned TLSBaseAddrReg) { |
7338 | MachineFunction *MF = I.getParent()->getParent(); |
7339 | const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); |
7340 | const bool is64Bit = STI.is64Bit(); |
7341 | const X86InstrInfo *TII = STI.getInstrInfo(); |
7342 | |
7343 | // Insert a Copy from TLSBaseAddrReg to RAX/EAX. |
7344 | MachineInstr *Copy = |
7345 | BuildMI(*I.getParent(), I, I.getDebugLoc(), |
7346 | TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX) |
7347 | .addReg(TLSBaseAddrReg); |
7348 | |
7349 | // Erase the TLS_base_addr instruction. |
7350 | I.eraseFromParent(); |
7351 | |
7352 | return Copy; |
7353 | } |
7354 | |
7355 | // Create a virtual register in *TLSBaseAddrReg, and populate it by |
7356 | // inserting a copy instruction after I. Returns the new instruction. |
7357 | MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) { |
7358 | MachineFunction *MF = I.getParent()->getParent(); |
7359 | const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); |
7360 | const bool is64Bit = STI.is64Bit(); |
7361 | const X86InstrInfo *TII = STI.getInstrInfo(); |
7362 | |
7363 | // Create a virtual register for the TLS base address. |
7364 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
7365 | *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit |
7366 | ? &X86::GR64RegClass |
7367 | : &X86::GR32RegClass); |
7368 | |
7369 | // Insert a copy from RAX/EAX to TLSBaseAddrReg. |
7370 | MachineInstr *Next = I.getNextNode(); |
7371 | MachineInstr *Copy = |
7372 | BuildMI(*I.getParent(), Next, I.getDebugLoc(), |
7373 | TII->get(TargetOpcode::COPY), *TLSBaseAddrReg) |
7374 | .addReg(is64Bit ? X86::RAX : X86::EAX); |
7375 | |
7376 | return Copy; |
7377 | } |
7378 | |
7379 | StringRef getPassName() const override { |
7380 | return "Local Dynamic TLS Access Clean-up"; |
7381 | } |
7382 | |
7383 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
7384 | AU.setPreservesCFG(); |
7385 | AU.addRequired<MachineDominatorTree>(); |
7386 | MachineFunctionPass::getAnalysisUsage(AU); |
7387 | } |
7388 | }; |
7389 | } |
7390 | |
7391 | char LDTLSCleanup::ID = 0; |
7392 | FunctionPass* |
7393 | llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } |
7394 | |
7395 | /// Constants defining how certain sequences should be outlined. |
7396 | /// |
7397 | /// \p MachineOutlinerDefault implies that the function is called with a call |
7398 | /// instruction, and a return must be emitted for the outlined function frame. |
7399 | /// |
7400 | /// That is, |
7401 | /// |
7402 | /// I1 OUTLINED_FUNCTION: |
7403 | /// I2 --> call OUTLINED_FUNCTION I1 |
7404 | /// I3 I2 |
7405 | /// I3 |
7406 | /// ret |
7407 | /// |
7408 | /// * Call construction overhead: 1 (call instruction) |
7409 | /// * Frame construction overhead: 1 (return instruction) |
7410 | /// |
7411 | /// \p MachineOutlinerTailCall implies that the function is being tail called. |
7412 | /// A jump is emitted instead of a call, and the return is already present in |
7413 | /// the outlined sequence. That is, |
7414 | /// |
7415 | /// I1 OUTLINED_FUNCTION: |
7416 | /// I2 --> jmp OUTLINED_FUNCTION I1 |
7417 | /// ret I2 |
7418 | /// ret |
7419 | /// |
7420 | /// * Call construction overhead: 1 (jump instruction) |
7421 | /// * Frame construction overhead: 0 (don't need to return) |
7422 | /// |
7423 | enum MachineOutlinerClass { |
7424 | MachineOutlinerDefault, |
7425 | MachineOutlinerTailCall |
7426 | }; |
7427 | |
7428 | outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo( |
7429 | std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { |
7430 | unsigned SequenceSize = |
7431 | std::accumulate(RepeatedSequenceLocs[0].front(), |
7432 | std::next(RepeatedSequenceLocs[0].back()), 0, |
7433 | [](unsigned Sum, const MachineInstr &MI) { |
7434 | // FIXME: x86 doesn't implement getInstSizeInBytes, so |
7435 | // we can't tell the cost. Just assume each instruction |
7436 | // is one byte. |
7437 | if (MI.isDebugInstr() || MI.isKill()) |
7438 | return Sum; |
7439 | return Sum + 1; |
7440 | }); |
7441 | |
7442 | // FIXME: Use real size in bytes for call and ret instructions. |
7443 | if (RepeatedSequenceLocs[0].back()->isTerminator()) { |
7444 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
7445 | C.setCallInfo(MachineOutlinerTailCall, 1); |
7446 | |
7447 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, |
7448 | 0, // Number of bytes to emit frame. |
7449 | MachineOutlinerTailCall // Type of frame. |
7450 | ); |
7451 | } |
7452 | |
7453 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
7454 | C.setCallInfo(MachineOutlinerDefault, 1); |
7455 | |
7456 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1, |
7457 | MachineOutlinerDefault); |
7458 | } |
7459 | |
7460 | bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, |
7461 | bool OutlineFromLinkOnceODRs) const { |
7462 | const Function &F = MF.getFunction(); |
7463 | |
7464 | // Does the function use a red zone? If it does, then we can't risk messing |
7465 | // with the stack. |
7466 | if (!F.hasFnAttribute(Attribute::NoRedZone)) { |
7467 | // It could have a red zone. If it does, then we don't want to touch it. |
7468 | const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
7469 | if (!X86FI || X86FI->getUsesRedZone()) |
7470 | return false; |
7471 | } |
7472 | |
7473 | // If we *don't* want to outline from things that could potentially be deduped |
7474 | // then return false. |
7475 | if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) |
7476 | return false; |
7477 | |
7478 | // This function is viable for outlining, so return true. |
7479 | return true; |
7480 | } |
7481 | |
7482 | outliner::InstrType |
7483 | X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const { |
7484 | MachineInstr &MI = *MIT; |
7485 | // Don't allow debug values to impact outlining type. |
7486 | if (MI.isDebugInstr() || MI.isIndirectDebugValue()) |
7487 | return outliner::InstrType::Invisible; |
7488 | |
7489 | // At this point, KILL instructions don't really tell us much so we can go |
7490 | // ahead and skip over them. |
7491 | if (MI.isKill()) |
7492 | return outliner::InstrType::Invisible; |
7493 | |
7494 | // Is this a tail call? If yes, we can outline as a tail call. |
7495 | if (isTailCall(MI)) |
7496 | return outliner::InstrType::Legal; |
7497 | |
7498 | // Is this the terminator of a basic block? |
7499 | if (MI.isTerminator() || MI.isReturn()) { |
7500 | |
7501 | // Does its parent have any successors in its MachineFunction? |
7502 | if (MI.getParent()->succ_empty()) |
7503 | return outliner::InstrType::Legal; |
7504 | |
7505 | // It does, so we can't tail call it. |
7506 | return outliner::InstrType::Illegal; |
7507 | } |
7508 | |
7509 | // Don't outline anything that modifies or reads from the stack pointer. |
7510 | // |
7511 | // FIXME: There are instructions which are being manually built without |
7512 | // explicit uses/defs so we also have to check the MCInstrDesc. We should be |
7513 | // able to remove the extra checks once those are fixed up. For example, |
7514 | // sometimes we might get something like %rax = POP64r 1. This won't be |
7515 | // caught by modifiesRegister or readsRegister even though the instruction |
7516 | // really ought to be formed so that modifiesRegister/readsRegister would |
7517 | // catch it. |
7518 | if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) || |
7519 | MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) || |
7520 | MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP)) |
7521 | return outliner::InstrType::Illegal; |
7522 | |
7523 | // Outlined calls change the instruction pointer, so don't read from it. |
7524 | if (MI.readsRegister(X86::RIP, &RI) || |
7525 | MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) || |
7526 | MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP)) |
7527 | return outliner::InstrType::Illegal; |
7528 | |
7529 | // Positions can't safely be outlined. |
7530 | if (MI.isPosition()) |
7531 | return outliner::InstrType::Illegal; |
7532 | |
7533 | // Make sure none of the operands of this instruction do anything tricky. |
7534 | for (const MachineOperand &MOP : MI.operands()) |
7535 | if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || |
7536 | MOP.isTargetIndex()) |
7537 | return outliner::InstrType::Illegal; |
7538 | |
7539 | return outliner::InstrType::Legal; |
7540 | } |
7541 | |
7542 | void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB, |
7543 | MachineFunction &MF, |
7544 | const outliner::OutlinedFunction &OF) |
7545 | const { |
7546 | // If we're a tail call, we already have a return, so don't do anything. |
7547 | if (OF.FrameConstructionID == MachineOutlinerTailCall) |
7548 | return; |
7549 | |
7550 | // We're a normal call, so our sequence doesn't have a return instruction. |
7551 | // Add it in. |
7552 | MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ)); |
7553 | MBB.insert(MBB.end(), retq); |
7554 | } |
7555 | |
7556 | MachineBasicBlock::iterator |
7557 | X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB, |
7558 | MachineBasicBlock::iterator &It, |
7559 | MachineFunction &MF, |
7560 | const outliner::Candidate &C) const { |
7561 | // Is it a tail call? |
7562 | if (C.CallConstructionID == MachineOutlinerTailCall) { |
7563 | // Yes, just insert a JMP. |
7564 | It = MBB.insert(It, |
7565 | BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64)) |
7566 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
7567 | } else { |
7568 | // No, insert a call. |
7569 | It = MBB.insert(It, |
7570 | BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32)) |
7571 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
7572 | } |
7573 | |
7574 | return It; |
7575 | } |
7576 | |
7577 | #define GET_INSTRINFO_HELPERS |
7578 | #include "X86GenInstrInfo.inc" |