LLVM 19.0.0git
X86InstructionSelector.cpp
Go to the documentation of this file.
1//===- X86InstructionSelector.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/IntrinsicsX86.h"
43#include "llvm/Support/Debug.h"
47#include <cassert>
48#include <cstdint>
49#include <tuple>
50
51#define DEBUG_TYPE "X86-isel"
52
53using namespace llvm;
54
55namespace {
56
57#define GET_GLOBALISEL_PREDICATE_BITSET
58#include "X86GenGlobalISel.inc"
59#undef GET_GLOBALISEL_PREDICATE_BITSET
60
61class X86InstructionSelector : public InstructionSelector {
62public:
63 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
64 const X86RegisterBankInfo &RBI);
65
66 bool select(MachineInstr &I) override;
67 static const char *getName() { return DEBUG_TYPE; }
68
69private:
70 /// tblgen-erated 'select' implementation, used as the initial selector for
71 /// the patterns that don't require complex C++.
72 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
73
74 // TODO: remove after supported by Tablegen-erated instruction selection.
75 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
76 Align Alignment) const;
77
79 MachineFunction &MF) const;
80 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
81 MachineFunction &MF) const;
82 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
96 bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
97 MachineFunction &MF) const;
101 MachineFunction &MF);
103 MachineFunction &MF);
104 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
105 MachineFunction &MF) const;
106 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
107 MachineFunction &MF) const;
108 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
109 MachineFunction &MF) const;
110 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
111 const unsigned DstReg,
112 const TargetRegisterClass *DstRC,
113 const unsigned SrcReg,
114 const TargetRegisterClass *SrcRC) const;
115 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
116 MachineFunction &MF) const;
117 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
118 bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
119 MachineFunction &MF) const;
120 bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
121 MachineFunction &MF) const;
122
123 // emit insert subreg instruction and insert it before MachineInstr &I
124 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
126 // emit extract subreg instruction and insert it before MachineInstr &I
127 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
129
130 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
131 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
132 MachineRegisterInfo &MRI) const;
133
134 const X86TargetMachine &TM;
135 const X86Subtarget &STI;
136 const X86InstrInfo &TII;
137 const X86RegisterInfo &TRI;
138 const X86RegisterBankInfo &RBI;
139
140#define GET_GLOBALISEL_PREDICATES_DECL
141#include "X86GenGlobalISel.inc"
142#undef GET_GLOBALISEL_PREDICATES_DECL
143
144#define GET_GLOBALISEL_TEMPORARIES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_TEMPORARIES_DECL
147};
148
149} // end anonymous namespace
150
151#define GET_GLOBALISEL_IMPL
152#include "X86GenGlobalISel.inc"
153#undef GET_GLOBALISEL_IMPL
154
155X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
156 const X86Subtarget &STI,
157 const X86RegisterBankInfo &RBI)
158 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
159 RBI(RBI),
161#include "X86GenGlobalISel.inc"
164#include "X86GenGlobalISel.inc"
166{
167}
168
169// FIXME: This should be target-independent, inferred from the types declared
170// for each class in the bank.
172X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
173 if (RB.getID() == X86::GPRRegBankID) {
174 if (Ty.getSizeInBits() <= 8)
175 return &X86::GR8RegClass;
176 if (Ty.getSizeInBits() == 16)
177 return &X86::GR16RegClass;
178 if (Ty.getSizeInBits() == 32)
179 return &X86::GR32RegClass;
180 if (Ty.getSizeInBits() == 64)
181 return &X86::GR64RegClass;
182 }
183 if (RB.getID() == X86::VECRRegBankID) {
184 if (Ty.getSizeInBits() == 16)
185 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
186 if (Ty.getSizeInBits() == 32)
187 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
188 if (Ty.getSizeInBits() == 64)
189 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
190 if (Ty.getSizeInBits() == 128)
191 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
192 if (Ty.getSizeInBits() == 256)
193 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
194 if (Ty.getSizeInBits() == 512)
195 return &X86::VR512RegClass;
196 }
197
198 llvm_unreachable("Unknown RegBank!");
199}
200
202X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
203 MachineRegisterInfo &MRI) const {
204 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
205 return getRegClass(Ty, RegBank);
206}
207
208static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
209 unsigned SubIdx = X86::NoSubRegister;
210 if (RC == &X86::GR32RegClass) {
211 SubIdx = X86::sub_32bit;
212 } else if (RC == &X86::GR16RegClass) {
213 SubIdx = X86::sub_16bit;
214 } else if (RC == &X86::GR8RegClass) {
215 SubIdx = X86::sub_8bit;
216 }
217
218 return SubIdx;
219}
220
222 assert(Reg.isPhysical());
223 if (X86::GR64RegClass.contains(Reg))
224 return &X86::GR64RegClass;
225 if (X86::GR32RegClass.contains(Reg))
226 return &X86::GR32RegClass;
227 if (X86::GR16RegClass.contains(Reg))
228 return &X86::GR16RegClass;
229 if (X86::GR8RegClass.contains(Reg))
230 return &X86::GR8RegClass;
231
232 llvm_unreachable("Unknown RegClass for PhysReg!");
233}
234
235// FIXME: We need some sort of API in RBI/TRI to allow generic code to
236// constrain operands of simple instructions given a TargetRegisterClass
237// and LLT
238bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,
239 MachineRegisterInfo &MRI) const {
240 for (MachineOperand &MO : I.operands()) {
241 if (!MO.isReg())
242 continue;
243 Register Reg = MO.getReg();
244 if (!Reg)
245 continue;
246 if (Reg.isPhysical())
247 continue;
248 LLT Ty = MRI.getType(Reg);
249 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
250 const TargetRegisterClass *RC =
251 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
252 if (!RC) {
253 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
254 RC = getRegClass(Ty, RB);
255 if (!RC) {
257 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
258 break;
259 }
260 }
261 RBI.constrainGenericRegister(Reg, *RC, MRI);
262 }
263
264 return true;
265}
266
267// Set X86 Opcode and constrain DestReg.
268bool X86InstructionSelector::selectCopy(MachineInstr &I,
269 MachineRegisterInfo &MRI) const {
270 Register DstReg = I.getOperand(0).getReg();
271 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
272 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
273
274 Register SrcReg = I.getOperand(1).getReg();
275 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
276 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
277
278 if (DstReg.isPhysical()) {
279 assert(I.isCopy() && "Generic operators do not allow physical registers");
280
281 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
282 DstRegBank.getID() == X86::GPRRegBankID) {
283
284 const TargetRegisterClass *SrcRC =
285 getRegClass(MRI.getType(SrcReg), SrcRegBank);
286 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
287
288 if (SrcRC != DstRC) {
289 // This case can be generated by ABI lowering, performe anyext
290 Register ExtSrc = MRI.createVirtualRegister(DstRC);
291 BuildMI(*I.getParent(), I, I.getDebugLoc(),
292 TII.get(TargetOpcode::SUBREG_TO_REG))
293 .addDef(ExtSrc)
294 .addImm(0)
295 .addReg(SrcReg)
296 .addImm(getSubRegIndex(SrcRC));
297
298 I.getOperand(1).setReg(ExtSrc);
299 }
300 }
301
302 return true;
303 }
304
305 assert((!SrcReg.isPhysical() || I.isCopy()) &&
306 "No phys reg on generic operators");
307 assert((DstSize == SrcSize ||
308 // Copies are a mean to setup initial types, the number of
309 // bits may not exactly match.
310 (SrcReg.isPhysical() &&
311 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
312 "Copy with different width?!");
313
314 const TargetRegisterClass *DstRC =
315 getRegClass(MRI.getType(DstReg), DstRegBank);
316
317 if (SrcRegBank.getID() == X86::GPRRegBankID &&
318 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
319 SrcReg.isPhysical()) {
320 // Change the physical register to performe truncate.
321
322 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
323
324 if (DstRC != SrcRC) {
325 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
326 I.getOperand(1).substPhysReg(SrcReg, TRI);
327 }
328 }
329
330 // No need to constrain SrcReg. It will get constrained when
331 // we hit another of its use or its defs.
332 // Copies do not have constraints.
333 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
334 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
335 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
336 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
337 << " operand\n");
338 return false;
339 }
340 }
341 I.setDesc(TII.get(X86::COPY));
342 return true;
343}
344
345bool X86InstructionSelector::select(MachineInstr &I) {
346 assert(I.getParent() && "Instruction should be in a basic block!");
347 assert(I.getParent()->getParent() && "Instruction should be in a function!");
348
349 MachineBasicBlock &MBB = *I.getParent();
352
353 unsigned Opcode = I.getOpcode();
354 if (!isPreISelGenericOpcode(Opcode)) {
355 // Certain non-generic instructions also need some special handling.
356
357 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
358 return false;
359
360 if (I.isCopy())
361 return selectCopy(I, MRI);
362
363 if (I.isDebugInstr())
364 return selectDebugInstr(I, MRI);
365
366 return true;
367 }
368
369 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
370 "Generic instruction has unexpected implicit operands\n");
371
372 if (selectImpl(I, *CoverageInfo))
373 return true;
374
375 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
376
377 // TODO: This should be implemented by tblgen.
378 switch (I.getOpcode()) {
379 default:
380 return false;
381 case TargetOpcode::G_STORE:
382 case TargetOpcode::G_LOAD:
383 return selectLoadStoreOp(I, MRI, MF);
384 case TargetOpcode::G_PTR_ADD:
385 case TargetOpcode::G_FRAME_INDEX:
386 return selectFrameIndexOrGep(I, MRI, MF);
387 case TargetOpcode::G_GLOBAL_VALUE:
388 return selectGlobalValue(I, MRI, MF);
389 case TargetOpcode::G_CONSTANT:
390 return selectConstant(I, MRI, MF);
391 case TargetOpcode::G_FCONSTANT:
392 return materializeFP(I, MRI, MF);
393 case TargetOpcode::G_PTRTOINT:
394 case TargetOpcode::G_TRUNC:
395 return selectTruncOrPtrToInt(I, MRI, MF);
396 case TargetOpcode::G_INTTOPTR:
397 return selectCopy(I, MRI);
398 case TargetOpcode::G_ZEXT:
399 return selectZext(I, MRI, MF);
400 case TargetOpcode::G_ANYEXT:
401 return selectAnyext(I, MRI, MF);
402 case TargetOpcode::G_ICMP:
403 return selectCmp(I, MRI, MF);
404 case TargetOpcode::G_FCMP:
405 return selectFCmp(I, MRI, MF);
406 case TargetOpcode::G_UADDE:
407 case TargetOpcode::G_UADDO:
408 case TargetOpcode::G_USUBE:
409 case TargetOpcode::G_USUBO:
410 return selectUAddSub(I, MRI, MF);
411 case TargetOpcode::G_UNMERGE_VALUES:
412 return selectUnmergeValues(I, MRI, MF);
413 case TargetOpcode::G_MERGE_VALUES:
414 case TargetOpcode::G_CONCAT_VECTORS:
415 return selectMergeValues(I, MRI, MF);
416 case TargetOpcode::G_EXTRACT:
417 return selectExtract(I, MRI, MF);
418 case TargetOpcode::G_INSERT:
419 return selectInsert(I, MRI, MF);
420 case TargetOpcode::G_BRCOND:
421 return selectCondBranch(I, MRI, MF);
422 case TargetOpcode::G_IMPLICIT_DEF:
423 case TargetOpcode::G_PHI:
424 return selectImplicitDefOrPHI(I, MRI);
425 case TargetOpcode::G_MUL:
426 case TargetOpcode::G_SMULH:
427 case TargetOpcode::G_UMULH:
428 case TargetOpcode::G_SDIV:
429 case TargetOpcode::G_UDIV:
430 case TargetOpcode::G_SREM:
431 case TargetOpcode::G_UREM:
432 return selectMulDivRem(I, MRI, MF);
433 case TargetOpcode::G_SELECT:
434 return selectSelect(I, MRI, MF);
435 }
436
437 return false;
438}
439
440unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
441 const RegisterBank &RB,
442 unsigned Opc,
443 Align Alignment) const {
444 bool Isload = (Opc == TargetOpcode::G_LOAD);
445 bool HasAVX = STI.hasAVX();
446 bool HasAVX512 = STI.hasAVX512();
447 bool HasVLX = STI.hasVLX();
448
449 if (Ty == LLT::scalar(8)) {
450 if (X86::GPRRegBankID == RB.getID())
451 return Isload ? X86::MOV8rm : X86::MOV8mr;
452 } else if (Ty == LLT::scalar(16)) {
453 if (X86::GPRRegBankID == RB.getID())
454 return Isload ? X86::MOV16rm : X86::MOV16mr;
455 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
456 if (X86::GPRRegBankID == RB.getID())
457 return Isload ? X86::MOV32rm : X86::MOV32mr;
458 if (X86::VECRRegBankID == RB.getID())
459 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
460 HasAVX ? X86::VMOVSSrm_alt :
461 X86::MOVSSrm_alt)
462 : (HasAVX512 ? X86::VMOVSSZmr :
463 HasAVX ? X86::VMOVSSmr :
464 X86::MOVSSmr);
465 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
466 if (X86::GPRRegBankID == RB.getID())
467 return Isload ? X86::MOV64rm : X86::MOV64mr;
468 if (X86::VECRRegBankID == RB.getID())
469 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
470 HasAVX ? X86::VMOVSDrm_alt :
471 X86::MOVSDrm_alt)
472 : (HasAVX512 ? X86::VMOVSDZmr :
473 HasAVX ? X86::VMOVSDmr :
474 X86::MOVSDmr);
475 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
476 if (Alignment >= Align(16))
477 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
478 : HasAVX512
479 ? X86::VMOVAPSZ128rm_NOVLX
480 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
481 : (HasVLX ? X86::VMOVAPSZ128mr
482 : HasAVX512
483 ? X86::VMOVAPSZ128mr_NOVLX
484 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
485 else
486 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
487 : HasAVX512
488 ? X86::VMOVUPSZ128rm_NOVLX
489 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
490 : (HasVLX ? X86::VMOVUPSZ128mr
491 : HasAVX512
492 ? X86::VMOVUPSZ128mr_NOVLX
493 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
494 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
495 if (Alignment >= Align(32))
496 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
497 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
498 : X86::VMOVAPSYrm)
499 : (HasVLX ? X86::VMOVAPSZ256mr
500 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
501 : X86::VMOVAPSYmr);
502 else
503 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
504 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
505 : X86::VMOVUPSYrm)
506 : (HasVLX ? X86::VMOVUPSZ256mr
507 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
508 : X86::VMOVUPSYmr);
509 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
510 if (Alignment >= Align(64))
511 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
512 else
513 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
514 }
515 return Opc;
516}
517
518// Fill in an address from the given instruction.
519static void X86SelectAddress(const MachineInstr &I,
521 X86AddressMode &AM) {
522 assert(I.getOperand(0).isReg() && "unsupported opperand.");
523 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
524 "unsupported type.");
525
526 if (I.getOpcode() == TargetOpcode::G_PTR_ADD) {
527 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
528 int64_t Imm = *COff;
529 if (isInt<32>(Imm)) { // Check for displacement overflow.
530 AM.Disp = static_cast<int32_t>(Imm);
531 AM.Base.Reg = I.getOperand(1).getReg();
532 return;
533 }
534 }
535 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
536 AM.Base.FrameIndex = I.getOperand(1).getIndex();
538 return;
539 }
540
541 // Default behavior.
542 AM.Base.Reg = I.getOperand(0).getReg();
543}
544
545bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
547 MachineFunction &MF) const {
548 unsigned Opc = I.getOpcode();
549
550 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
551 "unexpected instruction");
552
553 const Register DefReg = I.getOperand(0).getReg();
554 LLT Ty = MRI.getType(DefReg);
555 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
556
557 assert(I.hasOneMemOperand());
558 auto &MemOp = **I.memoperands_begin();
559 if (MemOp.isAtomic()) {
560 // Note: for unordered operations, we rely on the fact the appropriate MMO
561 // is already on the instruction we're mutating, and thus we don't need to
562 // make any changes. So long as we select an opcode which is capable of
563 // loading or storing the appropriate size atomically, the rest of the
564 // backend is required to respect the MMO state.
565 if (!MemOp.isUnordered()) {
566 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
567 return false;
568 }
569 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
570 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
571 return false;
572 }
573 }
574
575 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign());
576 if (NewOpc == Opc)
577 return false;
578
580 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
581
582 I.setDesc(TII.get(NewOpc));
583 MachineInstrBuilder MIB(MF, I);
584 if (Opc == TargetOpcode::G_LOAD) {
585 I.removeOperand(1);
586 addFullAddress(MIB, AM);
587 } else {
588 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
589 I.removeOperand(1);
590 I.removeOperand(0);
591 addFullAddress(MIB, AM).addUse(DefReg);
592 }
594}
595
596static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
597 if (Ty == LLT::pointer(0, 64))
598 return X86::LEA64r;
599 else if (Ty == LLT::pointer(0, 32))
600 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
601 else
602 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
603}
604
605bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
607 MachineFunction &MF) const {
608 unsigned Opc = I.getOpcode();
609
610 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
611 "unexpected instruction");
612
613 const Register DefReg = I.getOperand(0).getReg();
614 LLT Ty = MRI.getType(DefReg);
615
616 // Use LEA to calculate frame index and GEP
617 unsigned NewOpc = getLeaOP(Ty, STI);
618 I.setDesc(TII.get(NewOpc));
619 MachineInstrBuilder MIB(MF, I);
620
621 if (Opc == TargetOpcode::G_FRAME_INDEX) {
622 addOffset(MIB, 0);
623 } else {
624 MachineOperand &InxOp = I.getOperand(2);
625 I.addOperand(InxOp); // set IndexReg
626 InxOp.ChangeToImmediate(1); // set Scale
627 MIB.addImm(0).addReg(0);
628 }
629
631}
632
633bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
635 MachineFunction &MF) const {
636 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
637 "unexpected instruction");
638
639 auto GV = I.getOperand(1).getGlobal();
640 if (GV->isThreadLocal()) {
641 return false; // TODO: we don't support TLS yet.
642 }
643
644 // Can't handle alternate code models yet.
645 if (TM.getCodeModel() != CodeModel::Small)
646 return false;
647
649 AM.GV = GV;
650 AM.GVOpFlags = STI.classifyGlobalReference(GV);
651
652 // TODO: The ABI requires an extra load. not supported yet.
654 return false;
655
656 // TODO: This reference is relative to the pic base. not supported yet.
658 return false;
659
660 if (STI.isPICStyleRIPRel()) {
661 // Use rip-relative addressing.
662 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
663 AM.Base.Reg = X86::RIP;
664 }
665
666 const Register DefReg = I.getOperand(0).getReg();
667 LLT Ty = MRI.getType(DefReg);
668 unsigned NewOpc = getLeaOP(Ty, STI);
669
670 I.setDesc(TII.get(NewOpc));
671 MachineInstrBuilder MIB(MF, I);
672
673 I.removeOperand(1);
674 addFullAddress(MIB, AM);
675
677}
678
679bool X86InstructionSelector::selectConstant(MachineInstr &I,
681 MachineFunction &MF) const {
682 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
683 "unexpected instruction");
684
685 const Register DefReg = I.getOperand(0).getReg();
686 LLT Ty = MRI.getType(DefReg);
687
688 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
689 return false;
690
691 uint64_t Val = 0;
692 if (I.getOperand(1).isCImm()) {
693 Val = I.getOperand(1).getCImm()->getZExtValue();
694 I.getOperand(1).ChangeToImmediate(Val);
695 } else if (I.getOperand(1).isImm()) {
696 Val = I.getOperand(1).getImm();
697 } else
698 llvm_unreachable("Unsupported operand type.");
699
700 unsigned NewOpc;
701 switch (Ty.getSizeInBits()) {
702 case 8:
703 NewOpc = X86::MOV8ri;
704 break;
705 case 16:
706 NewOpc = X86::MOV16ri;
707 break;
708 case 32:
709 NewOpc = X86::MOV32ri;
710 break;
711 case 64:
712 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
713 if (isInt<32>(Val))
714 NewOpc = X86::MOV64ri32;
715 else
716 NewOpc = X86::MOV64ri;
717 break;
718 default:
719 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
720 }
721
722 I.setDesc(TII.get(NewOpc));
724}
725
726// Helper function for selectTruncOrPtrToInt and selectAnyext.
727// Returns true if DstRC lives on a floating register class and
728// SrcRC lives on a 128-bit vector class.
729static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
730 const TargetRegisterClass *SrcRC) {
731 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
732 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
733 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
734}
735
736bool X86InstructionSelector::selectTurnIntoCOPY(
737 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
738 const TargetRegisterClass *DstRC, const unsigned SrcReg,
739 const TargetRegisterClass *SrcRC) const {
740
741 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
742 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
743 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
744 << " operand\n");
745 return false;
746 }
747 I.setDesc(TII.get(X86::COPY));
748 return true;
749}
750
751bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
753 MachineFunction &MF) const {
754 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
755 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
756 "unexpected instruction");
757
758 const Register DstReg = I.getOperand(0).getReg();
759 const Register SrcReg = I.getOperand(1).getReg();
760
761 const LLT DstTy = MRI.getType(DstReg);
762 const LLT SrcTy = MRI.getType(SrcReg);
763
764 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
765 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
766
767 if (DstRB.getID() != SrcRB.getID()) {
768 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
769 << " input/output on different banks\n");
770 return false;
771 }
772
773 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
774 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
775
776 if (!DstRC || !SrcRC)
777 return false;
778
779 // If that's truncation of the value that lives on the vector class and goes
780 // into the floating class, just replace it with copy, as we are able to
781 // select it as a regular move.
782 if (canTurnIntoCOPY(DstRC, SrcRC))
783 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
784
785 if (DstRB.getID() != X86::GPRRegBankID)
786 return false;
787
788 unsigned SubIdx;
789 if (DstRC == SrcRC) {
790 // Nothing to be done
791 SubIdx = X86::NoSubRegister;
792 } else if (DstRC == &X86::GR32RegClass) {
793 SubIdx = X86::sub_32bit;
794 } else if (DstRC == &X86::GR16RegClass) {
795 SubIdx = X86::sub_16bit;
796 } else if (DstRC == &X86::GR8RegClass) {
797 SubIdx = X86::sub_8bit;
798 } else {
799 return false;
800 }
801
802 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
803
804 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
805 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
806 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
807 << "\n");
808 return false;
809 }
810
811 I.getOperand(1).setSubReg(SubIdx);
812
813 I.setDesc(TII.get(X86::COPY));
814 return true;
815}
816
817bool X86InstructionSelector::selectZext(MachineInstr &I,
819 MachineFunction &MF) const {
820 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
821
822 const Register DstReg = I.getOperand(0).getReg();
823 const Register SrcReg = I.getOperand(1).getReg();
824
825 const LLT DstTy = MRI.getType(DstReg);
826 const LLT SrcTy = MRI.getType(SrcReg);
827
828 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
829 "8=>16 Zext is handled by tablegen");
830 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
831 "8=>32 Zext is handled by tablegen");
832 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
833 "16=>32 Zext is handled by tablegen");
834 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
835 "8=>64 Zext is handled by tablegen");
836 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
837 "16=>64 Zext is handled by tablegen");
838 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
839 "32=>64 Zext is handled by tablegen");
840
841 if (SrcTy != LLT::scalar(1))
842 return false;
843
844 unsigned AndOpc;
845 if (DstTy == LLT::scalar(8))
846 AndOpc = X86::AND8ri;
847 else if (DstTy == LLT::scalar(16))
848 AndOpc = X86::AND16ri;
849 else if (DstTy == LLT::scalar(32))
850 AndOpc = X86::AND32ri;
851 else if (DstTy == LLT::scalar(64))
852 AndOpc = X86::AND64ri32;
853 else
854 return false;
855
856 Register DefReg = SrcReg;
857 if (DstTy != LLT::scalar(8)) {
858 Register ImpDefReg =
859 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
860 BuildMI(*I.getParent(), I, I.getDebugLoc(),
861 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
862
863 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
864 BuildMI(*I.getParent(), I, I.getDebugLoc(),
865 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
866 .addReg(ImpDefReg)
867 .addReg(SrcReg)
868 .addImm(X86::sub_8bit);
869 }
870
871 MachineInstr &AndInst =
872 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
873 .addReg(DefReg)
874 .addImm(1);
875
877
878 I.eraseFromParent();
879 return true;
880}
881
882bool X86InstructionSelector::selectAnyext(MachineInstr &I,
884 MachineFunction &MF) const {
885 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
886
887 const Register DstReg = I.getOperand(0).getReg();
888 const Register SrcReg = I.getOperand(1).getReg();
889
890 const LLT DstTy = MRI.getType(DstReg);
891 const LLT SrcTy = MRI.getType(SrcReg);
892
893 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
894 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
895
896 assert(DstRB.getID() == SrcRB.getID() &&
897 "G_ANYEXT input/output on different banks\n");
898
899 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
900 "G_ANYEXT incorrect operand size");
901
902 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
903 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
904
905 // If that's ANY_EXT of the value that lives on the floating class and goes
906 // into the vector class, just replace it with copy, as we are able to select
907 // it as a regular move.
908 if (canTurnIntoCOPY(SrcRC, DstRC))
909 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
910
911 if (DstRB.getID() != X86::GPRRegBankID)
912 return false;
913
914 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
915 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
916 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
917 << " operand\n");
918 return false;
919 }
920
921 if (SrcRC == DstRC) {
922 I.setDesc(TII.get(X86::COPY));
923 return true;
924 }
925
926 BuildMI(*I.getParent(), I, I.getDebugLoc(),
927 TII.get(TargetOpcode::SUBREG_TO_REG))
928 .addDef(DstReg)
929 .addImm(0)
930 .addReg(SrcReg)
931 .addImm(getSubRegIndex(SrcRC));
932
933 I.eraseFromParent();
934 return true;
935}
936
937bool X86InstructionSelector::selectCmp(MachineInstr &I,
939 MachineFunction &MF) const {
940 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
941
943 bool SwapArgs;
944 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
945 (CmpInst::Predicate)I.getOperand(1).getPredicate());
946
947 Register LHS = I.getOperand(2).getReg();
948 Register RHS = I.getOperand(3).getReg();
949
950 if (SwapArgs)
951 std::swap(LHS, RHS);
952
953 unsigned OpCmp;
954 LLT Ty = MRI.getType(LHS);
955
956 switch (Ty.getSizeInBits()) {
957 default:
958 return false;
959 case 8:
960 OpCmp = X86::CMP8rr;
961 break;
962 case 16:
963 OpCmp = X86::CMP16rr;
964 break;
965 case 32:
966 OpCmp = X86::CMP32rr;
967 break;
968 case 64:
969 OpCmp = X86::CMP64rr;
970 break;
971 }
972
974 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
975 .addReg(LHS)
976 .addReg(RHS);
977
978 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
979 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
980
983
984 I.eraseFromParent();
985 return true;
986}
987
988bool X86InstructionSelector::selectFCmp(MachineInstr &I,
990 MachineFunction &MF) const {
991 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
992
993 Register LhsReg = I.getOperand(2).getReg();
994 Register RhsReg = I.getOperand(3).getReg();
996 (CmpInst::Predicate)I.getOperand(1).getPredicate();
997
998 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
999 static const uint16_t SETFOpcTable[2][3] = {
1000 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1001 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
1002 const uint16_t *SETFOpc = nullptr;
1003 switch (Predicate) {
1004 default:
1005 break;
1006 case CmpInst::FCMP_OEQ:
1007 SETFOpc = &SETFOpcTable[0][0];
1008 break;
1009 case CmpInst::FCMP_UNE:
1010 SETFOpc = &SETFOpcTable[1][0];
1011 break;
1012 }
1013
1014 // Compute the opcode for the CMP instruction.
1015 unsigned OpCmp;
1016 LLT Ty = MRI.getType(LhsReg);
1017 switch (Ty.getSizeInBits()) {
1018 default:
1019 return false;
1020 case 32:
1021 OpCmp = X86::UCOMISSrr;
1022 break;
1023 case 64:
1024 OpCmp = X86::UCOMISDrr;
1025 break;
1026 }
1027
1028 Register ResultReg = I.getOperand(0).getReg();
1029 RBI.constrainGenericRegister(
1030 ResultReg,
1031 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1032 if (SETFOpc) {
1034 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1035 .addReg(LhsReg)
1036 .addReg(RhsReg);
1037
1038 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1039 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1040 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1041 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
1042 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1043 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1044 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1045 TII.get(SETFOpc[2]), ResultReg)
1046 .addReg(FlagReg1)
1047 .addReg(FlagReg2);
1052
1053 I.eraseFromParent();
1054 return true;
1055 }
1056
1058 bool SwapArgs;
1059 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1060 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1061
1062 if (SwapArgs)
1063 std::swap(LhsReg, RhsReg);
1064
1065 // Emit a compare of LHS/RHS.
1067 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1068 .addReg(LhsReg)
1069 .addReg(RhsReg);
1070
1071 MachineInstr &Set =
1072 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1075 I.eraseFromParent();
1076 return true;
1077}
1078
1079bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
1081 MachineFunction &MF) const {
1082 assert((I.getOpcode() == TargetOpcode::G_UADDE ||
1083 I.getOpcode() == TargetOpcode::G_UADDO ||
1084 I.getOpcode() == TargetOpcode::G_USUBE ||
1085 I.getOpcode() == TargetOpcode::G_USUBO) &&
1086 "unexpected instruction");
1087
1088 const Register DstReg = I.getOperand(0).getReg();
1089 const Register CarryOutReg = I.getOperand(1).getReg();
1090 const Register Op0Reg = I.getOperand(2).getReg();
1091 const Register Op1Reg = I.getOperand(3).getReg();
1092 bool IsSub = I.getOpcode() == TargetOpcode::G_USUBE ||
1093 I.getOpcode() == TargetOpcode::G_USUBO;
1094 bool HasCarryIn = I.getOpcode() == TargetOpcode::G_UADDE ||
1095 I.getOpcode() == TargetOpcode::G_USUBE;
1096
1097 const LLT DstTy = MRI.getType(DstReg);
1098 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
1099
1100 // TODO: Handle immediate argument variants?
1101 unsigned OpADC, OpADD, OpSBB, OpSUB;
1102 switch (DstTy.getSizeInBits()) {
1103 case 8:
1104 OpADC = X86::ADC8rr;
1105 OpADD = X86::ADD8rr;
1106 OpSBB = X86::SBB8rr;
1107 OpSUB = X86::SUB8rr;
1108 break;
1109 case 16:
1110 OpADC = X86::ADC16rr;
1111 OpADD = X86::ADD16rr;
1112 OpSBB = X86::SBB16rr;
1113 OpSUB = X86::SUB16rr;
1114 break;
1115 case 32:
1116 OpADC = X86::ADC32rr;
1117 OpADD = X86::ADD32rr;
1118 OpSBB = X86::SBB32rr;
1119 OpSUB = X86::SUB32rr;
1120 break;
1121 case 64:
1122 OpADC = X86::ADC64rr;
1123 OpADD = X86::ADD64rr;
1124 OpSBB = X86::SBB64rr;
1125 OpSUB = X86::SUB64rr;
1126 break;
1127 default:
1128 llvm_unreachable("selectUAddSub unsupported type.");
1129 }
1130
1131 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1132 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
1133
1134 unsigned Opcode = IsSub ? OpSUB : OpADD;
1135
1136 // G_UADDE/G_USUBE - find CarryIn def instruction.
1137 if (HasCarryIn) {
1138 Register CarryInReg = I.getOperand(4).getReg();
1139 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1140 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1141 CarryInReg = Def->getOperand(1).getReg();
1142 Def = MRI.getVRegDef(CarryInReg);
1143 }
1144
1145 // TODO - handle more CF generating instructions
1146 if (Def->getOpcode() == TargetOpcode::G_UADDE ||
1147 Def->getOpcode() == TargetOpcode::G_UADDO ||
1148 Def->getOpcode() == TargetOpcode::G_USUBE ||
1149 Def->getOpcode() == TargetOpcode::G_USUBO) {
1150 // carry set by prev ADD/SUB.
1151 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY),
1152 X86::EFLAGS)
1153 .addReg(CarryInReg);
1154
1155 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC, MRI))
1156 return false;
1157
1158 Opcode = IsSub ? OpSBB : OpADC;
1159 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1160 // carry is constant, support only 0.
1161 if (*val != 0)
1162 return false;
1163
1164 Opcode = IsSub ? OpSUB : OpADD;
1165 } else
1166 return false;
1167 }
1168
1169 MachineInstr &Inst =
1170 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1171 .addReg(Op0Reg)
1172 .addReg(Op1Reg);
1173
1174 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1175 .addReg(X86::EFLAGS);
1176
1177 if (!constrainSelectedInstRegOperands(Inst, TII, TRI, RBI) ||
1178 !RBI.constrainGenericRegister(CarryOutReg, *DstRC, MRI))
1179 return false;
1180
1181 I.eraseFromParent();
1182 return true;
1183}
1184
1185bool X86InstructionSelector::selectExtract(MachineInstr &I,
1187 MachineFunction &MF) const {
1188 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1189 "unexpected instruction");
1190
1191 const Register DstReg = I.getOperand(0).getReg();
1192 const Register SrcReg = I.getOperand(1).getReg();
1193 int64_t Index = I.getOperand(2).getImm();
1194
1195 const LLT DstTy = MRI.getType(DstReg);
1196 const LLT SrcTy = MRI.getType(SrcReg);
1197
1198 // Meanwile handle vector type only.
1199 if (!DstTy.isVector())
1200 return false;
1201
1202 if (Index % DstTy.getSizeInBits() != 0)
1203 return false; // Not extract subvector.
1204
1205 if (Index == 0) {
1206 // Replace by extract subreg copy.
1207 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1208 return false;
1209
1210 I.eraseFromParent();
1211 return true;
1212 }
1213
1214 bool HasAVX = STI.hasAVX();
1215 bool HasAVX512 = STI.hasAVX512();
1216 bool HasVLX = STI.hasVLX();
1217
1218 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1219 if (HasVLX)
1220 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1221 else if (HasAVX)
1222 I.setDesc(TII.get(X86::VEXTRACTF128rr));
1223 else
1224 return false;
1225 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1226 if (DstTy.getSizeInBits() == 128)
1227 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1228 else if (DstTy.getSizeInBits() == 256)
1229 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1230 else
1231 return false;
1232 } else
1233 return false;
1234
1235 // Convert to X86 VEXTRACT immediate.
1236 Index = Index / DstTy.getSizeInBits();
1237 I.getOperand(2).setImm(Index);
1238
1240}
1241
1242bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1243 MachineInstr &I,
1245 MachineFunction &MF) const {
1246 const LLT DstTy = MRI.getType(DstReg);
1247 const LLT SrcTy = MRI.getType(SrcReg);
1248 unsigned SubIdx = X86::NoSubRegister;
1249
1250 if (!DstTy.isVector() || !SrcTy.isVector())
1251 return false;
1252
1253 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1254 "Incorrect Src/Dst register size");
1255
1256 if (DstTy.getSizeInBits() == 128)
1257 SubIdx = X86::sub_xmm;
1258 else if (DstTy.getSizeInBits() == 256)
1259 SubIdx = X86::sub_ymm;
1260 else
1261 return false;
1262
1263 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1264 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1265
1266 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1267
1268 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1269 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1270 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1271 return false;
1272 }
1273
1274 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1275 .addReg(SrcReg, 0, SubIdx);
1276
1277 return true;
1278}
1279
1280bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1281 MachineInstr &I,
1283 MachineFunction &MF) const {
1284 const LLT DstTy = MRI.getType(DstReg);
1285 const LLT SrcTy = MRI.getType(SrcReg);
1286 unsigned SubIdx = X86::NoSubRegister;
1287
1288 // TODO: support scalar types
1289 if (!DstTy.isVector() || !SrcTy.isVector())
1290 return false;
1291
1292 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1293 "Incorrect Src/Dst register size");
1294
1295 if (SrcTy.getSizeInBits() == 128)
1296 SubIdx = X86::sub_xmm;
1297 else if (SrcTy.getSizeInBits() == 256)
1298 SubIdx = X86::sub_ymm;
1299 else
1300 return false;
1301
1302 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1303 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1304
1305 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1306 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1307 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1308 return false;
1309 }
1310
1311 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1312 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1313 .addReg(SrcReg);
1314
1315 return true;
1316}
1317
1318bool X86InstructionSelector::selectInsert(MachineInstr &I,
1320 MachineFunction &MF) const {
1321 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1322
1323 const Register DstReg = I.getOperand(0).getReg();
1324 const Register SrcReg = I.getOperand(1).getReg();
1325 const Register InsertReg = I.getOperand(2).getReg();
1326 int64_t Index = I.getOperand(3).getImm();
1327
1328 const LLT DstTy = MRI.getType(DstReg);
1329 const LLT InsertRegTy = MRI.getType(InsertReg);
1330
1331 // Meanwile handle vector type only.
1332 if (!DstTy.isVector())
1333 return false;
1334
1335 if (Index % InsertRegTy.getSizeInBits() != 0)
1336 return false; // Not insert subvector.
1337
1338 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1339 // Replace by subreg copy.
1340 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1341 return false;
1342
1343 I.eraseFromParent();
1344 return true;
1345 }
1346
1347 bool HasAVX = STI.hasAVX();
1348 bool HasAVX512 = STI.hasAVX512();
1349 bool HasVLX = STI.hasVLX();
1350
1351 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1352 if (HasVLX)
1353 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1354 else if (HasAVX)
1355 I.setDesc(TII.get(X86::VINSERTF128rr));
1356 else
1357 return false;
1358 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1359 if (InsertRegTy.getSizeInBits() == 128)
1360 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1361 else if (InsertRegTy.getSizeInBits() == 256)
1362 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1363 else
1364 return false;
1365 } else
1366 return false;
1367
1368 // Convert to X86 VINSERT immediate.
1369 Index = Index / InsertRegTy.getSizeInBits();
1370
1371 I.getOperand(3).setImm(Index);
1372
1374}
1375
1376bool X86InstructionSelector::selectUnmergeValues(
1378 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1379 "unexpected instruction");
1380
1381 // Split to extracts.
1382 unsigned NumDefs = I.getNumOperands() - 1;
1383 Register SrcReg = I.getOperand(NumDefs).getReg();
1384 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1385
1386 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1387 MachineInstr &ExtrInst =
1388 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1389 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1390 .addReg(SrcReg)
1391 .addImm(Idx * DefSize);
1392
1393 if (!select(ExtrInst))
1394 return false;
1395 }
1396
1397 I.eraseFromParent();
1398 return true;
1399}
1400
1401bool X86InstructionSelector::selectMergeValues(
1403 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1404 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1405 "unexpected instruction");
1406
1407 // Split to inserts.
1408 Register DstReg = I.getOperand(0).getReg();
1409 Register SrcReg0 = I.getOperand(1).getReg();
1410
1411 const LLT DstTy = MRI.getType(DstReg);
1412 const LLT SrcTy = MRI.getType(SrcReg0);
1413 unsigned SrcSize = SrcTy.getSizeInBits();
1414
1415 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1416
1417 // For the first src use insertSubReg.
1418 Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1419 MRI.setRegBank(DefReg, RegBank);
1420 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1421 return false;
1422
1423 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1424 Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1425 MRI.setRegBank(Tmp, RegBank);
1426
1427 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1428 TII.get(TargetOpcode::G_INSERT), Tmp)
1429 .addReg(DefReg)
1430 .addReg(I.getOperand(Idx).getReg())
1431 .addImm((Idx - 1) * SrcSize);
1432
1433 DefReg = Tmp;
1434
1435 if (!select(InsertInst))
1436 return false;
1437 }
1438
1439 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1440 TII.get(TargetOpcode::COPY), DstReg)
1441 .addReg(DefReg);
1442
1443 if (!select(CopyInst))
1444 return false;
1445
1446 I.eraseFromParent();
1447 return true;
1448}
1449
1450bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1452 MachineFunction &MF) const {
1453 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1454
1455 const Register CondReg = I.getOperand(0).getReg();
1456 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1457
1458 MachineInstr &TestInst =
1459 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1460 .addReg(CondReg)
1461 .addImm(1);
1462 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1463 .addMBB(DestMBB).addImm(X86::COND_NE);
1464
1465 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1466
1467 I.eraseFromParent();
1468 return true;
1469}
1470
1471bool X86InstructionSelector::materializeFP(MachineInstr &I,
1473 MachineFunction &MF) const {
1474 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1475 "unexpected instruction");
1476
1477 // Can't handle alternate code models yet.
1478 CodeModel::Model CM = TM.getCodeModel();
1479 if (CM != CodeModel::Small && CM != CodeModel::Large)
1480 return false;
1481
1482 const Register DstReg = I.getOperand(0).getReg();
1483 const LLT DstTy = MRI.getType(DstReg);
1484 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1485 Align Alignment = Align(DstTy.getSizeInBytes());
1486 const DebugLoc &DbgLoc = I.getDebugLoc();
1487
1488 unsigned Opc =
1489 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1490
1491 // Create the load from the constant pool.
1492 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1493 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1494 MachineInstr *LoadInst = nullptr;
1495 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1496
1497 if (CM == CodeModel::Large && STI.is64Bit()) {
1498 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1499 // they cannot be folded into immediate fields.
1500
1501 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1502 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1503 .addConstantPoolIndex(CPI, 0, OpFlag);
1504
1507 LLT::pointer(0, MF.getDataLayout().getPointerSizeInBits()), Alignment);
1508
1509 LoadInst =
1510 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1511 AddrReg)
1512 .addMemOperand(MMO);
1513
1514 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1515 // Handle the case when globals fit in our immediate field.
1516 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1517
1518 // x86-32 PIC requires a PIC base register for constant pools.
1519 unsigned PICBase = 0;
1520 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1521 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1522 // In DAGISEL the code that initialize it generated by the CGBR pass.
1523 return false; // TODO support the mode.
1524 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1525 PICBase = X86::RIP;
1526
1528 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1529 OpFlag);
1530 } else
1531 return false;
1532
1534 I.eraseFromParent();
1535 return true;
1536}
1537
1538bool X86InstructionSelector::selectImplicitDefOrPHI(
1540 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1541 I.getOpcode() == TargetOpcode::G_PHI) &&
1542 "unexpected instruction");
1543
1544 Register DstReg = I.getOperand(0).getReg();
1545
1546 if (!MRI.getRegClassOrNull(DstReg)) {
1547 const LLT DstTy = MRI.getType(DstReg);
1548 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1549
1550 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1551 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1552 << " operand\n");
1553 return false;
1554 }
1555 }
1556
1557 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1558 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1559 else
1560 I.setDesc(TII.get(X86::PHI));
1561
1562 return true;
1563}
1564
1565bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,
1567 MachineFunction &MF) const {
1568 // The implementation of this function is adapted from X86FastISel.
1569 assert((I.getOpcode() == TargetOpcode::G_MUL ||
1570 I.getOpcode() == TargetOpcode::G_SMULH ||
1571 I.getOpcode() == TargetOpcode::G_UMULH ||
1572 I.getOpcode() == TargetOpcode::G_SDIV ||
1573 I.getOpcode() == TargetOpcode::G_SREM ||
1574 I.getOpcode() == TargetOpcode::G_UDIV ||
1575 I.getOpcode() == TargetOpcode::G_UREM) &&
1576 "unexpected instruction");
1577
1578 const Register DstReg = I.getOperand(0).getReg();
1579 const Register Op1Reg = I.getOperand(1).getReg();
1580 const Register Op2Reg = I.getOperand(2).getReg();
1581
1582 const LLT RegTy = MRI.getType(DstReg);
1583 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1584 "Arguments and return value types must match");
1585
1586 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1587 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1588 return false;
1589
1590 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1591 const static unsigned NumOps = 7; // SDiv/SRem/UDiv/URem/Mul/SMulH/UMulh
1592 const static bool S = true; // IsSigned
1593 const static bool U = false; // !IsSigned
1594 const static unsigned Copy = TargetOpcode::COPY;
1595
1596 // For the X86 IDIV instruction, in most cases the dividend
1597 // (numerator) must be in a specific register pair highreg:lowreg,
1598 // producing the quotient in lowreg and the remainder in highreg.
1599 // For most data types, to set up the instruction, the dividend is
1600 // copied into lowreg, and lowreg is sign-extended into highreg. The
1601 // exception is i8, where the dividend is defined as a single register rather
1602 // than a register pair, and we therefore directly sign-extend the dividend
1603 // into lowreg, instead of copying, and ignore the highreg.
1604 const static struct MulDivRemEntry {
1605 // The following portion depends only on the data type.
1606 unsigned SizeInBits;
1607 unsigned LowInReg; // low part of the register pair
1608 unsigned HighInReg; // high part of the register pair
1609 // The following portion depends on both the data type and the operation.
1610 struct MulDivRemResult {
1611 unsigned OpMulDivRem; // The specific MUL/DIV opcode to use.
1612 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1613 // highreg, or copying a zero into highreg.
1614 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1615 // zero/sign-extending into lowreg for i8.
1616 unsigned ResultReg; // Register containing the desired result.
1617 bool IsOpSigned; // Whether to use signed or unsigned form.
1618 } ResultTable[NumOps];
1619 } OpTable[NumTypes] = {
1620 {8,
1621 X86::AX,
1622 0,
1623 {
1624 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1625 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1626 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1627 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1628 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S}, // Mul
1629 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SMulH
1630 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U}, // UMulH
1631 }}, // i8
1632 {16,
1633 X86::AX,
1634 X86::DX,
1635 {
1636 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1637 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1638 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1639 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1640 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S}, // Mul
1641 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S}, // SMulH
1642 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U}, // UMulH
1643 }}, // i16
1644 {32,
1645 X86::EAX,
1646 X86::EDX,
1647 {
1648 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1649 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1650 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1651 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1652 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S}, // Mul
1653 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S}, // SMulH
1654 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U}, // UMulH
1655 }}, // i32
1656 {64,
1657 X86::RAX,
1658 X86::RDX,
1659 {
1660 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1661 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1662 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1663 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1664 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S}, // Mul
1665 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S}, // SMulH
1666 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U}, // UMulH
1667 }}, // i64
1668 };
1669
1670 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {
1671 return El.SizeInBits == RegTy.getSizeInBits();
1672 });
1673 if (OpEntryIt == std::end(OpTable))
1674 return false;
1675
1676 unsigned OpIndex;
1677 switch (I.getOpcode()) {
1678 default:
1679 llvm_unreachable("Unexpected mul/div/rem opcode");
1680 case TargetOpcode::G_SDIV:
1681 OpIndex = 0;
1682 break;
1683 case TargetOpcode::G_SREM:
1684 OpIndex = 1;
1685 break;
1686 case TargetOpcode::G_UDIV:
1687 OpIndex = 2;
1688 break;
1689 case TargetOpcode::G_UREM:
1690 OpIndex = 3;
1691 break;
1692 case TargetOpcode::G_MUL:
1693 OpIndex = 4;
1694 break;
1695 case TargetOpcode::G_SMULH:
1696 OpIndex = 5;
1697 break;
1698 case TargetOpcode::G_UMULH:
1699 OpIndex = 6;
1700 break;
1701 }
1702
1703 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1704 const MulDivRemEntry::MulDivRemResult &OpEntry =
1705 TypeEntry.ResultTable[OpIndex];
1706
1707 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1708 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1709 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1710 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1711 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1712 << " operand\n");
1713 return false;
1714 }
1715
1716 // Move op1 into low-order input register.
1717 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1718 TypeEntry.LowInReg)
1719 .addReg(Op1Reg);
1720
1721 // Zero-extend or sign-extend into high-order input register.
1722 if (OpEntry.OpSignExtend) {
1723 if (OpEntry.IsOpSigned)
1724 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1725 TII.get(OpEntry.OpSignExtend));
1726 else {
1727 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1728 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1729 Zero32);
1730
1731 // Copy the zero into the appropriate sub/super/identical physical
1732 // register. Unfortunately the operations needed are not uniform enough
1733 // to fit neatly into the table above.
1734 if (RegTy.getSizeInBits() == 16) {
1735 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1736 TypeEntry.HighInReg)
1737 .addReg(Zero32, 0, X86::sub_16bit);
1738 } else if (RegTy.getSizeInBits() == 32) {
1739 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1740 TypeEntry.HighInReg)
1741 .addReg(Zero32);
1742 } else if (RegTy.getSizeInBits() == 64) {
1743 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1744 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1745 .addImm(0)
1746 .addReg(Zero32)
1747 .addImm(X86::sub_32bit);
1748 }
1749 }
1750 }
1751
1752 // Generate the DIV/IDIV/MUL/IMUL instruction.
1753 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))
1754 .addReg(Op2Reg);
1755
1756 // For i8 remainder, we can't reference ah directly, as we'll end
1757 // up with bogus copies like %r9b = COPY %ah. Reference ax
1758 // instead to prevent ah references in a rex instruction.
1759 //
1760 // The current assumption of the fast register allocator is that isel
1761 // won't generate explicit references to the GR8_NOREX registers. If
1762 // the allocator and/or the backend get enhanced to be more robust in
1763 // that regard, this can be, and should be, removed.
1764 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1765 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1766 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1767 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1768 .addReg(X86::AX);
1769
1770 // Shift AX right by 8 bits instead of using AH.
1771 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1772 ResultSuperReg)
1773 .addReg(SourceSuperReg)
1774 .addImm(8);
1775
1776 // Now reference the 8-bit subreg of the result.
1777 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1778 DstReg)
1779 .addReg(ResultSuperReg, 0, X86::sub_8bit);
1780 } else {
1781 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1782 DstReg)
1783 .addReg(OpEntry.ResultReg);
1784 }
1785 I.eraseFromParent();
1786
1787 return true;
1788}
1789
1790bool X86InstructionSelector::selectSelect(MachineInstr &I,
1792 MachineFunction &MF) const {
1793 GSelect &Sel = cast<GSelect>(I);
1794 unsigned DstReg = Sel.getReg(0);
1795 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::TEST32rr))
1796 .addReg(Sel.getCondReg())
1797 .addReg(Sel.getCondReg());
1798
1799 unsigned OpCmp;
1800 LLT Ty = MRI.getType(DstReg);
1801 switch (Ty.getSizeInBits()) {
1802 default:
1803 return false;
1804 case 8:
1805 OpCmp = X86::CMOV_GR8;
1806 break;
1807 case 16:
1808 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1809 break;
1810 case 32:
1811 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1812 break;
1813 case 64:
1814 assert(STI.is64Bit() && STI.canUseCMOV());
1815 OpCmp = X86::CMOV64rr;
1816 break;
1817 }
1818 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
1819 .addReg(Sel.getTrueReg())
1820 .addReg(Sel.getFalseReg())
1822
1823 const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);
1824 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1825 LLVM_DEBUG(dbgs() << "Failed to constrain CMOV\n");
1826 return false;
1827 }
1828
1829 Sel.eraseFromParent();
1830 return true;
1831}
1832
1835 X86Subtarget &Subtarget,
1836 X86RegisterBankInfo &RBI) {
1837 return new X86InstructionSelector(TM, Subtarget, RBI);
1838}
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Atomic ordering constants.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned OpIndex
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
Value * RHS
Value * LHS
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:950
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:960
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:963
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:976
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:410
A debug info location.
Definition: DebugLoc.h:33
Represents a G_SELECT.
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isVector() const
Definition: LowLevelType.h:148
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:203
An instruction for reading from memory.
Definition: Instructions.h:184
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:329
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:475
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements the register bank concept.
Definition: RegisterBank.h:28
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:180
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
Definition: X86BaseInfo.h:395
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
Definition: X86BaseInfo.h:385
@ LAST_VALID_COND
Definition: X86BaseInfo.h:92
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
Definition: TypePool.h:27
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
Definition: X86InstrInfo.h:103
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
Definition: X86InstrInfo.h:121
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:293
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:153
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:305
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@614 BaseType
const GlobalValue * GV
union llvm::X86AddressMode::@615 Base