LLVM 18.0.0git
X86InstructionSelector.cpp
Go to the documentation of this file.
1//===- X86InstructionSelector.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/IntrinsicsX86.h"
42#include "llvm/Support/Debug.h"
46#include <cassert>
47#include <cstdint>
48#include <tuple>
49
50#define DEBUG_TYPE "X86-isel"
51
52using namespace llvm;
53
54namespace {
55
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
59
60class X86InstructionSelector : public InstructionSelector {
61public:
62 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
63 const X86RegisterBankInfo &RBI);
64
65 bool select(MachineInstr &I) override;
66 static const char *getName() { return DEBUG_TYPE; }
67
68private:
69 /// tblgen-erated 'select' implementation, used as the initial selector for
70 /// the patterns that don't require complex C++.
71 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72
73 // TODO: remove after supported by Tablegen-erated instruction selection.
74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
75 Align Alignment) const;
76
78 MachineFunction &MF) const;
79 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
80 MachineFunction &MF) const;
81 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
82 MachineFunction &MF) const;
83 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
84 MachineFunction &MF) const;
85 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
86 MachineFunction &MF) const;
87 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
88 MachineFunction &MF) const;
89 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
90 MachineFunction &MF) const;
91 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
92 MachineFunction &MF) const;
93 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
94 MachineFunction &MF) const;
95 bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
96 MachineFunction &MF) const;
100 MachineFunction &MF);
102 MachineFunction &MF);
103 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
104 MachineFunction &MF) const;
105 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
106 MachineFunction &MF) const;
107 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
108 MachineFunction &MF) const;
109 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
110 const unsigned DstReg,
111 const TargetRegisterClass *DstRC,
112 const unsigned SrcReg,
113 const TargetRegisterClass *SrcRC) const;
114 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
115 MachineFunction &MF) const;
116 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
117 bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
119 bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
120 MachineFunction &MF) const;
121
122 // emit insert subreg instruction and insert it before MachineInstr &I
123 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
125 // emit extract subreg instruction and insert it before MachineInstr &I
126 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
128
129 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
130 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
131 MachineRegisterInfo &MRI) const;
132
133 const X86TargetMachine &TM;
134 const X86Subtarget &STI;
135 const X86InstrInfo &TII;
136 const X86RegisterInfo &TRI;
137 const X86RegisterBankInfo &RBI;
138
139#define GET_GLOBALISEL_PREDICATES_DECL
140#include "X86GenGlobalISel.inc"
141#undef GET_GLOBALISEL_PREDICATES_DECL
142
143#define GET_GLOBALISEL_TEMPORARIES_DECL
144#include "X86GenGlobalISel.inc"
145#undef GET_GLOBALISEL_TEMPORARIES_DECL
146};
147
148} // end anonymous namespace
149
150#define GET_GLOBALISEL_IMPL
151#include "X86GenGlobalISel.inc"
152#undef GET_GLOBALISEL_IMPL
153
154X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
155 const X86Subtarget &STI,
156 const X86RegisterBankInfo &RBI)
157 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
158 RBI(RBI),
160#include "X86GenGlobalISel.inc"
163#include "X86GenGlobalISel.inc"
165{
166}
167
168// FIXME: This should be target-independent, inferred from the types declared
169// for each class in the bank.
171X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
172 if (RB.getID() == X86::GPRRegBankID) {
173 if (Ty.getSizeInBits() <= 8)
174 return &X86::GR8RegClass;
175 if (Ty.getSizeInBits() == 16)
176 return &X86::GR16RegClass;
177 if (Ty.getSizeInBits() == 32)
178 return &X86::GR32RegClass;
179 if (Ty.getSizeInBits() == 64)
180 return &X86::GR64RegClass;
181 }
182 if (RB.getID() == X86::VECRRegBankID) {
183 if (Ty.getSizeInBits() == 16)
184 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
185 if (Ty.getSizeInBits() == 32)
186 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
187 if (Ty.getSizeInBits() == 64)
188 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
189 if (Ty.getSizeInBits() == 128)
190 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
191 if (Ty.getSizeInBits() == 256)
192 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
193 if (Ty.getSizeInBits() == 512)
194 return &X86::VR512RegClass;
195 }
196
197 llvm_unreachable("Unknown RegBank!");
198}
199
201X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
202 MachineRegisterInfo &MRI) const {
203 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
204 return getRegClass(Ty, RegBank);
205}
206
207static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
208 unsigned SubIdx = X86::NoSubRegister;
209 if (RC == &X86::GR32RegClass) {
210 SubIdx = X86::sub_32bit;
211 } else if (RC == &X86::GR16RegClass) {
212 SubIdx = X86::sub_16bit;
213 } else if (RC == &X86::GR8RegClass) {
214 SubIdx = X86::sub_8bit;
215 }
216
217 return SubIdx;
218}
219
221 assert(Reg.isPhysical());
222 if (X86::GR64RegClass.contains(Reg))
223 return &X86::GR64RegClass;
224 if (X86::GR32RegClass.contains(Reg))
225 return &X86::GR32RegClass;
226 if (X86::GR16RegClass.contains(Reg))
227 return &X86::GR16RegClass;
228 if (X86::GR8RegClass.contains(Reg))
229 return &X86::GR8RegClass;
230
231 llvm_unreachable("Unknown RegClass for PhysReg!");
232}
233
234// FIXME: We need some sort of API in RBI/TRI to allow generic code to
235// constrain operands of simple instructions given a TargetRegisterClass
236// and LLT
237bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,
238 MachineRegisterInfo &MRI) const {
239 for (MachineOperand &MO : I.operands()) {
240 if (!MO.isReg())
241 continue;
242 Register Reg = MO.getReg();
243 if (!Reg)
244 continue;
245 if (Reg.isPhysical())
246 continue;
247 LLT Ty = MRI.getType(Reg);
248 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
249 const TargetRegisterClass *RC =
250 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
251 if (!RC) {
252 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
253 RC = getRegClass(Ty, RB);
254 if (!RC) {
256 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
257 break;
258 }
259 }
260 RBI.constrainGenericRegister(Reg, *RC, MRI);
261 }
262
263 return true;
264}
265
266// Set X86 Opcode and constrain DestReg.
267bool X86InstructionSelector::selectCopy(MachineInstr &I,
268 MachineRegisterInfo &MRI) const {
269 Register DstReg = I.getOperand(0).getReg();
270 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
271 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
272
273 Register SrcReg = I.getOperand(1).getReg();
274 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
275 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
276
277 if (DstReg.isPhysical()) {
278 assert(I.isCopy() && "Generic operators do not allow physical registers");
279
280 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
281 DstRegBank.getID() == X86::GPRRegBankID) {
282
283 const TargetRegisterClass *SrcRC =
284 getRegClass(MRI.getType(SrcReg), SrcRegBank);
285 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
286
287 if (SrcRC != DstRC) {
288 // This case can be generated by ABI lowering, performe anyext
289 Register ExtSrc = MRI.createVirtualRegister(DstRC);
290 BuildMI(*I.getParent(), I, I.getDebugLoc(),
291 TII.get(TargetOpcode::SUBREG_TO_REG))
292 .addDef(ExtSrc)
293 .addImm(0)
294 .addReg(SrcReg)
295 .addImm(getSubRegIndex(SrcRC));
296
297 I.getOperand(1).setReg(ExtSrc);
298 }
299 }
300
301 return true;
302 }
303
304 assert((!SrcReg.isPhysical() || I.isCopy()) &&
305 "No phys reg on generic operators");
306 assert((DstSize == SrcSize ||
307 // Copies are a mean to setup initial types, the number of
308 // bits may not exactly match.
309 (SrcReg.isPhysical() &&
310 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
311 "Copy with different width?!");
312
313 const TargetRegisterClass *DstRC =
314 getRegClass(MRI.getType(DstReg), DstRegBank);
315
316 if (SrcRegBank.getID() == X86::GPRRegBankID &&
317 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
318 SrcReg.isPhysical()) {
319 // Change the physical register to performe truncate.
320
321 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
322
323 if (DstRC != SrcRC) {
324 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
325 I.getOperand(1).substPhysReg(SrcReg, TRI);
326 }
327 }
328
329 // No need to constrain SrcReg. It will get constrained when
330 // we hit another of its use or its defs.
331 // Copies do not have constraints.
332 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
333 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
334 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
335 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
336 << " operand\n");
337 return false;
338 }
339 }
340 I.setDesc(TII.get(X86::COPY));
341 return true;
342}
343
344bool X86InstructionSelector::select(MachineInstr &I) {
345 assert(I.getParent() && "Instruction should be in a basic block!");
346 assert(I.getParent()->getParent() && "Instruction should be in a function!");
347
348 MachineBasicBlock &MBB = *I.getParent();
351
352 unsigned Opcode = I.getOpcode();
353 if (!isPreISelGenericOpcode(Opcode)) {
354 // Certain non-generic instructions also need some special handling.
355
356 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
357 return false;
358
359 if (I.isCopy())
360 return selectCopy(I, MRI);
361
362 if (I.isDebugInstr())
363 return selectDebugInstr(I, MRI);
364
365 return true;
366 }
367
368 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
369 "Generic instruction has unexpected implicit operands\n");
370
371 if (selectImpl(I, *CoverageInfo))
372 return true;
373
374 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
375
376 // TODO: This should be implemented by tblgen.
377 switch (I.getOpcode()) {
378 default:
379 return false;
380 case TargetOpcode::G_STORE:
381 case TargetOpcode::G_LOAD:
382 return selectLoadStoreOp(I, MRI, MF);
383 case TargetOpcode::G_PTR_ADD:
384 case TargetOpcode::G_FRAME_INDEX:
385 return selectFrameIndexOrGep(I, MRI, MF);
386 case TargetOpcode::G_GLOBAL_VALUE:
387 return selectGlobalValue(I, MRI, MF);
388 case TargetOpcode::G_CONSTANT:
389 return selectConstant(I, MRI, MF);
390 case TargetOpcode::G_FCONSTANT:
391 return materializeFP(I, MRI, MF);
392 case TargetOpcode::G_PTRTOINT:
393 case TargetOpcode::G_TRUNC:
394 return selectTruncOrPtrToInt(I, MRI, MF);
395 case TargetOpcode::G_INTTOPTR:
396 return selectCopy(I, MRI);
397 case TargetOpcode::G_ZEXT:
398 return selectZext(I, MRI, MF);
399 case TargetOpcode::G_ANYEXT:
400 return selectAnyext(I, MRI, MF);
401 case TargetOpcode::G_ICMP:
402 return selectCmp(I, MRI, MF);
403 case TargetOpcode::G_FCMP:
404 return selectFCmp(I, MRI, MF);
405 case TargetOpcode::G_UADDE:
406 case TargetOpcode::G_UADDO:
407 case TargetOpcode::G_USUBE:
408 case TargetOpcode::G_USUBO:
409 return selectUAddSub(I, MRI, MF);
410 case TargetOpcode::G_UNMERGE_VALUES:
411 return selectUnmergeValues(I, MRI, MF);
412 case TargetOpcode::G_MERGE_VALUES:
413 case TargetOpcode::G_CONCAT_VECTORS:
414 return selectMergeValues(I, MRI, MF);
415 case TargetOpcode::G_EXTRACT:
416 return selectExtract(I, MRI, MF);
417 case TargetOpcode::G_INSERT:
418 return selectInsert(I, MRI, MF);
419 case TargetOpcode::G_BRCOND:
420 return selectCondBranch(I, MRI, MF);
421 case TargetOpcode::G_IMPLICIT_DEF:
422 case TargetOpcode::G_PHI:
423 return selectImplicitDefOrPHI(I, MRI);
424 case TargetOpcode::G_MUL:
425 case TargetOpcode::G_SMULH:
426 case TargetOpcode::G_UMULH:
427 case TargetOpcode::G_SDIV:
428 case TargetOpcode::G_UDIV:
429 case TargetOpcode::G_SREM:
430 case TargetOpcode::G_UREM:
431 return selectMulDivRem(I, MRI, MF);
432 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
433 return selectIntrinsicWSideEffects(I, MRI, MF);
434 }
435
436 return false;
437}
438
439unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
440 const RegisterBank &RB,
441 unsigned Opc,
442 Align Alignment) const {
443 bool Isload = (Opc == TargetOpcode::G_LOAD);
444 bool HasAVX = STI.hasAVX();
445 bool HasAVX512 = STI.hasAVX512();
446 bool HasVLX = STI.hasVLX();
447
448 if (Ty == LLT::scalar(8)) {
449 if (X86::GPRRegBankID == RB.getID())
450 return Isload ? X86::MOV8rm : X86::MOV8mr;
451 } else if (Ty == LLT::scalar(16)) {
452 if (X86::GPRRegBankID == RB.getID())
453 return Isload ? X86::MOV16rm : X86::MOV16mr;
454 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
455 if (X86::GPRRegBankID == RB.getID())
456 return Isload ? X86::MOV32rm : X86::MOV32mr;
457 if (X86::VECRRegBankID == RB.getID())
458 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
459 HasAVX ? X86::VMOVSSrm_alt :
460 X86::MOVSSrm_alt)
461 : (HasAVX512 ? X86::VMOVSSZmr :
462 HasAVX ? X86::VMOVSSmr :
463 X86::MOVSSmr);
464 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
465 if (X86::GPRRegBankID == RB.getID())
466 return Isload ? X86::MOV64rm : X86::MOV64mr;
467 if (X86::VECRRegBankID == RB.getID())
468 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
469 HasAVX ? X86::VMOVSDrm_alt :
470 X86::MOVSDrm_alt)
471 : (HasAVX512 ? X86::VMOVSDZmr :
472 HasAVX ? X86::VMOVSDmr :
473 X86::MOVSDmr);
474 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
475 if (Alignment >= Align(16))
476 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
477 : HasAVX512
478 ? X86::VMOVAPSZ128rm_NOVLX
479 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
480 : (HasVLX ? X86::VMOVAPSZ128mr
481 : HasAVX512
482 ? X86::VMOVAPSZ128mr_NOVLX
483 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
484 else
485 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
486 : HasAVX512
487 ? X86::VMOVUPSZ128rm_NOVLX
488 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
489 : (HasVLX ? X86::VMOVUPSZ128mr
490 : HasAVX512
491 ? X86::VMOVUPSZ128mr_NOVLX
492 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
493 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
494 if (Alignment >= Align(32))
495 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
496 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
497 : X86::VMOVAPSYrm)
498 : (HasVLX ? X86::VMOVAPSZ256mr
499 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
500 : X86::VMOVAPSYmr);
501 else
502 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
503 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
504 : X86::VMOVUPSYrm)
505 : (HasVLX ? X86::VMOVUPSZ256mr
506 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
507 : X86::VMOVUPSYmr);
508 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
509 if (Alignment >= Align(64))
510 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
511 else
512 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
513 }
514 return Opc;
515}
516
517// Fill in an address from the given instruction.
518static void X86SelectAddress(const MachineInstr &I,
520 X86AddressMode &AM) {
521 assert(I.getOperand(0).isReg() && "unsupported opperand.");
522 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
523 "unsupported type.");
524
525 if (I.getOpcode() == TargetOpcode::G_PTR_ADD) {
526 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
527 int64_t Imm = *COff;
528 if (isInt<32>(Imm)) { // Check for displacement overflow.
529 AM.Disp = static_cast<int32_t>(Imm);
530 AM.Base.Reg = I.getOperand(1).getReg();
531 return;
532 }
533 }
534 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
535 AM.Base.FrameIndex = I.getOperand(1).getIndex();
537 return;
538 }
539
540 // Default behavior.
541 AM.Base.Reg = I.getOperand(0).getReg();
542}
543
544bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
546 MachineFunction &MF) const {
547 unsigned Opc = I.getOpcode();
548
549 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
550 "unexpected instruction");
551
552 const Register DefReg = I.getOperand(0).getReg();
553 LLT Ty = MRI.getType(DefReg);
554 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
555
556 assert(I.hasOneMemOperand());
557 auto &MemOp = **I.memoperands_begin();
558 if (MemOp.isAtomic()) {
559 // Note: for unordered operations, we rely on the fact the appropriate MMO
560 // is already on the instruction we're mutating, and thus we don't need to
561 // make any changes. So long as we select an opcode which is capable of
562 // loading or storing the appropriate size atomically, the rest of the
563 // backend is required to respect the MMO state.
564 if (!MemOp.isUnordered()) {
565 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
566 return false;
567 }
568 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
569 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
570 return false;
571 }
572 }
573
574 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign());
575 if (NewOpc == Opc)
576 return false;
577
579 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
580
581 I.setDesc(TII.get(NewOpc));
582 MachineInstrBuilder MIB(MF, I);
583 if (Opc == TargetOpcode::G_LOAD) {
584 I.removeOperand(1);
585 addFullAddress(MIB, AM);
586 } else {
587 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
588 I.removeOperand(1);
589 I.removeOperand(0);
590 addFullAddress(MIB, AM).addUse(DefReg);
591 }
593}
594
595static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
596 if (Ty == LLT::pointer(0, 64))
597 return X86::LEA64r;
598 else if (Ty == LLT::pointer(0, 32))
599 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
600 else
601 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
602}
603
604bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
606 MachineFunction &MF) const {
607 unsigned Opc = I.getOpcode();
608
609 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
610 "unexpected instruction");
611
612 const Register DefReg = I.getOperand(0).getReg();
613 LLT Ty = MRI.getType(DefReg);
614
615 // Use LEA to calculate frame index and GEP
616 unsigned NewOpc = getLeaOP(Ty, STI);
617 I.setDesc(TII.get(NewOpc));
618 MachineInstrBuilder MIB(MF, I);
619
620 if (Opc == TargetOpcode::G_FRAME_INDEX) {
621 addOffset(MIB, 0);
622 } else {
623 MachineOperand &InxOp = I.getOperand(2);
624 I.addOperand(InxOp); // set IndexReg
625 InxOp.ChangeToImmediate(1); // set Scale
626 MIB.addImm(0).addReg(0);
627 }
628
630}
631
632bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
634 MachineFunction &MF) const {
635 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
636 "unexpected instruction");
637
638 auto GV = I.getOperand(1).getGlobal();
639 if (GV->isThreadLocal()) {
640 return false; // TODO: we don't support TLS yet.
641 }
642
643 // Can't handle alternate code models yet.
644 if (TM.getCodeModel() != CodeModel::Small)
645 return false;
646
648 AM.GV = GV;
649 AM.GVOpFlags = STI.classifyGlobalReference(GV);
650
651 // TODO: The ABI requires an extra load. not supported yet.
653 return false;
654
655 // TODO: This reference is relative to the pic base. not supported yet.
657 return false;
658
659 if (STI.isPICStyleRIPRel()) {
660 // Use rip-relative addressing.
661 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
662 AM.Base.Reg = X86::RIP;
663 }
664
665 const Register DefReg = I.getOperand(0).getReg();
666 LLT Ty = MRI.getType(DefReg);
667 unsigned NewOpc = getLeaOP(Ty, STI);
668
669 I.setDesc(TII.get(NewOpc));
670 MachineInstrBuilder MIB(MF, I);
671
672 I.removeOperand(1);
673 addFullAddress(MIB, AM);
674
676}
677
678bool X86InstructionSelector::selectConstant(MachineInstr &I,
680 MachineFunction &MF) const {
681 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
682 "unexpected instruction");
683
684 const Register DefReg = I.getOperand(0).getReg();
685 LLT Ty = MRI.getType(DefReg);
686
687 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
688 return false;
689
690 uint64_t Val = 0;
691 if (I.getOperand(1).isCImm()) {
692 Val = I.getOperand(1).getCImm()->getZExtValue();
693 I.getOperand(1).ChangeToImmediate(Val);
694 } else if (I.getOperand(1).isImm()) {
695 Val = I.getOperand(1).getImm();
696 } else
697 llvm_unreachable("Unsupported operand type.");
698
699 unsigned NewOpc;
700 switch (Ty.getSizeInBits()) {
701 case 8:
702 NewOpc = X86::MOV8ri;
703 break;
704 case 16:
705 NewOpc = X86::MOV16ri;
706 break;
707 case 32:
708 NewOpc = X86::MOV32ri;
709 break;
710 case 64:
711 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
712 if (isInt<32>(Val))
713 NewOpc = X86::MOV64ri32;
714 else
715 NewOpc = X86::MOV64ri;
716 break;
717 default:
718 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
719 }
720
721 I.setDesc(TII.get(NewOpc));
723}
724
725// Helper function for selectTruncOrPtrToInt and selectAnyext.
726// Returns true if DstRC lives on a floating register class and
727// SrcRC lives on a 128-bit vector class.
728static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
729 const TargetRegisterClass *SrcRC) {
730 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
731 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
732 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
733}
734
735bool X86InstructionSelector::selectTurnIntoCOPY(
736 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
737 const TargetRegisterClass *DstRC, const unsigned SrcReg,
738 const TargetRegisterClass *SrcRC) const {
739
740 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
741 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
742 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
743 << " operand\n");
744 return false;
745 }
746 I.setDesc(TII.get(X86::COPY));
747 return true;
748}
749
750bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
752 MachineFunction &MF) const {
753 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
754 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
755 "unexpected instruction");
756
757 const Register DstReg = I.getOperand(0).getReg();
758 const Register SrcReg = I.getOperand(1).getReg();
759
760 const LLT DstTy = MRI.getType(DstReg);
761 const LLT SrcTy = MRI.getType(SrcReg);
762
763 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
764 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
765
766 if (DstRB.getID() != SrcRB.getID()) {
767 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
768 << " input/output on different banks\n");
769 return false;
770 }
771
772 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
773 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
774
775 if (!DstRC || !SrcRC)
776 return false;
777
778 // If that's truncation of the value that lives on the vector class and goes
779 // into the floating class, just replace it with copy, as we are able to
780 // select it as a regular move.
781 if (canTurnIntoCOPY(DstRC, SrcRC))
782 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
783
784 if (DstRB.getID() != X86::GPRRegBankID)
785 return false;
786
787 unsigned SubIdx;
788 if (DstRC == SrcRC) {
789 // Nothing to be done
790 SubIdx = X86::NoSubRegister;
791 } else if (DstRC == &X86::GR32RegClass) {
792 SubIdx = X86::sub_32bit;
793 } else if (DstRC == &X86::GR16RegClass) {
794 SubIdx = X86::sub_16bit;
795 } else if (DstRC == &X86::GR8RegClass) {
796 SubIdx = X86::sub_8bit;
797 } else {
798 return false;
799 }
800
801 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
802
803 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
804 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
805 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
806 << "\n");
807 return false;
808 }
809
810 I.getOperand(1).setSubReg(SubIdx);
811
812 I.setDesc(TII.get(X86::COPY));
813 return true;
814}
815
816bool X86InstructionSelector::selectZext(MachineInstr &I,
818 MachineFunction &MF) const {
819 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
820
821 const Register DstReg = I.getOperand(0).getReg();
822 const Register SrcReg = I.getOperand(1).getReg();
823
824 const LLT DstTy = MRI.getType(DstReg);
825 const LLT SrcTy = MRI.getType(SrcReg);
826
827 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
828 "8=>16 Zext is handled by tablegen");
829 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
830 "8=>32 Zext is handled by tablegen");
831 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
832 "16=>32 Zext is handled by tablegen");
833 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
834 "8=>64 Zext is handled by tablegen");
835 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
836 "16=>64 Zext is handled by tablegen");
837 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
838 "32=>64 Zext is handled by tablegen");
839
840 if (SrcTy != LLT::scalar(1))
841 return false;
842
843 unsigned AndOpc;
844 if (DstTy == LLT::scalar(8))
845 AndOpc = X86::AND8ri;
846 else if (DstTy == LLT::scalar(16))
847 AndOpc = X86::AND16ri;
848 else if (DstTy == LLT::scalar(32))
849 AndOpc = X86::AND32ri;
850 else if (DstTy == LLT::scalar(64))
851 AndOpc = X86::AND64ri32;
852 else
853 return false;
854
855 Register DefReg = SrcReg;
856 if (DstTy != LLT::scalar(8)) {
857 Register ImpDefReg =
858 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
859 BuildMI(*I.getParent(), I, I.getDebugLoc(),
860 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
861
862 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
863 BuildMI(*I.getParent(), I, I.getDebugLoc(),
864 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
865 .addReg(ImpDefReg)
866 .addReg(SrcReg)
867 .addImm(X86::sub_8bit);
868 }
869
870 MachineInstr &AndInst =
871 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
872 .addReg(DefReg)
873 .addImm(1);
874
876
877 I.eraseFromParent();
878 return true;
879}
880
881bool X86InstructionSelector::selectAnyext(MachineInstr &I,
883 MachineFunction &MF) const {
884 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
885
886 const Register DstReg = I.getOperand(0).getReg();
887 const Register SrcReg = I.getOperand(1).getReg();
888
889 const LLT DstTy = MRI.getType(DstReg);
890 const LLT SrcTy = MRI.getType(SrcReg);
891
892 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
893 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
894
895 assert(DstRB.getID() == SrcRB.getID() &&
896 "G_ANYEXT input/output on different banks\n");
897
898 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
899 "G_ANYEXT incorrect operand size");
900
901 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
902 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
903
904 // If that's ANY_EXT of the value that lives on the floating class and goes
905 // into the vector class, just replace it with copy, as we are able to select
906 // it as a regular move.
907 if (canTurnIntoCOPY(SrcRC, DstRC))
908 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
909
910 if (DstRB.getID() != X86::GPRRegBankID)
911 return false;
912
913 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
914 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
915 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
916 << " operand\n");
917 return false;
918 }
919
920 if (SrcRC == DstRC) {
921 I.setDesc(TII.get(X86::COPY));
922 return true;
923 }
924
925 BuildMI(*I.getParent(), I, I.getDebugLoc(),
926 TII.get(TargetOpcode::SUBREG_TO_REG))
927 .addDef(DstReg)
928 .addImm(0)
929 .addReg(SrcReg)
930 .addImm(getSubRegIndex(SrcRC));
931
932 I.eraseFromParent();
933 return true;
934}
935
936bool X86InstructionSelector::selectCmp(MachineInstr &I,
938 MachineFunction &MF) const {
939 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
940
942 bool SwapArgs;
943 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
944 (CmpInst::Predicate)I.getOperand(1).getPredicate());
945
946 Register LHS = I.getOperand(2).getReg();
947 Register RHS = I.getOperand(3).getReg();
948
949 if (SwapArgs)
950 std::swap(LHS, RHS);
951
952 unsigned OpCmp;
953 LLT Ty = MRI.getType(LHS);
954
955 switch (Ty.getSizeInBits()) {
956 default:
957 return false;
958 case 8:
959 OpCmp = X86::CMP8rr;
960 break;
961 case 16:
962 OpCmp = X86::CMP16rr;
963 break;
964 case 32:
965 OpCmp = X86::CMP32rr;
966 break;
967 case 64:
968 OpCmp = X86::CMP64rr;
969 break;
970 }
971
973 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
974 .addReg(LHS)
975 .addReg(RHS);
976
977 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
978 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
979
982
983 I.eraseFromParent();
984 return true;
985}
986
987bool X86InstructionSelector::selectFCmp(MachineInstr &I,
989 MachineFunction &MF) const {
990 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
991
992 Register LhsReg = I.getOperand(2).getReg();
993 Register RhsReg = I.getOperand(3).getReg();
995 (CmpInst::Predicate)I.getOperand(1).getPredicate();
996
997 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
998 static const uint16_t SETFOpcTable[2][3] = {
999 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1000 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
1001 const uint16_t *SETFOpc = nullptr;
1002 switch (Predicate) {
1003 default:
1004 break;
1005 case CmpInst::FCMP_OEQ:
1006 SETFOpc = &SETFOpcTable[0][0];
1007 break;
1008 case CmpInst::FCMP_UNE:
1009 SETFOpc = &SETFOpcTable[1][0];
1010 break;
1011 }
1012
1013 // Compute the opcode for the CMP instruction.
1014 unsigned OpCmp;
1015 LLT Ty = MRI.getType(LhsReg);
1016 switch (Ty.getSizeInBits()) {
1017 default:
1018 return false;
1019 case 32:
1020 OpCmp = X86::UCOMISSrr;
1021 break;
1022 case 64:
1023 OpCmp = X86::UCOMISDrr;
1024 break;
1025 }
1026
1027 Register ResultReg = I.getOperand(0).getReg();
1028 RBI.constrainGenericRegister(
1029 ResultReg,
1030 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1031 if (SETFOpc) {
1033 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1034 .addReg(LhsReg)
1035 .addReg(RhsReg);
1036
1037 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1038 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1039 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1040 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
1041 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1042 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1043 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1044 TII.get(SETFOpc[2]), ResultReg)
1045 .addReg(FlagReg1)
1046 .addReg(FlagReg2);
1051
1052 I.eraseFromParent();
1053 return true;
1054 }
1055
1057 bool SwapArgs;
1058 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1059 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1060
1061 if (SwapArgs)
1062 std::swap(LhsReg, RhsReg);
1063
1064 // Emit a compare of LHS/RHS.
1066 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1067 .addReg(LhsReg)
1068 .addReg(RhsReg);
1069
1070 MachineInstr &Set =
1071 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1074 I.eraseFromParent();
1075 return true;
1076}
1077
1078bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
1080 MachineFunction &MF) const {
1081 assert((I.getOpcode() == TargetOpcode::G_UADDE ||
1082 I.getOpcode() == TargetOpcode::G_UADDO ||
1083 I.getOpcode() == TargetOpcode::G_USUBE ||
1084 I.getOpcode() == TargetOpcode::G_USUBO) &&
1085 "unexpected instruction");
1086
1087 const Register DstReg = I.getOperand(0).getReg();
1088 const Register CarryOutReg = I.getOperand(1).getReg();
1089 const Register Op0Reg = I.getOperand(2).getReg();
1090 const Register Op1Reg = I.getOperand(3).getReg();
1091 bool IsSub = I.getOpcode() == TargetOpcode::G_USUBE ||
1092 I.getOpcode() == TargetOpcode::G_USUBO;
1093 bool HasCarryIn = I.getOpcode() == TargetOpcode::G_UADDE ||
1094 I.getOpcode() == TargetOpcode::G_USUBE;
1095
1096 const LLT DstTy = MRI.getType(DstReg);
1097 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
1098
1099 // TODO: Handle immediate argument variants?
1100 unsigned OpADC, OpADD, OpSBB, OpSUB;
1101 switch (DstTy.getSizeInBits()) {
1102 case 8:
1103 OpADC = X86::ADC8rr;
1104 OpADD = X86::ADD8rr;
1105 OpSBB = X86::SBB8rr;
1106 OpSUB = X86::SUB8rr;
1107 break;
1108 case 16:
1109 OpADC = X86::ADC16rr;
1110 OpADD = X86::ADD16rr;
1111 OpSBB = X86::SBB16rr;
1112 OpSUB = X86::SUB16rr;
1113 break;
1114 case 32:
1115 OpADC = X86::ADC32rr;
1116 OpADD = X86::ADD32rr;
1117 OpSBB = X86::SBB32rr;
1118 OpSUB = X86::SUB32rr;
1119 break;
1120 case 64:
1121 OpADC = X86::ADC64rr;
1122 OpADD = X86::ADD64rr;
1123 OpSBB = X86::SBB64rr;
1124 OpSUB = X86::SUB64rr;
1125 break;
1126 default:
1127 llvm_unreachable("selectUAddSub unsupported type.");
1128 }
1129
1130 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1131 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
1132
1133 unsigned Opcode = IsSub ? OpSUB : OpADD;
1134
1135 // G_UADDE/G_USUBE - find CarryIn def instruction.
1136 if (HasCarryIn) {
1137 Register CarryInReg = I.getOperand(4).getReg();
1138 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1139 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1140 CarryInReg = Def->getOperand(1).getReg();
1141 Def = MRI.getVRegDef(CarryInReg);
1142 }
1143
1144 // TODO - handle more CF generating instructions
1145 if (Def->getOpcode() == TargetOpcode::G_UADDE ||
1146 Def->getOpcode() == TargetOpcode::G_UADDO ||
1147 Def->getOpcode() == TargetOpcode::G_USUBE ||
1148 Def->getOpcode() == TargetOpcode::G_USUBO) {
1149 // carry set by prev ADD/SUB.
1150 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY),
1151 X86::EFLAGS)
1152 .addReg(CarryInReg);
1153
1154 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC, MRI))
1155 return false;
1156
1157 Opcode = IsSub ? OpSBB : OpADC;
1158 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1159 // carry is constant, support only 0.
1160 if (*val != 0)
1161 return false;
1162
1163 Opcode = IsSub ? OpSUB : OpADD;
1164 } else
1165 return false;
1166 }
1167
1168 MachineInstr &Inst =
1169 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1170 .addReg(Op0Reg)
1171 .addReg(Op1Reg);
1172
1173 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1174 .addReg(X86::EFLAGS);
1175
1176 if (!constrainSelectedInstRegOperands(Inst, TII, TRI, RBI) ||
1177 !RBI.constrainGenericRegister(CarryOutReg, *DstRC, MRI))
1178 return false;
1179
1180 I.eraseFromParent();
1181 return true;
1182}
1183
1184bool X86InstructionSelector::selectExtract(MachineInstr &I,
1186 MachineFunction &MF) const {
1187 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1188 "unexpected instruction");
1189
1190 const Register DstReg = I.getOperand(0).getReg();
1191 const Register SrcReg = I.getOperand(1).getReg();
1192 int64_t Index = I.getOperand(2).getImm();
1193
1194 const LLT DstTy = MRI.getType(DstReg);
1195 const LLT SrcTy = MRI.getType(SrcReg);
1196
1197 // Meanwile handle vector type only.
1198 if (!DstTy.isVector())
1199 return false;
1200
1201 if (Index % DstTy.getSizeInBits() != 0)
1202 return false; // Not extract subvector.
1203
1204 if (Index == 0) {
1205 // Replace by extract subreg copy.
1206 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1207 return false;
1208
1209 I.eraseFromParent();
1210 return true;
1211 }
1212
1213 bool HasAVX = STI.hasAVX();
1214 bool HasAVX512 = STI.hasAVX512();
1215 bool HasVLX = STI.hasVLX();
1216
1217 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1218 if (HasVLX)
1219 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1220 else if (HasAVX)
1221 I.setDesc(TII.get(X86::VEXTRACTF128rr));
1222 else
1223 return false;
1224 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1225 if (DstTy.getSizeInBits() == 128)
1226 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1227 else if (DstTy.getSizeInBits() == 256)
1228 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1229 else
1230 return false;
1231 } else
1232 return false;
1233
1234 // Convert to X86 VEXTRACT immediate.
1235 Index = Index / DstTy.getSizeInBits();
1236 I.getOperand(2).setImm(Index);
1237
1239}
1240
1241bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1242 MachineInstr &I,
1244 MachineFunction &MF) const {
1245 const LLT DstTy = MRI.getType(DstReg);
1246 const LLT SrcTy = MRI.getType(SrcReg);
1247 unsigned SubIdx = X86::NoSubRegister;
1248
1249 if (!DstTy.isVector() || !SrcTy.isVector())
1250 return false;
1251
1252 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1253 "Incorrect Src/Dst register size");
1254
1255 if (DstTy.getSizeInBits() == 128)
1256 SubIdx = X86::sub_xmm;
1257 else if (DstTy.getSizeInBits() == 256)
1258 SubIdx = X86::sub_ymm;
1259 else
1260 return false;
1261
1262 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1263 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1264
1265 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1266
1267 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1268 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1269 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1270 return false;
1271 }
1272
1273 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1274 .addReg(SrcReg, 0, SubIdx);
1275
1276 return true;
1277}
1278
1279bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1280 MachineInstr &I,
1282 MachineFunction &MF) const {
1283 const LLT DstTy = MRI.getType(DstReg);
1284 const LLT SrcTy = MRI.getType(SrcReg);
1285 unsigned SubIdx = X86::NoSubRegister;
1286
1287 // TODO: support scalar types
1288 if (!DstTy.isVector() || !SrcTy.isVector())
1289 return false;
1290
1291 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1292 "Incorrect Src/Dst register size");
1293
1294 if (SrcTy.getSizeInBits() == 128)
1295 SubIdx = X86::sub_xmm;
1296 else if (SrcTy.getSizeInBits() == 256)
1297 SubIdx = X86::sub_ymm;
1298 else
1299 return false;
1300
1301 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1302 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1303
1304 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1305 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1306 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1307 return false;
1308 }
1309
1310 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1311 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1312 .addReg(SrcReg);
1313
1314 return true;
1315}
1316
1317bool X86InstructionSelector::selectInsert(MachineInstr &I,
1319 MachineFunction &MF) const {
1320 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1321
1322 const Register DstReg = I.getOperand(0).getReg();
1323 const Register SrcReg = I.getOperand(1).getReg();
1324 const Register InsertReg = I.getOperand(2).getReg();
1325 int64_t Index = I.getOperand(3).getImm();
1326
1327 const LLT DstTy = MRI.getType(DstReg);
1328 const LLT InsertRegTy = MRI.getType(InsertReg);
1329
1330 // Meanwile handle vector type only.
1331 if (!DstTy.isVector())
1332 return false;
1333
1334 if (Index % InsertRegTy.getSizeInBits() != 0)
1335 return false; // Not insert subvector.
1336
1337 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1338 // Replace by subreg copy.
1339 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1340 return false;
1341
1342 I.eraseFromParent();
1343 return true;
1344 }
1345
1346 bool HasAVX = STI.hasAVX();
1347 bool HasAVX512 = STI.hasAVX512();
1348 bool HasVLX = STI.hasVLX();
1349
1350 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1351 if (HasVLX)
1352 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1353 else if (HasAVX)
1354 I.setDesc(TII.get(X86::VINSERTF128rr));
1355 else
1356 return false;
1357 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1358 if (InsertRegTy.getSizeInBits() == 128)
1359 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1360 else if (InsertRegTy.getSizeInBits() == 256)
1361 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1362 else
1363 return false;
1364 } else
1365 return false;
1366
1367 // Convert to X86 VINSERT immediate.
1368 Index = Index / InsertRegTy.getSizeInBits();
1369
1370 I.getOperand(3).setImm(Index);
1371
1373}
1374
1375bool X86InstructionSelector::selectUnmergeValues(
1377 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1378 "unexpected instruction");
1379
1380 // Split to extracts.
1381 unsigned NumDefs = I.getNumOperands() - 1;
1382 Register SrcReg = I.getOperand(NumDefs).getReg();
1383 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1384
1385 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1386 MachineInstr &ExtrInst =
1387 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1388 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1389 .addReg(SrcReg)
1390 .addImm(Idx * DefSize);
1391
1392 if (!select(ExtrInst))
1393 return false;
1394 }
1395
1396 I.eraseFromParent();
1397 return true;
1398}
1399
1400bool X86InstructionSelector::selectMergeValues(
1402 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1403 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1404 "unexpected instruction");
1405
1406 // Split to inserts.
1407 Register DstReg = I.getOperand(0).getReg();
1408 Register SrcReg0 = I.getOperand(1).getReg();
1409
1410 const LLT DstTy = MRI.getType(DstReg);
1411 const LLT SrcTy = MRI.getType(SrcReg0);
1412 unsigned SrcSize = SrcTy.getSizeInBits();
1413
1414 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1415
1416 // For the first src use insertSubReg.
1417 Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1418 MRI.setRegBank(DefReg, RegBank);
1419 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1420 return false;
1421
1422 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1423 Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1424 MRI.setRegBank(Tmp, RegBank);
1425
1426 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1427 TII.get(TargetOpcode::G_INSERT), Tmp)
1428 .addReg(DefReg)
1429 .addReg(I.getOperand(Idx).getReg())
1430 .addImm((Idx - 1) * SrcSize);
1431
1432 DefReg = Tmp;
1433
1434 if (!select(InsertInst))
1435 return false;
1436 }
1437
1438 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1439 TII.get(TargetOpcode::COPY), DstReg)
1440 .addReg(DefReg);
1441
1442 if (!select(CopyInst))
1443 return false;
1444
1445 I.eraseFromParent();
1446 return true;
1447}
1448
1449bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1451 MachineFunction &MF) const {
1452 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1453
1454 const Register CondReg = I.getOperand(0).getReg();
1455 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1456
1457 MachineInstr &TestInst =
1458 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1459 .addReg(CondReg)
1460 .addImm(1);
1461 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1462 .addMBB(DestMBB).addImm(X86::COND_NE);
1463
1464 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1465
1466 I.eraseFromParent();
1467 return true;
1468}
1469
1470bool X86InstructionSelector::materializeFP(MachineInstr &I,
1472 MachineFunction &MF) const {
1473 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1474 "unexpected instruction");
1475
1476 // Can't handle alternate code models yet.
1477 CodeModel::Model CM = TM.getCodeModel();
1478 if (CM != CodeModel::Small && CM != CodeModel::Large)
1479 return false;
1480
1481 const Register DstReg = I.getOperand(0).getReg();
1482 const LLT DstTy = MRI.getType(DstReg);
1483 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1484 Align Alignment = Align(DstTy.getSizeInBytes());
1485 const DebugLoc &DbgLoc = I.getDebugLoc();
1486
1487 unsigned Opc =
1488 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1489
1490 // Create the load from the constant pool.
1491 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1492 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1493 MachineInstr *LoadInst = nullptr;
1494 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1495
1496 if (CM == CodeModel::Large && STI.is64Bit()) {
1497 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1498 // they cannot be folded into immediate fields.
1499
1500 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1501 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1502 .addConstantPoolIndex(CPI, 0, OpFlag);
1503
1506 LLT::pointer(0, MF.getDataLayout().getPointerSizeInBits()), Alignment);
1507
1508 LoadInst =
1509 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1510 AddrReg)
1511 .addMemOperand(MMO);
1512
1513 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1514 // Handle the case when globals fit in our immediate field.
1515 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1516
1517 // x86-32 PIC requires a PIC base register for constant pools.
1518 unsigned PICBase = 0;
1519 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1520 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1521 // In DAGISEL the code that initialize it generated by the CGBR pass.
1522 return false; // TODO support the mode.
1523 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1524 PICBase = X86::RIP;
1525
1527 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1528 OpFlag);
1529 } else
1530 return false;
1531
1533 I.eraseFromParent();
1534 return true;
1535}
1536
1537bool X86InstructionSelector::selectImplicitDefOrPHI(
1539 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1540 I.getOpcode() == TargetOpcode::G_PHI) &&
1541 "unexpected instruction");
1542
1543 Register DstReg = I.getOperand(0).getReg();
1544
1545 if (!MRI.getRegClassOrNull(DstReg)) {
1546 const LLT DstTy = MRI.getType(DstReg);
1547 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1548
1549 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1550 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1551 << " operand\n");
1552 return false;
1553 }
1554 }
1555
1556 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1557 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1558 else
1559 I.setDesc(TII.get(X86::PHI));
1560
1561 return true;
1562}
1563
1564bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,
1566 MachineFunction &MF) const {
1567 // The implementation of this function is adapted from X86FastISel.
1568 assert((I.getOpcode() == TargetOpcode::G_MUL ||
1569 I.getOpcode() == TargetOpcode::G_SMULH ||
1570 I.getOpcode() == TargetOpcode::G_UMULH ||
1571 I.getOpcode() == TargetOpcode::G_SDIV ||
1572 I.getOpcode() == TargetOpcode::G_SREM ||
1573 I.getOpcode() == TargetOpcode::G_UDIV ||
1574 I.getOpcode() == TargetOpcode::G_UREM) &&
1575 "unexpected instruction");
1576
1577 const Register DstReg = I.getOperand(0).getReg();
1578 const Register Op1Reg = I.getOperand(1).getReg();
1579 const Register Op2Reg = I.getOperand(2).getReg();
1580
1581 const LLT RegTy = MRI.getType(DstReg);
1582 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1583 "Arguments and return value types must match");
1584
1585 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1586 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1587 return false;
1588
1589 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1590 const static unsigned NumOps = 7; // SDiv/SRem/UDiv/URem/Mul/SMulH/UMulh
1591 const static bool S = true; // IsSigned
1592 const static bool U = false; // !IsSigned
1593 const static unsigned Copy = TargetOpcode::COPY;
1594
1595 // For the X86 IDIV instruction, in most cases the dividend
1596 // (numerator) must be in a specific register pair highreg:lowreg,
1597 // producing the quotient in lowreg and the remainder in highreg.
1598 // For most data types, to set up the instruction, the dividend is
1599 // copied into lowreg, and lowreg is sign-extended into highreg. The
1600 // exception is i8, where the dividend is defined as a single register rather
1601 // than a register pair, and we therefore directly sign-extend the dividend
1602 // into lowreg, instead of copying, and ignore the highreg.
1603 const static struct MulDivRemEntry {
1604 // The following portion depends only on the data type.
1605 unsigned SizeInBits;
1606 unsigned LowInReg; // low part of the register pair
1607 unsigned HighInReg; // high part of the register pair
1608 // The following portion depends on both the data type and the operation.
1609 struct MulDivRemResult {
1610 unsigned OpMulDivRem; // The specific MUL/DIV opcode to use.
1611 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1612 // highreg, or copying a zero into highreg.
1613 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1614 // zero/sign-extending into lowreg for i8.
1615 unsigned ResultReg; // Register containing the desired result.
1616 bool IsOpSigned; // Whether to use signed or unsigned form.
1617 } ResultTable[NumOps];
1618 } OpTable[NumTypes] = {
1619 {8,
1620 X86::AX,
1621 0,
1622 {
1623 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1624 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1625 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1626 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1627 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S}, // Mul
1628 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SMulH
1629 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U}, // UMulH
1630 }}, // i8
1631 {16,
1632 X86::AX,
1633 X86::DX,
1634 {
1635 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1636 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1637 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1638 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1639 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S}, // Mul
1640 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S}, // SMulH
1641 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U}, // UMulH
1642 }}, // i16
1643 {32,
1644 X86::EAX,
1645 X86::EDX,
1646 {
1647 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1648 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1649 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1650 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1651 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S}, // Mul
1652 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S}, // SMulH
1653 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U}, // UMulH
1654 }}, // i32
1655 {64,
1656 X86::RAX,
1657 X86::RDX,
1658 {
1659 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1660 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1661 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1662 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1663 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S}, // Mul
1664 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S}, // SMulH
1665 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U}, // UMulH
1666 }}, // i64
1667 };
1668
1669 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {
1670 return El.SizeInBits == RegTy.getSizeInBits();
1671 });
1672 if (OpEntryIt == std::end(OpTable))
1673 return false;
1674
1675 unsigned OpIndex;
1676 switch (I.getOpcode()) {
1677 default:
1678 llvm_unreachable("Unexpected mul/div/rem opcode");
1679 case TargetOpcode::G_SDIV:
1680 OpIndex = 0;
1681 break;
1682 case TargetOpcode::G_SREM:
1683 OpIndex = 1;
1684 break;
1685 case TargetOpcode::G_UDIV:
1686 OpIndex = 2;
1687 break;
1688 case TargetOpcode::G_UREM:
1689 OpIndex = 3;
1690 break;
1691 case TargetOpcode::G_MUL:
1692 OpIndex = 4;
1693 break;
1694 case TargetOpcode::G_SMULH:
1695 OpIndex = 5;
1696 break;
1697 case TargetOpcode::G_UMULH:
1698 OpIndex = 6;
1699 break;
1700 }
1701
1702 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1703 const MulDivRemEntry::MulDivRemResult &OpEntry =
1704 TypeEntry.ResultTable[OpIndex];
1705
1706 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1707 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1708 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1709 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1710 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1711 << " operand\n");
1712 return false;
1713 }
1714
1715 // Move op1 into low-order input register.
1716 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1717 TypeEntry.LowInReg)
1718 .addReg(Op1Reg);
1719
1720 // Zero-extend or sign-extend into high-order input register.
1721 if (OpEntry.OpSignExtend) {
1722 if (OpEntry.IsOpSigned)
1723 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1724 TII.get(OpEntry.OpSignExtend));
1725 else {
1726 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1727 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1728 Zero32);
1729
1730 // Copy the zero into the appropriate sub/super/identical physical
1731 // register. Unfortunately the operations needed are not uniform enough
1732 // to fit neatly into the table above.
1733 if (RegTy.getSizeInBits() == 16) {
1734 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1735 TypeEntry.HighInReg)
1736 .addReg(Zero32, 0, X86::sub_16bit);
1737 } else if (RegTy.getSizeInBits() == 32) {
1738 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1739 TypeEntry.HighInReg)
1740 .addReg(Zero32);
1741 } else if (RegTy.getSizeInBits() == 64) {
1742 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1743 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1744 .addImm(0)
1745 .addReg(Zero32)
1746 .addImm(X86::sub_32bit);
1747 }
1748 }
1749 }
1750
1751 // Generate the DIV/IDIV/MUL/IMUL instruction.
1752 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))
1753 .addReg(Op2Reg);
1754
1755 // For i8 remainder, we can't reference ah directly, as we'll end
1756 // up with bogus copies like %r9b = COPY %ah. Reference ax
1757 // instead to prevent ah references in a rex instruction.
1758 //
1759 // The current assumption of the fast register allocator is that isel
1760 // won't generate explicit references to the GR8_NOREX registers. If
1761 // the allocator and/or the backend get enhanced to be more robust in
1762 // that regard, this can be, and should be, removed.
1763 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1764 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1765 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1766 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1767 .addReg(X86::AX);
1768
1769 // Shift AX right by 8 bits instead of using AH.
1770 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1771 ResultSuperReg)
1772 .addReg(SourceSuperReg)
1773 .addImm(8);
1774
1775 // Now reference the 8-bit subreg of the result.
1776 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1777 TII.get(TargetOpcode::SUBREG_TO_REG))
1778 .addDef(DstReg)
1779 .addImm(0)
1780 .addReg(ResultSuperReg)
1781 .addImm(X86::sub_8bit);
1782 } else {
1783 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1784 DstReg)
1785 .addReg(OpEntry.ResultReg);
1786 }
1787 I.eraseFromParent();
1788
1789 return true;
1790}
1791
1792bool X86InstructionSelector::selectIntrinsicWSideEffects(
1794
1795 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1796 "unexpected instruction");
1797
1798 if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1799 return false;
1800
1801 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
1802
1803 I.eraseFromParent();
1804 return true;
1805}
1806
1809 X86Subtarget &Subtarget,
1810 X86RegisterBankInfo &RBI) {
1811 return new X86InstructionSelector(TM, Subtarget, RBI);
1812}
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Atomic ordering constants.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_TYPE
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned OpIndex
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
Value * RHS
Value * LHS
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:701
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:714
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:727
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:410
A debug info location.
Definition: DebugLoc.h:33
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
Definition: LowLevelType.h:139
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isVector() const
Definition: LowLevelType.h:145
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:49
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:175
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:185
An instruction for reading from memory.
Definition: Instructions.h:177
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
Definition: PointerUnion.h:118
This class implements the register bank concept.
Definition: RegisterBank.h:28
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:46
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:180
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
Definition: X86BaseInfo.h:434
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
Definition: X86BaseInfo.h:420
@ LAST_VALID_COND
Definition: X86BaseInfo.h:97
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
Definition: X86InstrInfo.h:82
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
Definition: X86InstrInfo.h:100
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:292
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:152
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:304
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1754
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
const GlobalValue * GV
union llvm::X86AddressMode::@609 Base
enum llvm::X86AddressMode::@608 BaseType