LLVM 23.0.0git
X86InstructionSelector.cpp
Go to the documentation of this file.
1//===- X86InstructionSelector.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/IntrinsicsX86.h"
42#include "llvm/Support/Debug.h"
46#include <cassert>
47#include <cstdint>
48#include <tuple>
49
50#define DEBUG_TYPE "X86-isel"
51
52using namespace llvm;
53
54namespace {
55
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
59
60class X86InstructionSelector : public InstructionSelector {
61public:
62 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
63 const X86RegisterBankInfo &RBI);
64
65 bool select(MachineInstr &I) override;
66 static const char *getName() { return DEBUG_TYPE; }
67
68private:
69 /// tblgen-erated 'select' implementation, used as the initial selector for
70 /// the patterns that don't require complex C++.
71 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72
73 // TODO: remove after supported by Tablegen-erated instruction selection.
74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
75 Align Alignment) const;
76 // TODO: remove once p0<->i32/i64 matching is available
77 unsigned getPtrLoadStoreOp(const LLT &Ty, const RegisterBank &RB,
78 unsigned Opc) const;
79
81 MachineFunction &MF) const;
82 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
96 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
97 MachineFunction &MF) const;
98 bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
99 MachineFunction &MF) const;
101 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
103 MachineFunction &MF);
105 MachineFunction &MF);
106 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
107 MachineFunction &MF) const;
108 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
109 MachineFunction &MF) const;
110 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
111 MachineFunction &MF) const;
112 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
113 const Register DstReg,
114 const TargetRegisterClass *DstRC,
115 const Register SrcReg,
116 const TargetRegisterClass *SrcRC) const;
117 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
119 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
120 bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
121 MachineFunction &MF) const;
122 bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
123 MachineFunction &MF) const;
124
125 ComplexRendererFns selectAddr(MachineOperand &Root) const;
126
127 // emit insert subreg instruction and insert it before MachineInstr &I
128 bool emitInsertSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
129 MachineRegisterInfo &MRI, MachineFunction &MF) const;
130 // emit extract subreg instruction and insert it before MachineInstr &I
131 bool emitExtractSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
132 MachineRegisterInfo &MRI, MachineFunction &MF) const;
133
134 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
136 MachineRegisterInfo &MRI) const;
137
138 const X86TargetMachine &TM;
139 const X86Subtarget &STI;
140 const X86InstrInfo &TII;
141 const X86RegisterInfo &TRI;
142 const X86RegisterBankInfo &RBI;
143
144#define GET_GLOBALISEL_PREDICATES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_PREDICATES_DECL
147
148#define GET_GLOBALISEL_TEMPORARIES_DECL
149#include "X86GenGlobalISel.inc"
150#undef GET_GLOBALISEL_TEMPORARIES_DECL
151};
152
153} // end anonymous namespace
154
155#define GET_GLOBALISEL_IMPL
156#include "X86GenGlobalISel.inc"
157#undef GET_GLOBALISEL_IMPL
158
159X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
160 const X86Subtarget &STI,
161 const X86RegisterBankInfo &RBI)
162 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
163 RBI(RBI),
165#include "X86GenGlobalISel.inc"
168#include "X86GenGlobalISel.inc"
170{
171}
172
173// FIXME: This should be target-independent, inferred from the types declared
174// for each class in the bank.
176X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
177 if (RB.getID() == X86::GPRRegBankID) {
178 if (Ty.getSizeInBits() <= 8)
179 return &X86::GR8RegClass;
180 if (Ty.getSizeInBits() == 16)
181 return &X86::GR16RegClass;
182 if (Ty.getSizeInBits() == 32)
183 return &X86::GR32RegClass;
184 if (Ty.getSizeInBits() == 64)
185 return &X86::GR64RegClass;
186 }
187 if (RB.getID() == X86::VECRRegBankID) {
188 if (Ty.getSizeInBits() == 16)
189 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
190 if (Ty.getSizeInBits() == 32)
191 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
192 if (Ty.getSizeInBits() == 64)
193 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
194 if (Ty.getSizeInBits() == 128)
195 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
196 if (Ty.getSizeInBits() == 256)
197 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
198 if (Ty.getSizeInBits() == 512)
199 return &X86::VR512RegClass;
200 }
201
202 if (RB.getID() == X86::PSRRegBankID) {
203 if (Ty.getSizeInBits() == 80)
204 return &X86::RFP80RegClass;
205 if (Ty.getSizeInBits() == 64)
206 return &X86::RFP64RegClass;
207 if (Ty.getSizeInBits() == 32)
208 return &X86::RFP32RegClass;
209 }
210
211 llvm_unreachable("Unknown RegBank!");
212}
213
214const TargetRegisterClass *
215X86InstructionSelector::getRegClass(LLT Ty, Register Reg,
216 MachineRegisterInfo &MRI) const {
217 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
218 return getRegClass(Ty, RegBank);
219}
220
221static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
222 unsigned SubIdx = X86::NoSubRegister;
223 if (RC == &X86::GR32RegClass) {
224 SubIdx = X86::sub_32bit;
225 } else if (RC == &X86::GR16RegClass) {
226 SubIdx = X86::sub_16bit;
227 } else if (RC == &X86::GR8RegClass) {
228 SubIdx = X86::sub_8bit;
229 }
230
231 return SubIdx;
232}
233
235 assert(Reg.isPhysical());
236 if (X86::GR64RegClass.contains(Reg))
237 return &X86::GR64RegClass;
238 if (X86::GR32RegClass.contains(Reg))
239 return &X86::GR32RegClass;
240 if (X86::GR16RegClass.contains(Reg))
241 return &X86::GR16RegClass;
242 if (X86::GR8RegClass.contains(Reg))
243 return &X86::GR8RegClass;
244
245 llvm_unreachable("Unknown RegClass for PhysReg!");
246}
247
248// FIXME: We need some sort of API in RBI/TRI to allow generic code to
249// constrain operands of simple instructions given a TargetRegisterClass
250// and LLT
251bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,
252 MachineRegisterInfo &MRI) const {
253 for (MachineOperand &MO : I.operands()) {
254 if (!MO.isReg())
255 continue;
256 Register Reg = MO.getReg();
257 if (!Reg)
258 continue;
259 if (Reg.isPhysical())
260 continue;
261 LLT Ty = MRI.getType(Reg);
262 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
263 const TargetRegisterClass *RC =
265 if (!RC) {
266 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
267 RC = getRegClass(Ty, RB);
268 if (!RC) {
270 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
271 break;
272 }
273 }
274 RBI.constrainGenericRegister(Reg, *RC, MRI);
275 }
276
277 return true;
278}
279
280// Set X86 Opcode and constrain DestReg.
281bool X86InstructionSelector::selectCopy(MachineInstr &I,
282 MachineRegisterInfo &MRI) const {
283 Register DstReg = I.getOperand(0).getReg();
284 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
285 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
286
287 Register SrcReg = I.getOperand(1).getReg();
288 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
289 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
290
291 if (DstReg.isPhysical()) {
292 assert(I.isCopy() && "Generic operators do not allow physical registers");
293
294 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
295 DstRegBank.getID() == X86::GPRRegBankID) {
296
297 const TargetRegisterClass *SrcRC =
298 getRegClass(MRI.getType(SrcReg), SrcRegBank);
299 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
300
301 if (SrcRC != DstRC) {
302 // This case can be generated by ABI lowering, performe anyext
303 Register ExtSrc = MRI.createVirtualRegister(DstRC);
304 BuildMI(*I.getParent(), I, I.getDebugLoc(),
305 TII.get(TargetOpcode::SUBREG_TO_REG))
306 .addDef(ExtSrc)
307 .addReg(SrcReg)
308 .addImm(getSubRegIndex(SrcRC));
309
310 I.getOperand(1).setReg(ExtSrc);
311 }
312 }
313
314 // Special case GPR16 -> XMM
315 if (SrcSize == 16 && SrcRegBank.getID() == X86::GPRRegBankID &&
316 (DstRegBank.getID() == X86::VECRRegBankID)) {
317
318 const DebugLoc &DL = I.getDebugLoc();
319
320 // Any extend GPR16 -> GPR32
321 Register ExtReg = MRI.createVirtualRegister(&X86::GR32RegClass);
322 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
323 ExtReg)
324 .addReg(SrcReg)
325 .addImm(X86::sub_16bit);
326
327 // Copy GR32 -> XMM
328 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
329 .addReg(ExtReg);
330
331 I.eraseFromParent();
332 }
333
334 // Special case XMM -> GR16
335 if (DstSize == 16 && DstRegBank.getID() == X86::GPRRegBankID &&
336 (SrcRegBank.getID() == X86::VECRRegBankID)) {
337
338 const DebugLoc &DL = I.getDebugLoc();
339
340 // Move XMM to GR32 register.
341 Register Temp32 = MRI.createVirtualRegister(&X86::GR32RegClass);
342 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Temp32)
343 .addReg(SrcReg);
344
345 // Extract the lower 16 bits
346 if (Register Dst32 = TRI.getMatchingSuperReg(DstReg, X86::sub_16bit,
347 &X86::GR32RegClass)) {
348 // Optimization for Physical Dst (e.g. AX): Copy to EAX directly.
349 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Dst32)
350 .addReg(Temp32);
351 } else {
352 // Handle if there is no super.
353 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
354 .addReg(Temp32, {}, X86::sub_16bit);
355 }
356
357 I.eraseFromParent();
358 }
359
360 return true;
361 }
362
363 assert((!SrcReg.isPhysical() || I.isCopy()) &&
364 "No phys reg on generic operators");
365 assert((DstSize == SrcSize ||
366 // Copies are a mean to setup initial types, the number of
367 // bits may not exactly match.
368 (SrcReg.isPhysical() &&
369 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
370 "Copy with different width?!");
371
372 const TargetRegisterClass *DstRC =
373 getRegClass(MRI.getType(DstReg), DstRegBank);
374
375 if (SrcRegBank.getID() == X86::GPRRegBankID &&
376 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
377 SrcReg.isPhysical()) {
378 // Change the physical register to performe truncate.
379
380 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
381
382 if (DstRC != SrcRC) {
383 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
384 I.getOperand(1).substPhysReg(SrcReg, TRI);
385 }
386 }
387
388 // No need to constrain SrcReg. It will get constrained when
389 // we hit another of its use or its defs.
390 // Copies do not have constraints.
391 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
392 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
393 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
394 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
395 << " operand\n");
396 return false;
397 }
398 }
399 I.setDesc(TII.get(X86::COPY));
400 return true;
401}
402
403bool X86InstructionSelector::select(MachineInstr &I) {
404 assert(I.getParent() && "Instruction should be in a basic block!");
405 assert(I.getParent()->getParent() && "Instruction should be in a function!");
406
407 MachineBasicBlock &MBB = *I.getParent();
408 MachineFunction &MF = *MBB.getParent();
409 MachineRegisterInfo &MRI = MF.getRegInfo();
410
411 unsigned Opcode = I.getOpcode();
412 if (!isPreISelGenericOpcode(Opcode) && !I.isPreISelOpcode()) {
413 // Certain non-generic instructions also need some special handling.
414
415 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
416 return false;
417
418 if (I.isCopy())
419 return selectCopy(I, MRI);
420
421 if (I.isDebugInstr())
422 return selectDebugInstr(I, MRI);
423
424 return true;
425 }
426
427 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
428 "Generic instruction has unexpected implicit operands\n");
429
430 if (selectImpl(I, *CoverageInfo))
431 return true;
432
433 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
434
435 // TODO: This should be implemented by tblgen.
436 switch (I.getOpcode()) {
437 default:
438 return false;
439 case TargetOpcode::G_STORE:
440 case TargetOpcode::G_LOAD:
441 return selectLoadStoreOp(I, MRI, MF);
442 case TargetOpcode::G_PTR_ADD:
443 case TargetOpcode::G_FRAME_INDEX:
444 return selectFrameIndexOrGep(I, MRI, MF);
445 case TargetOpcode::G_GLOBAL_VALUE:
446 return selectGlobalValue(I, MRI, MF);
447 case TargetOpcode::G_CONSTANT:
448 return selectConstant(I, MRI, MF);
449 case TargetOpcode::G_FCONSTANT:
450 return materializeFP(I, MRI, MF);
451 case TargetOpcode::G_PTRTOINT:
452 case TargetOpcode::G_TRUNC:
453 return selectTruncOrPtrToInt(I, MRI, MF);
454 case TargetOpcode::G_INTTOPTR:
455 case TargetOpcode::G_FREEZE:
456 return selectCopy(I, MRI);
457 case TargetOpcode::G_ZEXT:
458 return selectZext(I, MRI, MF);
459 case TargetOpcode::G_ANYEXT:
460 return selectAnyext(I, MRI, MF);
461 case TargetOpcode::G_ICMP:
462 return selectCmp(I, MRI, MF);
463 case TargetOpcode::G_FCMP:
464 return selectFCmp(I, MRI, MF);
465 case TargetOpcode::G_UADDE:
466 case TargetOpcode::G_UADDO:
467 case TargetOpcode::G_USUBE:
468 case TargetOpcode::G_USUBO:
469 return selectUAddSub(I, MRI, MF);
470 case TargetOpcode::G_UNMERGE_VALUES:
471 return selectUnmergeValues(I, MRI, MF);
472 case TargetOpcode::G_MERGE_VALUES:
473 case TargetOpcode::G_CONCAT_VECTORS:
474 return selectMergeValues(I, MRI, MF);
475 case TargetOpcode::G_EXTRACT:
476 return selectExtract(I, MRI, MF);
477 case TargetOpcode::G_INSERT:
478 return selectInsert(I, MRI, MF);
479 case TargetOpcode::G_BRCOND:
480 return selectCondBranch(I, MRI, MF);
481 case TargetOpcode::G_IMPLICIT_DEF:
482 case TargetOpcode::G_PHI:
483 return selectImplicitDefOrPHI(I, MRI);
484 case TargetOpcode::G_MUL:
485 case TargetOpcode::G_SMULH:
486 case TargetOpcode::G_UMULH:
487 case TargetOpcode::G_SDIV:
488 case TargetOpcode::G_UDIV:
489 case TargetOpcode::G_SREM:
490 case TargetOpcode::G_UREM:
491 return selectMulDivRem(I, MRI, MF);
492 case TargetOpcode::G_SELECT:
493 return selectSelect(I, MRI, MF);
494 }
495
496 return false;
497}
498
499unsigned X86InstructionSelector::getPtrLoadStoreOp(const LLT &Ty,
500 const RegisterBank &RB,
501 unsigned Opc) const {
502 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
503 "Only G_STORE and G_LOAD are expected for selection");
504 if (Ty.isPointer() && X86::GPRRegBankID == RB.getID()) {
505 bool IsLoad = (Opc == TargetOpcode::G_LOAD);
506 switch (Ty.getSizeInBits()) {
507 default:
508 break;
509 case 32:
510 return IsLoad ? X86::MOV32rm : X86::MOV32mr;
511 case 64:
512 return IsLoad ? X86::MOV64rm : X86::MOV64mr;
513 }
514 }
515 return Opc;
516}
517
518unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
519 const RegisterBank &RB,
520 unsigned Opc,
521 Align Alignment) const {
522 bool Isload = (Opc == TargetOpcode::G_LOAD);
523 bool HasAVX = STI.hasAVX();
524 bool HasAVX512 = STI.hasAVX512();
525 bool HasVLX = STI.hasVLX();
526
527 if (Ty == LLT::scalar(8)) {
528 if (X86::GPRRegBankID == RB.getID())
529 return Isload ? X86::MOV8rm : X86::MOV8mr;
530 } else if (Ty == LLT::scalar(16)) {
531 if (X86::GPRRegBankID == RB.getID())
532 return Isload ? X86::MOV16rm : X86::MOV16mr;
533 } else if (Ty == LLT::scalar(32)) {
534 if (X86::GPRRegBankID == RB.getID())
535 return Isload ? X86::MOV32rm : X86::MOV32mr;
536 if (X86::VECRRegBankID == RB.getID())
537 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
538 HasAVX ? X86::VMOVSSrm_alt :
539 X86::MOVSSrm_alt)
540 : (HasAVX512 ? X86::VMOVSSZmr :
541 HasAVX ? X86::VMOVSSmr :
542 X86::MOVSSmr);
543 if (X86::PSRRegBankID == RB.getID())
544 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
545 } else if (Ty == LLT::scalar(64)) {
546 if (X86::GPRRegBankID == RB.getID())
547 return Isload ? X86::MOV64rm : X86::MOV64mr;
548 if (X86::VECRRegBankID == RB.getID())
549 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
550 HasAVX ? X86::VMOVSDrm_alt :
551 X86::MOVSDrm_alt)
552 : (HasAVX512 ? X86::VMOVSDZmr :
553 HasAVX ? X86::VMOVSDmr :
554 X86::MOVSDmr);
555 if (X86::PSRRegBankID == RB.getID())
556 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
557 } else if (Ty == LLT::scalar(80)) {
558 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
559 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
560 if (Alignment >= Align(16))
561 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
562 : HasAVX512
563 ? X86::VMOVAPSZ128rm_NOVLX
564 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
565 : (HasVLX ? X86::VMOVAPSZ128mr
566 : HasAVX512
567 ? X86::VMOVAPSZ128mr_NOVLX
568 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
569 else
570 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
571 : HasAVX512
572 ? X86::VMOVUPSZ128rm_NOVLX
573 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
574 : (HasVLX ? X86::VMOVUPSZ128mr
575 : HasAVX512
576 ? X86::VMOVUPSZ128mr_NOVLX
577 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
578 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
579 if (Alignment >= Align(32))
580 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
581 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
582 : X86::VMOVAPSYrm)
583 : (HasVLX ? X86::VMOVAPSZ256mr
584 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
585 : X86::VMOVAPSYmr);
586 else
587 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
588 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
589 : X86::VMOVUPSYrm)
590 : (HasVLX ? X86::VMOVUPSZ256mr
591 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
592 : X86::VMOVUPSYmr);
593 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
594 if (Alignment >= Align(64))
595 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
596 else
597 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
598 }
599 return Opc;
600}
601
602// Fill in an address from the given instruction.
604 const MachineRegisterInfo &MRI,
605 const X86Subtarget &STI, X86AddressMode &AM) {
606 assert(I.getOperand(0).isReg() && "unsupported operand.");
607 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
608 "unsupported type.");
609
610 switch (I.getOpcode()) {
611 default:
612 break;
613 case TargetOpcode::G_FRAME_INDEX:
614 AM.Base.FrameIndex = I.getOperand(1).getIndex();
616 return true;
617 case TargetOpcode::G_PTR_ADD: {
618 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
619 int64_t Imm = *COff;
620 if (isInt<32>(Imm)) { // Check for displacement overflow.
621 AM.Disp = static_cast<int32_t>(Imm);
622 AM.Base.Reg = I.getOperand(1).getReg();
623 return true;
624 }
625 }
626 break;
627 }
628 case TargetOpcode::G_GLOBAL_VALUE:
629 case X86::G_WRAPPER_RIP: {
630 auto GV = I.getOperand(1).getGlobal();
631 if (GV->isThreadLocal()) {
632 return false; // TODO: we don't support TLS yet.
633 }
634 // Can't handle alternate code models yet.
635 if (TM.getCodeModel() != CodeModel::Small)
636 return false;
637 AM.GV = GV;
639
640 // TODO: This reference is relative to the pic base. not supported yet.
642 return false;
643
644 if (STI.isPICStyleRIPRel() || AM.GVOpFlags == X86II::MO_GOTPCREL ||
646 // Use rip-relative addressing.
647 assert(AM.Base.Reg == 0 && AM.IndexReg == 0 &&
648 "RIP-relative addresses can't have additional register operands");
649 AM.Base.Reg = X86::RIP;
650 }
651 return true;
652 }
653 case TargetOpcode::G_CONSTANT_POOL: {
654 // TODO: Need a separate move for Large model
655 if (TM.getCodeModel() == CodeModel::Large)
656 return false;
657
658 AM.GVOpFlags = STI.classifyLocalReference(nullptr);
659 if (AM.GVOpFlags == X86II::MO_GOTOFF)
660 AM.Base.Reg = STI.getInstrInfo()->getGlobalBaseReg(I.getMF());
661 else if (STI.is64Bit())
662 AM.Base.Reg = X86::RIP;
663 AM.CP = true;
664 AM.Disp = I.getOperand(1).getIndex();
665 return true;
666 }
667 }
668 // Default behavior.
669 AM.Base.Reg = I.getOperand(0).getReg();
670 return true;
671}
672
673bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
674 MachineRegisterInfo &MRI,
675 MachineFunction &MF) const {
676 unsigned Opc = I.getOpcode();
677
678 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
679 "Only G_STORE and G_LOAD are expected for selection");
680
681 const Register DefReg = I.getOperand(0).getReg();
682 LLT Ty = MRI.getType(DefReg);
683 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
684
685 assert(I.hasOneMemOperand());
686 auto &MemOp = **I.memoperands_begin();
687 if (MemOp.isAtomic()) {
688 // Note: for unordered operations, we rely on the fact the appropriate MMO
689 // is already on the instruction we're mutating, and thus we don't need to
690 // make any changes. So long as we select an opcode which is capable of
691 // loading or storing the appropriate size atomically, the rest of the
692 // backend is required to respect the MMO state.
693 if (!MemOp.isUnordered()) {
694 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
695 return false;
696 }
697 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
698 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
699 return false;
700 }
701 }
702
703 unsigned NewOpc = getPtrLoadStoreOp(Ty, RB, Opc);
704 if (NewOpc == Opc)
705 return false;
706
707 I.setDesc(TII.get(NewOpc));
708 MachineInstrBuilder MIB(MF, I);
709 MachineInstr *Ptr = MRI.getVRegDef(I.getOperand(1).getReg());
710
711 X86AddressMode AM;
712 if (!X86SelectAddress(*Ptr, TM, MRI, STI, AM))
713 return false;
714
715 if (Opc == TargetOpcode::G_LOAD) {
716 I.removeOperand(1);
717 addFullAddress(MIB, AM);
718 } else {
719 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
720 I.removeOperand(1);
721 I.removeOperand(0);
722 addFullAddress(MIB, AM).addUse(DefReg);
723 }
725 I.addImplicitDefUseOperands(MF);
726 return true;
727}
728
729static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
730 if (Ty == LLT::pointer(0, 64))
731 return X86::LEA64r;
732 else if (Ty == LLT::pointer(0, 32))
733 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
734 else
735 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
736}
737
738bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
739 MachineRegisterInfo &MRI,
740 MachineFunction &MF) const {
741 unsigned Opc = I.getOpcode();
742
743 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
744 "unexpected instruction");
745
746 const Register DefReg = I.getOperand(0).getReg();
747 LLT Ty = MRI.getType(DefReg);
748
749 // Use LEA to calculate frame index and GEP
750 unsigned NewOpc = getLeaOP(Ty, STI);
751 I.setDesc(TII.get(NewOpc));
752 MachineInstrBuilder MIB(MF, I);
753
754 if (Opc == TargetOpcode::G_FRAME_INDEX) {
755 addOffset(MIB, 0);
756 } else {
757 MachineOperand &InxOp = I.getOperand(2);
758 I.addOperand(InxOp); // set IndexReg
759 InxOp.ChangeToImmediate(1); // set Scale
760 MIB.addImm(0).addReg(0);
761 }
762
764 return true;
765}
766
767bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
768 MachineRegisterInfo &MRI,
769 MachineFunction &MF) const {
770 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
771 "unexpected instruction");
772
773 X86AddressMode AM;
774 if (!X86SelectAddress(I, TM, MRI, STI, AM))
775 return false;
776
777 const Register DefReg = I.getOperand(0).getReg();
778 LLT Ty = MRI.getType(DefReg);
779 unsigned NewOpc = getLeaOP(Ty, STI);
780
781 I.setDesc(TII.get(NewOpc));
782 MachineInstrBuilder MIB(MF, I);
783
784 I.removeOperand(1);
785 addFullAddress(MIB, AM);
786
788 return true;
789}
790
791bool X86InstructionSelector::selectConstant(MachineInstr &I,
792 MachineRegisterInfo &MRI,
793 MachineFunction &MF) const {
794 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
795 "unexpected instruction");
796
797 const Register DefReg = I.getOperand(0).getReg();
798 LLT Ty = MRI.getType(DefReg);
799
800 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
801 return false;
802
803 uint64_t Val = 0;
804 if (I.getOperand(1).isCImm()) {
805 Val = I.getOperand(1).getCImm()->getZExtValue();
806 I.getOperand(1).ChangeToImmediate(Val);
807 } else if (I.getOperand(1).isImm()) {
808 Val = I.getOperand(1).getImm();
809 } else
810 llvm_unreachable("Unsupported operand type.");
811
812 unsigned NewOpc;
813 switch (Ty.getSizeInBits()) {
814 case 8:
815 NewOpc = X86::MOV8ri;
816 break;
817 case 16:
818 NewOpc = X86::MOV16ri;
819 break;
820 case 32:
821 NewOpc = X86::MOV32ri;
822 break;
823 case 64:
824 if (isUInt<32>(Val))
825 NewOpc = X86::MOV32ri64;
826 else if (isInt<32>(Val))
827 NewOpc = X86::MOV64ri32;
828 else
829 NewOpc = X86::MOV64ri;
830 break;
831 default:
832 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
833 }
834
835 I.setDesc(TII.get(NewOpc));
837 return true;
838}
839
840// Helper function for selectTruncOrPtrToInt and selectAnyext.
841// Returns true if DstRC lives on a floating register class and
842// SrcRC lives on a 128-bit vector class.
843static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
844 const TargetRegisterClass *SrcRC) {
845 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
846 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
847 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
848}
849
850bool X86InstructionSelector::selectTurnIntoCOPY(
851 MachineInstr &I, MachineRegisterInfo &MRI, const Register DstReg,
852 const TargetRegisterClass *DstRC, const Register SrcReg,
853 const TargetRegisterClass *SrcRC) const {
854
855 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
856 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
857 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
858 << " operand\n");
859 return false;
860 }
861 I.setDesc(TII.get(X86::COPY));
862 return true;
863}
864
865bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
866 MachineRegisterInfo &MRI,
867 MachineFunction &MF) const {
868 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
869 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
870 "unexpected instruction");
871
872 const Register DstReg = I.getOperand(0).getReg();
873 const Register SrcReg = I.getOperand(1).getReg();
874
875 const LLT DstTy = MRI.getType(DstReg);
876 const LLT SrcTy = MRI.getType(SrcReg);
877
878 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
879 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
880
881 if (DstRB.getID() != SrcRB.getID()) {
882 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
883 << " input/output on different banks\n");
884 return false;
885 }
886
887 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
888 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
889
890 if (!DstRC || !SrcRC)
891 return false;
892
893 // If that's truncation of the value that lives on the vector class and goes
894 // into the floating class, just replace it with copy, as we are able to
895 // select it as a regular move.
896 if (canTurnIntoCOPY(DstRC, SrcRC))
897 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
898
899 if (DstRB.getID() != X86::GPRRegBankID)
900 return false;
901
902 unsigned SubIdx;
903 if (DstRC == SrcRC) {
904 // Nothing to be done
905 SubIdx = X86::NoSubRegister;
906 } else if (DstRC == &X86::GR32RegClass) {
907 SubIdx = X86::sub_32bit;
908 } else if (DstRC == &X86::GR16RegClass) {
909 SubIdx = X86::sub_16bit;
910 } else if (DstRC == &X86::GR8RegClass) {
911 SubIdx = X86::sub_8bit;
912 } else {
913 return false;
914 }
915
916 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
917
918 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
919 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
920 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
921 << "\n");
922 return false;
923 }
924
925 I.getOperand(1).setSubReg(SubIdx);
926
927 I.setDesc(TII.get(X86::COPY));
928 return true;
929}
930
931bool X86InstructionSelector::selectZext(MachineInstr &I,
932 MachineRegisterInfo &MRI,
933 MachineFunction &MF) const {
934 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
935
936 const Register DstReg = I.getOperand(0).getReg();
937 const Register SrcReg = I.getOperand(1).getReg();
938
939 const LLT DstTy = MRI.getType(DstReg);
940 const LLT SrcTy = MRI.getType(SrcReg);
941
942 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
943 "8=>16 Zext is handled by tablegen");
944 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
945 "8=>32 Zext is handled by tablegen");
946 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
947 "16=>32 Zext is handled by tablegen");
948 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
949 "8=>64 Zext is handled by tablegen");
950 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
951 "16=>64 Zext is handled by tablegen");
952 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
953 "32=>64 Zext is handled by tablegen");
954
955 if (SrcTy != LLT::scalar(1))
956 return false;
957
958 unsigned AndOpc;
959 if (DstTy == LLT::scalar(8))
960 AndOpc = X86::AND8ri;
961 else if (DstTy == LLT::scalar(16))
962 AndOpc = X86::AND16ri;
963 else if (DstTy == LLT::scalar(32))
964 AndOpc = X86::AND32ri;
965 else if (DstTy == LLT::scalar(64))
966 AndOpc = X86::AND64ri32;
967 else
968 return false;
969
970 Register DefReg = SrcReg;
971 if (DstTy != LLT::scalar(8)) {
972 Register ImpDefReg =
973 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
974 BuildMI(*I.getParent(), I, I.getDebugLoc(),
975 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
976
977 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
978 BuildMI(*I.getParent(), I, I.getDebugLoc(),
979 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
980 .addReg(ImpDefReg)
981 .addReg(SrcReg)
982 .addImm(X86::sub_8bit);
983 }
984
985 MachineInstr &AndInst =
986 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
987 .addReg(DefReg)
988 .addImm(1);
989
991
992 I.eraseFromParent();
993 return true;
994}
995
996bool X86InstructionSelector::selectAnyext(MachineInstr &I,
997 MachineRegisterInfo &MRI,
998 MachineFunction &MF) const {
999 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
1000
1001 const Register DstReg = I.getOperand(0).getReg();
1002 const Register SrcReg = I.getOperand(1).getReg();
1003
1004 const LLT DstTy = MRI.getType(DstReg);
1005 const LLT SrcTy = MRI.getType(SrcReg);
1006
1007 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1008 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1009
1010 assert(DstRB.getID() == SrcRB.getID() &&
1011 "G_ANYEXT input/output on different banks\n");
1012
1013 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1014 "G_ANYEXT incorrect operand size");
1015
1016 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
1017 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
1018
1019 // If that's ANY_EXT of the value that lives on the floating class and goes
1020 // into the vector class, just replace it with copy, as we are able to select
1021 // it as a regular move.
1022 if (canTurnIntoCOPY(SrcRC, DstRC))
1023 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
1024
1025 if (DstRB.getID() != X86::GPRRegBankID)
1026 return false;
1027
1028 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1029 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1030 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1031 << " operand\n");
1032 return false;
1033 }
1034
1035 if (SrcRC == DstRC) {
1036 I.setDesc(TII.get(X86::COPY));
1037 return true;
1038 }
1039
1040 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1041 TII.get(TargetOpcode::SUBREG_TO_REG))
1042 .addDef(DstReg)
1043 .addReg(SrcReg)
1044 .addImm(getSubRegIndex(SrcRC));
1045
1046 I.eraseFromParent();
1047 return true;
1048}
1049
1050bool X86InstructionSelector::selectCmp(MachineInstr &I,
1051 MachineRegisterInfo &MRI,
1052 MachineFunction &MF) const {
1053 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
1054
1055 X86::CondCode CC;
1056 bool SwapArgs;
1057 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
1058 (CmpInst::Predicate)I.getOperand(1).getPredicate());
1059
1060 Register LHS = I.getOperand(2).getReg();
1061 Register RHS = I.getOperand(3).getReg();
1062
1063 if (SwapArgs)
1064 std::swap(LHS, RHS);
1065
1066 unsigned OpCmp;
1067 LLT Ty = MRI.getType(LHS);
1068
1069 switch (Ty.getSizeInBits()) {
1070 default:
1071 return false;
1072 case 8:
1073 OpCmp = X86::CMP8rr;
1074 break;
1075 case 16:
1076 OpCmp = X86::CMP16rr;
1077 break;
1078 case 32:
1079 OpCmp = X86::CMP32rr;
1080 break;
1081 case 64:
1082 OpCmp = X86::CMP64rr;
1083 break;
1084 }
1085
1086 MachineInstr &CmpInst =
1087 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1088 .addReg(LHS)
1089 .addReg(RHS);
1090
1091 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1092 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
1093
1096
1097 I.eraseFromParent();
1098 return true;
1099}
1100
1101bool X86InstructionSelector::selectFCmp(MachineInstr &I,
1102 MachineRegisterInfo &MRI,
1103 MachineFunction &MF) const {
1104 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
1105
1106 Register LhsReg = I.getOperand(2).getReg();
1107 Register RhsReg = I.getOperand(3).getReg();
1109 (CmpInst::Predicate)I.getOperand(1).getPredicate();
1110
1111 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1112 static const uint16_t SETFOpcTable[2][3] = {
1113 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1114 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
1115 const uint16_t *SETFOpc = nullptr;
1116 switch (Predicate) {
1117 default:
1118 break;
1119 case CmpInst::FCMP_OEQ:
1120 SETFOpc = &SETFOpcTable[0][0];
1121 break;
1122 case CmpInst::FCMP_UNE:
1123 SETFOpc = &SETFOpcTable[1][0];
1124 break;
1125 }
1126
1127 assert((LhsReg.isVirtual() && RhsReg.isVirtual()) &&
1128 "Both arguments of FCMP need to be virtual!");
1129 auto *LhsBank = RBI.getRegBank(LhsReg, MRI, TRI);
1130 [[maybe_unused]] auto *RhsBank = RBI.getRegBank(RhsReg, MRI, TRI);
1131 assert((LhsBank == RhsBank) &&
1132 "Both banks assigned to FCMP arguments need to be same!");
1133
1134 // Compute the opcode for the CMP instruction.
1135 unsigned OpCmp;
1136 LLT Ty = MRI.getType(LhsReg);
1137 switch (Ty.getSizeInBits()) {
1138 default:
1139 return false;
1140 case 32:
1141 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32
1142 : X86::UCOMISSrr;
1143 break;
1144 case 64:
1145 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64
1146 : X86::UCOMISDrr;
1147 break;
1148 case 80:
1149 OpCmp = X86::UCOM_FpIr80;
1150 break;
1151 }
1152
1153 Register ResultReg = I.getOperand(0).getReg();
1155 ResultReg,
1156 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1157 if (SETFOpc) {
1158 MachineInstr &CmpInst =
1159 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1160 .addReg(LhsReg)
1161 .addReg(RhsReg);
1162
1163 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1164 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1165 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1166 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
1167 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1168 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1169 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1170 TII.get(SETFOpc[2]), ResultReg)
1171 .addReg(FlagReg1)
1172 .addReg(FlagReg2);
1177
1178 I.eraseFromParent();
1179 return true;
1180 }
1181
1182 X86::CondCode CC;
1183 bool SwapArgs;
1184 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1185 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1186
1187 if (SwapArgs)
1188 std::swap(LhsReg, RhsReg);
1189
1190 // Emit a compare of LHS/RHS.
1191 MachineInstr &CmpInst =
1192 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1193 .addReg(LhsReg)
1194 .addReg(RhsReg);
1195
1196 MachineInstr &Set =
1197 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1200 I.eraseFromParent();
1201 return true;
1202}
1203
1204bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
1205 MachineRegisterInfo &MRI,
1206 MachineFunction &MF) const {
1207 assert((I.getOpcode() == TargetOpcode::G_UADDE ||
1208 I.getOpcode() == TargetOpcode::G_UADDO ||
1209 I.getOpcode() == TargetOpcode::G_USUBE ||
1210 I.getOpcode() == TargetOpcode::G_USUBO) &&
1211 "unexpected instruction");
1212
1213 auto &CarryMI = cast<GAddSubCarryOut>(I);
1214
1215 const Register DstReg = CarryMI.getDstReg();
1216 const Register CarryOutReg = CarryMI.getCarryOutReg();
1217 const Register Op0Reg = CarryMI.getLHSReg();
1218 const Register Op1Reg = CarryMI.getRHSReg();
1219 bool IsSub = CarryMI.isSub();
1220
1221 const LLT DstTy = MRI.getType(DstReg);
1222 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
1223
1224 // TODO: Handle immediate argument variants?
1225 unsigned OpADC, OpADD, OpSBB, OpSUB;
1226 switch (DstTy.getSizeInBits()) {
1227 case 8:
1228 OpADC = X86::ADC8rr;
1229 OpADD = X86::ADD8rr;
1230 OpSBB = X86::SBB8rr;
1231 OpSUB = X86::SUB8rr;
1232 break;
1233 case 16:
1234 OpADC = X86::ADC16rr;
1235 OpADD = X86::ADD16rr;
1236 OpSBB = X86::SBB16rr;
1237 OpSUB = X86::SUB16rr;
1238 break;
1239 case 32:
1240 OpADC = X86::ADC32rr;
1241 OpADD = X86::ADD32rr;
1242 OpSBB = X86::SBB32rr;
1243 OpSUB = X86::SUB32rr;
1244 break;
1245 case 64:
1246 OpADC = X86::ADC64rr;
1247 OpADD = X86::ADD64rr;
1248 OpSBB = X86::SBB64rr;
1249 OpSUB = X86::SUB64rr;
1250 break;
1251 default:
1252 llvm_unreachable("selectUAddSub unsupported type.");
1253 }
1254
1255 const RegisterBank &CarryRB = *RBI.getRegBank(CarryOutReg, MRI, TRI);
1256 const TargetRegisterClass *CarryRC =
1257 getRegClass(MRI.getType(CarryOutReg), CarryRB);
1258
1259 unsigned Opcode = IsSub ? OpSUB : OpADD;
1260
1261 // G_UADDE/G_USUBE - find CarryIn def instruction.
1262 if (auto CarryInMI = dyn_cast<GAddSubCarryInOut>(&I)) {
1263 Register CarryInReg = CarryInMI->getCarryInReg();
1264 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1265 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1266 CarryInReg = Def->getOperand(1).getReg();
1267 Def = MRI.getVRegDef(CarryInReg);
1268 }
1269
1270 // TODO - handle more CF generating instructions
1271 if (Def->getOpcode() == TargetOpcode::G_UADDE ||
1272 Def->getOpcode() == TargetOpcode::G_UADDO ||
1273 Def->getOpcode() == TargetOpcode::G_USUBE ||
1274 Def->getOpcode() == TargetOpcode::G_USUBO) {
1275 // carry set by prev ADD/SUB.
1276
1277 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::CMP8ri))
1278 .addReg(CarryInReg)
1279 .addImm(1);
1280
1281 if (!RBI.constrainGenericRegister(CarryInReg, *CarryRC, MRI))
1282 return false;
1283
1284 Opcode = IsSub ? OpSBB : OpADC;
1285 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1286 // carry is constant, support only 0.
1287 if (*val != 0)
1288 return false;
1289
1290 Opcode = IsSub ? OpSUB : OpADD;
1291 } else
1292 return false;
1293 }
1294
1295 MachineInstr &Inst =
1296 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1297 .addReg(Op0Reg)
1298 .addReg(Op1Reg);
1299
1300 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), CarryOutReg)
1302
1304 if (!RBI.constrainGenericRegister(CarryOutReg, *CarryRC, MRI))
1305 return false;
1306
1307 I.eraseFromParent();
1308 return true;
1309}
1310
1311bool X86InstructionSelector::selectExtract(MachineInstr &I,
1312 MachineRegisterInfo &MRI,
1313 MachineFunction &MF) const {
1314 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1315 "unexpected instruction");
1316
1317 const Register DstReg = I.getOperand(0).getReg();
1318 const Register SrcReg = I.getOperand(1).getReg();
1319 int64_t Index = I.getOperand(2).getImm();
1320
1321 const LLT DstTy = MRI.getType(DstReg);
1322 const LLT SrcTy = MRI.getType(SrcReg);
1323
1324 // Meanwile handle vector type only.
1325 if (!DstTy.isVector())
1326 return false;
1327
1328 if (Index % DstTy.getSizeInBits() != 0)
1329 return false; // Not extract subvector.
1330
1331 if (Index == 0) {
1332 // Replace by extract subreg copy.
1333 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1334 return false;
1335
1336 I.eraseFromParent();
1337 return true;
1338 }
1339
1340 bool HasAVX = STI.hasAVX();
1341 bool HasAVX512 = STI.hasAVX512();
1342 bool HasVLX = STI.hasVLX();
1343
1344 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1345 if (HasVLX)
1346 I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));
1347 else if (HasAVX)
1348 I.setDesc(TII.get(X86::VEXTRACTF128rri));
1349 else
1350 return false;
1351 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1352 if (DstTy.getSizeInBits() == 128)
1353 I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));
1354 else if (DstTy.getSizeInBits() == 256)
1355 I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));
1356 else
1357 return false;
1358 } else
1359 return false;
1360
1361 // Convert to X86 VEXTRACT immediate.
1362 Index = Index / DstTy.getSizeInBits();
1363 I.getOperand(2).setImm(Index);
1364
1366 return true;
1367}
1368
1369bool X86InstructionSelector::emitExtractSubreg(Register DstReg, Register SrcReg,
1370 MachineInstr &I,
1371 MachineRegisterInfo &MRI,
1372 MachineFunction &MF) const {
1373 const LLT DstTy = MRI.getType(DstReg);
1374 const LLT SrcTy = MRI.getType(SrcReg);
1375 unsigned SubIdx = X86::NoSubRegister;
1376
1377 if (!DstTy.isVector() || !SrcTy.isVector())
1378 return false;
1379
1380 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1381 "Incorrect Src/Dst register size");
1382
1383 if (DstTy.getSizeInBits() == 128)
1384 SubIdx = X86::sub_xmm;
1385 else if (DstTy.getSizeInBits() == 256)
1386 SubIdx = X86::sub_ymm;
1387 else
1388 return false;
1389
1390 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1391 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1392
1393 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1394
1395 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1396 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1397 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1398 return false;
1399 }
1400
1401 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1402 .addReg(SrcReg, {}, SubIdx);
1403
1404 return true;
1405}
1406
1407bool X86InstructionSelector::emitInsertSubreg(Register DstReg, Register SrcReg,
1408 MachineInstr &I,
1409 MachineRegisterInfo &MRI,
1410 MachineFunction &MF) const {
1411 const LLT DstTy = MRI.getType(DstReg);
1412 const LLT SrcTy = MRI.getType(SrcReg);
1413 unsigned SubIdx = X86::NoSubRegister;
1414
1415 // TODO: support scalar types
1416 if (!DstTy.isVector() || !SrcTy.isVector())
1417 return false;
1418
1419 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1420 "Incorrect Src/Dst register size");
1421
1422 if (SrcTy.getSizeInBits() == 128)
1423 SubIdx = X86::sub_xmm;
1424 else if (SrcTy.getSizeInBits() == 256)
1425 SubIdx = X86::sub_ymm;
1426 else
1427 return false;
1428
1429 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1430 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1431
1432 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1433 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1434 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1435 return false;
1436 }
1437
1438 Register ImpDefReg = MRI.createVirtualRegister(DstRC);
1439 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::IMPLICIT_DEF),
1440 ImpDefReg);
1441
1442 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::INSERT_SUBREG),
1443 DstReg)
1444 .addReg(ImpDefReg)
1445 .addReg(SrcReg)
1446 .addImm(SubIdx);
1447
1448 return true;
1449}
1450
1451bool X86InstructionSelector::selectInsert(MachineInstr &I,
1452 MachineRegisterInfo &MRI,
1453 MachineFunction &MF) const {
1454 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1455
1456 const Register DstReg = I.getOperand(0).getReg();
1457 const Register SrcReg = I.getOperand(1).getReg();
1458 const Register InsertReg = I.getOperand(2).getReg();
1459 int64_t Index = I.getOperand(3).getImm();
1460
1461 const LLT DstTy = MRI.getType(DstReg);
1462 const LLT InsertRegTy = MRI.getType(InsertReg);
1463
1464 // Meanwile handle vector type only.
1465 if (!DstTy.isVector())
1466 return false;
1467
1468 if (Index % InsertRegTy.getSizeInBits() != 0)
1469 return false; // Not insert subvector.
1470
1471 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1472 // Replace by subreg copy.
1473 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1474 return false;
1475
1476 I.eraseFromParent();
1477 return true;
1478 }
1479
1480 bool HasAVX = STI.hasAVX();
1481 bool HasAVX512 = STI.hasAVX512();
1482 bool HasVLX = STI.hasVLX();
1483
1484 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1485 if (HasVLX)
1486 I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));
1487 else if (HasAVX)
1488 I.setDesc(TII.get(X86::VINSERTF128rri));
1489 else
1490 return false;
1491 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1492 if (InsertRegTy.getSizeInBits() == 128)
1493 I.setDesc(TII.get(X86::VINSERTF32X4Zrri));
1494 else if (InsertRegTy.getSizeInBits() == 256)
1495 I.setDesc(TII.get(X86::VINSERTF64X4Zrri));
1496 else
1497 return false;
1498 } else
1499 return false;
1500
1501 // Convert to X86 VINSERT immediate.
1502 Index = Index / InsertRegTy.getSizeInBits();
1503
1504 I.getOperand(3).setImm(Index);
1505
1507 return true;
1508}
1509
1510bool X86InstructionSelector::selectUnmergeValues(
1511 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1512 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1513 "unexpected instruction");
1514
1515 // Split to extracts.
1516 unsigned NumDefs = I.getNumOperands() - 1;
1517 Register SrcReg = I.getOperand(NumDefs).getReg();
1518 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1519
1520 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1521 MachineInstr &ExtrInst =
1522 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1523 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1524 .addReg(SrcReg)
1525 .addImm(Idx * DefSize);
1526
1527 if (!select(ExtrInst))
1528 return false;
1529 }
1530
1531 I.eraseFromParent();
1532 return true;
1533}
1534
1535bool X86InstructionSelector::selectMergeValues(
1536 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1537 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1538 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1539 "unexpected instruction");
1540
1541 // Split to inserts.
1542 Register DstReg = I.getOperand(0).getReg();
1543 Register SrcReg0 = I.getOperand(1).getReg();
1544
1545 const LLT DstTy = MRI.getType(DstReg);
1546 const LLT SrcTy = MRI.getType(SrcReg0);
1547 unsigned SrcSize = SrcTy.getSizeInBits();
1548
1549 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1550
1551 // For the first src use insertSubReg.
1552 Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1553 MRI.setRegBank(DefReg, RegBank);
1554 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1555 return false;
1556
1557 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1558 Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1559 MRI.setRegBank(Tmp, RegBank);
1560
1561 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1562 TII.get(TargetOpcode::G_INSERT), Tmp)
1563 .addReg(DefReg)
1564 .addReg(I.getOperand(Idx).getReg())
1565 .addImm((Idx - 1) * SrcSize);
1566
1567 DefReg = Tmp;
1568
1569 if (!select(InsertInst))
1570 return false;
1571 }
1572
1573 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1574 TII.get(TargetOpcode::COPY), DstReg)
1575 .addReg(DefReg);
1576
1577 if (!select(CopyInst))
1578 return false;
1579
1580 I.eraseFromParent();
1581 return true;
1582}
1583
1584bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1585 MachineRegisterInfo &MRI,
1586 MachineFunction &MF) const {
1587 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1588
1589 const Register CondReg = I.getOperand(0).getReg();
1590 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1591
1592 MachineInstr &TestInst =
1593 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1594 .addReg(CondReg)
1595 .addImm(1);
1596 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1597 .addMBB(DestMBB).addImm(X86::COND_NE);
1598
1599 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1600
1601 I.eraseFromParent();
1602 return true;
1603}
1604
1605bool X86InstructionSelector::materializeFP(MachineInstr &I,
1606 MachineRegisterInfo &MRI,
1607 MachineFunction &MF) const {
1608 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1609 "unexpected instruction");
1610
1611 // Can't handle alternate code models yet.
1613 if (CM != CodeModel::Small && CM != CodeModel::Large)
1614 return false;
1615
1616 const Register DstReg = I.getOperand(0).getReg();
1617 const LLT DstTy = MRI.getType(DstReg);
1618 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1619 // Create the load from the constant pool.
1620 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1621 const auto &DL = MF.getDataLayout();
1622 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
1623 const DebugLoc &DbgLoc = I.getDebugLoc();
1624
1625 unsigned Opc =
1626 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1627
1628 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1629 MachineInstr *LoadInst = nullptr;
1630 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1631
1632 if (CM == CodeModel::Large && STI.is64Bit()) {
1633 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1634 // they cannot be folded into immediate fields.
1635
1636 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1637 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1638 .addConstantPoolIndex(CPI, 0, OpFlag);
1639
1640 MachineMemOperand *MMO = MF.getMachineMemOperand(
1642 LLT::pointer(0, DL.getPointerSizeInBits()), Alignment);
1643
1644 LoadInst =
1645 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1646 AddrReg)
1647 .addMemOperand(MMO);
1648
1649 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1650 // Handle the case when globals fit in our immediate field.
1651 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1652
1653 // x86-32 PIC requires a PIC base register for constant pools.
1654 unsigned PICBase = 0;
1655 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1656 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1657 // In DAGISEL the code that initialize it generated by the CGBR pass.
1658 return false; // TODO support the mode.
1659 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1660 PICBase = X86::RIP;
1661
1662 LoadInst = addConstantPoolReference(
1663 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1664 OpFlag);
1665 } else
1666 return false;
1667
1668 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1669 I.eraseFromParent();
1670 return true;
1671}
1672
1673bool X86InstructionSelector::selectImplicitDefOrPHI(
1674 MachineInstr &I, MachineRegisterInfo &MRI) const {
1675 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1676 I.getOpcode() == TargetOpcode::G_PHI) &&
1677 "unexpected instruction");
1678
1679 Register DstReg = I.getOperand(0).getReg();
1680
1681 if (!MRI.getRegClassOrNull(DstReg)) {
1682 const LLT DstTy = MRI.getType(DstReg);
1683 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1684
1685 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1686 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1687 << " operand\n");
1688 return false;
1689 }
1690 }
1691
1692 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1693 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1694 else
1695 I.setDesc(TII.get(X86::PHI));
1696
1697 return true;
1698}
1699
1700bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,
1701 MachineRegisterInfo &MRI,
1702 MachineFunction &MF) const {
1703 // The implementation of this function is adapted from X86FastISel.
1704 assert((I.getOpcode() == TargetOpcode::G_MUL ||
1705 I.getOpcode() == TargetOpcode::G_SMULH ||
1706 I.getOpcode() == TargetOpcode::G_UMULH ||
1707 I.getOpcode() == TargetOpcode::G_SDIV ||
1708 I.getOpcode() == TargetOpcode::G_SREM ||
1709 I.getOpcode() == TargetOpcode::G_UDIV ||
1710 I.getOpcode() == TargetOpcode::G_UREM) &&
1711 "unexpected instruction");
1712
1713 const Register DstReg = I.getOperand(0).getReg();
1714 const Register Op1Reg = I.getOperand(1).getReg();
1715 const Register Op2Reg = I.getOperand(2).getReg();
1716
1717 const LLT RegTy = MRI.getType(DstReg);
1718 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1719 "Arguments and return value types must match");
1720
1721 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1722 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1723 return false;
1724
1725 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1726 const static unsigned NumOps = 7; // SDiv/SRem/UDiv/URem/Mul/SMulH/UMulh
1727 const static bool S = true; // IsSigned
1728 const static bool U = false; // !IsSigned
1729 const static unsigned Copy = TargetOpcode::COPY;
1730
1731 // For the X86 IDIV instruction, in most cases the dividend
1732 // (numerator) must be in a specific register pair highreg:lowreg,
1733 // producing the quotient in lowreg and the remainder in highreg.
1734 // For most data types, to set up the instruction, the dividend is
1735 // copied into lowreg, and lowreg is sign-extended into highreg. The
1736 // exception is i8, where the dividend is defined as a single register rather
1737 // than a register pair, and we therefore directly sign-extend the dividend
1738 // into lowreg, instead of copying, and ignore the highreg.
1739 const static struct MulDivRemEntry {
1740 // The following portion depends only on the data type.
1741 unsigned SizeInBits;
1742 unsigned LowInReg; // low part of the register pair
1743 unsigned HighInReg; // high part of the register pair
1744 // The following portion depends on both the data type and the operation.
1745 struct MulDivRemResult {
1746 unsigned OpMulDivRem; // The specific MUL/DIV opcode to use.
1747 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1748 // highreg, or copying a zero into highreg.
1749 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1750 // zero/sign-extending into lowreg for i8.
1751 unsigned ResultReg; // Register containing the desired result.
1752 bool IsOpSigned; // Whether to use signed or unsigned form.
1753 } ResultTable[NumOps];
1754 } OpTable[NumTypes] = {
1755 {8,
1756 X86::AX,
1757 0,
1758 {
1759 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1760 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1761 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1762 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1763 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S}, // Mul
1764 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SMulH
1765 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U}, // UMulH
1766 }}, // i8
1767 {16,
1768 X86::AX,
1769 X86::DX,
1770 {
1771 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1772 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1773 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1774 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1775 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S}, // Mul
1776 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S}, // SMulH
1777 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U}, // UMulH
1778 }}, // i16
1779 {32,
1780 X86::EAX,
1781 X86::EDX,
1782 {
1783 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1784 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1785 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1786 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1787 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S}, // Mul
1788 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S}, // SMulH
1789 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U}, // UMulH
1790 }}, // i32
1791 {64,
1792 X86::RAX,
1793 X86::RDX,
1794 {
1795 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1796 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1797 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1798 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1799 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S}, // Mul
1800 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S}, // SMulH
1801 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U}, // UMulH
1802 }}, // i64
1803 };
1804
1805 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {
1806 return El.SizeInBits == RegTy.getSizeInBits();
1807 });
1808 if (OpEntryIt == std::end(OpTable))
1809 return false;
1810
1811 unsigned OpIndex;
1812 switch (I.getOpcode()) {
1813 default:
1814 llvm_unreachable("Unexpected mul/div/rem opcode");
1815 case TargetOpcode::G_SDIV:
1816 OpIndex = 0;
1817 break;
1818 case TargetOpcode::G_SREM:
1819 OpIndex = 1;
1820 break;
1821 case TargetOpcode::G_UDIV:
1822 OpIndex = 2;
1823 break;
1824 case TargetOpcode::G_UREM:
1825 OpIndex = 3;
1826 break;
1827 case TargetOpcode::G_MUL:
1828 OpIndex = 4;
1829 break;
1830 case TargetOpcode::G_SMULH:
1831 OpIndex = 5;
1832 break;
1833 case TargetOpcode::G_UMULH:
1834 OpIndex = 6;
1835 break;
1836 }
1837
1838 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1839 const MulDivRemEntry::MulDivRemResult &OpEntry =
1840 TypeEntry.ResultTable[OpIndex];
1841
1842 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1843 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1844 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1845 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1846 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1847 << " operand\n");
1848 return false;
1849 }
1850
1851 // Move op1 into low-order input register.
1852 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1853 TypeEntry.LowInReg)
1854 .addReg(Op1Reg);
1855
1856 // Zero-extend or sign-extend into high-order input register.
1857 if (OpEntry.OpSignExtend) {
1858 if (OpEntry.IsOpSigned)
1859 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1860 TII.get(OpEntry.OpSignExtend));
1861 else {
1862 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1863 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1864 Zero32);
1865
1866 // Copy the zero into the appropriate sub/super/identical physical
1867 // register. Unfortunately the operations needed are not uniform enough
1868 // to fit neatly into the table above.
1869 if (RegTy.getSizeInBits() == 16) {
1870 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1871 TypeEntry.HighInReg)
1872 .addReg(Zero32, {}, X86::sub_16bit);
1873 } else if (RegTy.getSizeInBits() == 32) {
1874 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1875 TypeEntry.HighInReg)
1876 .addReg(Zero32);
1877 } else if (RegTy.getSizeInBits() == 64) {
1878 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1879 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1880 .addReg(Zero32)
1881 .addImm(X86::sub_32bit);
1882 }
1883 }
1884 }
1885
1886 // Generate the DIV/IDIV/MUL/IMUL instruction.
1887 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))
1888 .addReg(Op2Reg);
1889
1890 // For i8 remainder, we can't reference ah directly, as we'll end
1891 // up with bogus copies like %r9b = COPY %ah. Reference ax
1892 // instead to prevent ah references in a rex instruction.
1893 //
1894 // The current assumption of the fast register allocator is that isel
1895 // won't generate explicit references to the GR8_NOREX registers. If
1896 // the allocator and/or the backend get enhanced to be more robust in
1897 // that regard, this can be, and should be, removed.
1898 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1899 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1900 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1901 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1902 .addReg(X86::AX);
1903
1904 // Shift AX right by 8 bits instead of using AH.
1905 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1906 ResultSuperReg)
1907 .addReg(SourceSuperReg)
1908 .addImm(8);
1909
1910 // Now reference the 8-bit subreg of the result.
1911 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1912 DstReg)
1913 .addReg(ResultSuperReg, {}, X86::sub_8bit);
1914 } else {
1915 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1916 DstReg)
1917 .addReg(OpEntry.ResultReg);
1918 }
1919 I.eraseFromParent();
1920
1921 return true;
1922}
1923
1924bool X86InstructionSelector::selectSelect(MachineInstr &I,
1925 MachineRegisterInfo &MRI,
1926 MachineFunction &MF) const {
1927 GSelect &Sel = cast<GSelect>(I);
1928 Register DstReg = Sel.getReg(0);
1929 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::TEST32rr))
1930 .addReg(Sel.getCondReg())
1931 .addReg(Sel.getCondReg());
1932
1933 unsigned OpCmp;
1934 LLT Ty = MRI.getType(DstReg);
1935 if (Ty.getSizeInBits() == 80) {
1936 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::CMOVE_Fp80),
1937 DstReg)
1938 .addReg(Sel.getTrueReg())
1939 .addReg(Sel.getFalseReg());
1940 } else {
1941 switch (Ty.getSizeInBits()) {
1942 default:
1943 return false;
1944 case 8:
1945 OpCmp = X86::CMOV_GR8;
1946 break;
1947 case 16:
1948 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1949 break;
1950 case 32:
1951 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1952 break;
1953 case 64:
1954 assert(STI.is64Bit() && STI.canUseCMOV());
1955 OpCmp = X86::CMOV64rr;
1956 break;
1957 }
1958 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
1959 .addReg(Sel.getTrueReg())
1960 .addReg(Sel.getFalseReg())
1962 }
1963 const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);
1964 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1965 LLVM_DEBUG(dbgs() << "Failed to constrain CMOV\n");
1966 return false;
1967 }
1968
1969 Sel.eraseFromParent();
1970 return true;
1971}
1972
1973InstructionSelector::ComplexRendererFns
1974X86InstructionSelector::selectAddr(MachineOperand &Root) const {
1975 MachineInstr *MI = Root.getParent();
1976 MachineIRBuilder MIRBuilder(*MI);
1977
1978 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1979 MachineInstr *Ptr = MRI.getVRegDef(Root.getReg());
1980 X86AddressMode AM;
1981 if (!X86SelectAddress(*Ptr, TM, MRI, STI, AM))
1982 return std::nullopt;
1983
1984 if (AM.IndexReg)
1985 return std::nullopt;
1986
1987 return {// Base
1988 {[=](MachineInstrBuilder &MIB) {
1990 MIB.addUse(AM.Base.Reg);
1991 else {
1993 "Unknown type of address base");
1994 MIB.addFrameIndex(AM.Base.FrameIndex);
1995 }
1996 },
1997 // Scale
1998 [=](MachineInstrBuilder &MIB) { MIB.addImm(AM.Scale); },
1999 // Index
2000 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); },
2001 // Disp
2002 [=](MachineInstrBuilder &MIB) {
2003 if (AM.GV)
2004 MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
2005 else if (AM.CP)
2006 MIB.addConstantPoolIndex(AM.Disp, 0, AM.GVOpFlags);
2007 else
2008 MIB.addImm(AM.Disp);
2009 },
2010 // Segment
2011 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); }}};
2012}
2013
2014InstructionSelector *
2016 const X86Subtarget &Subtarget,
2017 const X86RegisterBankInfo &RBI) {
2018 return new X86InstructionSelector(TM, Subtarget, RBI);
2019}
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
unsigned OpIndex
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
#define LLVM_DEBUG(...)
Definition Debug.h:114
static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
Value * RHS
Value * LHS
This file declares the targeting of the RegisterBankInfo class for X86.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
CodeModel::Model getCodeModel() const
Returns the code model.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
This class provides the information for the target register banks.
bool canUseCMOV() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
const X86InstrInfo * getInstrInfo() const override
bool hasAVX512() const
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
bool isPICStyleRIPRel() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
bool hasAVX() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:52
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
@ LAST_VALID_COND
Definition X86BaseInfo.h:94
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
Definition TypePool.h:28
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:294
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:156
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
const GlobalValue * GV
union llvm::X86AddressMode::BaseUnion Base
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType