38#include "llvm/IR/IntrinsicsX86.h"
50#define DEBUG_TYPE "X86-isel"
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
74 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
75 Align Alignment)
const;
110 const unsigned DstReg,
112 const unsigned SrcReg,
123 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
126 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
139#define GET_GLOBALISEL_PREDICATES_DECL
140#include "X86GenGlobalISel.inc"
141#undef GET_GLOBALISEL_PREDICATES_DECL
143#define GET_GLOBALISEL_TEMPORARIES_DECL
144#include "X86GenGlobalISel.inc"
145#undef GET_GLOBALISEL_TEMPORARIES_DECL
150#define GET_GLOBALISEL_IMPL
151#include "X86GenGlobalISel.inc"
152#undef GET_GLOBALISEL_IMPL
157 :
TM(
TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
160#include
"X86GenGlobalISel.inc"
163#include
"X86GenGlobalISel.inc"
171X86InstructionSelector::getRegClass(
LLT Ty,
const RegisterBank &RB)
const {
172 if (RB.
getID() == X86::GPRRegBankID) {
174 return &X86::GR8RegClass;
176 return &X86::GR16RegClass;
178 return &X86::GR32RegClass;
180 return &X86::GR64RegClass;
182 if (RB.
getID() == X86::VECRRegBankID) {
184 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
186 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
188 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
190 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
192 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
194 return &X86::VR512RegClass;
201X86InstructionSelector::getRegClass(
LLT Ty,
unsigned Reg,
208 unsigned SubIdx = X86::NoSubRegister;
209 if (RC == &X86::GR32RegClass) {
210 SubIdx = X86::sub_32bit;
211 }
else if (RC == &X86::GR16RegClass) {
212 SubIdx = X86::sub_16bit;
213 }
else if (RC == &X86::GR8RegClass) {
214 SubIdx = X86::sub_8bit;
222 if (X86::GR64RegClass.
contains(Reg))
223 return &X86::GR64RegClass;
224 if (X86::GR32RegClass.
contains(Reg))
225 return &X86::GR32RegClass;
226 if (X86::GR16RegClass.
contains(Reg))
227 return &X86::GR16RegClass;
229 return &X86::GR8RegClass;
237bool X86InstructionSelector::selectDebugInstr(
MachineInstr &
I,
245 if (
Reg.isPhysical())
247 LLT Ty =
MRI.getType(Reg);
256 dbgs() <<
"Warning: DBG_VALUE operand has unexpected size/bank\n");
260 RBI.constrainGenericRegister(Reg, *RC,
MRI);
269 Register DstReg =
I.getOperand(0).getReg();
270 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
273 Register SrcReg =
I.getOperand(1).getReg();
274 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
278 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
280 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
281 DstRegBank.
getID() == X86::GPRRegBankID) {
287 if (SrcRC != DstRC) {
289 Register ExtSrc =
MRI.createVirtualRegister(DstRC);
291 TII.get(TargetOpcode::SUBREG_TO_REG))
295 .
addImm(getSubRegIndex(SrcRC));
297 I.getOperand(1).setReg(ExtSrc);
305 "No phys reg on generic operators");
306 assert((DstSize == SrcSize ||
310 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
311 "Copy with different width?!");
316 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
317 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
323 if (DstRC != SrcRC) {
324 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
325 I.getOperand(1).substPhysReg(SrcReg,
TRI);
334 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
340 I.setDesc(
TII.get(X86::COPY));
345 assert(
I.getParent() &&
"Instruction should be in a basic block!");
346 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
352 unsigned Opcode =
I.getOpcode();
356 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
362 if (
I.isDebugInstr())
368 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
369 "Generic instruction has unexpected implicit operands\n");
371 if (selectImpl(
I, *CoverageInfo))
377 switch (
I.getOpcode()) {
380 case TargetOpcode::G_STORE:
381 case TargetOpcode::G_LOAD:
383 case TargetOpcode::G_PTR_ADD:
384 case TargetOpcode::G_FRAME_INDEX:
385 return selectFrameIndexOrGep(
I,
MRI, MF);
386 case TargetOpcode::G_GLOBAL_VALUE:
387 return selectGlobalValue(
I,
MRI, MF);
388 case TargetOpcode::G_CONSTANT:
389 return selectConstant(
I,
MRI, MF);
390 case TargetOpcode::G_FCONSTANT:
391 return materializeFP(
I,
MRI, MF);
392 case TargetOpcode::G_PTRTOINT:
393 case TargetOpcode::G_TRUNC:
394 return selectTruncOrPtrToInt(
I,
MRI, MF);
395 case TargetOpcode::G_INTTOPTR:
397 case TargetOpcode::G_ZEXT:
398 return selectZext(
I,
MRI, MF);
399 case TargetOpcode::G_ANYEXT:
400 return selectAnyext(
I,
MRI, MF);
401 case TargetOpcode::G_ICMP:
402 return selectCmp(
I,
MRI, MF);
403 case TargetOpcode::G_FCMP:
404 return selectFCmp(
I,
MRI, MF);
405 case TargetOpcode::G_UADDE:
406 return selectUadde(
I,
MRI, MF);
407 case TargetOpcode::G_UNMERGE_VALUES:
409 case TargetOpcode::G_MERGE_VALUES:
410 case TargetOpcode::G_CONCAT_VECTORS:
412 case TargetOpcode::G_EXTRACT:
413 return selectExtract(
I,
MRI, MF);
414 case TargetOpcode::G_INSERT:
415 return selectInsert(
I,
MRI, MF);
416 case TargetOpcode::G_BRCOND:
417 return selectCondBranch(
I,
MRI, MF);
418 case TargetOpcode::G_IMPLICIT_DEF:
419 case TargetOpcode::G_PHI:
420 return selectImplicitDefOrPHI(
I,
MRI);
421 case TargetOpcode::G_SDIV:
422 case TargetOpcode::G_UDIV:
423 case TargetOpcode::G_SREM:
424 case TargetOpcode::G_UREM:
425 return selectDivRem(
I,
MRI, MF);
426 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
427 return selectIntrinsicWSideEffects(
I,
MRI, MF);
433unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
436 Align Alignment)
const {
437 bool Isload = (Opc == TargetOpcode::G_LOAD);
438 bool HasAVX = STI.hasAVX();
439 bool HasAVX512 = STI.hasAVX512();
440 bool HasVLX = STI.hasVLX();
443 if (X86::GPRRegBankID == RB.
getID())
444 return Isload ? X86::MOV8rm : X86::MOV8mr;
446 if (X86::GPRRegBankID == RB.
getID())
447 return Isload ? X86::MOV16rm : X86::MOV16mr;
449 if (X86::GPRRegBankID == RB.
getID())
450 return Isload ? X86::MOV32rm : X86::MOV32mr;
451 if (X86::VECRRegBankID == RB.
getID())
452 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
453 HasAVX ? X86::VMOVSSrm_alt :
455 : (HasAVX512 ? X86::VMOVSSZmr :
456 HasAVX ? X86::VMOVSSmr :
459 if (X86::GPRRegBankID == RB.
getID())
460 return Isload ? X86::MOV64rm : X86::MOV64mr;
461 if (X86::VECRRegBankID == RB.
getID())
462 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
463 HasAVX ? X86::VMOVSDrm_alt :
465 : (HasAVX512 ? X86::VMOVSDZmr :
466 HasAVX ? X86::VMOVSDmr :
469 if (Alignment >=
Align(16))
470 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
472 ? X86::VMOVAPSZ128rm_NOVLX
473 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
474 : (HasVLX ? X86::VMOVAPSZ128mr
476 ? X86::VMOVAPSZ128mr_NOVLX
477 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
479 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
481 ? X86::VMOVUPSZ128rm_NOVLX
482 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
483 : (HasVLX ? X86::VMOVUPSZ128mr
485 ? X86::VMOVUPSZ128mr_NOVLX
486 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
488 if (Alignment >=
Align(32))
489 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
490 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
492 : (HasVLX ? X86::VMOVAPSZ256mr
493 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
496 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
497 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
499 : (HasVLX ? X86::VMOVUPSZ256mr
500 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
503 if (Alignment >=
Align(64))
504 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
506 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
515 assert(
I.getOperand(0).isReg() &&
"unsupported opperand.");
516 assert(
MRI.getType(
I.getOperand(0).getReg()).isPointer() &&
517 "unsupported type.");
519 if (
I.getOpcode() == TargetOpcode::G_PTR_ADD) {
522 if (isInt<32>(Imm)) {
523 AM.
Disp =
static_cast<int32_t
>(Imm);
524 AM.
Base.
Reg =
I.getOperand(1).getReg();
528 }
else if (
I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
535 AM.
Base.
Reg =
I.getOperand(0).getReg();
538bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
541 unsigned Opc =
I.getOpcode();
543 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
544 "unexpected instruction");
546 const Register DefReg =
I.getOperand(0).getReg();
547 LLT Ty =
MRI.getType(DefReg);
551 auto &
MemOp = **
I.memoperands_begin();
552 if (
MemOp.isAtomic()) {
558 if (!
MemOp.isUnordered()) {
568 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc,
MemOp.getAlign());
575 I.setDesc(
TII.get(NewOpc));
577 if (Opc == TargetOpcode::G_LOAD) {
598bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
601 unsigned Opc =
I.getOpcode();
603 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
604 "unexpected instruction");
606 const Register DefReg =
I.getOperand(0).getReg();
607 LLT Ty =
MRI.getType(DefReg);
610 unsigned NewOpc =
getLeaOP(Ty, STI);
611 I.setDesc(
TII.get(NewOpc));
614 if (Opc == TargetOpcode::G_FRAME_INDEX) {
620 MIB.addImm(0).addReg(0);
626bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
629 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
630 "unexpected instruction");
632 auto GV =
I.getOperand(1).getGlobal();
633 if (GV->isThreadLocal()) {
643 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
653 if (STI.isPICStyleRIPRel()) {
659 const Register DefReg =
I.getOperand(0).getReg();
660 LLT Ty =
MRI.getType(DefReg);
661 unsigned NewOpc =
getLeaOP(Ty, STI);
663 I.setDesc(
TII.get(NewOpc));
675 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
676 "unexpected instruction");
678 const Register DefReg =
I.getOperand(0).getReg();
679 LLT Ty =
MRI.getType(DefReg);
681 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
685 if (
I.getOperand(1).isCImm()) {
686 Val =
I.getOperand(1).getCImm()->getZExtValue();
687 I.getOperand(1).ChangeToImmediate(Val);
688 }
else if (
I.getOperand(1).isImm()) {
689 Val =
I.getOperand(1).getImm();
696 NewOpc = X86::MOV8ri;
699 NewOpc = X86::MOV16ri;
702 NewOpc = X86::MOV32ri;
707 NewOpc = X86::MOV64ri32;
709 NewOpc = X86::MOV64ri;
715 I.setDesc(
TII.get(NewOpc));
724 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
725 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
726 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
729bool X86InstructionSelector::selectTurnIntoCOPY(
734 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
735 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
740 I.setDesc(
TII.get(X86::COPY));
744bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
747 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
748 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
749 "unexpected instruction");
751 const Register DstReg =
I.getOperand(0).getReg();
752 const Register SrcReg =
I.getOperand(1).getReg();
754 const LLT DstTy =
MRI.getType(DstReg);
755 const LLT SrcTy =
MRI.getType(SrcReg);
762 <<
" input/output on different banks\n");
769 if (!DstRC || !SrcRC)
776 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
778 if (DstRB.
getID() != X86::GPRRegBankID)
782 if (DstRC == SrcRC) {
784 SubIdx = X86::NoSubRegister;
785 }
else if (DstRC == &X86::GR32RegClass) {
786 SubIdx = X86::sub_32bit;
787 }
else if (DstRC == &X86::GR16RegClass) {
788 SubIdx = X86::sub_16bit;
789 }
else if (DstRC == &X86::GR8RegClass) {
790 SubIdx = X86::sub_8bit;
795 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
797 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
798 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
804 I.getOperand(1).setSubReg(SubIdx);
806 I.setDesc(
TII.get(X86::COPY));
813 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
815 const Register DstReg =
I.getOperand(0).getReg();
816 const Register SrcReg =
I.getOperand(1).getReg();
818 const LLT DstTy =
MRI.getType(DstReg);
819 const LLT SrcTy =
MRI.getType(SrcReg);
822 "8=>16 Zext is handled by tablegen");
824 "8=>32 Zext is handled by tablegen");
826 "16=>32 Zext is handled by tablegen");
828 "8=>64 Zext is handled by tablegen");
830 "16=>64 Zext is handled by tablegen");
832 "32=>64 Zext is handled by tablegen");
839 AndOpc = X86::AND8ri;
841 AndOpc = X86::AND16ri8;
843 AndOpc = X86::AND32ri8;
845 AndOpc = X86::AND64ri8;
854 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
858 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
865 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
878 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
880 const Register DstReg =
I.getOperand(0).getReg();
881 const Register SrcReg =
I.getOperand(1).getReg();
883 const LLT DstTy =
MRI.getType(DstReg);
884 const LLT SrcTy =
MRI.getType(SrcReg);
890 "G_ANYEXT input/output on different banks\n");
893 "G_ANYEXT incorrect operand size");
902 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
904 if (DstRB.
getID() != X86::GPRRegBankID)
907 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
908 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
914 if (SrcRC == DstRC) {
915 I.setDesc(
TII.get(X86::COPY));
920 TII.get(TargetOpcode::SUBREG_TO_REG))
924 .
addImm(getSubRegIndex(SrcRC));
933 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
947 LLT Ty =
MRI.getType(LHS);
956 OpCmp = X86::CMP16rr;
959 OpCmp = X86::CMP32rr;
962 OpCmp = X86::CMP64rr;
972 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(
CC);
984 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
986 Register LhsReg =
I.getOperand(2).getReg();
987 Register RhsReg =
I.getOperand(3).getReg();
992 static const uint16_t SETFOpcTable[2][3] = {
1000 SETFOpc = &SETFOpcTable[0][0];
1003 SETFOpc = &SETFOpcTable[1][0];
1009 LLT Ty =
MRI.getType(LhsReg);
1014 OpCmp = X86::UCOMISSrr;
1017 OpCmp = X86::UCOMISDrr;
1021 Register ResultReg =
I.getOperand(0).getReg();
1022 RBI.constrainGenericRegister(
1027 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1031 Register FlagReg1 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1032 Register FlagReg2 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1034 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1036 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1038 TII.get(SETFOpc[2]), ResultReg)
1046 I.eraseFromParent();
1060 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1068 I.eraseFromParent();
1075 assert((
I.getOpcode() == TargetOpcode::G_UADDE) &&
"unexpected instruction");
1077 const Register DstReg =
I.getOperand(0).getReg();
1078 const Register CarryOutReg =
I.getOperand(1).getReg();
1079 const Register Op0Reg =
I.getOperand(2).getReg();
1080 const Register Op1Reg =
I.getOperand(3).getReg();
1081 Register CarryInReg =
I.getOperand(4).getReg();
1083 const LLT DstTy =
MRI.getType(DstReg);
1090 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1091 CarryInReg =
Def->getOperand(1).getReg();
1092 Def =
MRI.getVRegDef(CarryInReg);
1096 if (
Def->getOpcode() == TargetOpcode::G_UADDE) {
1099 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), X86::EFLAGS)
1102 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass,
MRI))
1105 Opcode = X86::ADC32rr;
1111 Opcode = X86::ADD32rr;
1116 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1120 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1124 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass,
MRI))
1127 I.eraseFromParent();
1134 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1135 "unexpected instruction");
1137 const Register DstReg =
I.getOperand(0).getReg();
1138 const Register SrcReg =
I.getOperand(1).getReg();
1139 int64_t
Index =
I.getOperand(2).getImm();
1141 const LLT DstTy =
MRI.getType(DstReg);
1142 const LLT SrcTy =
MRI.getType(SrcReg);
1153 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1156 I.eraseFromParent();
1160 bool HasAVX = STI.hasAVX();
1161 bool HasAVX512 = STI.hasAVX512();
1162 bool HasVLX = STI.hasVLX();
1166 I.setDesc(
TII.get(X86::VEXTRACTF32x4Z256rr));
1168 I.setDesc(
TII.get(X86::VEXTRACTF128rr));
1173 I.setDesc(
TII.get(X86::VEXTRACTF32x4Zrr));
1175 I.setDesc(
TII.get(X86::VEXTRACTF64x4Zrr));
1183 I.getOperand(2).setImm(
Index);
1188bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1192 const LLT DstTy =
MRI.getType(DstReg);
1193 const LLT SrcTy =
MRI.getType(SrcReg);
1194 unsigned SubIdx = X86::NoSubRegister;
1200 "Incorrect Src/Dst register size");
1203 SubIdx = X86::sub_xmm;
1205 SubIdx = X86::sub_ymm;
1212 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1214 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1215 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1220 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1221 .
addReg(SrcReg, 0, SubIdx);
1226bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1230 const LLT DstTy =
MRI.getType(DstReg);
1231 const LLT SrcTy =
MRI.getType(SrcReg);
1232 unsigned SubIdx = X86::NoSubRegister;
1239 "Incorrect Src/Dst register size");
1242 SubIdx = X86::sub_xmm;
1244 SubIdx = X86::sub_ymm;
1251 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1252 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1257 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1267 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1269 const Register DstReg =
I.getOperand(0).getReg();
1270 const Register SrcReg =
I.getOperand(1).getReg();
1271 const Register InsertReg =
I.getOperand(2).getReg();
1272 int64_t
Index =
I.getOperand(3).getImm();
1274 const LLT DstTy =
MRI.getType(DstReg);
1275 const LLT InsertRegTy =
MRI.getType(InsertReg);
1284 if (
Index == 0 &&
MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1286 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1289 I.eraseFromParent();
1293 bool HasAVX = STI.hasAVX();
1294 bool HasAVX512 = STI.hasAVX512();
1295 bool HasVLX = STI.hasVLX();
1299 I.setDesc(
TII.get(X86::VINSERTF32x4Z256rr));
1301 I.setDesc(
TII.get(X86::VINSERTF128rr));
1306 I.setDesc(
TII.get(X86::VINSERTF32x4Zrr));
1308 I.setDesc(
TII.get(X86::VINSERTF64x4Zrr));
1317 I.getOperand(3).setImm(
Index);
1322bool X86InstructionSelector::selectUnmergeValues(
1324 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1325 "unexpected instruction");
1328 unsigned NumDefs =
I.getNumOperands() - 1;
1329 Register SrcReg =
I.getOperand(NumDefs).getReg();
1330 unsigned DefSize =
MRI.getType(
I.getOperand(0).getReg()).getSizeInBits();
1332 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1335 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(
Idx).getReg())
1339 if (!select(ExtrInst))
1343 I.eraseFromParent();
1347bool X86InstructionSelector::selectMergeValues(
1349 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1350 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1351 "unexpected instruction");
1354 Register DstReg =
I.getOperand(0).getReg();
1355 Register SrcReg0 =
I.getOperand(1).getReg();
1357 const LLT DstTy =
MRI.getType(DstReg);
1358 const LLT SrcTy =
MRI.getType(SrcReg0);
1364 Register DefReg =
MRI.createGenericVirtualRegister(DstTy);
1365 MRI.setRegBank(DefReg, RegBank);
1366 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1369 for (
unsigned Idx = 2;
Idx <
I.getNumOperands(); ++
Idx) {
1370 Register Tmp =
MRI.createGenericVirtualRegister(DstTy);
1371 MRI.setRegBank(Tmp, RegBank);
1374 TII.get(TargetOpcode::G_INSERT), Tmp)
1381 if (!select(InsertInst))
1386 TII.get(TargetOpcode::COPY), DstReg)
1389 if (!select(CopyInst))
1392 I.eraseFromParent();
1396bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1399 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1401 const Register CondReg =
I.getOperand(0).getReg();
1405 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1408 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1413 I.eraseFromParent();
1420 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1421 "unexpected instruction");
1428 const Register DstReg =
I.getOperand(0).getReg();
1429 const LLT DstTy =
MRI.getType(DstReg);
1432 const DebugLoc &DbgLoc =
I.getDebugLoc();
1435 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1438 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1441 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1447 Register AddrReg =
MRI.createVirtualRegister(&X86::GR64RegClass);
1448 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1465 unsigned PICBase = 0;
1474 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(Opc), DstReg), CPI, PICBase,
1480 I.eraseFromParent();
1484bool X86InstructionSelector::selectImplicitDefOrPHI(
1486 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1487 I.getOpcode() == TargetOpcode::G_PHI) &&
1488 "unexpected instruction");
1490 Register DstReg =
I.getOperand(0).getReg();
1492 if (!
MRI.getRegClassOrNull(DstReg)) {
1493 const LLT DstTy =
MRI.getType(DstReg);
1496 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1503 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1504 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1506 I.setDesc(
TII.get(X86::PHI));
1515 assert((
I.getOpcode() == TargetOpcode::G_SDIV ||
1516 I.getOpcode() == TargetOpcode::G_SREM ||
1517 I.getOpcode() == TargetOpcode::G_UDIV ||
1518 I.getOpcode() == TargetOpcode::G_UREM) &&
1519 "unexpected instruction");
1521 const Register DstReg =
I.getOperand(0).getReg();
1522 const Register Op1Reg =
I.getOperand(1).getReg();
1523 const Register Op2Reg =
I.getOperand(2).getReg();
1525 const LLT RegTy =
MRI.getType(DstReg);
1526 assert(RegTy ==
MRI.getType(Op1Reg) && RegTy ==
MRI.getType(Op2Reg) &&
1527 "Arguments and return value types must match");
1530 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1533 const static unsigned NumTypes = 4;
1534 const static unsigned NumOps = 4;
1535 const static bool S =
true;
1536 const static bool U =
false;
1537 const static unsigned Copy = TargetOpcode::COPY;
1546 const static struct DivRemEntry {
1548 unsigned SizeInBits;
1552 struct DivRemResult {
1554 unsigned OpSignExtend;
1558 unsigned DivRemResultReg;
1560 } ResultTable[NumOps];
1561 } OpTable[NumTypes] = {
1566 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},
1567 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1568 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U},
1569 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1575 {X86::IDIV16r, X86::CWD,
Copy, X86::AX, S},
1576 {X86::IDIV16r, X86::CWD,
Copy, X86::DX, S},
1577 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U},
1578 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U},
1584 {X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S},
1585 {X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S},
1586 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U},
1587 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1593 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S},
1594 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S},
1595 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U},
1596 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1600 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const DivRemEntry &El) {
1603 if (OpEntryIt == std::end(OpTable))
1607 switch (
I.getOpcode()) {
1610 case TargetOpcode::G_SDIV:
1613 case TargetOpcode::G_SREM:
1616 case TargetOpcode::G_UDIV:
1619 case TargetOpcode::G_UREM:
1624 const DivRemEntry &TypeEntry = *OpEntryIt;
1625 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[
OpIndex];
1628 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1629 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1630 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1637 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1641 if (OpEntry.OpSignExtend) {
1642 if (OpEntry.IsOpSigned)
1644 TII.get(OpEntry.OpSignExtend));
1646 Register Zero32 =
MRI.createVirtualRegister(&X86::GR32RegClass);
1647 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1655 TypeEntry.HighInReg)
1656 .
addReg(Zero32, 0, X86::sub_16bit);
1659 TypeEntry.HighInReg)
1663 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1671 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpDivRem))
1681 if ((
I.getOpcode() == Instruction::SRem ||
1682 I.getOpcode() == Instruction::URem) &&
1683 OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) {
1684 Register SourceSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1685 Register ResultSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1686 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1690 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1697 TII.get(TargetOpcode::SUBREG_TO_REG))
1703 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1705 .
addReg(OpEntry.DivRemResultReg);
1707 I.eraseFromParent();
1711bool X86InstructionSelector::selectIntrinsicWSideEffects(
1714 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1715 "unexpected instruction");
1717 if (
I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1720 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TRAP));
1722 I.eraseFromParent();
1730 return new X86InstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Atomic ordering constants.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
ConstantFP - Floating Point Values [float, double].
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Provides the logic to select generic machine instructions.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
An instruction for reading from memory.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
T get() const
Returns the value of the specified pointer type.
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwise returns null.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
union llvm::X86AddressMode::@599 Base
enum llvm::X86AddressMode::@598 BaseType