38 #include "llvm/IR/IntrinsicsX86.h"
50 #define DEBUG_TYPE "X86-isel"
56 #define GET_GLOBALISEL_PREDICATE_BITSET
57 #include "X86GenGlobalISel.inc"
58 #undef GET_GLOBALISEL_PREDICATE_BITSET
74 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
75 Align Alignment)
const;
109 const unsigned DstReg,
111 const unsigned SrcReg,
122 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
125 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
138 #define GET_GLOBALISEL_PREDICATES_DECL
139 #include "X86GenGlobalISel.inc"
140 #undef GET_GLOBALISEL_PREDICATES_DECL
142 #define GET_GLOBALISEL_TEMPORARIES_DECL
143 #include "X86GenGlobalISel.inc"
144 #undef GET_GLOBALISEL_TEMPORARIES_DECL
149 #define GET_GLOBALISEL_IMPL
150 #include "X86GenGlobalISel.inc"
151 #undef GET_GLOBALISEL_IMPL
156 :
TM(
TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
159 #
include "X86GenGlobalISel.inc"
162 #
include "X86GenGlobalISel.inc"
171 if (RB.
getID() == X86::GPRRegBankID) {
173 return &X86::GR8RegClass;
175 return &X86::GR16RegClass;
177 return &X86::GR32RegClass;
179 return &X86::GR64RegClass;
181 if (RB.
getID() == X86::VECRRegBankID) {
183 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
185 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
187 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
189 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
191 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
193 return &X86::VR512RegClass;
207 unsigned SubIdx = X86::NoSubRegister;
208 if (RC == &X86::GR32RegClass) {
209 SubIdx = X86::sub_32bit;
210 }
else if (RC == &X86::GR16RegClass) {
211 SubIdx = X86::sub_16bit;
212 }
else if (RC == &X86::GR8RegClass) {
213 SubIdx = X86::sub_8bit;
222 return &X86::GR64RegClass;
224 return &X86::GR32RegClass;
226 return &X86::GR16RegClass;
228 return &X86::GR8RegClass;
236 Register DstReg =
I.getOperand(0).getReg();
237 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
240 Register SrcReg =
I.getOperand(1).getReg();
241 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
245 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
247 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
248 DstRegBank.
getID() == X86::GPRRegBankID) {
254 if (SrcRC != DstRC) {
258 TII.get(TargetOpcode::SUBREG_TO_REG))
262 .
addImm(getSubRegIndex(SrcRC));
264 I.getOperand(1).setReg(ExtSrc);
272 "No phys reg on generic operators");
273 assert((DstSize == SrcSize ||
277 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
278 "Copy with different width?!");
283 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
284 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
290 if (DstRC != SrcRC) {
291 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
292 I.getOperand(1).substPhysReg(SrcReg,
TRI);
301 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
307 I.setDesc(
TII.get(X86::COPY));
312 assert(
I.getParent() &&
"Instruction should be in a basic block!");
313 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
319 unsigned Opcode =
I.getOpcode();
323 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
332 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
333 "Generic instruction has unexpected implicit operands\n");
341 switch (
I.getOpcode()) {
344 case TargetOpcode::G_STORE:
345 case TargetOpcode::G_LOAD:
346 return selectLoadStoreOp(
I,
MRI, MF);
347 case TargetOpcode::G_PTR_ADD:
348 case TargetOpcode::G_FRAME_INDEX:
349 return selectFrameIndexOrGep(
I,
MRI, MF);
350 case TargetOpcode::G_GLOBAL_VALUE:
351 return selectGlobalValue(
I,
MRI, MF);
352 case TargetOpcode::G_CONSTANT:
353 return selectConstant(
I,
MRI, MF);
354 case TargetOpcode::G_FCONSTANT:
355 return materializeFP(
I,
MRI, MF);
356 case TargetOpcode::G_PTRTOINT:
357 case TargetOpcode::G_TRUNC:
358 return selectTruncOrPtrToInt(
I,
MRI, MF);
359 case TargetOpcode::G_INTTOPTR:
361 case TargetOpcode::G_ZEXT:
362 return selectZext(
I,
MRI, MF);
363 case TargetOpcode::G_ANYEXT:
364 return selectAnyext(
I,
MRI, MF);
365 case TargetOpcode::G_ICMP:
366 return selectCmp(
I,
MRI, MF);
367 case TargetOpcode::G_FCMP:
368 return selectFCmp(
I,
MRI, MF);
369 case TargetOpcode::G_UADDE:
370 return selectUadde(
I,
MRI, MF);
371 case TargetOpcode::G_UNMERGE_VALUES:
373 case TargetOpcode::G_MERGE_VALUES:
374 case TargetOpcode::G_CONCAT_VECTORS:
376 case TargetOpcode::G_EXTRACT:
377 return selectExtract(
I,
MRI, MF);
378 case TargetOpcode::G_INSERT:
379 return selectInsert(
I,
MRI, MF);
380 case TargetOpcode::G_BRCOND:
381 return selectCondBranch(
I,
MRI, MF);
382 case TargetOpcode::G_IMPLICIT_DEF:
383 case TargetOpcode::G_PHI:
384 return selectImplicitDefOrPHI(
I,
MRI);
385 case TargetOpcode::G_SDIV:
386 case TargetOpcode::G_UDIV:
387 case TargetOpcode::G_SREM:
388 case TargetOpcode::G_UREM:
389 return selectDivRem(
I,
MRI, MF);
390 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
391 return selectIntrinsicWSideEffects(
I,
MRI, MF);
397 unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
400 Align Alignment)
const {
401 bool Isload = (Opc == TargetOpcode::G_LOAD);
402 bool HasAVX = STI.hasAVX();
403 bool HasAVX512 = STI.hasAVX512();
404 bool HasVLX = STI.hasVLX();
406 if (Ty == LLT::scalar(8)) {
407 if (X86::GPRRegBankID == RB.
getID())
408 return Isload ? X86::MOV8rm : X86::MOV8mr;
409 }
else if (Ty == LLT::scalar(16)) {
410 if (X86::GPRRegBankID == RB.
getID())
411 return Isload ? X86::MOV16rm : X86::MOV16mr;
412 }
else if (Ty == LLT::scalar(32) || Ty ==
LLT::pointer(0, 32)) {
413 if (X86::GPRRegBankID == RB.
getID())
414 return Isload ? X86::MOV32rm : X86::MOV32mr;
415 if (X86::VECRRegBankID == RB.
getID())
416 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
417 HasAVX ? X86::VMOVSSrm_alt :
419 : (HasAVX512 ? X86::VMOVSSZmr :
420 HasAVX ? X86::VMOVSSmr :
422 }
else if (Ty == LLT::scalar(64) || Ty ==
LLT::pointer(0, 64)) {
423 if (X86::GPRRegBankID == RB.
getID())
424 return Isload ? X86::MOV64rm : X86::MOV64mr;
425 if (X86::VECRRegBankID == RB.
getID())
426 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
427 HasAVX ? X86::VMOVSDrm_alt :
429 : (HasAVX512 ? X86::VMOVSDZmr :
430 HasAVX ? X86::VMOVSDmr :
433 if (Alignment >=
Align(16))
434 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
436 ? X86::VMOVAPSZ128rm_NOVLX
437 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
438 : (HasVLX ? X86::VMOVAPSZ128mr
440 ? X86::VMOVAPSZ128mr_NOVLX
441 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
443 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
445 ? X86::VMOVUPSZ128rm_NOVLX
446 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
447 : (HasVLX ? X86::VMOVUPSZ128mr
449 ? X86::VMOVUPSZ128mr_NOVLX
450 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
452 if (Alignment >=
Align(32))
453 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
454 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
456 : (HasVLX ? X86::VMOVAPSZ256mr
457 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
460 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
461 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
463 : (HasVLX ? X86::VMOVUPSZ256mr
464 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
467 if (Alignment >=
Align(64))
468 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
470 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
479 assert(
I.getOperand(0).isReg() &&
"unsupported opperand.");
481 "unsupported type.");
483 if (
I.getOpcode() == TargetOpcode::G_PTR_ADD) {
487 AM.
Disp =
static_cast<int32_t
>(
Imm);
488 AM.
Base.
Reg =
I.getOperand(1).getReg();
492 }
else if (
I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
494 AM.
BaseType = X86AddressMode::FrameIndexBase;
499 AM.
Base.
Reg =
I.getOperand(0).getReg();
502 bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
505 unsigned Opc =
I.getOpcode();
507 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
508 "unexpected instruction");
510 const Register DefReg =
I.getOperand(0).getReg();
515 auto &
MemOp = **
I.memoperands_begin();
516 if (
MemOp.isAtomic()) {
522 if (!
MemOp.isUnordered()) {
532 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc,
MemOp.getAlign());
539 I.setDesc(
TII.get(NewOpc));
541 if (Opc == TargetOpcode::G_LOAD) {
562 bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
565 unsigned Opc =
I.getOpcode();
567 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
568 "unexpected instruction");
570 const Register DefReg =
I.getOperand(0).getReg();
574 unsigned NewOpc =
getLeaOP(Ty, STI);
575 I.setDesc(
TII.get(NewOpc));
578 if (Opc == TargetOpcode::G_FRAME_INDEX) {
584 MIB.addImm(0).addReg(0);
590 bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
593 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
594 "unexpected instruction");
596 auto GV =
I.getOperand(1).getGlobal();
597 if (GV->isThreadLocal()) {
607 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
617 if (STI.isPICStyleRIPRel()) {
623 const Register DefReg =
I.getOperand(0).getReg();
625 unsigned NewOpc =
getLeaOP(Ty, STI);
627 I.setDesc(
TII.get(NewOpc));
639 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
640 "unexpected instruction");
642 const Register DefReg =
I.getOperand(0).getReg();
645 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
649 if (
I.getOperand(1).isCImm()) {
650 Val =
I.getOperand(1).getCImm()->getZExtValue();
651 I.getOperand(1).ChangeToImmediate(Val);
652 }
else if (
I.getOperand(1).isImm()) {
653 Val =
I.getOperand(1).getImm();
660 NewOpc = X86::MOV8ri;
663 NewOpc = X86::MOV16ri;
666 NewOpc = X86::MOV32ri;
671 NewOpc = X86::MOV64ri32;
673 NewOpc = X86::MOV64ri;
679 I.setDesc(
TII.get(NewOpc));
688 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
689 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
690 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
693 bool X86InstructionSelector::selectTurnIntoCOPY(
698 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
699 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
704 I.setDesc(
TII.get(X86::COPY));
708 bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
711 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
712 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
713 "unexpected instruction");
715 const Register DstReg =
I.getOperand(0).getReg();
716 const Register SrcReg =
I.getOperand(1).getReg();
726 <<
" input/output on different banks\n");
733 if (!DstRC || !SrcRC)
740 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
742 if (DstRB.
getID() != X86::GPRRegBankID)
746 if (DstRC == SrcRC) {
748 SubIdx = X86::NoSubRegister;
749 }
else if (DstRC == &X86::GR32RegClass) {
750 SubIdx = X86::sub_32bit;
751 }
else if (DstRC == &X86::GR16RegClass) {
752 SubIdx = X86::sub_16bit;
753 }
else if (DstRC == &X86::GR8RegClass) {
754 SubIdx = X86::sub_8bit;
761 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
762 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
768 I.getOperand(1).setSubReg(SubIdx);
770 I.setDesc(
TII.get(X86::COPY));
777 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
779 const Register DstReg =
I.getOperand(0).getReg();
780 const Register SrcReg =
I.getOperand(1).getReg();
785 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
786 "8=>16 Zext is handled by tablegen");
787 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
788 "8=>32 Zext is handled by tablegen");
789 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
790 "16=>32 Zext is handled by tablegen");
791 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
792 "8=>64 Zext is handled by tablegen");
793 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
794 "16=>64 Zext is handled by tablegen");
795 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
796 "32=>64 Zext is handled by tablegen");
798 if (SrcTy != LLT::scalar(1))
802 if (DstTy == LLT::scalar(8))
803 AndOpc = X86::AND8ri;
804 else if (DstTy == LLT::scalar(16))
805 AndOpc = X86::AND16ri8;
806 else if (DstTy == LLT::scalar(32))
807 AndOpc = X86::AND32ri8;
808 else if (DstTy == LLT::scalar(64))
809 AndOpc = X86::AND64ri8;
814 if (DstTy != LLT::scalar(8)) {
818 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
822 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
829 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
842 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
844 const Register DstReg =
I.getOperand(0).getReg();
845 const Register SrcReg =
I.getOperand(1).getReg();
854 "G_ANYEXT input/output on different banks\n");
857 "G_ANYEXT incorrect operand size");
866 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
868 if (DstRB.
getID() != X86::GPRRegBankID)
871 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
872 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
878 if (SrcRC == DstRC) {
879 I.setDesc(
TII.get(X86::COPY));
884 TII.get(TargetOpcode::SUBREG_TO_REG))
888 .
addImm(getSubRegIndex(SrcRC));
897 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
920 OpCmp = X86::CMP16rr;
923 OpCmp = X86::CMP32rr;
926 OpCmp = X86::CMP64rr;
936 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(CC);
948 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
950 Register LhsReg =
I.getOperand(2).getReg();
951 Register RhsReg =
I.getOperand(3).getReg();
956 static const uint16_t SETFOpcTable[2][3] = {
963 case CmpInst::FCMP_OEQ:
964 SETFOpc = &SETFOpcTable[0][0];
966 case CmpInst::FCMP_UNE:
967 SETFOpc = &SETFOpcTable[1][0];
978 OpCmp = X86::UCOMISSrr;
981 OpCmp = X86::UCOMISDrr;
985 Register ResultReg =
I.getOperand(0).getReg();
986 RBI.constrainGenericRegister(
998 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1000 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1002 TII.get(SETFOpc[2]), ResultReg)
1010 I.eraseFromParent();
1024 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1032 I.eraseFromParent();
1039 assert((
I.getOpcode() == TargetOpcode::G_UADDE) &&
"unexpected instruction");
1041 const Register DstReg =
I.getOperand(0).getReg();
1042 const Register CarryOutReg =
I.getOperand(1).getReg();
1043 const Register Op0Reg =
I.getOperand(2).getReg();
1044 const Register Op1Reg =
I.getOperand(3).getReg();
1045 Register CarryInReg =
I.getOperand(4).getReg();
1049 if (DstTy != LLT::scalar(32))
1054 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1055 CarryInReg =
Def->getOperand(1).getReg();
1060 if (
Def->getOpcode() == TargetOpcode::G_UADDE) {
1063 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), X86::EFLAGS)
1066 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass,
MRI))
1069 Opcode = X86::ADC32rr;
1075 Opcode = X86::ADD32rr;
1080 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1084 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1088 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass,
MRI))
1091 I.eraseFromParent();
1098 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1099 "unexpected instruction");
1101 const Register DstReg =
I.getOperand(0).getReg();
1102 const Register SrcReg =
I.getOperand(1).getReg();
1103 int64_t
Index =
I.getOperand(2).getImm();
1117 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1120 I.eraseFromParent();
1124 bool HasAVX = STI.hasAVX();
1125 bool HasAVX512 = STI.hasAVX512();
1126 bool HasVLX = STI.hasVLX();
1130 I.setDesc(
TII.get(X86::VEXTRACTF32x4Z256rr));
1132 I.setDesc(
TII.get(X86::VEXTRACTF128rr));
1137 I.setDesc(
TII.get(X86::VEXTRACTF32x4Zrr));
1139 I.setDesc(
TII.get(X86::VEXTRACTF64x4Zrr));
1147 I.getOperand(2).setImm(
Index);
1152 bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1158 unsigned SubIdx = X86::NoSubRegister;
1164 "Incorrect Src/Dst register size");
1167 SubIdx = X86::sub_xmm;
1169 SubIdx = X86::sub_ymm;
1178 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1179 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1184 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1185 .
addReg(SrcReg, 0, SubIdx);
1190 bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1196 unsigned SubIdx = X86::NoSubRegister;
1203 "Incorrect Src/Dst register size");
1206 SubIdx = X86::sub_xmm;
1208 SubIdx = X86::sub_ymm;
1215 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1216 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1221 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1231 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1233 const Register DstReg =
I.getOperand(0).getReg();
1234 const Register SrcReg =
I.getOperand(1).getReg();
1235 const Register InsertReg =
I.getOperand(2).getReg();
1236 int64_t
Index =
I.getOperand(3).getImm();
1250 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1253 I.eraseFromParent();
1257 bool HasAVX = STI.hasAVX();
1258 bool HasAVX512 = STI.hasAVX512();
1259 bool HasVLX = STI.hasVLX();
1263 I.setDesc(
TII.get(X86::VINSERTF32x4Z256rr));
1265 I.setDesc(
TII.get(X86::VINSERTF128rr));
1270 I.setDesc(
TII.get(X86::VINSERTF32x4Zrr));
1272 I.setDesc(
TII.get(X86::VINSERTF64x4Zrr));
1281 I.getOperand(3).setImm(
Index);
1288 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1289 "unexpected instruction");
1292 unsigned NumDefs =
I.getNumOperands() - 1;
1293 Register SrcReg =
I.getOperand(NumDefs).getReg();
1296 for (
unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1299 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(Idx).getReg())
1307 I.eraseFromParent();
1313 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1314 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1315 "unexpected instruction");
1318 Register DstReg =
I.getOperand(0).getReg();
1319 Register SrcReg0 =
I.getOperand(1).getReg();
1330 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1333 for (
unsigned Idx = 2; Idx <
I.getNumOperands(); ++Idx) {
1338 TII.get(TargetOpcode::G_INSERT), Tmp)
1340 .
addReg(
I.getOperand(Idx).getReg())
1341 .
addImm((Idx - 1) * SrcSize);
1350 TII.get(TargetOpcode::COPY), DstReg)
1356 I.eraseFromParent();
1360 bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1363 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1365 const Register CondReg =
I.getOperand(0).getReg();
1369 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1372 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1377 I.eraseFromParent();
1384 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1385 "unexpected instruction");
1392 const Register DstReg =
I.getOperand(0).getReg();
1396 const DebugLoc &DbgLoc =
I.getDebugLoc();
1399 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1402 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1405 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1412 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1416 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1429 unsigned PICBase = 0;
1438 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(Opc), DstReg), CPI, PICBase,
1444 I.eraseFromParent();
1448 bool X86InstructionSelector::selectImplicitDefOrPHI(
1450 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1451 I.getOpcode() == TargetOpcode::G_PHI) &&
1452 "unexpected instruction");
1454 Register DstReg =
I.getOperand(0).getReg();
1460 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1467 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1468 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1470 I.setDesc(
TII.get(X86::PHI));
1479 assert((
I.getOpcode() == TargetOpcode::G_SDIV ||
1480 I.getOpcode() == TargetOpcode::G_SREM ||
1481 I.getOpcode() == TargetOpcode::G_UDIV ||
1482 I.getOpcode() == TargetOpcode::G_UREM) &&
1483 "unexpected instruction");
1485 const Register DstReg =
I.getOperand(0).getReg();
1486 const Register Op1Reg =
I.getOperand(1).getReg();
1487 const Register Op2Reg =
I.getOperand(2).getReg();
1491 "Arguments and return value types must match");
1494 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1497 const static unsigned NumTypes = 4;
1498 const static unsigned NumOps = 4;
1499 const static bool S =
true;
1500 const static bool U =
false;
1501 const static unsigned Copy = TargetOpcode::COPY;
1510 const static struct DivRemEntry {
1512 unsigned SizeInBits;
1516 struct DivRemResult {
1518 unsigned OpSignExtend;
1522 unsigned DivRemResultReg;
1524 } ResultTable[NumOps];
1525 } OpTable[NumTypes] = {
1530 {X86::IDIV8r, 0, X86::MOVSX16rr8,
X86::AL,
S},
1531 {X86::IDIV8r, 0, X86::MOVSX16rr8,
X86::AH,
S},
1532 {X86::DIV8r, 0, X86::MOVZX16rr8,
X86::AL, U},
1533 {X86::DIV8r, 0, X86::MOVZX16rr8,
X86::AH, U},
1539 {X86::IDIV16r, X86::CWD,
Copy, X86::AX,
S},
1540 {X86::IDIV16r, X86::CWD,
Copy, X86::DX,
S},
1541 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX, U},
1542 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX, U},
1557 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX,
S},
1558 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX,
S},
1559 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX, U},
1560 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX, U},
1564 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const DivRemEntry &El) {
1567 if (OpEntryIt ==
std::end(OpTable))
1571 switch (
I.getOpcode()) {
1574 case TargetOpcode::G_SDIV:
1577 case TargetOpcode::G_SREM:
1580 case TargetOpcode::G_UDIV:
1583 case TargetOpcode::G_UREM:
1588 const DivRemEntry &TypeEntry = *OpEntryIt;
1589 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[
OpIndex];
1592 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1593 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1594 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1601 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1605 if (OpEntry.OpSignExtend) {
1606 if (OpEntry.IsOpSigned)
1608 TII.get(OpEntry.OpSignExtend));
1611 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1619 TypeEntry.HighInReg)
1620 .
addReg(Zero32, 0, X86::sub_16bit);
1623 TypeEntry.HighInReg)
1627 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1635 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpDivRem))
1645 if ((
I.getOpcode() == Instruction::SRem ||
1646 I.getOpcode() == Instruction::URem) &&
1647 OpEntry.DivRemResultReg ==
X86::AH && STI.is64Bit()) {
1650 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1654 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1661 TII.get(TargetOpcode::SUBREG_TO_REG))
1667 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1669 .
addReg(OpEntry.DivRemResultReg);
1671 I.eraseFromParent();
1675 bool X86InstructionSelector::selectIntrinsicWSideEffects(
1678 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1679 "unexpected instruction");
1681 if (
I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1686 I.eraseFromParent();
1694 return new X86InstructionSelector(
TM, Subtarget, RBI);