40#include "llvm/IR/IntrinsicsX86.h"
51#define DEBUG_TYPE "X86-isel"
57#define GET_GLOBALISEL_PREDICATE_BITSET
58#include "X86GenGlobalISel.inc"
59#undef GET_GLOBALISEL_PREDICATE_BITSET
75 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
76 Align Alignment)
const;
111 const unsigned DstReg,
113 const unsigned SrcReg,
124 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
127 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &
I,
140#define GET_GLOBALISEL_PREDICATES_DECL
141#include "X86GenGlobalISel.inc"
142#undef GET_GLOBALISEL_PREDICATES_DECL
144#define GET_GLOBALISEL_TEMPORARIES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_TEMPORARIES_DECL
151#define GET_GLOBALISEL_IMPL
152#include "X86GenGlobalISel.inc"
153#undef GET_GLOBALISEL_IMPL
158 :
TM(
TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
161#include
"X86GenGlobalISel.inc"
164#include
"X86GenGlobalISel.inc"
172X86InstructionSelector::getRegClass(
LLT Ty,
const RegisterBank &RB)
const {
173 if (RB.
getID() == X86::GPRRegBankID) {
175 return &X86::GR8RegClass;
177 return &X86::GR16RegClass;
179 return &X86::GR32RegClass;
181 return &X86::GR64RegClass;
183 if (RB.
getID() == X86::VECRRegBankID) {
185 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
187 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
189 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
191 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
193 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
195 return &X86::VR512RegClass;
202X86InstructionSelector::getRegClass(
LLT Ty,
unsigned Reg,
209 unsigned SubIdx = X86::NoSubRegister;
210 if (RC == &X86::GR32RegClass) {
211 SubIdx = X86::sub_32bit;
212 }
else if (RC == &X86::GR16RegClass) {
213 SubIdx = X86::sub_16bit;
214 }
else if (RC == &X86::GR8RegClass) {
215 SubIdx = X86::sub_8bit;
223 if (X86::GR64RegClass.
contains(Reg))
224 return &X86::GR64RegClass;
225 if (X86::GR32RegClass.
contains(Reg))
226 return &X86::GR32RegClass;
227 if (X86::GR16RegClass.
contains(Reg))
228 return &X86::GR16RegClass;
230 return &X86::GR8RegClass;
238bool X86InstructionSelector::selectDebugInstr(
MachineInstr &
I,
246 if (
Reg.isPhysical())
248 LLT Ty =
MRI.getType(Reg);
251 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
253 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
257 dbgs() <<
"Warning: DBG_VALUE operand has unexpected size/bank\n");
261 RBI.constrainGenericRegister(Reg, *RC,
MRI);
270 Register DstReg =
I.getOperand(0).getReg();
271 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
274 Register SrcReg =
I.getOperand(1).getReg();
275 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
279 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
281 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
282 DstRegBank.
getID() == X86::GPRRegBankID) {
288 if (SrcRC != DstRC) {
290 Register ExtSrc =
MRI.createVirtualRegister(DstRC);
292 TII.get(TargetOpcode::SUBREG_TO_REG))
296 .
addImm(getSubRegIndex(SrcRC));
298 I.getOperand(1).setReg(ExtSrc);
306 "No phys reg on generic operators");
307 assert((DstSize == SrcSize ||
311 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
312 "Copy with different width?!");
317 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
318 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
324 if (DstRC != SrcRC) {
325 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
326 I.getOperand(1).substPhysReg(SrcReg,
TRI);
335 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
341 I.setDesc(
TII.get(X86::COPY));
346 assert(
I.getParent() &&
"Instruction should be in a basic block!");
347 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
353 unsigned Opcode =
I.getOpcode();
357 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
363 if (
I.isDebugInstr())
369 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
370 "Generic instruction has unexpected implicit operands\n");
372 if (selectImpl(
I, *CoverageInfo))
378 switch (
I.getOpcode()) {
381 case TargetOpcode::G_STORE:
382 case TargetOpcode::G_LOAD:
384 case TargetOpcode::G_PTR_ADD:
385 case TargetOpcode::G_FRAME_INDEX:
386 return selectFrameIndexOrGep(
I,
MRI, MF);
387 case TargetOpcode::G_GLOBAL_VALUE:
388 return selectGlobalValue(
I,
MRI, MF);
389 case TargetOpcode::G_CONSTANT:
390 return selectConstant(
I,
MRI, MF);
391 case TargetOpcode::G_FCONSTANT:
392 return materializeFP(
I,
MRI, MF);
393 case TargetOpcode::G_PTRTOINT:
394 case TargetOpcode::G_TRUNC:
395 return selectTruncOrPtrToInt(
I,
MRI, MF);
396 case TargetOpcode::G_INTTOPTR:
398 case TargetOpcode::G_ZEXT:
399 return selectZext(
I,
MRI, MF);
400 case TargetOpcode::G_ANYEXT:
401 return selectAnyext(
I,
MRI, MF);
402 case TargetOpcode::G_ICMP:
403 return selectCmp(
I,
MRI, MF);
404 case TargetOpcode::G_FCMP:
405 return selectFCmp(
I,
MRI, MF);
406 case TargetOpcode::G_UADDE:
407 case TargetOpcode::G_UADDO:
408 case TargetOpcode::G_USUBE:
409 case TargetOpcode::G_USUBO:
410 return selectUAddSub(
I,
MRI, MF);
411 case TargetOpcode::G_UNMERGE_VALUES:
413 case TargetOpcode::G_MERGE_VALUES:
414 case TargetOpcode::G_CONCAT_VECTORS:
416 case TargetOpcode::G_EXTRACT:
417 return selectExtract(
I,
MRI, MF);
418 case TargetOpcode::G_INSERT:
419 return selectInsert(
I,
MRI, MF);
420 case TargetOpcode::G_BRCOND:
421 return selectCondBranch(
I,
MRI, MF);
422 case TargetOpcode::G_IMPLICIT_DEF:
423 case TargetOpcode::G_PHI:
424 return selectImplicitDefOrPHI(
I,
MRI);
425 case TargetOpcode::G_MUL:
426 case TargetOpcode::G_SMULH:
427 case TargetOpcode::G_UMULH:
428 case TargetOpcode::G_SDIV:
429 case TargetOpcode::G_UDIV:
430 case TargetOpcode::G_SREM:
431 case TargetOpcode::G_UREM:
432 return selectMulDivRem(
I,
MRI, MF);
433 case TargetOpcode::G_SELECT:
434 return selectSelect(
I,
MRI, MF);
440unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
443 Align Alignment)
const {
444 bool Isload = (Opc == TargetOpcode::G_LOAD);
445 bool HasAVX = STI.hasAVX();
446 bool HasAVX512 = STI.hasAVX512();
447 bool HasVLX = STI.hasVLX();
450 if (X86::GPRRegBankID == RB.
getID())
451 return Isload ? X86::MOV8rm : X86::MOV8mr;
453 if (X86::GPRRegBankID == RB.
getID())
454 return Isload ? X86::MOV16rm : X86::MOV16mr;
456 if (X86::GPRRegBankID == RB.
getID())
457 return Isload ? X86::MOV32rm : X86::MOV32mr;
458 if (X86::VECRRegBankID == RB.
getID())
459 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
460 HasAVX ? X86::VMOVSSrm_alt :
462 : (HasAVX512 ? X86::VMOVSSZmr :
463 HasAVX ? X86::VMOVSSmr :
466 if (X86::GPRRegBankID == RB.
getID())
467 return Isload ? X86::MOV64rm : X86::MOV64mr;
468 if (X86::VECRRegBankID == RB.
getID())
469 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
470 HasAVX ? X86::VMOVSDrm_alt :
472 : (HasAVX512 ? X86::VMOVSDZmr :
473 HasAVX ? X86::VMOVSDmr :
476 if (Alignment >=
Align(16))
477 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
479 ? X86::VMOVAPSZ128rm_NOVLX
480 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
481 : (HasVLX ? X86::VMOVAPSZ128mr
483 ? X86::VMOVAPSZ128mr_NOVLX
484 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
486 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
488 ? X86::VMOVUPSZ128rm_NOVLX
489 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
490 : (HasVLX ? X86::VMOVUPSZ128mr
492 ? X86::VMOVUPSZ128mr_NOVLX
493 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
495 if (Alignment >=
Align(32))
496 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
497 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
499 : (HasVLX ? X86::VMOVAPSZ256mr
500 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
503 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
504 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
506 : (HasVLX ? X86::VMOVUPSZ256mr
507 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
510 if (Alignment >=
Align(64))
511 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
513 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
522 assert(
I.getOperand(0).isReg() &&
"unsupported opperand.");
523 assert(
MRI.getType(
I.getOperand(0).getReg()).isPointer() &&
524 "unsupported type.");
526 if (
I.getOpcode() == TargetOpcode::G_PTR_ADD) {
529 if (isInt<32>(Imm)) {
530 AM.
Disp =
static_cast<int32_t
>(Imm);
531 AM.
Base.
Reg =
I.getOperand(1).getReg();
535 }
else if (
I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
542 AM.
Base.
Reg =
I.getOperand(0).getReg();
545bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
548 unsigned Opc =
I.getOpcode();
550 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
551 "unexpected instruction");
553 const Register DefReg =
I.getOperand(0).getReg();
554 LLT Ty =
MRI.getType(DefReg);
558 auto &
MemOp = **
I.memoperands_begin();
559 if (
MemOp.isAtomic()) {
565 if (!
MemOp.isUnordered()) {
575 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc,
MemOp.getAlign());
582 I.setDesc(
TII.get(NewOpc));
584 if (Opc == TargetOpcode::G_LOAD) {
605bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
608 unsigned Opc =
I.getOpcode();
610 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
611 "unexpected instruction");
613 const Register DefReg =
I.getOperand(0).getReg();
614 LLT Ty =
MRI.getType(DefReg);
617 unsigned NewOpc =
getLeaOP(Ty, STI);
618 I.setDesc(
TII.get(NewOpc));
621 if (Opc == TargetOpcode::G_FRAME_INDEX) {
627 MIB.addImm(0).addReg(0);
633bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
636 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
637 "unexpected instruction");
639 auto GV =
I.getOperand(1).getGlobal();
640 if (GV->isThreadLocal()) {
650 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
660 if (STI.isPICStyleRIPRel()) {
666 const Register DefReg =
I.getOperand(0).getReg();
667 LLT Ty =
MRI.getType(DefReg);
668 unsigned NewOpc =
getLeaOP(Ty, STI);
670 I.setDesc(
TII.get(NewOpc));
682 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
683 "unexpected instruction");
685 const Register DefReg =
I.getOperand(0).getReg();
686 LLT Ty =
MRI.getType(DefReg);
688 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
692 if (
I.getOperand(1).isCImm()) {
693 Val =
I.getOperand(1).getCImm()->getZExtValue();
694 I.getOperand(1).ChangeToImmediate(Val);
695 }
else if (
I.getOperand(1).isImm()) {
696 Val =
I.getOperand(1).getImm();
703 NewOpc = X86::MOV8ri;
706 NewOpc = X86::MOV16ri;
709 NewOpc = X86::MOV32ri;
714 NewOpc = X86::MOV64ri32;
716 NewOpc = X86::MOV64ri;
722 I.setDesc(
TII.get(NewOpc));
731 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
732 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
733 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
736bool X86InstructionSelector::selectTurnIntoCOPY(
741 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
742 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
747 I.setDesc(
TII.get(X86::COPY));
751bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
754 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
755 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
756 "unexpected instruction");
758 const Register DstReg =
I.getOperand(0).getReg();
759 const Register SrcReg =
I.getOperand(1).getReg();
761 const LLT DstTy =
MRI.getType(DstReg);
762 const LLT SrcTy =
MRI.getType(SrcReg);
769 <<
" input/output on different banks\n");
776 if (!DstRC || !SrcRC)
783 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
785 if (DstRB.
getID() != X86::GPRRegBankID)
789 if (DstRC == SrcRC) {
791 SubIdx = X86::NoSubRegister;
792 }
else if (DstRC == &X86::GR32RegClass) {
793 SubIdx = X86::sub_32bit;
794 }
else if (DstRC == &X86::GR16RegClass) {
795 SubIdx = X86::sub_16bit;
796 }
else if (DstRC == &X86::GR8RegClass) {
797 SubIdx = X86::sub_8bit;
802 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
804 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
805 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
811 I.getOperand(1).setSubReg(SubIdx);
813 I.setDesc(
TII.get(X86::COPY));
820 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
822 const Register DstReg =
I.getOperand(0).getReg();
823 const Register SrcReg =
I.getOperand(1).getReg();
825 const LLT DstTy =
MRI.getType(DstReg);
826 const LLT SrcTy =
MRI.getType(SrcReg);
829 "8=>16 Zext is handled by tablegen");
831 "8=>32 Zext is handled by tablegen");
833 "16=>32 Zext is handled by tablegen");
835 "8=>64 Zext is handled by tablegen");
837 "16=>64 Zext is handled by tablegen");
839 "32=>64 Zext is handled by tablegen");
846 AndOpc = X86::AND8ri;
848 AndOpc = X86::AND16ri;
850 AndOpc = X86::AND32ri;
852 AndOpc = X86::AND64ri32;
861 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
865 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
872 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
885 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
887 const Register DstReg =
I.getOperand(0).getReg();
888 const Register SrcReg =
I.getOperand(1).getReg();
890 const LLT DstTy =
MRI.getType(DstReg);
891 const LLT SrcTy =
MRI.getType(SrcReg);
897 "G_ANYEXT input/output on different banks\n");
900 "G_ANYEXT incorrect operand size");
909 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
911 if (DstRB.
getID() != X86::GPRRegBankID)
914 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
915 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
921 if (SrcRC == DstRC) {
922 I.setDesc(
TII.get(X86::COPY));
927 TII.get(TargetOpcode::SUBREG_TO_REG))
931 .
addImm(getSubRegIndex(SrcRC));
940 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
954 LLT Ty =
MRI.getType(LHS);
963 OpCmp = X86::CMP16rr;
966 OpCmp = X86::CMP32rr;
969 OpCmp = X86::CMP64rr;
979 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(
CC);
991 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
993 Register LhsReg =
I.getOperand(2).getReg();
994 Register RhsReg =
I.getOperand(3).getReg();
999 static const uint16_t SETFOpcTable[2][3] = {
1003 switch (Predicate) {
1007 SETFOpc = &SETFOpcTable[0][0];
1010 SETFOpc = &SETFOpcTable[1][0];
1016 LLT Ty =
MRI.getType(LhsReg);
1021 OpCmp = X86::UCOMISSrr;
1024 OpCmp = X86::UCOMISDrr;
1028 Register ResultReg =
I.getOperand(0).getReg();
1029 RBI.constrainGenericRegister(
1034 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1038 Register FlagReg1 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1039 Register FlagReg2 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1041 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1043 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1045 TII.get(SETFOpc[2]), ResultReg)
1053 I.eraseFromParent();
1067 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1075 I.eraseFromParent();
1082 assert((
I.getOpcode() == TargetOpcode::G_UADDE ||
1083 I.getOpcode() == TargetOpcode::G_UADDO ||
1084 I.getOpcode() == TargetOpcode::G_USUBE ||
1085 I.getOpcode() == TargetOpcode::G_USUBO) &&
1086 "unexpected instruction");
1088 const Register DstReg =
I.getOperand(0).getReg();
1089 const Register CarryOutReg =
I.getOperand(1).getReg();
1090 const Register Op0Reg =
I.getOperand(2).getReg();
1091 const Register Op1Reg =
I.getOperand(3).getReg();
1092 bool IsSub =
I.getOpcode() == TargetOpcode::G_USUBE ||
1093 I.getOpcode() == TargetOpcode::G_USUBO;
1094 bool HasCarryIn =
I.getOpcode() == TargetOpcode::G_UADDE ||
1095 I.getOpcode() == TargetOpcode::G_USUBE;
1097 const LLT DstTy =
MRI.getType(DstReg);
1098 assert(DstTy.
isScalar() &&
"selectUAddSub only supported for scalar types");
1101 unsigned OpADC, OpADD, OpSBB, OpSUB;
1104 OpADC = X86::ADC8rr;
1105 OpADD = X86::ADD8rr;
1106 OpSBB = X86::SBB8rr;
1107 OpSUB = X86::SUB8rr;
1110 OpADC = X86::ADC16rr;
1111 OpADD = X86::ADD16rr;
1112 OpSBB = X86::SBB16rr;
1113 OpSUB = X86::SUB16rr;
1116 OpADC = X86::ADC32rr;
1117 OpADD = X86::ADD32rr;
1118 OpSBB = X86::SBB32rr;
1119 OpSUB = X86::SUB32rr;
1122 OpADC = X86::ADC64rr;
1123 OpADD = X86::ADD64rr;
1124 OpSBB = X86::SBB64rr;
1125 OpSUB = X86::SUB64rr;
1134 unsigned Opcode = IsSub ? OpSUB : OpADD;
1138 Register CarryInReg =
I.getOperand(4).getReg();
1140 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1141 CarryInReg =
Def->getOperand(1).getReg();
1142 Def =
MRI.getVRegDef(CarryInReg);
1146 if (
Def->getOpcode() == TargetOpcode::G_UADDE ||
1147 Def->getOpcode() == TargetOpcode::G_UADDO ||
1148 Def->getOpcode() == TargetOpcode::G_USUBE ||
1149 Def->getOpcode() == TargetOpcode::G_USUBO) {
1151 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY),
1155 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC,
MRI))
1158 Opcode = IsSub ? OpSBB : OpADC;
1164 Opcode = IsSub ? OpSUB : OpADD;
1170 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1174 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1178 !RBI.constrainGenericRegister(CarryOutReg, *DstRC,
MRI))
1181 I.eraseFromParent();
1188 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1189 "unexpected instruction");
1191 const Register DstReg =
I.getOperand(0).getReg();
1192 const Register SrcReg =
I.getOperand(1).getReg();
1193 int64_t
Index =
I.getOperand(2).getImm();
1195 const LLT DstTy =
MRI.getType(DstReg);
1196 const LLT SrcTy =
MRI.getType(SrcReg);
1207 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1210 I.eraseFromParent();
1214 bool HasAVX = STI.hasAVX();
1215 bool HasAVX512 = STI.hasAVX512();
1216 bool HasVLX = STI.hasVLX();
1220 I.setDesc(
TII.get(X86::VEXTRACTF32x4Z256rr));
1222 I.setDesc(
TII.get(X86::VEXTRACTF128rr));
1227 I.setDesc(
TII.get(X86::VEXTRACTF32x4Zrr));
1229 I.setDesc(
TII.get(X86::VEXTRACTF64x4Zrr));
1237 I.getOperand(2).setImm(
Index);
1242bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1246 const LLT DstTy =
MRI.getType(DstReg);
1247 const LLT SrcTy =
MRI.getType(SrcReg);
1248 unsigned SubIdx = X86::NoSubRegister;
1254 "Incorrect Src/Dst register size");
1257 SubIdx = X86::sub_xmm;
1259 SubIdx = X86::sub_ymm;
1266 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1268 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1269 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1274 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1275 .
addReg(SrcReg, 0, SubIdx);
1280bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1284 const LLT DstTy =
MRI.getType(DstReg);
1285 const LLT SrcTy =
MRI.getType(SrcReg);
1286 unsigned SubIdx = X86::NoSubRegister;
1293 "Incorrect Src/Dst register size");
1296 SubIdx = X86::sub_xmm;
1298 SubIdx = X86::sub_ymm;
1305 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1306 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1311 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1321 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1323 const Register DstReg =
I.getOperand(0).getReg();
1324 const Register SrcReg =
I.getOperand(1).getReg();
1325 const Register InsertReg =
I.getOperand(2).getReg();
1326 int64_t
Index =
I.getOperand(3).getImm();
1328 const LLT DstTy =
MRI.getType(DstReg);
1329 const LLT InsertRegTy =
MRI.getType(InsertReg);
1338 if (
Index == 0 &&
MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1340 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1343 I.eraseFromParent();
1347 bool HasAVX = STI.hasAVX();
1348 bool HasAVX512 = STI.hasAVX512();
1349 bool HasVLX = STI.hasVLX();
1353 I.setDesc(
TII.get(X86::VINSERTF32x4Z256rr));
1355 I.setDesc(
TII.get(X86::VINSERTF128rr));
1360 I.setDesc(
TII.get(X86::VINSERTF32x4Zrr));
1362 I.setDesc(
TII.get(X86::VINSERTF64x4Zrr));
1371 I.getOperand(3).setImm(
Index);
1376bool X86InstructionSelector::selectUnmergeValues(
1378 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1379 "unexpected instruction");
1382 unsigned NumDefs =
I.getNumOperands() - 1;
1383 Register SrcReg =
I.getOperand(NumDefs).getReg();
1384 unsigned DefSize =
MRI.getType(
I.getOperand(0).getReg()).getSizeInBits();
1386 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1389 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(
Idx).getReg())
1393 if (!select(ExtrInst))
1397 I.eraseFromParent();
1401bool X86InstructionSelector::selectMergeValues(
1403 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1404 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1405 "unexpected instruction");
1408 Register DstReg =
I.getOperand(0).getReg();
1409 Register SrcReg0 =
I.getOperand(1).getReg();
1411 const LLT DstTy =
MRI.getType(DstReg);
1412 const LLT SrcTy =
MRI.getType(SrcReg0);
1418 Register DefReg =
MRI.createGenericVirtualRegister(DstTy);
1419 MRI.setRegBank(DefReg, RegBank);
1420 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1423 for (
unsigned Idx = 2;
Idx <
I.getNumOperands(); ++
Idx) {
1424 Register Tmp =
MRI.createGenericVirtualRegister(DstTy);
1425 MRI.setRegBank(Tmp, RegBank);
1428 TII.get(TargetOpcode::G_INSERT), Tmp)
1435 if (!select(InsertInst))
1440 TII.get(TargetOpcode::COPY), DstReg)
1443 if (!select(CopyInst))
1446 I.eraseFromParent();
1450bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1453 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1455 const Register CondReg =
I.getOperand(0).getReg();
1459 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1462 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1467 I.eraseFromParent();
1474 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1475 "unexpected instruction");
1482 const Register DstReg =
I.getOperand(0).getReg();
1483 const LLT DstTy =
MRI.getType(DstReg);
1486 const DebugLoc &DbgLoc =
I.getDebugLoc();
1489 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1492 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1495 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1501 Register AddrReg =
MRI.createVirtualRegister(&X86::GR64RegClass);
1502 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1519 unsigned PICBase = 0;
1528 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(Opc), DstReg), CPI, PICBase,
1534 I.eraseFromParent();
1538bool X86InstructionSelector::selectImplicitDefOrPHI(
1540 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1541 I.getOpcode() == TargetOpcode::G_PHI) &&
1542 "unexpected instruction");
1544 Register DstReg =
I.getOperand(0).getReg();
1546 if (!
MRI.getRegClassOrNull(DstReg)) {
1547 const LLT DstTy =
MRI.getType(DstReg);
1550 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1557 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1558 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1560 I.setDesc(
TII.get(X86::PHI));
1565bool X86InstructionSelector::selectMulDivRem(
MachineInstr &
I,
1569 assert((
I.getOpcode() == TargetOpcode::G_MUL ||
1570 I.getOpcode() == TargetOpcode::G_SMULH ||
1571 I.getOpcode() == TargetOpcode::G_UMULH ||
1572 I.getOpcode() == TargetOpcode::G_SDIV ||
1573 I.getOpcode() == TargetOpcode::G_SREM ||
1574 I.getOpcode() == TargetOpcode::G_UDIV ||
1575 I.getOpcode() == TargetOpcode::G_UREM) &&
1576 "unexpected instruction");
1578 const Register DstReg =
I.getOperand(0).getReg();
1579 const Register Op1Reg =
I.getOperand(1).getReg();
1580 const Register Op2Reg =
I.getOperand(2).getReg();
1582 const LLT RegTy =
MRI.getType(DstReg);
1583 assert(RegTy ==
MRI.getType(Op1Reg) && RegTy ==
MRI.getType(Op2Reg) &&
1584 "Arguments and return value types must match");
1587 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1590 const static unsigned NumTypes = 4;
1591 const static unsigned NumOps = 7;
1592 const static bool S =
true;
1593 const static bool U =
false;
1594 const static unsigned Copy = TargetOpcode::COPY;
1604 const static struct MulDivRemEntry {
1606 unsigned SizeInBits;
1610 struct MulDivRemResult {
1611 unsigned OpMulDivRem;
1612 unsigned OpSignExtend;
1618 } ResultTable[NumOps];
1619 } OpTable[NumTypes] = {
1624 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},
1625 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1626 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U},
1627 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1628 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S},
1629 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S},
1630 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1636 {X86::IDIV16r, X86::CWD,
Copy, X86::AX, S},
1637 {X86::IDIV16r, X86::CWD,
Copy, X86::DX, S},
1638 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U},
1639 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U},
1640 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::AX, S},
1641 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::DX, S},
1642 {X86::MUL16r, X86::MOV32r0,
Copy, X86::DX,
U},
1648 {X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S},
1649 {X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S},
1650 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U},
1651 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1652 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EAX, S},
1653 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EDX, S},
1654 {X86::MUL32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1660 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S},
1661 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S},
1662 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U},
1663 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1664 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RAX, S},
1665 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RDX, S},
1666 {X86::MUL64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1670 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const MulDivRemEntry &El) {
1673 if (OpEntryIt == std::end(OpTable))
1677 switch (
I.getOpcode()) {
1680 case TargetOpcode::G_SDIV:
1683 case TargetOpcode::G_SREM:
1686 case TargetOpcode::G_UDIV:
1689 case TargetOpcode::G_UREM:
1692 case TargetOpcode::G_MUL:
1695 case TargetOpcode::G_SMULH:
1698 case TargetOpcode::G_UMULH:
1703 const MulDivRemEntry &
TypeEntry = *OpEntryIt;
1704 const MulDivRemEntry::MulDivRemResult &OpEntry =
1708 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1709 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1710 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1717 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1722 if (OpEntry.OpSignExtend) {
1723 if (OpEntry.IsOpSigned)
1725 TII.get(OpEntry.OpSignExtend));
1727 Register Zero32 =
MRI.createVirtualRegister(&X86::GR32RegClass);
1728 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1737 .
addReg(Zero32, 0, X86::sub_16bit);
1744 TII.get(TargetOpcode::SUBREG_TO_REG),
TypeEntry.HighInReg)
1753 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpMulDivRem))
1764 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1765 Register SourceSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1766 Register ResultSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1767 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1771 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1777 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1779 .
addReg(ResultSuperReg, 0, X86::sub_8bit);
1781 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1783 .
addReg(OpEntry.ResultReg);
1785 I.eraseFromParent();
1794 unsigned DstReg = Sel.
getReg(0);
1800 LLT Ty =
MRI.getType(DstReg);
1805 OpCmp = X86::CMOV_GR8;
1808 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1811 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1814 assert(STI.is64Bit() && STI.canUseCMOV());
1815 OpCmp = X86::CMOV64rr;
1824 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1837 return new X86InstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Atomic ordering constants.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
ConstantFP - Floating Point Values [float, double].
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
An instruction for reading from memory.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@614 BaseType
union llvm::X86AddressMode::@615 Base