52#define DEBUG_TYPE "x86-instr-info"
54#define GET_INSTRINFO_CTOR_DTOR
55#include "X86GenInstrInfo.inc"
61 cl::desc(
"Disable fusing of spill code into instructions"),
65 cl::desc(
"Print instructions that the allocator wants to"
66 " fuse, but the X86 backend currently can't"),
70 cl::desc(
"Re-materialize load from stub in PIC mode"),
74 cl::desc(
"Clearance between two register writes "
75 "for inserting XOR to avoid partial "
79 "undef-reg-clearance",
80 cl::desc(
"How many idle instructions we would like before "
81 "certain undef register reads"),
85void X86InstrInfo::anchor() {}
89 (STI.isTarget64BitLP64() ?
X86::ADJCALLSTACKDOWN64
90 :
X86::ADJCALLSTACKDOWN32),
91 (STI.isTarget64BitLP64() ?
X86::ADJCALLSTACKUP64
92 :
X86::ADJCALLSTACKUP32),
94 Subtarget(STI), RI(STI.getTargetTriple()) {}
97 unsigned OpNum)
const {
101 if (!RC || !Subtarget.hasEGPR())
113 unsigned &SubIdx)
const {
114 switch (
MI.getOpcode()) {
117 case X86::MOVSX16rr8:
118 case X86::MOVZX16rr8:
119 case X86::MOVSX32rr8:
120 case X86::MOVZX32rr8:
121 case X86::MOVSX64rr8:
122 if (!Subtarget.is64Bit())
127 case X86::MOVSX32rr16:
128 case X86::MOVZX32rr16:
129 case X86::MOVSX64rr16:
130 case X86::MOVSX64rr32: {
131 if (
MI.getOperand(0).getSubReg() ||
MI.getOperand(1).getSubReg())
134 SrcReg =
MI.getOperand(1).getReg();
135 DstReg =
MI.getOperand(0).getReg();
136 switch (
MI.getOpcode()) {
139 case X86::MOVSX16rr8:
140 case X86::MOVZX16rr8:
141 case X86::MOVSX32rr8:
142 case X86::MOVZX32rr8:
143 case X86::MOVSX64rr8:
144 SubIdx = X86::sub_8bit;
146 case X86::MOVSX32rr16:
147 case X86::MOVZX32rr16:
148 case X86::MOVSX64rr16:
149 SubIdx = X86::sub_16bit;
151 case X86::MOVSX64rr32:
152 SubIdx = X86::sub_32bit;
162 if (
MI.mayLoad() ||
MI.mayStore())
167 if (
MI.isCopyLike() ||
MI.isInsertSubreg())
170 unsigned Opcode =
MI.getOpcode();
181 if (isBSF(Opcode) || isBSR(Opcode) || isLZCNT(Opcode) || isPOPCNT(Opcode) ||
187 if (isBLCFILL(Opcode) || isBLCI(Opcode) || isBLCIC(Opcode) ||
188 isBLCMSK(Opcode) || isBLCS(Opcode) || isBLSFILL(Opcode) ||
189 isBLSI(Opcode) || isBLSIC(Opcode) || isBLSMSK(Opcode) || isBLSR(Opcode) ||
194 if (isBEXTR(Opcode) || isBZHI(Opcode))
197 if (isROL(Opcode) || isROR(Opcode) || isSAR(Opcode) || isSHL(Opcode) ||
198 isSHR(Opcode) || isSHLD(Opcode) || isSHRD(Opcode))
201 if (isADC(Opcode) || isADD(Opcode) || isAND(Opcode) || isOR(Opcode) ||
202 isSBB(Opcode) || isSUB(Opcode) || isXOR(Opcode))
208 if (isDEC(Opcode) || isINC(Opcode) || isNEG(Opcode))
216 if (isMOVSX(Opcode) || isMOVZX(Opcode) || isMOVSXD(Opcode) || isMOV(Opcode))
219 if (isRORX(Opcode) || isSARX(Opcode) || isSHLX(Opcode) || isSHRX(Opcode))
229 switch (
MI.getOpcode()) {
242 case X86::IMUL64rmi32:
257 case X86::POPCNT16rm:
258 case X86::POPCNT32rm:
259 case X86::POPCNT64rm:
267 case X86::BLCFILL32rm:
268 case X86::BLCFILL64rm:
273 case X86::BLCMSK32rm:
274 case X86::BLCMSK64rm:
277 case X86::BLSFILL32rm:
278 case X86::BLSFILL64rm:
283 case X86::BLSMSK32rm:
284 case X86::BLSMSK64rm:
294 case X86::BEXTRI32mi:
295 case X86::BEXTRI64mi:
348 case X86::CVTTSD2SI64rm:
349 case X86::VCVTTSD2SI64rm:
350 case X86::VCVTTSD2SI64Zrm:
351 case X86::CVTTSD2SIrm:
352 case X86::VCVTTSD2SIrm:
353 case X86::VCVTTSD2SIZrm:
354 case X86::CVTTSS2SI64rm:
355 case X86::VCVTTSS2SI64rm:
356 case X86::VCVTTSS2SI64Zrm:
357 case X86::CVTTSS2SIrm:
358 case X86::VCVTTSS2SIrm:
359 case X86::VCVTTSS2SIZrm:
360 case X86::CVTSI2SDrm:
361 case X86::VCVTSI2SDrm:
362 case X86::VCVTSI2SDZrm:
363 case X86::CVTSI2SSrm:
364 case X86::VCVTSI2SSrm:
365 case X86::VCVTSI2SSZrm:
366 case X86::CVTSI642SDrm:
367 case X86::VCVTSI642SDrm:
368 case X86::VCVTSI642SDZrm:
369 case X86::CVTSI642SSrm:
370 case X86::VCVTSI642SSrm:
371 case X86::VCVTSI642SSZrm:
372 case X86::CVTSS2SDrm:
373 case X86::VCVTSS2SDrm:
374 case X86::VCVTSS2SDZrm:
375 case X86::CVTSD2SSrm:
376 case X86::VCVTSD2SSrm:
377 case X86::VCVTSD2SSZrm:
379 case X86::VCVTTSD2USI64Zrm:
380 case X86::VCVTTSD2USIZrm:
381 case X86::VCVTTSS2USI64Zrm:
382 case X86::VCVTTSS2USIZrm:
383 case X86::VCVTUSI2SDZrm:
384 case X86::VCVTUSI642SDZrm:
385 case X86::VCVTUSI2SSZrm:
386 case X86::VCVTUSI642SSZrm:
390 case X86::MOV8rm_NOREX:
394 case X86::MOVSX16rm8:
395 case X86::MOVSX32rm16:
396 case X86::MOVSX32rm8:
397 case X86::MOVSX32rm8_NOREX:
398 case X86::MOVSX64rm16:
399 case X86::MOVSX64rm32:
400 case X86::MOVSX64rm8:
401 case X86::MOVZX16rm8:
402 case X86::MOVZX32rm16:
403 case X86::MOVZX32rm8:
404 case X86::MOVZX32rm8_NOREX:
405 case X86::MOVZX64rm16:
406 case X86::MOVZX64rm8:
415 if (isFrameInstr(
MI)) {
418 if (!isFrameSetup(
MI))
429 for (
auto E =
MBB->end();
I != E; ++
I) {
430 if (
I->getOpcode() == getCallFrameDestroyOpcode() ||
I->isCall())
436 if (
I->getOpcode() != getCallFrameDestroyOpcode())
439 return -(
I->getOperand(1).
getImm());
444 switch (
MI.getOpcode()) {
463 int &FrameIndex)
const {
483 case X86::KMOVBkm_EVEX:
488 case X86::KMOVWkm_EVEX:
490 case X86::VMOVSHZrm_alt:
495 case X86::MOVSSrm_alt:
497 case X86::VMOVSSrm_alt:
499 case X86::VMOVSSZrm_alt:
501 case X86::KMOVDkm_EVEX:
507 case X86::MOVSDrm_alt:
509 case X86::VMOVSDrm_alt:
511 case X86::VMOVSDZrm_alt:
512 case X86::MMX_MOVD64rm:
513 case X86::MMX_MOVQ64rm:
515 case X86::KMOVQkm_EVEX:
530 case X86::VMOVAPSZ128rm:
531 case X86::VMOVUPSZ128rm:
532 case X86::VMOVAPSZ128rm_NOVLX:
533 case X86::VMOVUPSZ128rm_NOVLX:
534 case X86::VMOVAPDZ128rm:
535 case X86::VMOVUPDZ128rm:
536 case X86::VMOVDQU8Z128rm:
537 case X86::VMOVDQU16Z128rm:
538 case X86::VMOVDQA32Z128rm:
539 case X86::VMOVDQU32Z128rm:
540 case X86::VMOVDQA64Z128rm:
541 case X86::VMOVDQU64Z128rm:
544 case X86::VMOVAPSYrm:
545 case X86::VMOVUPSYrm:
546 case X86::VMOVAPDYrm:
547 case X86::VMOVUPDYrm:
548 case X86::VMOVDQAYrm:
549 case X86::VMOVDQUYrm:
550 case X86::VMOVAPSZ256rm:
551 case X86::VMOVUPSZ256rm:
552 case X86::VMOVAPSZ256rm_NOVLX:
553 case X86::VMOVUPSZ256rm_NOVLX:
554 case X86::VMOVAPDZ256rm:
555 case X86::VMOVUPDZ256rm:
556 case X86::VMOVDQU8Z256rm:
557 case X86::VMOVDQU16Z256rm:
558 case X86::VMOVDQA32Z256rm:
559 case X86::VMOVDQU32Z256rm:
560 case X86::VMOVDQA64Z256rm:
561 case X86::VMOVDQU64Z256rm:
564 case X86::VMOVAPSZrm:
565 case X86::VMOVUPSZrm:
566 case X86::VMOVAPDZrm:
567 case X86::VMOVUPDZrm:
568 case X86::VMOVDQU8Zrm:
569 case X86::VMOVDQU16Zrm:
570 case X86::VMOVDQA32Zrm:
571 case X86::VMOVDQU32Zrm:
572 case X86::VMOVDQA64Zrm:
573 case X86::VMOVDQU64Zrm:
585 case X86::KMOVBmk_EVEX:
590 case X86::KMOVWmk_EVEX:
599 case X86::KMOVDmk_EVEX:
607 case X86::MMX_MOVD64mr:
608 case X86::MMX_MOVQ64mr:
609 case X86::MMX_MOVNTQmr:
611 case X86::KMOVQmk_EVEX:
626 case X86::VMOVUPSZ128mr:
627 case X86::VMOVAPSZ128mr:
628 case X86::VMOVUPSZ128mr_NOVLX:
629 case X86::VMOVAPSZ128mr_NOVLX:
630 case X86::VMOVUPDZ128mr:
631 case X86::VMOVAPDZ128mr:
632 case X86::VMOVDQA32Z128mr:
633 case X86::VMOVDQU32Z128mr:
634 case X86::VMOVDQA64Z128mr:
635 case X86::VMOVDQU64Z128mr:
636 case X86::VMOVDQU8Z128mr:
637 case X86::VMOVDQU16Z128mr:
640 case X86::VMOVUPSYmr:
641 case X86::VMOVAPSYmr:
642 case X86::VMOVUPDYmr:
643 case X86::VMOVAPDYmr:
644 case X86::VMOVDQUYmr:
645 case X86::VMOVDQAYmr:
646 case X86::VMOVUPSZ256mr:
647 case X86::VMOVAPSZ256mr:
648 case X86::VMOVUPSZ256mr_NOVLX:
649 case X86::VMOVAPSZ256mr_NOVLX:
650 case X86::VMOVUPDZ256mr:
651 case X86::VMOVAPDZ256mr:
652 case X86::VMOVDQU8Z256mr:
653 case X86::VMOVDQU16Z256mr:
654 case X86::VMOVDQA32Z256mr:
655 case X86::VMOVDQU32Z256mr:
656 case X86::VMOVDQA64Z256mr:
657 case X86::VMOVDQU64Z256mr:
660 case X86::VMOVUPSZmr:
661 case X86::VMOVAPSZmr:
662 case X86::VMOVUPDZmr:
663 case X86::VMOVAPDZmr:
664 case X86::VMOVDQU8Zmr:
665 case X86::VMOVDQU16Zmr:
666 case X86::VMOVDQA32Zmr:
667 case X86::VMOVDQU32Zmr:
668 case X86::VMOVDQA64Zmr:
669 case X86::VMOVDQU64Zmr:
677 int &FrameIndex)
const {
686 if (
MI.getOperand(0).getSubReg() == 0 && isFrameOperand(
MI, 1, FrameIndex))
687 return MI.getOperand(0).getReg();
692 int &FrameIndex)
const {
703 return MI.getOperand(0).getReg();
710 int &FrameIndex)
const {
720 isFrameOperand(
MI, 0, FrameIndex))
726 int &FrameIndex)
const {
746 if (!BaseReg.isVirtual())
748 bool isPICBase =
false;
750 if (
DefMI.getOpcode() != X86::MOVPC32r)
752 assert(!isPICBase &&
"More than one PIC base?");
760 switch (
MI.getOpcode()) {
766 case X86::IMPLICIT_DEF:
769 case X86::LOAD_STACK_GUARD:
776 case X86::AVX1_SETALLONES:
777 case X86::AVX2_SETALLONES:
778 case X86::AVX512_128_SET0:
779 case X86::AVX512_256_SET0:
780 case X86::AVX512_512_SET0:
781 case X86::AVX512_128_SETALLONES:
782 case X86::AVX512_256_SETALLONES:
783 case X86::AVX512_512_SETALLONES:
784 case X86::AVX512_FsFLD0SD:
785 case X86::AVX512_FsFLD0SH:
786 case X86::AVX512_FsFLD0SS:
787 case X86::AVX512_FsFLD0F128:
792 case X86::FsFLD0F128:
802 case X86::MOV32ImmSExti8:
807 case X86::MOV64ImmSExti8:
809 case X86::V_SETALLONES:
815 case X86::PTILEZEROV:
819 case X86::MOV8rm_NOREX:
824 case X86::MOVSSrm_alt:
826 case X86::MOVSDrm_alt:
834 case X86::VMOVSSrm_alt:
836 case X86::VMOVSDrm_alt:
843 case X86::VMOVAPSYrm:
844 case X86::VMOVUPSYrm:
845 case X86::VMOVAPDYrm:
846 case X86::VMOVUPDYrm:
847 case X86::VMOVDQAYrm:
848 case X86::VMOVDQUYrm:
849 case X86::MMX_MOVD64rm:
850 case X86::MMX_MOVQ64rm:
851 case X86::VBROADCASTSSrm:
852 case X86::VBROADCASTSSYrm:
853 case X86::VBROADCASTSDYrm:
855 case X86::VPBROADCASTBZ128rm:
856 case X86::VPBROADCASTBZ256rm:
857 case X86::VPBROADCASTBZrm:
858 case X86::VBROADCASTF32X2Z256rm:
859 case X86::VBROADCASTF32X2Zrm:
860 case X86::VBROADCASTI32X2Z128rm:
861 case X86::VBROADCASTI32X2Z256rm:
862 case X86::VBROADCASTI32X2Zrm:
863 case X86::VPBROADCASTWZ128rm:
864 case X86::VPBROADCASTWZ256rm:
865 case X86::VPBROADCASTWZrm:
866 case X86::VPBROADCASTDZ128rm:
867 case X86::VPBROADCASTDZ256rm:
868 case X86::VPBROADCASTDZrm:
869 case X86::VBROADCASTSSZ128rm:
870 case X86::VBROADCASTSSZ256rm:
871 case X86::VBROADCASTSSZrm:
872 case X86::VPBROADCASTQZ128rm:
873 case X86::VPBROADCASTQZ256rm:
874 case X86::VPBROADCASTQZrm:
875 case X86::VBROADCASTSDZ256rm:
876 case X86::VBROADCASTSDZrm:
878 case X86::VMOVSSZrm_alt:
880 case X86::VMOVSDZrm_alt:
882 case X86::VMOVSHZrm_alt:
883 case X86::VMOVAPDZ128rm:
884 case X86::VMOVAPDZ256rm:
885 case X86::VMOVAPDZrm:
886 case X86::VMOVAPSZ128rm:
887 case X86::VMOVAPSZ256rm:
888 case X86::VMOVAPSZ128rm_NOVLX:
889 case X86::VMOVAPSZ256rm_NOVLX:
890 case X86::VMOVAPSZrm:
891 case X86::VMOVDQA32Z128rm:
892 case X86::VMOVDQA32Z256rm:
893 case X86::VMOVDQA32Zrm:
894 case X86::VMOVDQA64Z128rm:
895 case X86::VMOVDQA64Z256rm:
896 case X86::VMOVDQA64Zrm:
897 case X86::VMOVDQU16Z128rm:
898 case X86::VMOVDQU16Z256rm:
899 case X86::VMOVDQU16Zrm:
900 case X86::VMOVDQU32Z128rm:
901 case X86::VMOVDQU32Z256rm:
902 case X86::VMOVDQU32Zrm:
903 case X86::VMOVDQU64Z128rm:
904 case X86::VMOVDQU64Z256rm:
905 case X86::VMOVDQU64Zrm:
906 case X86::VMOVDQU8Z128rm:
907 case X86::VMOVDQU8Z256rm:
908 case X86::VMOVDQU8Zrm:
909 case X86::VMOVUPDZ128rm:
910 case X86::VMOVUPDZ256rm:
911 case X86::VMOVUPDZrm:
912 case X86::VMOVUPSZ128rm:
913 case X86::VMOVUPSZ256rm:
914 case X86::VMOVUPSZ128rm_NOVLX:
915 case X86::VMOVUPSZ256rm_NOVLX:
916 case X86::VMOVUPSZrm: {
922 MI.isDereferenceableInvariantLoad()) {
924 if (BaseReg == 0 || BaseReg == X86::RIP)
966 if (ClobbersEFLAGS &&
MBB.computeRegisterLiveness(&
TRI, X86::EFLAGS,
I) !=
1001 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS &&
1011 unsigned ShiftAmtOperandIdx) {
1013 unsigned ShiftCountMask = (
MI.getDesc().TSFlags &
X86II::REX_W) ? 63 : 31;
1014 unsigned Imm =
MI.getOperand(ShiftAmtOperandIdx).getImm();
1015 return Imm & ShiftCountMask;
1026 return ShAmt < 4 && ShAmt > 0;
1033 bool &NoSignFlag,
bool &ClearsOverflowFlag) {
1034 if (!(CmpValDefInstr.
getOpcode() == X86::SUBREG_TO_REG &&
1035 CmpInstr.
getOpcode() == X86::TEST64rr) &&
1036 !(CmpValDefInstr.
getOpcode() == X86::COPY &&
1044 "CmpInstr is an analyzable TEST16rr/TEST64rr, and "
1045 "`X86InstrInfo::analyzeCompare` requires two reg operands are the"
1054 "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG or TEST16rr "
1055 "is a user of COPY sub16bit.");
1057 if (CmpInstr.
getOpcode() == X86::TEST16rr) {
1066 if (!((VregDefInstr->
getOpcode() == X86::AND32ri ||
1067 VregDefInstr->
getOpcode() == X86::AND64ri32) &&
1072 if (CmpInstr.
getOpcode() == X86::TEST64rr) {
1086 assert(VregDefInstr &&
"Must have a definition (SSA)");
1096 if (X86::isAND(VregDefInstr->
getOpcode()) &&
1117 if (Instr.modifiesRegister(X86::EFLAGS,
TRI))
1121 *AndInstr = VregDefInstr;
1142 ClearsOverflowFlag =
true;
1150 unsigned &NewSrcSubReg,
bool &isKill,
1156 RC =
Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1158 RC =
Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1161 unsigned SubReg = Src.getSubReg();
1162 isKill =
MI.killsRegister(SrcReg,
nullptr);
1164 NewSrcSubReg = X86::NoSubRegister;
1168 if (
Opc != X86::LEA64_32r) {
1171 assert(!Src.isUndef() &&
"Undef op doesn't need optimization");
1188 assert(!Src.isUndef() &&
"Undef op doesn't need optimization");
1193 NewSrcSubReg = X86::NoSubRegister;
1219MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
unsigned MIOpc,
1223 bool Is8BitOp)
const {
1228 RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1229 *RegInfo.getRegClass(
MI.getOperand(0).getReg())) == 16) &&
1230 "Unexpected type for LEA transform");
1239 if (!Subtarget.is64Bit())
1242 unsigned Opcode = X86::LEA64_32r;
1243 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1244 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1257 unsigned SrcSubReg =
MI.getOperand(1).getSubReg();
1259 unsigned Src2SubReg;
1260 bool IsDead =
MI.getOperand(0).isDead();
1261 bool IsKill =
MI.getOperand(1).isKill();
1262 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1263 assert(!
MI.getOperand(1).isUndef() &&
"Undef op doesn't need optimization");
1275#define CASE_NF(OP) \
1283 unsigned ShAmt =
MI.getOperand(2).getImm();
1301 case X86::ADD8ri_DB:
1302 case X86::ADD16ri_DB:
1307 case X86::ADD8rr_DB:
1308 case X86::ADD16rr_DB: {
1309 Src2 =
MI.getOperand(2).getReg();
1310 Src2SubReg =
MI.getOperand(2).getSubReg();
1311 bool IsKill2 =
MI.getOperand(2).isKill();
1312 assert(!
MI.getOperand(2).isUndef() &&
"Undef op doesn't need optimization");
1316 addRegReg(MIB, InRegLEA,
true, X86::NoSubRegister, InRegLEA,
false,
1317 X86::NoSubRegister);
1319 if (Subtarget.is64Bit())
1325 ImpDef2 =
BuildMI(
MBB, &*MIB,
MI.getDebugLoc(),
get(X86::IMPLICIT_DEF),
1327 InsMI2 =
BuildMI(
MBB, &*MIB,
MI.getDebugLoc(),
get(TargetOpcode::COPY))
1330 addRegReg(MIB, InRegLEA,
true, X86::NoSubRegister, InRegLEA2,
true,
1331 X86::NoSubRegister);
1333 if (LV && IsKill2 && InsMI2)
1339 MachineInstr *NewMI = MIB;
1340 MachineInstr *ExtMI =
1388 LiveRange::Segment *DestSeg =
1429 if (
MI.getNumOperands() > 2)
1430 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).isUndef())
1435 unsigned SrcSubReg, SrcSubReg2;
1436 bool Is64Bit = Subtarget.is64Bit();
1438 bool Is8BitOp =
false;
1439 unsigned NumRegOperands = 2;
1440 unsigned MIOpc =
MI.getOpcode();
1445 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1452 Src.getReg(), &X86::GR64_NOSPRegClass))
1455 NewMI =
BuildMI(MF,
MI.getDebugLoc(),
get(X86::LEA64r))
1465 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1470 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1476 isKill, ImplicitOp, LV, LIS))
1487 if (ImplicitOp.
getReg() != 0)
1488 MIB.
add(ImplicitOp);
1492 if (LV && SrcReg != Src.getReg())
1500 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1504 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1508 assert(
MI.getNumOperands() >= 2 &&
"Unknown inc instruction!");
1509 unsigned Opc = (MIOpc == X86::INC64r || MIOpc == X86::INC64r_NF)
1511 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1515 isKill, ImplicitOp, LV, LIS))
1521 if (ImplicitOp.
getReg() != 0)
1522 MIB.
add(ImplicitOp);
1527 if (LV && SrcReg != Src.getReg())
1533 assert(
MI.getNumOperands() >= 2 &&
"Unknown dec instruction!");
1534 unsigned Opc = (MIOpc == X86::DEC64r || MIOpc == X86::DEC64r_NF)
1536 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1541 isKill, ImplicitOp, LV, LIS))
1547 if (ImplicitOp.
getReg() != 0)
1548 MIB.
add(ImplicitOp);
1553 if (LV && SrcReg != Src.getReg())
1563 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1566 case X86::ADD64rr_DB:
1567 case X86::ADD32rr_DB: {
1568 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1570 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_NF ||
1571 MIOpc == X86::ADD64rr_DB)
1574 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1580 isKill2, ImplicitOp2, LV, LIS))
1585 if (Src.getReg() == Src2.
getReg()) {
1590 SrcSubReg = SrcSubReg2;
1593 isKill, ImplicitOp, LV, LIS))
1598 if (ImplicitOp.
getReg() != 0)
1599 MIB.
add(ImplicitOp);
1600 if (ImplicitOp2.
getReg() != 0)
1601 MIB.
add(ImplicitOp2);
1604 addRegReg(MIB, SrcReg, isKill, SrcSubReg, SrcReg2, isKill2, SrcSubReg2);
1608 if (SrcReg2 != Src2.
getReg())
1610 if (SrcReg != SrcReg2 && SrcReg != Src.getReg())
1617 case X86::ADD8rr_DB:
1621 case X86::ADD16rr_DB:
1622 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1624 case X86::ADD64ri32_DB:
1625 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1627 BuildMI(MF,
MI.getDebugLoc(),
get(X86::LEA64r)).add(Dest).add(Src),
1631 case X86::ADD32ri_DB: {
1632 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1633 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1638 isKill, ImplicitOp, LV, LIS))
1645 if (ImplicitOp.
getReg() != 0)
1646 MIB.
add(ImplicitOp);
1651 if (LV && SrcReg != Src.getReg())
1656 case X86::ADD8ri_DB:
1660 case X86::ADD16ri_DB:
1661 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1667 if (!
MI.getOperand(2).isImm())
1669 int64_t Imm =
MI.getOperand(2).getImm();
1673 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1674 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1679 isKill, ImplicitOp, LV, LIS))
1686 if (ImplicitOp.
getReg() != 0)
1687 MIB.
add(ImplicitOp);
1692 if (LV && SrcReg != Src.getReg())
1698 if (!
MI.getOperand(2).isImm())
1700 int64_t Imm =
MI.getOperand(2).getImm();
1704 assert(
MI.getNumOperands() >= 3 &&
"Unknown sub instruction!");
1712 case X86::VMOVDQU8Z128rmk:
1713 case X86::VMOVDQU8Z256rmk:
1714 case X86::VMOVDQU8Zrmk:
1715 case X86::VMOVDQU16Z128rmk:
1716 case X86::VMOVDQU16Z256rmk:
1717 case X86::VMOVDQU16Zrmk:
1718 case X86::VMOVDQU32Z128rmk:
1719 case X86::VMOVDQA32Z128rmk:
1720 case X86::VMOVDQU32Z256rmk:
1721 case X86::VMOVDQA32Z256rmk:
1722 case X86::VMOVDQU32Zrmk:
1723 case X86::VMOVDQA32Zrmk:
1724 case X86::VMOVDQU64Z128rmk:
1725 case X86::VMOVDQA64Z128rmk:
1726 case X86::VMOVDQU64Z256rmk:
1727 case X86::VMOVDQA64Z256rmk:
1728 case X86::VMOVDQU64Zrmk:
1729 case X86::VMOVDQA64Zrmk:
1730 case X86::VMOVUPDZ128rmk:
1731 case X86::VMOVAPDZ128rmk:
1732 case X86::VMOVUPDZ256rmk:
1733 case X86::VMOVAPDZ256rmk:
1734 case X86::VMOVUPDZrmk:
1735 case X86::VMOVAPDZrmk:
1736 case X86::VMOVUPSZ128rmk:
1737 case X86::VMOVAPSZ128rmk:
1738 case X86::VMOVUPSZ256rmk:
1739 case X86::VMOVAPSZ256rmk:
1740 case X86::VMOVUPSZrmk:
1741 case X86::VMOVAPSZrmk:
1742 case X86::VBROADCASTSDZ256rmk:
1743 case X86::VBROADCASTSDZrmk:
1744 case X86::VBROADCASTSSZ128rmk:
1745 case X86::VBROADCASTSSZ256rmk:
1746 case X86::VBROADCASTSSZrmk:
1747 case X86::VPBROADCASTDZ128rmk:
1748 case X86::VPBROADCASTDZ256rmk:
1749 case X86::VPBROADCASTDZrmk:
1750 case X86::VPBROADCASTQZ128rmk:
1751 case X86::VPBROADCASTQZ256rmk:
1752 case X86::VPBROADCASTQZrmk: {
1757 case X86::VMOVDQU8Z128rmk:
1758 Opc = X86::VPBLENDMBZ128rmk;
1760 case X86::VMOVDQU8Z256rmk:
1761 Opc = X86::VPBLENDMBZ256rmk;
1763 case X86::VMOVDQU8Zrmk:
1764 Opc = X86::VPBLENDMBZrmk;
1766 case X86::VMOVDQU16Z128rmk:
1767 Opc = X86::VPBLENDMWZ128rmk;
1769 case X86::VMOVDQU16Z256rmk:
1770 Opc = X86::VPBLENDMWZ256rmk;
1772 case X86::VMOVDQU16Zrmk:
1773 Opc = X86::VPBLENDMWZrmk;
1775 case X86::VMOVDQU32Z128rmk:
1776 Opc = X86::VPBLENDMDZ128rmk;
1778 case X86::VMOVDQU32Z256rmk:
1779 Opc = X86::VPBLENDMDZ256rmk;
1781 case X86::VMOVDQU32Zrmk:
1782 Opc = X86::VPBLENDMDZrmk;
1784 case X86::VMOVDQU64Z128rmk:
1785 Opc = X86::VPBLENDMQZ128rmk;
1787 case X86::VMOVDQU64Z256rmk:
1788 Opc = X86::VPBLENDMQZ256rmk;
1790 case X86::VMOVDQU64Zrmk:
1791 Opc = X86::VPBLENDMQZrmk;
1793 case X86::VMOVUPDZ128rmk:
1794 Opc = X86::VBLENDMPDZ128rmk;
1796 case X86::VMOVUPDZ256rmk:
1797 Opc = X86::VBLENDMPDZ256rmk;
1799 case X86::VMOVUPDZrmk:
1800 Opc = X86::VBLENDMPDZrmk;
1802 case X86::VMOVUPSZ128rmk:
1803 Opc = X86::VBLENDMPSZ128rmk;
1805 case X86::VMOVUPSZ256rmk:
1806 Opc = X86::VBLENDMPSZ256rmk;
1808 case X86::VMOVUPSZrmk:
1809 Opc = X86::VBLENDMPSZrmk;
1811 case X86::VMOVDQA32Z128rmk:
1812 Opc = X86::VPBLENDMDZ128rmk;
1814 case X86::VMOVDQA32Z256rmk:
1815 Opc = X86::VPBLENDMDZ256rmk;
1817 case X86::VMOVDQA32Zrmk:
1818 Opc = X86::VPBLENDMDZrmk;
1820 case X86::VMOVDQA64Z128rmk:
1821 Opc = X86::VPBLENDMQZ128rmk;
1823 case X86::VMOVDQA64Z256rmk:
1824 Opc = X86::VPBLENDMQZ256rmk;
1826 case X86::VMOVDQA64Zrmk:
1827 Opc = X86::VPBLENDMQZrmk;
1829 case X86::VMOVAPDZ128rmk:
1830 Opc = X86::VBLENDMPDZ128rmk;
1832 case X86::VMOVAPDZ256rmk:
1833 Opc = X86::VBLENDMPDZ256rmk;
1835 case X86::VMOVAPDZrmk:
1836 Opc = X86::VBLENDMPDZrmk;
1838 case X86::VMOVAPSZ128rmk:
1839 Opc = X86::VBLENDMPSZ128rmk;
1841 case X86::VMOVAPSZ256rmk:
1842 Opc = X86::VBLENDMPSZ256rmk;
1844 case X86::VMOVAPSZrmk:
1845 Opc = X86::VBLENDMPSZrmk;
1847 case X86::VBROADCASTSDZ256rmk:
1848 Opc = X86::VBLENDMPDZ256rmbk;
1850 case X86::VBROADCASTSDZrmk:
1851 Opc = X86::VBLENDMPDZrmbk;
1853 case X86::VBROADCASTSSZ128rmk:
1854 Opc = X86::VBLENDMPSZ128rmbk;
1856 case X86::VBROADCASTSSZ256rmk:
1857 Opc = X86::VBLENDMPSZ256rmbk;
1859 case X86::VBROADCASTSSZrmk:
1860 Opc = X86::VBLENDMPSZrmbk;
1862 case X86::VPBROADCASTDZ128rmk:
1863 Opc = X86::VPBLENDMDZ128rmbk;
1865 case X86::VPBROADCASTDZ256rmk:
1866 Opc = X86::VPBLENDMDZ256rmbk;
1868 case X86::VPBROADCASTDZrmk:
1869 Opc = X86::VPBLENDMDZrmbk;
1871 case X86::VPBROADCASTQZ128rmk:
1872 Opc = X86::VPBLENDMQZ128rmbk;
1874 case X86::VPBROADCASTQZ256rmk:
1875 Opc = X86::VPBLENDMQZ256rmbk;
1877 case X86::VPBROADCASTQZrmk:
1878 Opc = X86::VPBLENDMQZrmbk;
1884 .
add(
MI.getOperand(2))
1886 .
add(
MI.getOperand(3))
1887 .
add(
MI.getOperand(4))
1888 .
add(
MI.getOperand(5))
1889 .
add(
MI.getOperand(6))
1890 .
add(
MI.getOperand(7));
1895 case X86::VMOVDQU8Z128rrk:
1896 case X86::VMOVDQU8Z256rrk:
1897 case X86::VMOVDQU8Zrrk:
1898 case X86::VMOVDQU16Z128rrk:
1899 case X86::VMOVDQU16Z256rrk:
1900 case X86::VMOVDQU16Zrrk:
1901 case X86::VMOVDQU32Z128rrk:
1902 case X86::VMOVDQA32Z128rrk:
1903 case X86::VMOVDQU32Z256rrk:
1904 case X86::VMOVDQA32Z256rrk:
1905 case X86::VMOVDQU32Zrrk:
1906 case X86::VMOVDQA32Zrrk:
1907 case X86::VMOVDQU64Z128rrk:
1908 case X86::VMOVDQA64Z128rrk:
1909 case X86::VMOVDQU64Z256rrk:
1910 case X86::VMOVDQA64Z256rrk:
1911 case X86::VMOVDQU64Zrrk:
1912 case X86::VMOVDQA64Zrrk:
1913 case X86::VMOVUPDZ128rrk:
1914 case X86::VMOVAPDZ128rrk:
1915 case X86::VMOVUPDZ256rrk:
1916 case X86::VMOVAPDZ256rrk:
1917 case X86::VMOVUPDZrrk:
1918 case X86::VMOVAPDZrrk:
1919 case X86::VMOVUPSZ128rrk:
1920 case X86::VMOVAPSZ128rrk:
1921 case X86::VMOVUPSZ256rrk:
1922 case X86::VMOVAPSZ256rrk:
1923 case X86::VMOVUPSZrrk:
1924 case X86::VMOVAPSZrrk: {
1929 case X86::VMOVDQU8Z128rrk:
1930 Opc = X86::VPBLENDMBZ128rrk;
1932 case X86::VMOVDQU8Z256rrk:
1933 Opc = X86::VPBLENDMBZ256rrk;
1935 case X86::VMOVDQU8Zrrk:
1936 Opc = X86::VPBLENDMBZrrk;
1938 case X86::VMOVDQU16Z128rrk:
1939 Opc = X86::VPBLENDMWZ128rrk;
1941 case X86::VMOVDQU16Z256rrk:
1942 Opc = X86::VPBLENDMWZ256rrk;
1944 case X86::VMOVDQU16Zrrk:
1945 Opc = X86::VPBLENDMWZrrk;
1947 case X86::VMOVDQU32Z128rrk:
1948 Opc = X86::VPBLENDMDZ128rrk;
1950 case X86::VMOVDQU32Z256rrk:
1951 Opc = X86::VPBLENDMDZ256rrk;
1953 case X86::VMOVDQU32Zrrk:
1954 Opc = X86::VPBLENDMDZrrk;
1956 case X86::VMOVDQU64Z128rrk:
1957 Opc = X86::VPBLENDMQZ128rrk;
1959 case X86::VMOVDQU64Z256rrk:
1960 Opc = X86::VPBLENDMQZ256rrk;
1962 case X86::VMOVDQU64Zrrk:
1963 Opc = X86::VPBLENDMQZrrk;
1965 case X86::VMOVUPDZ128rrk:
1966 Opc = X86::VBLENDMPDZ128rrk;
1968 case X86::VMOVUPDZ256rrk:
1969 Opc = X86::VBLENDMPDZ256rrk;
1971 case X86::VMOVUPDZrrk:
1972 Opc = X86::VBLENDMPDZrrk;
1974 case X86::VMOVUPSZ128rrk:
1975 Opc = X86::VBLENDMPSZ128rrk;
1977 case X86::VMOVUPSZ256rrk:
1978 Opc = X86::VBLENDMPSZ256rrk;
1980 case X86::VMOVUPSZrrk:
1981 Opc = X86::VBLENDMPSZrrk;
1983 case X86::VMOVDQA32Z128rrk:
1984 Opc = X86::VPBLENDMDZ128rrk;
1986 case X86::VMOVDQA32Z256rrk:
1987 Opc = X86::VPBLENDMDZ256rrk;
1989 case X86::VMOVDQA32Zrrk:
1990 Opc = X86::VPBLENDMDZrrk;
1992 case X86::VMOVDQA64Z128rrk:
1993 Opc = X86::VPBLENDMQZ128rrk;
1995 case X86::VMOVDQA64Z256rrk:
1996 Opc = X86::VPBLENDMQZ256rrk;
1998 case X86::VMOVDQA64Zrrk:
1999 Opc = X86::VPBLENDMQZrrk;
2001 case X86::VMOVAPDZ128rrk:
2002 Opc = X86::VBLENDMPDZ128rrk;
2004 case X86::VMOVAPDZ256rrk:
2005 Opc = X86::VBLENDMPDZ256rrk;
2007 case X86::VMOVAPDZrrk:
2008 Opc = X86::VBLENDMPDZrrk;
2010 case X86::VMOVAPSZ128rrk:
2011 Opc = X86::VBLENDMPSZ128rrk;
2013 case X86::VMOVAPSZ256rrk:
2014 Opc = X86::VBLENDMPSZ256rrk;
2016 case X86::VMOVAPSZrrk:
2017 Opc = X86::VBLENDMPSZrrk;
2023 .
add(
MI.getOperand(2))
2025 .
add(
MI.getOperand(3));
2036 for (
unsigned I = 0;
I < NumRegOperands; ++
I) {
2038 if (
Op.isReg() && (
Op.isDead() ||
Op.isKill()))
2044 MBB.insert(
MI.getIterator(), NewMI);
2065 unsigned SrcOpIdx2) {
2067 if (SrcOpIdx1 > SrcOpIdx2)
2070 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
2076 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
2078 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
2080 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
2089 unsigned Opc =
MI.getOpcode();
2098 "Intrinsic instructions can't commute operand 1");
2103 assert(Case < 3 &&
"Unexpected case number!");
2108 const unsigned Form132Index = 0;
2109 const unsigned Form213Index = 1;
2110 const unsigned Form231Index = 2;
2111 static const unsigned FormMapping[][3] = {
2116 {Form231Index, Form213Index, Form132Index},
2121 {Form132Index, Form231Index, Form213Index},
2126 {Form213Index, Form132Index, Form231Index}};
2128 unsigned FMAForms[3];
2134 for (
unsigned FormIndex = 0; FormIndex < 3; FormIndex++)
2135 if (
Opc == FMAForms[FormIndex])
2136 return FMAForms[FormMapping[Case][FormIndex]];
2142 unsigned SrcOpIdx2) {
2146 assert(Case < 3 &&
"Unexpected case value!");
2149 static const uint8_t SwapMasks[3][4] = {
2150 {0x04, 0x10, 0x08, 0x20},
2151 {0x02, 0x10, 0x08, 0x40},
2152 {0x02, 0x04, 0x20, 0x40},
2155 uint8_t Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
2157 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
2158 SwapMasks[Case][2] | SwapMasks[Case][3]);
2160 if (Imm & SwapMasks[Case][0])
2161 NewImm |= SwapMasks[Case][1];
2162 if (Imm & SwapMasks[Case][1])
2163 NewImm |= SwapMasks[Case][0];
2164 if (Imm & SwapMasks[Case][2])
2165 NewImm |= SwapMasks[Case][3];
2166 if (Imm & SwapMasks[Case][3])
2167 NewImm |= SwapMasks[Case][2];
2168 MI.getOperand(
MI.getNumOperands() - 1).setImm(NewImm);
2174#define VPERM_CASES(Suffix) \
2175 case X86::VPERMI2##Suffix##Z128rr: \
2176 case X86::VPERMT2##Suffix##Z128rr: \
2177 case X86::VPERMI2##Suffix##Z256rr: \
2178 case X86::VPERMT2##Suffix##Z256rr: \
2179 case X86::VPERMI2##Suffix##Zrr: \
2180 case X86::VPERMT2##Suffix##Zrr: \
2181 case X86::VPERMI2##Suffix##Z128rm: \
2182 case X86::VPERMT2##Suffix##Z128rm: \
2183 case X86::VPERMI2##Suffix##Z256rm: \
2184 case X86::VPERMT2##Suffix##Z256rm: \
2185 case X86::VPERMI2##Suffix##Zrm: \
2186 case X86::VPERMT2##Suffix##Zrm: \
2187 case X86::VPERMI2##Suffix##Z128rrkz: \
2188 case X86::VPERMT2##Suffix##Z128rrkz: \
2189 case X86::VPERMI2##Suffix##Z256rrkz: \
2190 case X86::VPERMT2##Suffix##Z256rrkz: \
2191 case X86::VPERMI2##Suffix##Zrrkz: \
2192 case X86::VPERMT2##Suffix##Zrrkz: \
2193 case X86::VPERMI2##Suffix##Z128rmkz: \
2194 case X86::VPERMT2##Suffix##Z128rmkz: \
2195 case X86::VPERMI2##Suffix##Z256rmkz: \
2196 case X86::VPERMT2##Suffix##Z256rmkz: \
2197 case X86::VPERMI2##Suffix##Zrmkz: \
2198 case X86::VPERMT2##Suffix##Zrmkz:
2200#define VPERM_CASES_BROADCAST(Suffix) \
2201 VPERM_CASES(Suffix) \
2202 case X86::VPERMI2##Suffix##Z128rmb: \
2203 case X86::VPERMT2##Suffix##Z128rmb: \
2204 case X86::VPERMI2##Suffix##Z256rmb: \
2205 case X86::VPERMT2##Suffix##Z256rmb: \
2206 case X86::VPERMI2##Suffix##Zrmb: \
2207 case X86::VPERMT2##Suffix##Zrmb: \
2208 case X86::VPERMI2##Suffix##Z128rmbkz: \
2209 case X86::VPERMT2##Suffix##Z128rmbkz: \
2210 case X86::VPERMI2##Suffix##Z256rmbkz: \
2211 case X86::VPERMT2##Suffix##Z256rmbkz: \
2212 case X86::VPERMI2##Suffix##Zrmbkz: \
2213 case X86::VPERMT2##Suffix##Zrmbkz:
2226#undef VPERM_CASES_BROADCAST
2233#define VPERM_CASES(Orig, New) \
2234 case X86::Orig##Z128rr: \
2235 return X86::New##Z128rr; \
2236 case X86::Orig##Z128rrkz: \
2237 return X86::New##Z128rrkz; \
2238 case X86::Orig##Z128rm: \
2239 return X86::New##Z128rm; \
2240 case X86::Orig##Z128rmkz: \
2241 return X86::New##Z128rmkz; \
2242 case X86::Orig##Z256rr: \
2243 return X86::New##Z256rr; \
2244 case X86::Orig##Z256rrkz: \
2245 return X86::New##Z256rrkz; \
2246 case X86::Orig##Z256rm: \
2247 return X86::New##Z256rm; \
2248 case X86::Orig##Z256rmkz: \
2249 return X86::New##Z256rmkz; \
2250 case X86::Orig##Zrr: \
2251 return X86::New##Zrr; \
2252 case X86::Orig##Zrrkz: \
2253 return X86::New##Zrrkz; \
2254 case X86::Orig##Zrm: \
2255 return X86::New##Zrm; \
2256 case X86::Orig##Zrmkz: \
2257 return X86::New##Zrmkz;
2259#define VPERM_CASES_BROADCAST(Orig, New) \
2260 VPERM_CASES(Orig, New) \
2261 case X86::Orig##Z128rmb: \
2262 return X86::New##Z128rmb; \
2263 case X86::Orig##Z128rmbkz: \
2264 return X86::New##Z128rmbkz; \
2265 case X86::Orig##Z256rmb: \
2266 return X86::New##Z256rmb; \
2267 case X86::Orig##Z256rmbkz: \
2268 return X86::New##Z256rmbkz; \
2269 case X86::Orig##Zrmb: \
2270 return X86::New##Zrmb; \
2271 case X86::Orig##Zrmbkz: \
2272 return X86::New##Zrmbkz;
2290#undef VPERM_CASES_BROADCAST
2296 unsigned OpIdx2)
const {
2298 return std::exchange(NewMI,
false)
2299 ?
MI.getParent()->getParent()->CloneMachineInstr(&
MI)
2303 unsigned Opc =
MI.getOpcode();
2305#define CASE_ND(OP) \
2321#define FROM_TO_SIZE(A, B, S) \
2327 Opc = X86::B##_ND; \
2335 Opc = X86::A##_ND; \
2344 WorkingMI = CloneIfNew(
MI);
2353 WorkingMI = CloneIfNew(
MI);
2355 get(X86::PFSUBRrr ==
Opc ? X86::PFSUBrr : X86::PFSUBRrr));
2357 case X86::BLENDPDrri:
2358 case X86::BLENDPSrri:
2359 case X86::PBLENDWrri:
2360 case X86::VBLENDPDrri:
2361 case X86::VBLENDPSrri:
2362 case X86::VBLENDPDYrri:
2363 case X86::VBLENDPSYrri:
2364 case X86::VPBLENDDrri:
2365 case X86::VPBLENDWrri:
2366 case X86::VPBLENDDYrri:
2367 case X86::VPBLENDWYrri: {
2372 case X86::BLENDPDrri:
2373 Mask = (int8_t)0x03;
2375 case X86::BLENDPSrri:
2376 Mask = (int8_t)0x0F;
2378 case X86::PBLENDWrri:
2379 Mask = (int8_t)0xFF;
2381 case X86::VBLENDPDrri:
2382 Mask = (int8_t)0x03;
2384 case X86::VBLENDPSrri:
2385 Mask = (int8_t)0x0F;
2387 case X86::VBLENDPDYrri:
2388 Mask = (int8_t)0x0F;
2390 case X86::VBLENDPSYrri:
2391 Mask = (int8_t)0xFF;
2393 case X86::VPBLENDDrri:
2394 Mask = (int8_t)0x0F;
2396 case X86::VPBLENDWrri:
2397 Mask = (int8_t)0xFF;
2399 case X86::VPBLENDDYrri:
2400 Mask = (int8_t)0xFF;
2402 case X86::VPBLENDWYrri:
2403 Mask = (int8_t)0xFF;
2409 int8_t Imm =
MI.getOperand(3).getImm() & Mask;
2410 WorkingMI = CloneIfNew(
MI);
2414 case X86::INSERTPSrri:
2415 case X86::VINSERTPSrri:
2416 case X86::VINSERTPSZrri: {
2417 unsigned Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
2418 unsigned ZMask = Imm & 15;
2419 unsigned DstIdx = (Imm >> 4) & 3;
2420 unsigned SrcIdx = (Imm >> 6) & 3;
2424 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2427 assert(AltIdx < 4 &&
"Illegal insertion index");
2428 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2429 WorkingMI = CloneIfNew(
MI);
2438 case X86::VMOVSSrr: {
2440 if (Subtarget.hasSSE41()) {
2446 Opc = X86::BLENDPDrri;
2450 Opc = X86::BLENDPSrri;
2454 Opc = X86::VBLENDPDrri;
2458 Opc = X86::VBLENDPSrri;
2463 WorkingMI = CloneIfNew(
MI);
2469 assert(
Opc == X86::MOVSDrr &&
"Only MOVSD can commute to SHUFPD");
2470 WorkingMI = CloneIfNew(
MI);
2475 case X86::SHUFPDrri: {
2477 assert(
MI.getOperand(3).getImm() == 0x02 &&
"Unexpected immediate!");
2478 WorkingMI = CloneIfNew(
MI);
2483 case X86::PCLMULQDQrri:
2484 case X86::VPCLMULQDQrri:
2485 case X86::VPCLMULQDQYrri:
2486 case X86::VPCLMULQDQZrri:
2487 case X86::VPCLMULQDQZ128rri:
2488 case X86::VPCLMULQDQZ256rri: {
2491 unsigned Imm =
MI.getOperand(3).getImm();
2492 unsigned Src1Hi = Imm & 0x01;
2493 unsigned Src2Hi = Imm & 0x10;
2494 WorkingMI = CloneIfNew(
MI);
2498 case X86::VPCMPBZ128rri:
2499 case X86::VPCMPUBZ128rri:
2500 case X86::VPCMPBZ256rri:
2501 case X86::VPCMPUBZ256rri:
2502 case X86::VPCMPBZrri:
2503 case X86::VPCMPUBZrri:
2504 case X86::VPCMPDZ128rri:
2505 case X86::VPCMPUDZ128rri:
2506 case X86::VPCMPDZ256rri:
2507 case X86::VPCMPUDZ256rri:
2508 case X86::VPCMPDZrri:
2509 case X86::VPCMPUDZrri:
2510 case X86::VPCMPQZ128rri:
2511 case X86::VPCMPUQZ128rri:
2512 case X86::VPCMPQZ256rri:
2513 case X86::VPCMPUQZ256rri:
2514 case X86::VPCMPQZrri:
2515 case X86::VPCMPUQZrri:
2516 case X86::VPCMPWZ128rri:
2517 case X86::VPCMPUWZ128rri:
2518 case X86::VPCMPWZ256rri:
2519 case X86::VPCMPUWZ256rri:
2520 case X86::VPCMPWZrri:
2521 case X86::VPCMPUWZrri:
2522 case X86::VPCMPBZ128rrik:
2523 case X86::VPCMPUBZ128rrik:
2524 case X86::VPCMPBZ256rrik:
2525 case X86::VPCMPUBZ256rrik:
2526 case X86::VPCMPBZrrik:
2527 case X86::VPCMPUBZrrik:
2528 case X86::VPCMPDZ128rrik:
2529 case X86::VPCMPUDZ128rrik:
2530 case X86::VPCMPDZ256rrik:
2531 case X86::VPCMPUDZ256rrik:
2532 case X86::VPCMPDZrrik:
2533 case X86::VPCMPUDZrrik:
2534 case X86::VPCMPQZ128rrik:
2535 case X86::VPCMPUQZ128rrik:
2536 case X86::VPCMPQZ256rrik:
2537 case X86::VPCMPUQZ256rrik:
2538 case X86::VPCMPQZrrik:
2539 case X86::VPCMPUQZrrik:
2540 case X86::VPCMPWZ128rrik:
2541 case X86::VPCMPUWZ128rrik:
2542 case X86::VPCMPWZ256rrik:
2543 case X86::VPCMPUWZ256rrik:
2544 case X86::VPCMPWZrrik:
2545 case X86::VPCMPUWZrrik:
2546 WorkingMI = CloneIfNew(
MI);
2550 MI.getOperand(
MI.getNumOperands() - 1).getImm() & 0x7));
2553 case X86::VPCOMUBri:
2555 case X86::VPCOMUDri:
2557 case X86::VPCOMUQri:
2559 case X86::VPCOMUWri:
2560 WorkingMI = CloneIfNew(
MI);
2565 case X86::VCMPSDZrri:
2566 case X86::VCMPSSZrri:
2567 case X86::VCMPPDZrri:
2568 case X86::VCMPPSZrri:
2569 case X86::VCMPSHZrri:
2570 case X86::VCMPPHZrri:
2571 case X86::VCMPPHZ128rri:
2572 case X86::VCMPPHZ256rri:
2573 case X86::VCMPPDZ128rri:
2574 case X86::VCMPPSZ128rri:
2575 case X86::VCMPPDZ256rri:
2576 case X86::VCMPPSZ256rri:
2577 case X86::VCMPPDZrrik:
2578 case X86::VCMPPSZrrik:
2579 case X86::VCMPPHZrrik:
2580 case X86::VCMPPDZ128rrik:
2581 case X86::VCMPPSZ128rrik:
2582 case X86::VCMPPHZ128rrik:
2583 case X86::VCMPPDZ256rrik:
2584 case X86::VCMPPSZ256rrik:
2585 case X86::VCMPPHZ256rrik:
2586 WorkingMI = CloneIfNew(
MI);
2589 MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 0x1f));
2591 case X86::VPERM2F128rri:
2592 case X86::VPERM2I128rri:
2596 WorkingMI = CloneIfNew(
MI);
2599 case X86::MOVHLPSrr:
2600 case X86::UNPCKHPDrr:
2601 case X86::VMOVHLPSrr:
2602 case X86::VUNPCKHPDrr:
2603 case X86::VMOVHLPSZrr:
2604 case X86::VUNPCKHPDZ128rr:
2605 assert(Subtarget.hasSSE2() &&
"Commuting MOVHLP/UNPCKHPD requires SSE2!");
2610 case X86::MOVHLPSrr:
2611 Opc = X86::UNPCKHPDrr;
2613 case X86::UNPCKHPDrr:
2614 Opc = X86::MOVHLPSrr;
2616 case X86::VMOVHLPSrr:
2617 Opc = X86::VUNPCKHPDrr;
2619 case X86::VUNPCKHPDrr:
2620 Opc = X86::VMOVHLPSrr;
2622 case X86::VMOVHLPSZrr:
2623 Opc = X86::VUNPCKHPDZ128rr;
2625 case X86::VUNPCKHPDZ128rr:
2626 Opc = X86::VMOVHLPSZrr;
2629 WorkingMI = CloneIfNew(
MI);
2635 WorkingMI = CloneIfNew(
MI);
2636 unsigned OpNo =
MI.getDesc().getNumOperands() - 1;
2641 case X86::VPTERNLOGDZrri:
2642 case X86::VPTERNLOGDZrmi:
2643 case X86::VPTERNLOGDZ128rri:
2644 case X86::VPTERNLOGDZ128rmi:
2645 case X86::VPTERNLOGDZ256rri:
2646 case X86::VPTERNLOGDZ256rmi:
2647 case X86::VPTERNLOGQZrri:
2648 case X86::VPTERNLOGQZrmi:
2649 case X86::VPTERNLOGQZ128rri:
2650 case X86::VPTERNLOGQZ128rmi:
2651 case X86::VPTERNLOGQZ256rri:
2652 case X86::VPTERNLOGQZ256rmi:
2653 case X86::VPTERNLOGDZrrik:
2654 case X86::VPTERNLOGDZ128rrik:
2655 case X86::VPTERNLOGDZ256rrik:
2656 case X86::VPTERNLOGQZrrik:
2657 case X86::VPTERNLOGQZ128rrik:
2658 case X86::VPTERNLOGQZ256rrik:
2659 case X86::VPTERNLOGDZrrikz:
2660 case X86::VPTERNLOGDZrmikz:
2661 case X86::VPTERNLOGDZ128rrikz:
2662 case X86::VPTERNLOGDZ128rmikz:
2663 case X86::VPTERNLOGDZ256rrikz:
2664 case X86::VPTERNLOGDZ256rmikz:
2665 case X86::VPTERNLOGQZrrikz:
2666 case X86::VPTERNLOGQZrmikz:
2667 case X86::VPTERNLOGQZ128rrikz:
2668 case X86::VPTERNLOGQZ128rmikz:
2669 case X86::VPTERNLOGQZ256rrikz:
2670 case X86::VPTERNLOGQZ256rmikz:
2671 case X86::VPTERNLOGDZ128rmbi:
2672 case X86::VPTERNLOGDZ256rmbi:
2673 case X86::VPTERNLOGDZrmbi:
2674 case X86::VPTERNLOGQZ128rmbi:
2675 case X86::VPTERNLOGQZ256rmbi:
2676 case X86::VPTERNLOGQZrmbi:
2677 case X86::VPTERNLOGDZ128rmbikz:
2678 case X86::VPTERNLOGDZ256rmbikz:
2679 case X86::VPTERNLOGDZrmbikz:
2680 case X86::VPTERNLOGQZ128rmbikz:
2681 case X86::VPTERNLOGQZ256rmbikz:
2682 case X86::VPTERNLOGQZrmbikz: {
2683 WorkingMI = CloneIfNew(
MI);
2689 WorkingMI = CloneIfNew(
MI);
2695 WorkingMI = CloneIfNew(
MI);
2704bool X86InstrInfo::findThreeSrcCommutedOpIndices(
const MachineInstr &
MI,
2705 unsigned &SrcOpIdx1,
2706 unsigned &SrcOpIdx2,
2707 bool IsIntrinsic)
const {
2710 unsigned FirstCommutableVecOp = 1;
2711 unsigned LastCommutableVecOp = 3;
2712 unsigned KMaskOp = -1U;
2735 FirstCommutableVecOp = 3;
2737 LastCommutableVecOp++;
2738 }
else if (IsIntrinsic) {
2741 FirstCommutableVecOp = 2;
2744 if (
isMem(
MI, LastCommutableVecOp))
2745 LastCommutableVecOp--;
2750 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2751 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2752 SrcOpIdx1 == KMaskOp))
2754 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2755 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2756 SrcOpIdx2 == KMaskOp))
2761 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2762 SrcOpIdx2 == CommuteAnyOperandIndex) {
2763 unsigned CommutableOpIdx2 = SrcOpIdx2;
2767 if (SrcOpIdx1 == SrcOpIdx2)
2770 CommutableOpIdx2 = LastCommutableVecOp;
2771 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2773 CommutableOpIdx2 = SrcOpIdx1;
2777 Register Op2Reg =
MI.getOperand(CommutableOpIdx2).getReg();
2779 unsigned CommutableOpIdx1;
2780 for (CommutableOpIdx1 = LastCommutableVecOp;
2781 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2783 if (CommutableOpIdx1 == KMaskOp)
2789 if (Op2Reg !=
MI.getOperand(CommutableOpIdx1).getReg())
2794 if (CommutableOpIdx1 < FirstCommutableVecOp)
2799 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2808 unsigned &SrcOpIdx1,
2809 unsigned &SrcOpIdx2)
const {
2811 if (!
Desc.isCommutable())
2814 switch (
MI.getOpcode()) {
2819 case X86::VCMPSDrri:
2820 case X86::VCMPSSrri:
2821 case X86::VCMPPDrri:
2822 case X86::VCMPPSrri:
2823 case X86::VCMPPDYrri:
2824 case X86::VCMPPSYrri:
2825 case X86::VCMPSDZrri:
2826 case X86::VCMPSSZrri:
2827 case X86::VCMPPDZrri:
2828 case X86::VCMPPSZrri:
2829 case X86::VCMPSHZrri:
2830 case X86::VCMPPHZrri:
2831 case X86::VCMPPHZ128rri:
2832 case X86::VCMPPHZ256rri:
2833 case X86::VCMPPDZ128rri:
2834 case X86::VCMPPSZ128rri:
2835 case X86::VCMPPDZ256rri:
2836 case X86::VCMPPSZ256rri:
2837 case X86::VCMPPDZrrik:
2838 case X86::VCMPPSZrrik:
2839 case X86::VCMPPHZrrik:
2840 case X86::VCMPPDZ128rrik:
2841 case X86::VCMPPSZ128rrik:
2842 case X86::VCMPPHZ128rrik:
2843 case X86::VCMPPDZ256rrik:
2844 case X86::VCMPPSZ256rrik:
2845 case X86::VCMPPHZ256rrik: {
2850 unsigned Imm =
MI.getOperand(3 + OpOffset).getImm() & 0x7;
2867 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2874 if (Subtarget.hasSSE41())
2877 case X86::SHUFPDrri:
2879 if (
MI.getOperand(3).getImm() == 0x02)
2882 case X86::MOVHLPSrr:
2883 case X86::UNPCKHPDrr:
2884 case X86::VMOVHLPSrr:
2885 case X86::VUNPCKHPDrr:
2886 case X86::VMOVHLPSZrr:
2887 case X86::VUNPCKHPDZ128rr:
2888 if (Subtarget.hasSSE2())
2891 case X86::VPTERNLOGDZrri:
2892 case X86::VPTERNLOGDZrmi:
2893 case X86::VPTERNLOGDZ128rri:
2894 case X86::VPTERNLOGDZ128rmi:
2895 case X86::VPTERNLOGDZ256rri:
2896 case X86::VPTERNLOGDZ256rmi:
2897 case X86::VPTERNLOGQZrri:
2898 case X86::VPTERNLOGQZrmi:
2899 case X86::VPTERNLOGQZ128rri:
2900 case X86::VPTERNLOGQZ128rmi:
2901 case X86::VPTERNLOGQZ256rri:
2902 case X86::VPTERNLOGQZ256rmi:
2903 case X86::VPTERNLOGDZrrik:
2904 case X86::VPTERNLOGDZ128rrik:
2905 case X86::VPTERNLOGDZ256rrik:
2906 case X86::VPTERNLOGQZrrik:
2907 case X86::VPTERNLOGQZ128rrik:
2908 case X86::VPTERNLOGQZ256rrik:
2909 case X86::VPTERNLOGDZrrikz:
2910 case X86::VPTERNLOGDZrmikz:
2911 case X86::VPTERNLOGDZ128rrikz:
2912 case X86::VPTERNLOGDZ128rmikz:
2913 case X86::VPTERNLOGDZ256rrikz:
2914 case X86::VPTERNLOGDZ256rmikz:
2915 case X86::VPTERNLOGQZrrikz:
2916 case X86::VPTERNLOGQZrmikz:
2917 case X86::VPTERNLOGQZ128rrikz:
2918 case X86::VPTERNLOGQZ128rmikz:
2919 case X86::VPTERNLOGQZ256rrikz:
2920 case X86::VPTERNLOGQZ256rmikz:
2921 case X86::VPTERNLOGDZ128rmbi:
2922 case X86::VPTERNLOGDZ256rmbi:
2923 case X86::VPTERNLOGDZrmbi:
2924 case X86::VPTERNLOGQZ128rmbi:
2925 case X86::VPTERNLOGQZ256rmbi:
2926 case X86::VPTERNLOGQZrmbi:
2927 case X86::VPTERNLOGDZ128rmbikz:
2928 case X86::VPTERNLOGDZ256rmbikz:
2929 case X86::VPTERNLOGDZrmbikz:
2930 case X86::VPTERNLOGQZ128rmbikz:
2931 case X86::VPTERNLOGQZ256rmbikz:
2932 case X86::VPTERNLOGQZrmbikz:
2933 return findThreeSrcCommutedOpIndices(
MI, SrcOpIdx1, SrcOpIdx2);
2934 case X86::VPDPWSSDYrr:
2935 case X86::VPDPWSSDrr:
2936 case X86::VPDPWSSDSYrr:
2937 case X86::VPDPWSSDSrr:
2938 case X86::VPDPWUUDrr:
2939 case X86::VPDPWUUDYrr:
2940 case X86::VPDPWUUDSrr:
2941 case X86::VPDPWUUDSYrr:
2942 case X86::VPDPBSSDSrr:
2943 case X86::VPDPBSSDSYrr:
2944 case X86::VPDPBSSDrr:
2945 case X86::VPDPBSSDYrr:
2946 case X86::VPDPBUUDSrr:
2947 case X86::VPDPBUUDSYrr:
2948 case X86::VPDPBUUDrr:
2949 case X86::VPDPBUUDYrr:
2950 case X86::VPDPBSSDSZ128rr:
2951 case X86::VPDPBSSDSZ128rrk:
2952 case X86::VPDPBSSDSZ128rrkz:
2953 case X86::VPDPBSSDSZ256rr:
2954 case X86::VPDPBSSDSZ256rrk:
2955 case X86::VPDPBSSDSZ256rrkz:
2956 case X86::VPDPBSSDSZrr:
2957 case X86::VPDPBSSDSZrrk:
2958 case X86::VPDPBSSDSZrrkz:
2959 case X86::VPDPBSSDZ128rr:
2960 case X86::VPDPBSSDZ128rrk:
2961 case X86::VPDPBSSDZ128rrkz:
2962 case X86::VPDPBSSDZ256rr:
2963 case X86::VPDPBSSDZ256rrk:
2964 case X86::VPDPBSSDZ256rrkz:
2965 case X86::VPDPBSSDZrr:
2966 case X86::VPDPBSSDZrrk:
2967 case X86::VPDPBSSDZrrkz:
2968 case X86::VPDPBUUDSZ128rr:
2969 case X86::VPDPBUUDSZ128rrk:
2970 case X86::VPDPBUUDSZ128rrkz:
2971 case X86::VPDPBUUDSZ256rr:
2972 case X86::VPDPBUUDSZ256rrk:
2973 case X86::VPDPBUUDSZ256rrkz:
2974 case X86::VPDPBUUDSZrr:
2975 case X86::VPDPBUUDSZrrk:
2976 case X86::VPDPBUUDSZrrkz:
2977 case X86::VPDPBUUDZ128rr:
2978 case X86::VPDPBUUDZ128rrk:
2979 case X86::VPDPBUUDZ128rrkz:
2980 case X86::VPDPBUUDZ256rr:
2981 case X86::VPDPBUUDZ256rrk:
2982 case X86::VPDPBUUDZ256rrkz:
2983 case X86::VPDPBUUDZrr:
2984 case X86::VPDPBUUDZrrk:
2985 case X86::VPDPBUUDZrrkz:
2986 case X86::VPDPWSSDZ128rr:
2987 case X86::VPDPWSSDZ128rrk:
2988 case X86::VPDPWSSDZ128rrkz:
2989 case X86::VPDPWSSDZ256rr:
2990 case X86::VPDPWSSDZ256rrk:
2991 case X86::VPDPWSSDZ256rrkz:
2992 case X86::VPDPWSSDZrr:
2993 case X86::VPDPWSSDZrrk:
2994 case X86::VPDPWSSDZrrkz:
2995 case X86::VPDPWSSDSZ128rr:
2996 case X86::VPDPWSSDSZ128rrk:
2997 case X86::VPDPWSSDSZ128rrkz:
2998 case X86::VPDPWSSDSZ256rr:
2999 case X86::VPDPWSSDSZ256rrk:
3000 case X86::VPDPWSSDSZ256rrkz:
3001 case X86::VPDPWSSDSZrr:
3002 case X86::VPDPWSSDSZrrk:
3003 case X86::VPDPWSSDSZrrkz:
3004 case X86::VPDPWUUDZ128rr:
3005 case X86::VPDPWUUDZ128rrk:
3006 case X86::VPDPWUUDZ128rrkz:
3007 case X86::VPDPWUUDZ256rr:
3008 case X86::VPDPWUUDZ256rrk:
3009 case X86::VPDPWUUDZ256rrkz:
3010 case X86::VPDPWUUDZrr:
3011 case X86::VPDPWUUDZrrk:
3012 case X86::VPDPWUUDZrrkz:
3013 case X86::VPDPWUUDSZ128rr:
3014 case X86::VPDPWUUDSZ128rrk:
3015 case X86::VPDPWUUDSZ128rrkz:
3016 case X86::VPDPWUUDSZ256rr:
3017 case X86::VPDPWUUDSZ256rrk:
3018 case X86::VPDPWUUDSZ256rrkz:
3019 case X86::VPDPWUUDSZrr:
3020 case X86::VPDPWUUDSZrrk:
3021 case X86::VPDPWUUDSZrrkz:
3022 case X86::VPMADD52HUQrr:
3023 case X86::VPMADD52HUQYrr:
3024 case X86::VPMADD52HUQZ128r:
3025 case X86::VPMADD52HUQZ128rk:
3026 case X86::VPMADD52HUQZ128rkz:
3027 case X86::VPMADD52HUQZ256r:
3028 case X86::VPMADD52HUQZ256rk:
3029 case X86::VPMADD52HUQZ256rkz:
3030 case X86::VPMADD52HUQZr:
3031 case X86::VPMADD52HUQZrk:
3032 case X86::VPMADD52HUQZrkz:
3033 case X86::VPMADD52LUQrr:
3034 case X86::VPMADD52LUQYrr:
3035 case X86::VPMADD52LUQZ128r:
3036 case X86::VPMADD52LUQZ128rk:
3037 case X86::VPMADD52LUQZ128rkz:
3038 case X86::VPMADD52LUQZ256r:
3039 case X86::VPMADD52LUQZ256rk:
3040 case X86::VPMADD52LUQZ256rkz:
3041 case X86::VPMADD52LUQZr:
3042 case X86::VPMADD52LUQZrk:
3043 case X86::VPMADD52LUQZrkz:
3044 case X86::VFMADDCPHZr:
3045 case X86::VFMADDCPHZrk:
3046 case X86::VFMADDCPHZrkz:
3047 case X86::VFMADDCPHZ128r:
3048 case X86::VFMADDCPHZ128rk:
3049 case X86::VFMADDCPHZ128rkz:
3050 case X86::VFMADDCPHZ256r:
3051 case X86::VFMADDCPHZ256rk:
3052 case X86::VFMADDCPHZ256rkz:
3053 case X86::VFMADDCSHZr:
3054 case X86::VFMADDCSHZrk:
3055 case X86::VFMADDCSHZrkz: {
3056 unsigned CommutableOpIdx1 = 2;
3057 unsigned CommutableOpIdx2 = 3;
3063 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3066 if (!
MI.getOperand(SrcOpIdx1).isReg() || !
MI.getOperand(SrcOpIdx2).isReg())
3076 return findThreeSrcCommutedOpIndices(
MI, SrcOpIdx1, SrcOpIdx2,
3083 unsigned CommutableOpIdx1 =
Desc.getNumDefs() + 1;
3084 unsigned CommutableOpIdx2 =
Desc.getNumDefs() + 2;
3087 if ((
MI.getDesc().getOperandConstraint(
Desc.getNumDefs(),
3102 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3106 if (!
MI.getOperand(SrcOpIdx1).isReg() ||
3107 !
MI.getOperand(SrcOpIdx2).isReg())
3119 unsigned Opcode =
MI->getOpcode();
3120 if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
3121 Opcode != X86::LEA64_32r)
3143 unsigned Opcode =
MI.getOpcode();
3144 if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
3171 unsigned Opcode =
MCID.getOpcode();
3172 if (!(X86::isJCC(Opcode) || X86::isSETCC(Opcode) || X86::isSETZUCC(Opcode) ||
3173 X86::isCMOVCC(Opcode) || X86::isCFCMOVCC(Opcode) ||
3174 X86::isCCMPCC(Opcode) || X86::isCTESTCC(Opcode)))
3177 unsigned NumUses =
MCID.getNumOperands() -
MCID.getNumDefs();
3186 CondNo +=
MCID.getNumDefs();
3196 return X86::isSETCC(
MI.getOpcode()) || X86::isSETZUCC(
MI.getOpcode())
3212 return X86::isCCMPCC(
MI.getOpcode()) || X86::isCTESTCC(
MI.getOpcode())
3243 enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
3274#define GET_X86_NF_TRANSFORM_TABLE
3275#define GET_X86_ND2NONND_TABLE
3276#include "X86GenInstrMapping.inc"
3281 return (
I == Table.
end() ||
I->OldOpc !=
Opc) ? 0U :
I->NewOpc;
3284#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3286 static std::atomic<bool> NFTableChecked(
false);
3287 if (!NFTableChecked.load(std::memory_order_relaxed)) {
3289 "X86NFTransformTable is not sorted!");
3290 NFTableChecked.store(
true, std::memory_order_relaxed);
3297#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3299 static std::atomic<bool> NDTableChecked(
false);
3300 if (!NDTableChecked.load(std::memory_order_relaxed)) {
3302 "X86ND2NonNDTableis not sorted!");
3303 NDTableChecked.store(
true, std::memory_order_relaxed);
3383std::pair<X86::CondCode, bool>
3386 bool NeedSwap =
false;
3387 switch (Predicate) {
3466 return std::make_pair(CC, NeedSwap);
3475#define GET_ND_IF_ENABLED(OPC) (HasNDD ? OPC##_ND : OPC)
3569 switch (Imm & 0x3) {
3587 if (Info.RegClass == X86::VR128RegClassID ||
3588 Info.RegClass == X86::VR128XRegClassID)
3590 if (Info.RegClass == X86::VR256RegClassID ||
3591 Info.RegClass == X86::VR256XRegClassID)
3593 if (Info.RegClass == X86::VR512RegClassID)
3600 return (
Reg == X86::FPCW ||
Reg == X86::FPSW ||
3601 (
Reg >= X86::ST0 &&
Reg <= X86::ST7));
3609 if (
MI.isCall() ||
MI.isInlineAsm())
3633#ifdef EXPENSIVE_CHECKS
3635 "Got false negative from X86II::getMemoryOperandNo()!");
3645#ifdef EXPENSIVE_CHECKS
3647 "Expected no operands to have OPERAND_MEMORY type!");
3656 if (IsMemOp(
Desc.operands()[
I])) {
3657#ifdef EXPENSIVE_CHECKS
3661 "Expected all five operands in the memory reference to have "
3662 "OPERAND_MEMORY type!");
3674 "Unexpected number of operands!");
3677 if (!Index.isReg() || Index.getReg() != X86::NoRegister)
3685 MI.getParent()->getParent()->getConstantPool()->getConstants();
3697 switch (
MI.getOpcode()) {
3698 case X86::TCRETURNdi:
3699 case X86::TCRETURNri:
3700 case X86::TCRETURNmi:
3701 case X86::TCRETURNdi64:
3702 case X86::TCRETURNri64:
3703 case X86::TCRETURNri64_ImpCall:
3704 case X86::TCRETURNmi64:
3723 if (Symbol ==
"__x86_indirect_thunk_r11")
3728 if (TailCall.getOpcode() != X86::TCRETURNdi &&
3729 TailCall.getOpcode() != X86::TCRETURNdi64) {
3734 if (Subtarget.isTargetWin64() && MF->
hasWinCFI()) {
3747 TailCall.getOperand(1).getImm() != 0) {
3761 while (
I !=
MBB.begin()) {
3763 if (
I->isDebugInstr())
3766 assert(0 &&
"Can't find the branch to replace!");
3770 if (CC != BranchCond[0].
getImm())
3776 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3777 : X86::TCRETURNdi64cc;
3790 LiveRegs.stepForward(*MIB, Clobbers);
3791 for (
const auto &
C : Clobbers) {
3796 I->eraseFromParent();
3810 if (Succ->isEHPad() || (Succ ==
TBB && FallthroughBB))
3813 if (FallthroughBB && FallthroughBB !=
TBB)
3815 FallthroughBB = Succ;
3817 return FallthroughBB;
3820bool X86InstrInfo::analyzeBranchImpl(
3831 if (
I->isDebugInstr())
3836 if (!isUnpredicatedTerminator(*
I))
3845 if (
I->getOpcode() == X86::JMP_1) {
3849 TBB =
I->getOperand(0).getMBB();
3864 UnCondBrIter =
MBB.
end();
3869 TBB =
I->getOperand(0).getMBB();
3880 if (
I->findRegisterUseOperand(X86::EFLAGS,
nullptr)->isUndef())
3886 TBB =
I->getOperand(0).getMBB();
3901 if (OldBranchCode == BranchCode &&
TBB == NewTBB)
3907 if (
TBB == NewTBB &&
3940 Cond[0].setImm(BranchCode);
3951 bool AllowModify)
const {
3953 return analyzeBranchImpl(
MBB,
TBB, FBB,
Cond, CondBranches, AllowModify);
3959 assert(MemRefBegin >= 0 &&
"instr should have memory operand");
3971 if (!
Reg.isVirtual())
3976 unsigned Opcode =
MI->getOpcode();
3977 if (Opcode != X86::LEA64r && Opcode != X86::LEA32r)
3983 unsigned Opcode =
MI.getOpcode();
3986 if (Opcode == X86::JMP64m || Opcode == X86::JMP32m) {
3994 if (Opcode == X86::JMP64r || Opcode == X86::JMP32r) {
3996 if (!Reg.isVirtual())
4003 if (
Add->getOpcode() != X86::ADD64rr &&
Add->getOpcode() != X86::ADD32rr)
4016 MachineBranchPredicate &MBP,
4017 bool AllowModify)
const {
4018 using namespace std::placeholders;
4022 if (analyzeBranchImpl(
MBB, MBP.TrueDest, MBP.FalseDest,
Cond, CondBranches,
4026 if (
Cond.size() != 1)
4029 assert(MBP.TrueDest &&
"expected!");
4032 MBP.FalseDest =
MBB.getNextNode();
4037 bool SingleUseCondition =
true;
4040 if (
MI.modifiesRegister(X86::EFLAGS,
TRI)) {
4045 if (
MI.readsRegister(X86::EFLAGS,
TRI))
4046 SingleUseCondition =
false;
4052 if (SingleUseCondition) {
4053 for (
auto *Succ :
MBB.successors())
4054 if (Succ->isLiveIn(X86::EFLAGS))
4055 SingleUseCondition =
false;
4058 MBP.ConditionDef = ConditionDef;
4059 MBP.SingleUseCondition = SingleUseCondition;
4066 const unsigned TestOpcode =
4067 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
4069 if (ConditionDef->
getOpcode() == TestOpcode &&
4076 ? MachineBranchPredicate::PRED_NE
4077 : MachineBranchPredicate::PRED_EQ;
4085 int *BytesRemoved)
const {
4086 assert(!BytesRemoved &&
"code size not handled");
4091 while (
I !=
MBB.begin()) {
4093 if (
I->isDebugInstr())
4095 if (
I->getOpcode() != X86::JMP_1 &&
4099 I->eraseFromParent();
4113 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
4115 "X86 branch conditions have one component!");
4116 assert(!BytesAdded &&
"code size not handled");
4120 assert(!FBB &&
"Unconditional branch with multiple successors!");
4126 bool FallThru = FBB ==
nullptr;
4141 if (FBB ==
nullptr) {
4143 assert(FBB &&
"MBB cannot be the last block in function when the false "
4144 "body is a fall-through.");
4168 Register FalseReg,
int &CondCycles,
4169 int &TrueCycles,
int &FalseCycles)
const {
4171 if (!Subtarget.canUseCMOV())
4173 if (
Cond.size() != 1)
4182 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
4187 if (X86::GR16RegClass.hasSubClassEq(RC) ||
4188 X86::GR32RegClass.hasSubClassEq(RC) ||
4189 X86::GR64RegClass.hasSubClassEq(RC)) {
4210 assert(
Cond.size() == 1 &&
"Invalid Cond array");
4213 false , Subtarget.hasNDD());
4222 return X86::GR8_ABCD_HRegClass.contains(
Reg);
4228 bool HasAVX = Subtarget.
hasAVX();
4230 bool HasEGPR = Subtarget.hasEGPR();
4237 if (X86::VK16RegClass.
contains(SrcReg)) {
4238 if (X86::GR64RegClass.
contains(DestReg)) {
4239 assert(Subtarget.hasBWI());
4240 return HasEGPR ? X86::KMOVQrk_EVEX : X86::KMOVQrk;
4242 if (X86::GR32RegClass.
contains(DestReg))
4243 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDrk_EVEX : X86::KMOVDrk)
4244 : (HasEGPR ? X86::KMOVWrk_EVEX : X86::KMOVWrk);
4252 if (X86::VK16RegClass.
contains(DestReg)) {
4253 if (X86::GR64RegClass.
contains(SrcReg)) {
4254 assert(Subtarget.hasBWI());
4255 return HasEGPR ? X86::KMOVQkr_EVEX : X86::KMOVQkr;
4257 if (X86::GR32RegClass.
contains(SrcReg))
4258 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDkr_EVEX : X86::KMOVDkr)
4259 : (HasEGPR ? X86::KMOVWkr_EVEX : X86::KMOVWkr);
4267 if (X86::GR64RegClass.
contains(DestReg)) {
4268 if (X86::VR128XRegClass.
contains(SrcReg))
4270 return HasAVX512 ? X86::VMOVPQIto64Zrr
4271 : HasAVX ? X86::VMOVPQIto64rr
4272 : X86::MOVPQIto64rr;
4273 if (X86::VR64RegClass.
contains(SrcReg))
4275 return X86::MMX_MOVD64from64rr;
4276 }
else if (X86::GR64RegClass.
contains(SrcReg)) {
4278 if (X86::VR128XRegClass.
contains(DestReg))
4279 return HasAVX512 ? X86::VMOV64toPQIZrr
4280 : HasAVX ? X86::VMOV64toPQIrr
4281 : X86::MOV64toPQIrr;
4283 if (X86::VR64RegClass.
contains(DestReg))
4284 return X86::MMX_MOVD64to64rr;
4290 if (X86::GR32RegClass.
contains(DestReg) &&
4291 X86::VR128XRegClass.
contains(SrcReg))
4293 return HasAVX512 ? X86::VMOVPDI2DIZrr
4294 : HasAVX ? X86::VMOVPDI2DIrr
4297 if (X86::VR128XRegClass.
contains(DestReg) &&
4298 X86::GR32RegClass.
contains(SrcReg))
4300 return HasAVX512 ? X86::VMOVDI2PDIZrr
4301 : HasAVX ? X86::VMOVDI2PDIrr
4311 bool RenamableDest,
bool RenamableSrc)
const {
4313 bool HasAVX = Subtarget.hasAVX();
4314 bool HasVLX = Subtarget.hasVLX();
4315 bool HasEGPR = Subtarget.hasEGPR();
4317 if (X86::GR64RegClass.
contains(DestReg, SrcReg))
4319 else if (X86::GR32RegClass.
contains(DestReg, SrcReg))
4321 else if (X86::GR16RegClass.
contains(DestReg, SrcReg))
4323 else if (X86::GR8RegClass.
contains(DestReg, SrcReg)) {
4326 if ((
isHReg(DestReg) ||
isHReg(SrcReg)) && Subtarget.is64Bit()) {
4327 Opc = X86::MOV8rr_NOREX;
4330 "8-bit H register can not be copied outside GR8_NOREX");
4333 }
else if (X86::VR64RegClass.
contains(DestReg, SrcReg))
4334 Opc = X86::MMX_MOVQ64rr;
4335 else if (X86::VR128XRegClass.
contains(DestReg, SrcReg)) {
4337 Opc = X86::VMOVAPSZ128rr;
4338 else if (X86::VR128RegClass.
contains(DestReg, SrcReg))
4339 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
4343 Opc = X86::VMOVAPSZrr;
4346 TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, &X86::VR512RegClass);
4348 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4350 }
else if (X86::VR256XRegClass.
contains(DestReg, SrcReg)) {
4352 Opc = X86::VMOVAPSZ256rr;
4353 else if (X86::VR256RegClass.
contains(DestReg, SrcReg))
4354 Opc = X86::VMOVAPSYrr;
4358 Opc = X86::VMOVAPSZrr;
4361 TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, &X86::VR512RegClass);
4363 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4365 }
else if (X86::VR512RegClass.
contains(DestReg, SrcReg))
4366 Opc = X86::VMOVAPSZrr;
4369 else if (X86::VK16RegClass.
contains(DestReg, SrcReg))
4370 Opc = Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVQkk)
4371 : (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVWkk);
4382 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
4390 LLVM_DEBUG(
dbgs() <<
"Cannot copy " << RI.getName(SrcReg) <<
" to "
4391 << RI.getName(DestReg) <<
'\n');
4395std::optional<DestSourcePair>
4397 if (
MI.isMoveReg()) {
4401 if (
MI.getOperand(0).isUndef() &&
MI.getOperand(0).getSubReg())
4402 return std::nullopt;
4406 return std::nullopt;
4411 return Load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
4413 return X86::MOVSHPrm;
4414 return X86::MOVSHPmr;
4419 bool IsStackAligned,
4421 bool HasAVX = STI.
hasAVX();
4423 bool HasVLX = STI.hasVLX();
4424 bool HasEGPR = STI.hasEGPR();
4426 assert(RC !=
nullptr &&
"Invalid target register class");
4431 assert(X86::GR8RegClass.hasSubClassEq(RC) &&
"Unknown 1-byte regclass");
4435 if (
isHReg(
Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
4436 return Load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
4437 return Load ? X86::MOV8rm : X86::MOV8mr;
4439 if (X86::VK16RegClass.hasSubClassEq(RC))
4440 return Load ? (HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm)
4441 : (HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk);
4442 assert(X86::GR16RegClass.hasSubClassEq(RC) &&
"Unknown 2-byte regclass");
4443 return Load ? X86::MOV16rm : X86::MOV16mr;
4445 if (X86::GR32RegClass.hasSubClassEq(RC))
4446 return Load ? X86::MOV32rm : X86::MOV32mr;
4447 if (X86::FR32XRegClass.hasSubClassEq(RC))
4448 return Load ? (HasAVX512 ? X86::VMOVSSZrm_alt
4449 : HasAVX ? X86::VMOVSSrm_alt
4451 : (HasAVX512 ? X86::VMOVSSZmr
4452 : HasAVX ? X86::VMOVSSmr
4454 if (X86::RFP32RegClass.hasSubClassEq(RC))
4455 return Load ? X86::LD_Fp32m : X86::ST_Fp32m;
4456 if (X86::VK32RegClass.hasSubClassEq(RC)) {
4457 assert(STI.hasBWI() &&
"KMOVD requires BWI");
4458 return Load ? (HasEGPR ? X86::KMOVDkm_EVEX : X86::KMOVDkm)
4459 : (HasEGPR ? X86::KMOVDmk_EVEX : X86::KMOVDmk);
4463 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
4464 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
4465 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
4466 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
4467 X86::VK16PAIRRegClass.hasSubClassEq(RC))
4468 return Load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
4469 if (X86::FR16RegClass.hasSubClassEq(RC) ||
4470 X86::FR16XRegClass.hasSubClassEq(RC))
4474 if (X86::GR64RegClass.hasSubClassEq(RC))
4475 return Load ? X86::MOV64rm : X86::MOV64mr;
4476 if (X86::FR64XRegClass.hasSubClassEq(RC))
4477 return Load ? (HasAVX512 ? X86::VMOVSDZrm_alt
4478 : HasAVX ? X86::VMOVSDrm_alt
4480 : (HasAVX512 ? X86::VMOVSDZmr
4481 : HasAVX ? X86::VMOVSDmr
4483 if (X86::VR64RegClass.hasSubClassEq(RC))
4484 return Load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
4485 if (X86::RFP64RegClass.hasSubClassEq(RC))
4486 return Load ? X86::LD_Fp64m : X86::ST_Fp64m;
4487 if (X86::VK64RegClass.hasSubClassEq(RC)) {
4488 assert(STI.hasBWI() &&
"KMOVQ requires BWI");
4489 return Load ? (HasEGPR ? X86::KMOVQkm_EVEX : X86::KMOVQkm)
4490 : (HasEGPR ? X86::KMOVQmk_EVEX : X86::KMOVQmk);
4494 assert(X86::RFP80RegClass.hasSubClassEq(RC) &&
"Unknown 10-byte regclass");
4495 return Load ? X86::LD_Fp80m : X86::ST_FpP80m;
4497 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
4500 return Load ? (HasVLX ? X86::VMOVAPSZ128rm
4501 : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX
4502 : HasAVX ? X86::VMOVAPSrm
4504 : (HasVLX ? X86::VMOVAPSZ128mr
4505 : HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX
4506 : HasAVX ? X86::VMOVAPSmr
4509 return Load ? (HasVLX ? X86::VMOVUPSZ128rm
4510 : HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX
4511 : HasAVX ? X86::VMOVUPSrm
4513 : (HasVLX ? X86::VMOVUPSZ128mr
4514 : HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX
4515 : HasAVX ? X86::VMOVUPSmr
4521 assert(X86::VR256XRegClass.hasSubClassEq(RC) &&
"Unknown 32-byte regclass");
4524 return Load ? (HasVLX ? X86::VMOVAPSZ256rm
4525 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
4527 : (HasVLX ? X86::VMOVAPSZ256mr
4528 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
4531 return Load ? (HasVLX ? X86::VMOVUPSZ256rm
4532 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
4534 : (HasVLX ? X86::VMOVUPSZ256mr
4535 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
4538 assert(X86::VR512RegClass.hasSubClassEq(RC) &&
"Unknown 64-byte regclass");
4541 return Load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
4543 return Load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
4545 assert(X86::TILERegClass.hasSubClassEq(RC) &&
"Unknown 1024-byte regclass");
4546 assert(STI.hasAMXTILE() &&
"Using 8*1024-bit register requires AMX-TILE");
4547#define GET_EGPR_IF_ENABLED(OPC) (STI.hasEGPR() ? OPC##_EVEX : OPC)
4550#undef GET_EGPR_IF_ENABLED
4554std::optional<ExtAddrMode>
4559 if (MemRefBegin < 0)
4560 return std::nullopt;
4565 if (!BaseOp.isReg())
4566 return std::nullopt;
4570 if (!DispMO.
isImm())
4571 return std::nullopt;
4597 ErrInfo =
"Scale factor in address must be 1, 2, 4 or 8";
4602 ErrInfo =
"Displacement in address must fit into 32-bit signed "
4612 int64_t &ImmVal)
const {
4618 if (
MI.isSubregToReg()) {
4622 if (!
MI.getOperand(1).isImm())
4624 unsigned FillBits =
MI.getOperand(1).getImm();
4625 unsigned SubIdx =
MI.getOperand(3).getImm();
4626 MovReg =
MI.getOperand(2).getReg();
4627 if (SubIdx != X86::sub_32bit || FillBits != 0)
4630 MovMI =
MRI.getUniqueVRegDef(MovReg);
4635 if (MovMI->
getOpcode() == X86::MOV32r0 &&
4641 if (MovMI->
getOpcode() != X86::MOV32ri &&
4655 if (!
MI->modifiesRegister(NullValueReg,
TRI))
4657 switch (
MI->getOpcode()) {
4664 assert(
MI->getOperand(0).isDef() &&
MI->getOperand(1).isUse() &&
4665 "expected for shift opcode!");
4666 return MI->getOperand(0).getReg() == NullValueReg &&
4667 MI->getOperand(1).getReg() == NullValueReg;
4672 return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
4686 if (MemRefBegin < 0)
4693 if (!BaseOp->
isReg())
4706 if (!DispMO.
isImm())
4711 if (!BaseOp->
isReg())
4714 OffsetIsScalable =
false;
4718 Width = !
MemOp.memoperands_empty() ?
MemOp.memoperands().front()->getSize()
4726 bool IsStackAligned,
4741 case X86::TILELOADD:
4742 case X86::TILESTORED:
4743 case X86::TILELOADD_EVEX:
4744 case X86::TILESTORED_EVEX:
4752 bool isKill)
const {
4756 case X86::TILESTORED:
4757 case X86::TILESTORED_EVEX: {
4760 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4770 case X86::TILELOADD:
4771 case X86::TILELOADD_EVEX: {
4774 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4794 "Stack slot too small for store");
4796 unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
4798 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4819 "Load size exceeds stack slot");
4820 unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
4822 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4834 Register &SrcReg2, int64_t &CmpMask,
4835 int64_t &CmpValue)
const {
4836 switch (
MI.getOpcode()) {
4839 case X86::CMP64ri32:
4843 SrcReg =
MI.getOperand(0).getReg();
4845 if (
MI.getOperand(1).isImm()) {
4847 CmpValue =
MI.getOperand(1).getImm();
4849 CmpMask = CmpValue = 0;
4857 SrcReg =
MI.getOperand(1).getReg();
4866 SrcReg =
MI.getOperand(1).getReg();
4867 SrcReg2 =
MI.getOperand(2).getReg();
4875 SrcReg =
MI.getOperand(1).getReg();
4877 if (
MI.getOperand(2).isImm()) {
4879 CmpValue =
MI.getOperand(2).getImm();
4881 CmpMask = CmpValue = 0;
4888 SrcReg =
MI.getOperand(0).getReg();
4889 SrcReg2 =
MI.getOperand(1).getReg();
4897 SrcReg =
MI.getOperand(0).getReg();
4898 if (
MI.getOperand(1).getReg() != SrcReg)
4905 case X86::TEST64ri32:
4909 SrcReg =
MI.getOperand(0).getReg();
4919bool X86InstrInfo::isRedundantFlagInstr(
const MachineInstr &FlagI,
4921 int64_t ImmMask, int64_t ImmValue,
4923 int64_t *ImmDelta)
const {
4938 OIMask != ImmMask || OIValue != ImmValue)
4940 if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) {
4944 if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) {
4950 case X86::CMP64ri32:
4954 case X86::TEST64ri32:
4965 case X86::TEST8rr: {
4972 SrcReg == OISrcReg && ImmMask == OIMask) {
4973 if (OIValue == ImmValue) {
4976 }
else if (
static_cast<uint64_t
>(ImmValue) ==
4977 static_cast<uint64_t
>(OIValue) - 1) {
4980 }
else if (
static_cast<uint64_t
>(ImmValue) ==
4981 static_cast<uint64_t
>(OIValue) + 1) {
4999 bool &ClearsOverflowFlag) {
5001 ClearsOverflowFlag =
false;
5007 if (
MI.getOpcode() == X86::ADD64rm ||
MI.getOpcode() == X86::ADD32rm) {
5008 unsigned Flags =
MI.getOperand(5).getTargetFlags();
5014 switch (
MI.getOpcode()) {
5110 case X86::LZCNT16rr:
5111 case X86::LZCNT16rm:
5112 case X86::LZCNT32rr:
5113 case X86::LZCNT32rm:
5114 case X86::LZCNT64rr:
5115 case X86::LZCNT64rm:
5116 case X86::POPCNT16rr:
5117 case X86::POPCNT16rm:
5118 case X86::POPCNT32rr:
5119 case X86::POPCNT32rm:
5120 case X86::POPCNT64rr:
5121 case X86::POPCNT64rm:
5122 case X86::TZCNT16rr:
5123 case X86::TZCNT16rm:
5124 case X86::TZCNT32rr:
5125 case X86::TZCNT32rm:
5126 case X86::TZCNT64rr:
5127 case X86::TZCNT64rm:
5173 case X86::BLSMSK32rr:
5174 case X86::BLSMSK32rm:
5175 case X86::BLSMSK64rr:
5176 case X86::BLSMSK64rm:
5181 case X86::BLCFILL32rr:
5182 case X86::BLCFILL32rm:
5183 case X86::BLCFILL64rr:
5184 case X86::BLCFILL64rm:
5189 case X86::BLCIC32rr:
5190 case X86::BLCIC32rm:
5191 case X86::BLCIC64rr:
5192 case X86::BLCIC64rm:
5193 case X86::BLCMSK32rr:
5194 case X86::BLCMSK32rm:
5195 case X86::BLCMSK64rr:
5196 case X86::BLCMSK64rm:
5201 case X86::BLSFILL32rr:
5202 case X86::BLSFILL32rm:
5203 case X86::BLSFILL64rr:
5204 case X86::BLSFILL64rm:
5205 case X86::BLSIC32rr:
5206 case X86::BLSIC32rm:
5207 case X86::BLSIC64rr:
5208 case X86::BLSIC64rm:
5213 case X86::T1MSKC32rr:
5214 case X86::T1MSKC32rm:
5215 case X86::T1MSKC64rr:
5216 case X86::T1MSKC64rm:
5217 case X86::TZMSK32rr:
5218 case X86::TZMSK32rm:
5219 case X86::TZMSK64rr:
5220 case X86::TZMSK64rm:
5224 ClearsOverflowFlag =
true;
5226 case X86::BEXTR32rr:
5227 case X86::BEXTR64rr:
5228 case X86::BEXTR32rm:
5229 case X86::BEXTR64rm:
5230 case X86::BEXTRI32ri:
5231 case X86::BEXTRI32mi:
5232 case X86::BEXTRI64ri:
5233 case X86::BEXTRI64mi:
5244 switch (
MI.getOpcode()) {
5252 case X86::LZCNT16rr:
5253 case X86::LZCNT32rr:
5254 case X86::LZCNT64rr:
5256 case X86::POPCNT16rr:
5257 case X86::POPCNT32rr:
5258 case X86::POPCNT64rr:
5260 case X86::TZCNT16rr:
5261 case X86::TZCNT32rr:
5262 case X86::TZCNT64rr:
5276 case X86::BLSMSK32rr:
5277 case X86::BLSMSK64rr:
5309 unsigned NewOpcode = 0;
5310#define FROM_TO(A, B) \
5311 CASE_ND(A) NewOpcode = X86::B; \
5335 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
5336 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
5344 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
5350 assert(SrcRegDef &&
"Must have a definition (SSA)");
5356 bool NoSignFlag =
false;
5357 bool ClearsOverflowFlag =
false;
5358 bool ShouldUpdateCC =
false;
5359 bool IsSwapped =
false;
5360 bool HasNF = Subtarget.hasNF();
5363 int64_t ImmDelta = 0;
5376 if (&Inst == SrcRegDef) {
5399 Subtarget, NoSignFlag, ClearsOverflowFlag)) {
5408 if (Inst.modifiesRegister(X86::EFLAGS,
TRI)) {
5419 Inst.getOperand(OpNo).getReg() == SrcReg) {
5420 ShouldUpdateCC =
true;
5431 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
5432 Inst, &IsSwapped, &ImmDelta)) {
5440 if (!Movr0Inst && Inst.
getOpcode() == X86::MOV32r0 &&
5441 Inst.registerDefIsDead(X86::EFLAGS,
TRI)) {
5455 if (HasNF && Inst.registerDefIsDead(X86::EFLAGS,
TRI) && !IsWithReloc) {
5460 InstsToUpdate.
push_back(std::make_pair(&Inst, NewOp));
5474 if (
MBB->pred_size() != 1)
5476 MBB = *
MBB->pred_begin();
5477 From =
MBB->rbegin();
5484 bool FlagsMayLiveOut =
true;
5489 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS,
TRI);
5490 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS,
TRI);
5492 if (!UseEFLAGS && ModifyEFLAGS) {
5494 FlagsMayLiveOut =
false;
5497 if (!UseEFLAGS && !ModifyEFLAGS)
5528 if (!ClearsOverflowFlag)
5547 ReplacementCC = NewCC;
5553 }
else if (IsSwapped) {
5560 ShouldUpdateCC =
true;
5561 }
else if (ImmDelta != 0) {
5562 unsigned BitWidth = RI.getRegSizeInBits(*
MRI->getRegClass(SrcReg));
5572 if (ImmDelta != 1 || CmpValue == 0)
5582 if (ImmDelta != 1 || CmpValue == 0)
5609 ShouldUpdateCC =
true;
5612 if (ShouldUpdateCC && ReplacementCC != OldCC) {
5616 OpsToUpdate.
push_back(std::make_pair(&Instr, ReplacementCC));
5618 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS,
TRI)) {
5620 FlagsMayLiveOut =
false;
5627 if ((
MI !=
nullptr || ShouldUpdateCC) && FlagsMayLiveOut) {
5634 assert((
MI ==
nullptr ||
Sub ==
nullptr) &&
"Should not have Sub and MI set");
5641 if (&CmpMBB != SubBB)
5645 InsertE =
Sub->getParent()->rend();
5646 for (; InsertI != InsertE; ++InsertI) {
5648 if (!Instr->readsRegister(X86::EFLAGS,
TRI) &&
5649 Instr->modifiesRegister(X86::EFLAGS,
TRI)) {
5656 if (InsertI == InsertE)
5661 for (
auto &Inst : InstsToUpdate) {
5662 Inst.first->setDesc(
get(Inst.second));
5663 Inst.first->removeOperand(
5664 Inst.first->findRegisterDefOperandIdx(X86::EFLAGS,
nullptr));
5669 Sub->findRegisterDefOperand(X86::EFLAGS,
nullptr);
5670 assert(FlagDef &&
"Unable to locate a def EFLAGS operand");
5676 for (
auto &
Op : OpsToUpdate) {
5677 Op.first->getOperand(
Op.first->getDesc().getNumOperands() - 1)
5682 MBB = *
MBB->pred_begin()) {
5683 assert(
MBB->pred_size() == 1 &&
"Expected exactly one predecessor");
5684 if (!
MBB->isLiveIn(X86::EFLAGS))
5685 MBB->addLiveIn(X86::EFLAGS);
5713#define FROM_TO(FROM, TO) \
5716 case X86::FROM##_ND: \
5717 return X86::TO##_ND;
5747#define FROM_TO(FROM, TO) \
5751 FROM_TO(CTEST64rr, CTEST64ri32)
5770 bool MakeChange)
const {
5776 const TargetRegisterClass *RC =
nullptr;
5778 RC =
MRI->getRegClass(
Reg);
5780 (
Reg.
isVirtual() && X86::GR64RegClass.hasSubClassEq(RC))) {
5785 if (
UseMI.findRegisterUseOperand(
Reg,
nullptr)->getSubReg())
5790 !
MRI->hasOneNonDBGUse(
Reg))
5795 if (
Opc == TargetOpcode::COPY) {
5797 const TargetRegisterClass *RC =
nullptr;
5799 RC =
MRI->getRegClass(ToReg);
5800 bool GR32Reg = (ToReg.
isVirtual() && X86::GR32RegClass.hasSubClassEq(RC)) ||
5802 bool GR64Reg = (ToReg.
isVirtual() && X86::GR64RegClass.hasSubClassEq(RC)) ||
5804 bool GR8Reg = (ToReg.
isVirtual() && X86::GR8RegClass.hasSubClassEq(RC)) ||
5815 NewOpc = X86::MOV32ri64;
5817 NewOpc = X86::MOV64ri;
5818 }
else if (GR32Reg) {
5819 NewOpc = X86::MOV32ri;
5823 if (
UseMI.getParent()->computeRegisterLiveness(
5832 UseMI.removeOperand(
5833 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr));
5841 NewOpc = X86::MOV8ri;
5851 if ((NewOpc == X86::SUB64ri32 || NewOpc == X86::SUB32ri ||
5852 NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri ||
5853 NewOpc == X86::SUB64ri32_ND || NewOpc == X86::SUB32ri_ND ||
5854 NewOpc == X86::SBB64ri32_ND || NewOpc == X86::SBB32ri_ND) &&
5855 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr) != 2)
5858 if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
5859 (NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
5860 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr) != 1)
5863 using namespace X86;
5864 if (isSHL(
Opc) || isSHR(
Opc) || isSAR(
Opc) || isROL(
Opc) || isROR(
Opc) ||
5865 isRCL(
Opc) || isRCR(
Opc)) {
5866 unsigned RegIdx =
UseMI.findRegisterUseOperandIdx(
Reg,
nullptr);
5876 UseMI.removeOperand(RegIdx);
5890 UseMI.registerDefIsDead(X86::EFLAGS,
nullptr)) {
5894 UseMI.setDesc(
get(TargetOpcode::COPY));
5895 UseMI.removeOperand(
5896 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr));
5897 UseMI.removeOperand(
5898 UseMI.findRegisterDefOperandIdx(X86::EFLAGS,
nullptr));
5899 UseMI.untieRegOperand(0);
5903 unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
5904 unsigned ImmOpNum = 2;
5905 if (!
UseMI.getOperand(0).isDef()) {
5909 if (
Opc == TargetOpcode::COPY)
5913 commuteInstruction(
UseMI);
5917 UseMI.getOperand(ImmOpNum).ChangeToImmediate(ImmVal);
5935 return foldImmediateImpl(
UseMI, &
DefMI, Reg, ImmVal,
MRI,
true);
5947 assert(
Desc.getNumOperands() == 3 &&
"Expected two-addr instruction.");
5967 assert(
Desc.getNumOperands() == 3 &&
"Expected two-addr instruction.");
5985 MIB->
setDesc(
TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
5997 assert(Imm != 0 &&
"Using push/pop for 0 is not efficient.");
6000 int StackAdjustment;
6002 if (Subtarget.is64Bit()) {
6004 MIB->
getOpcode() == X86::MOV32ImmSExti8);
6018 StackAdjustment = 8;
6024 StackAdjustment = 4;
6036 bool EmitCFI = !TFL->
hasFP(MF) && NeedsDwarfCFI;
6083 MIB->
getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
6095 const MCInstrDesc &BroadcastDesc,
unsigned SubIdx) {
6098 if (
TRI->getEncodingValue(DestReg) < 16) {
6105 DestReg =
TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
6117 const MCInstrDesc &ExtractDesc,
unsigned SubIdx) {
6120 if (
TRI->getEncodingValue(SrcReg) < 16) {
6127 SrcReg =
TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
6150 if (
MI.getOpcode() == X86::MOVSHPrm) {
6151 NewOpc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
6153 if (
Reg > X86::XMM15)
6154 NewOpc = X86::VMOVSSZrm;
6156 NewOpc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
6158 if (
Reg > X86::XMM15)
6159 NewOpc = X86::VMOVSSZmr;
6167 bool HasAVX = Subtarget.hasAVX();
6169 switch (
MI.getOpcode()) {
6176 case X86::MOV32ImmSExti8:
6177 case X86::MOV64ImmSExti8:
6179 case X86::SETB_C32r:
6181 case X86::SETB_C64r:
6189 case X86::FsFLD0F128:
6191 case X86::AVX_SET0: {
6192 assert(HasAVX &&
"AVX not supported");
6195 Register XReg =
TRI->getSubReg(SrcReg, X86::sub_xmm);
6201 case X86::AVX512_128_SET0:
6202 case X86::AVX512_FsFLD0SH:
6203 case X86::AVX512_FsFLD0SS:
6204 case X86::AVX512_FsFLD0SD:
6205 case X86::AVX512_FsFLD0F128: {
6206 bool HasVLX = Subtarget.hasVLX();
6209 if (HasVLX ||
TRI->getEncodingValue(SrcReg) < 16)
6211 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6214 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
6218 case X86::AVX512_256_SET0:
6219 case X86::AVX512_512_SET0: {
6220 bool HasVLX = Subtarget.hasVLX();
6223 if (HasVLX ||
TRI->getEncodingValue(SrcReg) < 16) {
6224 Register XReg =
TRI->getSubReg(SrcReg, X86::sub_xmm);
6230 if (
MI.getOpcode() == X86::AVX512_256_SET0) {
6233 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
6241 case X86::V_SETALLONES:
6243 get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
6244 case X86::AVX2_SETALLONES:
6246 case X86::AVX1_SETALLONES: {
6253 case X86::AVX512_128_SETALLONES:
6254 case X86::AVX512_256_SETALLONES:
6255 case X86::AVX512_512_SETALLONES: {
6258 switch (
MI.getOpcode()) {
6259 case X86::AVX512_128_SETALLONES: {
6260 if (X86::VR128RegClass.
contains(Reg))
6263 Opc = X86::VPTERNLOGDZ128rri;
6266 case X86::AVX512_256_SETALLONES: {
6267 if (X86::VR256RegClass.
contains(Reg))
6270 Opc = X86::VPTERNLOGDZ256rri;
6273 case X86::AVX512_512_SETALLONES:
6274 Opc = X86::VPTERNLOGDZrri;
6286 case X86::AVX512_512_SEXT_MASK_32:
6287 case X86::AVX512_512_SEXT_MASK_64: {
6291 unsigned Opc = (
MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64)
6292 ? X86::VPTERNLOGQZrrikz
6293 : X86::VPTERNLOGDZrrikz;
6294 MI.removeOperand(1);
6299 .
addReg(MaskReg, MaskState)
6305 case X86::VMOVAPSZ128rm_NOVLX:
6307 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6308 case X86::VMOVUPSZ128rm_NOVLX:
6310 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6311 case X86::VMOVAPSZ256rm_NOVLX:
6313 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6314 case X86::VMOVUPSZ256rm_NOVLX:
6316 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6317 case X86::VMOVAPSZ128mr_NOVLX:
6319 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6320 case X86::VMOVUPSZ128mr_NOVLX:
6322 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6323 case X86::VMOVAPSZ256mr_NOVLX:
6325 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6326 case X86::VMOVUPSZ256mr_NOVLX:
6328 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6329 case X86::MOV32ri64: {
6331 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
6332 MI.setDesc(
get(X86::MOV32ri));
6338 case X86::RDFLAGS32:
6339 case X86::RDFLAGS64: {
6340 unsigned Is64Bit =
MI.getOpcode() == X86::RDFLAGS64;
6344 get(Is64Bit ? X86::PUSHF64 : X86::PUSHF32))
6352 "Unexpected register in operand! Should be EFLAGS.");
6355 "Unexpected register in operand! Should be DF.");
6358 MIB->
setDesc(
get(Is64Bit ? X86::POP64r : X86::POP32r));
6362 case X86::WRFLAGS32:
6363 case X86::WRFLAGS64: {
6364 unsigned Is64Bit =
MI.getOpcode() == X86::WRFLAGS64;
6368 get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
6369 .
addReg(
MI.getOperand(0).getReg());
6371 get(Is64Bit ? X86::POPF64 : X86::POPF32));
6372 MI.eraseFromParent();
6399 case TargetOpcode::LOAD_STACK_GUARD:
6405 case X86::SHLDROT32ri:
6407 case X86::SHLDROT64ri:
6409 case X86::SHRDROT32ri:
6411 case X86::SHRDROT64ri:
6413 case X86::ADD8rr_DB:
6416 case X86::ADD16rr_DB:
6419 case X86::ADD32rr_DB:
6422 case X86::ADD64rr_DB:
6425 case X86::ADD8ri_DB:
6428 case X86::ADD16ri_DB:
6431 case X86::ADD32ri_DB:
6434 case X86::ADD64ri32_DB:
6458 bool ForLoadFold =
false) {
6460 case X86::CVTSI2SSrr:
6461 case X86::CVTSI2SSrm:
6462 case X86::CVTSI642SSrr:
6463 case X86::CVTSI642SSrm:
6464 case X86::CVTSI2SDrr:
6465 case X86::CVTSI2SDrm:
6466 case X86::CVTSI642SDrr:
6467 case X86::CVTSI642SDrm:
6470 return !ForLoadFold;
6471 case X86::CVTSD2SSrr:
6472 case X86::CVTSD2SSrm:
6473 case X86::CVTSS2SDrr:
6474 case X86::CVTSS2SDrm:
6481 case X86::RCPSSr_Int:
6482 case X86::RCPSSm_Int:
6483 case X86::ROUNDSDri:
6484 case X86::ROUNDSDmi:
6485 case X86::ROUNDSSri:
6486 case X86::ROUNDSSmi:
6489 case X86::RSQRTSSr_Int:
6490 case X86::RSQRTSSm_Int:
6493 case X86::SQRTSSr_Int:
6494 case X86::SQRTSSm_Int:
6497 case X86::SQRTSDr_Int:
6498 case X86::SQRTSDm_Int:
6500 case X86::VFCMULCPHZ128rm:
6501 case X86::VFCMULCPHZ128rmb:
6502 case X86::VFCMULCPHZ128rmbkz:
6503 case X86::VFCMULCPHZ128rmkz:
6504 case X86::VFCMULCPHZ128rr:
6505 case X86::VFCMULCPHZ128rrkz:
6506 case X86::VFCMULCPHZ256rm:
6507 case X86::VFCMULCPHZ256rmb:
6508 case X86::VFCMULCPHZ256rmbkz:
6509 case X86::VFCMULCPHZ256rmkz:
6510 case X86::VFCMULCPHZ256rr:
6511 case X86::VFCMULCPHZ256rrkz:
6512 case X86::VFCMULCPHZrm:
6513 case X86::VFCMULCPHZrmb:
6514 case X86::VFCMULCPHZrmbkz:
6515 case X86::VFCMULCPHZrmkz:
6516 case X86::VFCMULCPHZrr:
6517 case X86::VFCMULCPHZrrb:
6518 case X86::VFCMULCPHZrrbkz:
6519 case X86::VFCMULCPHZrrkz:
6520 case X86::VFMULCPHZ128rm:
6521 case X86::VFMULCPHZ128rmb:
6522 case X86::VFMULCPHZ128rmbkz:
6523 case X86::VFMULCPHZ128rmkz:
6524 case X86::VFMULCPHZ128rr:
6525 case X86::VFMULCPHZ128rrkz:
6526 case X86::VFMULCPHZ256rm:
6527 case X86::VFMULCPHZ256rmb:
6528 case X86::VFMULCPHZ256rmbkz:
6529 case X86::VFMULCPHZ256rmkz:
6530 case X86::VFMULCPHZ256rr:
6531 case X86::VFMULCPHZ256rrkz:
6532 case X86::VFMULCPHZrm:
6533 case X86::VFMULCPHZrmb:
6534 case X86::VFMULCPHZrmbkz:
6535 case X86::VFMULCPHZrmkz:
6536 case X86::VFMULCPHZrr:
6537 case X86::VFMULCPHZrrb:
6538 case X86::VFMULCPHZrrbkz:
6539 case X86::VFMULCPHZrrkz:
6540 case X86::VFCMULCSHZrm:
6541 case X86::VFCMULCSHZrmkz:
6542 case X86::VFCMULCSHZrr:
6543 case X86::VFCMULCSHZrrb:
6544 case X86::VFCMULCSHZrrbkz:
6545 case X86::VFCMULCSHZrrkz:
6546 case X86::VFMULCSHZrm:
6547 case X86::VFMULCSHZrmkz:
6548 case X86::VFMULCSHZrr:
6549 case X86::VFMULCSHZrrb:
6550 case X86::VFMULCSHZrrbkz:
6551 case X86::VFMULCSHZrrkz:
6552 return Subtarget.hasMULCFalseDeps();
6553 case X86::VPERMDYrm:
6554 case X86::VPERMDYrr:
6555 case X86::VPERMQYmi:
6556 case X86::VPERMQYri:
6557 case X86::VPERMPSYrm:
6558 case X86::VPERMPSYrr:
6559 case X86::VPERMPDYmi:
6560 case X86::VPERMPDYri:
6561 case X86::VPERMDZ256rm:
6562 case X86::VPERMDZ256rmb:
6563 case X86::VPERMDZ256rmbkz:
6564 case X86::VPERMDZ256rmkz:
6565 case X86::VPERMDZ256rr:
6566 case X86::VPERMDZ256rrkz:
6567 case X86::VPERMDZrm:
6568 case X86::VPERMDZrmb:
6569 case X86::VPERMDZrmbkz:
6570 case X86::VPERMDZrmkz:
6571 case X86::VPERMDZrr:
6572 case X86::VPERMDZrrkz:
6573 case X86::VPERMQZ256mbi:
6574 case X86::VPERMQZ256mbikz:
6575 case X86::VPERMQZ256mi:
6576 case X86::VPERMQZ256mikz:
6577 case X86::VPERMQZ256ri:
6578 case X86::VPERMQZ256rikz:
6579 case X86::VPERMQZ256rm:
6580 case X86::VPERMQZ256rmb:
6581 case X86::VPERMQZ256rmbkz:
6582 case X86::VPERMQZ256rmkz:
6583 case X86::VPERMQZ256rr:
6584 case X86::VPERMQZ256rrkz:
6585 case X86::VPERMQZmbi:
6586 case X86::VPERMQZmbikz:
6587 case X86::VPERMQZmi:
6588 case X86::VPERMQZmikz:
6589 case X86::VPERMQZri:
6590 case X86::VPERMQZrikz:
6591 case X86::VPERMQZrm:
6592 case X86::VPERMQZrmb:
6593 case X86::VPERMQZrmbkz:
6594 case X86::VPERMQZrmkz:
6595 case X86::VPERMQZrr:
6596 case X86::VPERMQZrrkz:
6597 case X86::VPERMPSZ256rm:
6598 case X86::VPERMPSZ256rmb:
6599 case X86::VPERMPSZ256rmbkz:
6600 case X86::VPERMPSZ256rmkz:
6601 case X86::VPERMPSZ256rr:
6602 case X86::VPERMPSZ256rrkz:
6603 case X86::VPERMPSZrm:
6604 case X86::VPERMPSZrmb:
6605 case X86::VPERMPSZrmbkz:
6606 case X86::VPERMPSZrmkz:
6607 case X86::VPERMPSZrr:
6608 case X86::VPERMPSZrrkz:
6609 case X86::VPERMPDZ256mbi:
6610 case X86::VPERMPDZ256mbikz:
6611 case X86::VPERMPDZ256mi:
6612 case X86::VPERMPDZ256mikz:
6613 case X86::VPERMPDZ256ri:
6614 case X86::VPERMPDZ256rikz:
6615 case X86::VPERMPDZ256rm:
6616 case X86::VPERMPDZ256rmb:
6617 case X86::VPERMPDZ256rmbkz:
6618 case X86::VPERMPDZ256rmkz:
6619 case X86::VPERMPDZ256rr:
6620 case X86::VPERMPDZ256rrkz:
6621 case X86::VPERMPDZmbi:
6622 case X86::VPERMPDZmbikz:
6623 case X86::VPERMPDZmi:
6624 case X86::VPERMPDZmikz:
6625 case X86::VPERMPDZri:
6626 case X86::VPERMPDZrikz:
6627 case X86::VPERMPDZrm:
6628 case X86::VPERMPDZrmb:
6629 case X86::VPERMPDZrmbkz:
6630 case X86::VPERMPDZrmkz:
6631 case X86::VPERMPDZrr:
6632 case X86::VPERMPDZrrkz:
6633 return Subtarget.hasPERMFalseDeps();
6634 case X86::VRANGEPDZ128rmbi:
6635 case X86::VRANGEPDZ128rmbikz:
6636 case X86::VRANGEPDZ128rmi:
6637 case X86::VRANGEPDZ128rmikz:
6638 case X86::VRANGEPDZ128rri:
6639 case X86::VRANGEPDZ128rrikz:
6640 case X86::VRANGEPDZ256rmbi:
6641 case X86::VRANGEPDZ256rmbikz:
6642 case X86::VRANGEPDZ256rmi:
6643 case X86::VRANGEPDZ256rmikz:
6644 case X86::VRANGEPDZ256rri:
6645 case X86::VRANGEPDZ256rrikz:
6646 case X86::VRANGEPDZrmbi:
6647 case X86::VRANGEPDZrmbikz:
6648 case X86::VRANGEPDZrmi:
6649 case X86::VRANGEPDZrmikz:
6650 case X86::VRANGEPDZrri:
6651 case X86::VRANGEPDZrrib:
6652 case X86::VRANGEPDZrribkz:
6653 case X86::VRANGEPDZrrikz:
6654 case X86::VRANGEPSZ128rmbi:
6655 case X86::VRANGEPSZ128rmbikz:
6656 case X86::VRANGEPSZ128rmi:
6657 case X86::VRANGEPSZ128rmikz:
6658 case X86::VRANGEPSZ128rri:
6659 case X86::VRANGEPSZ128rrikz:
6660 case X86::VRANGEPSZ256rmbi:
6661 case X86::VRANGEPSZ256rmbikz:
6662 case X86::VRANGEPSZ256rmi:
6663 case X86::VRANGEPSZ256rmikz:
6664 case X86::VRANGEPSZ256rri:
6665 case X86::VRANGEPSZ256rrikz:
6666 case X86::VRANGEPSZrmbi:
6667 case X86::VRANGEPSZrmbikz:
6668 case X86::VRANGEPSZrmi:
6669 case X86::VRANGEPSZrmikz:
6670 case X86::VRANGEPSZrri:
6671 case X86::VRANGEPSZrrib:
6672 case X86::VRANGEPSZrribkz:
6673 case X86::VRANGEPSZrrikz:
6674 case X86::VRANGESDZrmi:
6675 case X86::VRANGESDZrmikz:
6676 case X86::VRANGESDZrri:
6677 case X86::VRANGESDZrrib:
6678 case X86::VRANGESDZrribkz:
6679 case X86::VRANGESDZrrikz:
6680 case X86::VRANGESSZrmi:
6681 case X86::VRANGESSZrmikz:
6682 case X86::VRANGESSZrri:
6683 case X86::VRANGESSZrrib:
6684 case X86::VRANGESSZrribkz:
6685 case X86::VRANGESSZrrikz:
6686 return Subtarget.hasRANGEFalseDeps();
6687 case X86::VGETMANTSSZrmi:
6688 case X86::VGETMANTSSZrmikz:
6689 case X86::VGETMANTSSZrri:
6690 case X86::VGETMANTSSZrrib:
6691 case X86::VGETMANTSSZrribkz:
6692 case X86::VGETMANTSSZrrikz:
6693 case X86::VGETMANTSDZrmi:
6694 case X86::VGETMANTSDZrmikz:
6695 case X86::VGETMANTSDZrri:
6696 case X86::VGETMANTSDZrrib:
6697 case X86::VGETMANTSDZrribkz:
6698 case X86::VGETMANTSDZrrikz:
6699 case X86::VGETMANTSHZrmi:
6700 case X86::VGETMANTSHZrmikz:
6701 case X86::VGETMANTSHZrri:
6702 case X86::VGETMANTSHZrrib:
6703 case X86::VGETMANTSHZrribkz:
6704 case X86::VGETMANTSHZrrikz:
6705 case X86::VGETMANTPSZ128rmbi:
6706 case X86::VGETMANTPSZ128rmbikz:
6707 case X86::VGETMANTPSZ128rmi:
6708 case X86::VGETMANTPSZ128rmikz:
6709 case X86::VGETMANTPSZ256rmbi:
6710 case X86::VGETMANTPSZ256rmbikz:
6711 case X86::VGETMANTPSZ256rmi:
6712 case X86::VGETMANTPSZ256rmikz:
6713 case X86::VGETMANTPSZrmbi:
6714 case X86::VGETMANTPSZrmbikz:
6715 case X86::VGETMANTPSZrmi:
6716 case X86::VGETMANTPSZrmikz:
6717 case X86::VGETMANTPDZ128rmbi:
6718 case X86::VGETMANTPDZ128rmbikz:
6719 case X86::VGETMANTPDZ128rmi:
6720 case X86::VGETMANTPDZ128rmikz:
6721 case X86::VGETMANTPDZ256rmbi:
6722 case X86::VGETMANTPDZ256rmbikz:
6723 case X86::VGETMANTPDZ256rmi:
6724 case X86::VGETMANTPDZ256rmikz:
6725 case X86::VGETMANTPDZrmbi:
6726 case X86::VGETMANTPDZrmbikz:
6727 case X86::VGETMANTPDZrmi:
6728 case X86::VGETMANTPDZrmikz:
6729 return Subtarget.hasGETMANTFalseDeps();
6730 case X86::VPMULLQZ128rm:
6731 case X86::VPMULLQZ128rmb:
6732 case X86::VPMULLQZ128rmbkz:
6733 case X86::VPMULLQZ128rmkz:
6734 case X86::VPMULLQZ128rr:
6735 case X86::VPMULLQZ128rrkz:
6736 case X86::VPMULLQZ256rm:
6737 case X86::VPMULLQZ256rmb:
6738 case X86::VPMULLQZ256rmbkz:
6739 case X86::VPMULLQZ256rmkz:
6740 case X86::VPMULLQZ256rr:
6741 case X86::VPMULLQZ256rrkz:
6742 case X86::VPMULLQZrm:
6743 case X86::VPMULLQZrmb:
6744 case X86::VPMULLQZrmbkz:
6745 case X86::VPMULLQZrmkz:
6746 case X86::VPMULLQZrr:
6747 case X86::VPMULLQZrrkz:
6748 return Subtarget.hasMULLQFalseDeps();
6750 case X86::POPCNT32rm:
6751 case X86::POPCNT32rr:
6752 case X86::POPCNT64rm:
6753 case X86::POPCNT64rr:
6754 return Subtarget.hasPOPCNTFalseDeps();
6755 case X86::LZCNT32rm:
6756 case X86::LZCNT32rr:
6757 case X86::LZCNT64rm:
6758 case X86::LZCNT64rr:
6759 case X86::TZCNT32rm:
6760 case X86::TZCNT32rr:
6761 case X86::TZCNT64rm:
6762 case X86::TZCNT64rr:
6763 return Subtarget.hasLZCNTFalseDeps();
6780 bool HasNDDPartialWrite =
false;
6783 if (!Reg.isVirtual())
6784 HasNDDPartialWrite =
6785 X86::GR8RegClass.contains(Reg) || X86::GR16RegClass.contains(Reg);
6798 bool ReadsReg =
false;
6799 if (Reg.isVirtual())
6800 ReadsReg = (MO.
readsReg() ||
MI.readsVirtualRegister(Reg));
6802 ReadsReg =
MI.readsRegister(Reg,
TRI);
6803 if (ReadsReg != HasNDDPartialWrite)
6817 bool ForLoadFold =
false) {
6820 case X86::MMX_PUNPCKHBWrr:
6821 case X86::MMX_PUNPCKHWDrr:
6822 case X86::MMX_PUNPCKHDQrr:
6823 case X86::MMX_PUNPCKLBWrr:
6824 case X86::MMX_PUNPCKLWDrr:
6825 case X86::MMX_PUNPCKLDQrr:
6826 case X86::MOVHLPSrr:
6827 case X86::PACKSSWBrr:
6828 case X86::PACKUSWBrr:
6829 case X86::PACKSSDWrr:
6830 case X86::PACKUSDWrr:
6831 case X86::PUNPCKHBWrr:
6832 case X86::PUNPCKLBWrr:
6833 case X86::PUNPCKHWDrr:
6834 case X86::PUNPCKLWDrr:
6835 case X86::PUNPCKHDQrr:
6836 case X86::PUNPCKLDQrr:
6837 case X86::PUNPCKHQDQrr:
6838 case X86::PUNPCKLQDQrr:
6839 case X86::SHUFPDrri:
6840 case X86::SHUFPSrri:
6846 return OpNum == 2 && !ForLoadFold;
6848 case X86::VMOVLHPSrr:
6849 case X86::VMOVLHPSZrr:
6850 case X86::VPACKSSWBrr:
6851 case X86::VPACKUSWBrr:
6852 case X86::VPACKSSDWrr:
6853 case X86::VPACKUSDWrr:
6854 case X86::VPACKSSWBZ128rr:
6855 case X86::VPACKUSWBZ128rr:
6856 case X86::VPACKSSDWZ128rr:
6857 case X86::VPACKUSDWZ128rr:
6858 case X86::VPERM2F128rri:
6859 case X86::VPERM2I128rri:
6860 case X86::VSHUFF32X4Z256rri:
6861 case X86::VSHUFF32X4Zrri:
6862 case X86::VSHUFF64X2Z256rri:
6863 case X86::VSHUFF64X2Zrri:
6864 case X86::VSHUFI32X4Z256rri:
6865 case X86::VSHUFI32X4Zrri:
6866 case X86::VSHUFI64X2Z256rri:
6867 case X86::VSHUFI64X2Zrri:
6868 case X86::VPUNPCKHBWrr:
6869 case X86::VPUNPCKLBWrr:
6870 case X86::VPUNPCKHBWYrr:
6871 case X86::VPUNPCKLBWYrr:
6872 case X86::VPUNPCKHBWZ128rr:
6873 case X86::VPUNPCKLBWZ128rr:
6874 case X86::VPUNPCKHBWZ256rr:
6875 case X86::VPUNPCKLBWZ256rr:
6876 case X86::VPUNPCKHBWZrr:
6877 case X86::VPUNPCKLBWZrr:
6878 case X86::VPUNPCKHWDrr:
6879 case X86::VPUNPCKLWDrr:
6880 case X86::VPUNPCKHWDYrr:
6881 case X86::VPUNPCKLWDYrr:
6882 case X86::VPUNPCKHWDZ128rr:
6883 case X86::VPUNPCKLWDZ128rr:
6884 case X86::VPUNPCKHWDZ256rr:
6885 case X86::VPUNPCKLWDZ256rr:
6886 case X86::VPUNPCKHWDZrr:
6887 case X86::VPUNPCKLWDZrr:
6888 case X86::VPUNPCKHDQrr:
6889 case X86::VPUNPCKLDQrr:
6890 case X86::VPUNPCKHDQYrr:
6891 case X86::VPUNPCKLDQYrr:
6892 case X86::VPUNPCKHDQZ128rr:
6893 case X86::VPUNPCKLDQZ128rr:
6894 case X86::VPUNPCKHDQZ256rr:
6895 case X86::VPUNPCKLDQZ256rr:
6896 case X86::VPUNPCKHDQZrr:
6897 case X86::VPUNPCKLDQZrr:
6898 case X86::VPUNPCKHQDQrr:
6899 case X86::VPUNPCKLQDQrr:
6900 case X86::VPUNPCKHQDQYrr:
6901 case X86::VPUNPCKLQDQYrr:
6902 case X86::VPUNPCKHQDQZ128rr:
6903 case X86::VPUNPCKLQDQZ128rr:
6904 case X86::VPUNPCKHQDQZ256rr:
6905 case X86::VPUNPCKLQDQZ256rr:
6906 case X86::VPUNPCKHQDQZrr:
6907 case X86::VPUNPCKLQDQZrr:
6911 return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
6913 case X86::VCVTSI2SSrr:
6914 case X86::VCVTSI2SSrm:
6915 case X86::VCVTSI2SSrr_Int:
6916 case X86::VCVTSI2SSrm_Int:
6917 case X86::VCVTSI642SSrr:
6918 case X86::VCVTSI642SSrm:
6919 case X86::VCVTSI642SSrr_Int:
6920 case X86::VCVTSI642SSrm_Int:
6921 case X86::VCVTSI2SDrr:
6922 case X86::VCVTSI2SDrm:
6923 case X86::VCVTSI2SDrr_Int:
6924 case X86::VCVTSI2SDrm_Int:
6925 case X86::VCVTSI642SDrr:
6926 case X86::VCVTSI642SDrm:
6927 case X86::VCVTSI642SDrr_Int:
6928 case X86::VCVTSI642SDrm_Int:
6930 case X86::VCVTSI2SSZrr:
6931 case X86::VCVTSI2SSZrm:
6932 case X86::VCVTSI2SSZrr_Int:
6933 case X86::VCVTSI2SSZrrb_Int:
6934 case X86::VCVTSI2SSZrm_Int:
6935 case X86::VCVTSI642SSZrr:
6936 case X86::VCVTSI642SSZrm:
6937 case X86::VCVTSI642SSZrr_Int:
6938 case X86::VCVTSI642SSZrrb_Int:
6939 case X86::VCVTSI642SSZrm_Int:
6940 case X86::VCVTSI2SDZrr:
6941 case X86::VCVTSI2SDZrm:
6942 case X86::VCVTSI2SDZrr_Int:
6943 case X86::VCVTSI2SDZrm_Int:
6944 case X86::VCVTSI642SDZrr:
6945 case X86::VCVTSI642SDZrm:
6946 case X86::VCVTSI642SDZrr_Int:
6947 case X86::VCVTSI642SDZrrb_Int:
6948 case X86::VCVTSI642SDZrm_Int:
6949 case X86::VCVTUSI2SSZrr:
6950 case X86::VCVTUSI2SSZrm:
6951 case X86::VCVTUSI2SSZrr_Int:
6952 case X86::VCVTUSI2SSZrrb_Int:
6953 case X86::VCVTUSI2SSZrm_Int:
6954 case X86::VCVTUSI642SSZrr:
6955 case X86::VCVTUSI642SSZrm:
6956 case X86::VCVTUSI642SSZrr_Int:
6957 case X86::VCVTUSI642SSZrrb_Int:
6958 case X86::VCVTUSI642SSZrm_Int:
6959 case X86::VCVTUSI2SDZrr:
6960 case X86::VCVTUSI2SDZrm:
6961 case X86::VCVTUSI2SDZrr_Int:
6962 case X86::VCVTUSI2SDZrm_Int:
6963 case X86::VCVTUSI642SDZrr:
6964 case X86::VCVTUSI642SDZrm:
6965 case X86::VCVTUSI642SDZrr_Int:
6966 case X86::VCVTUSI642SDZrrb_Int:
6967 case X86::VCVTUSI642SDZrm_Int:
6968 case X86::VCVTSI2SHZrr:
6969 case X86::VCVTSI2SHZrm:
6970 case X86::VCVTSI2SHZrr_Int:
6971 case X86::VCVTSI2SHZrrb_Int:
6972 case X86::VCVTSI2SHZrm_Int:
6973 case X86::VCVTSI642SHZrr:
6974 case X86::VCVTSI642SHZrm:
6975 case X86::VCVTSI642SHZrr_Int:
6976 case X86::VCVTSI642SHZrrb_Int:
6977 case X86::VCVTSI642SHZrm_Int:
6978 case X86::VCVTUSI2SHZrr:
6979 case X86::VCVTUSI2SHZrm:
6980 case X86::VCVTUSI2SHZrr_Int:
6981 case X86::VCVTUSI2SHZrrb_Int:
6982 case X86::VCVTUSI2SHZrm_Int:
6983 case X86::VCVTUSI642SHZrr:
6984 case X86::VCVTUSI642SHZrm:
6985 case X86::VCVTUSI642SHZrr_Int:
6986 case X86::VCVTUSI642SHZrrb_Int:
6987 case X86::VCVTUSI642SHZrm_Int:
6990 return OpNum == 1 && !ForLoadFold;
6991 case X86::VCVTSD2SSrr:
6992 case X86::VCVTSD2SSrm:
6993 case X86::VCVTSD2SSrr_Int:
6994 case X86::VCVTSD2SSrm_Int:
6995 case X86::VCVTSS2SDrr:
6996 case X86::VCVTSS2SDrm:
6997 case X86::VCVTSS2SDrr_Int:
6998 case X86::VCVTSS2SDrm_Int:
7000 case X86::VRCPSSr_Int:
7002 case X86::VRCPSSm_Int:
7003 case X86::VROUNDSDri:
7004 case X86::VROUNDSDmi:
7005 case X86::VROUNDSDri_Int:
7006 case X86::VROUNDSDmi_Int:
7007 case X86::VROUNDSSri:
7008 case X86::VROUNDSSmi:
7009 case X86::VROUNDSSri_Int:
7010 case X86::VROUNDSSmi_Int:
7011 case X86::VRSQRTSSr:
7012 case X86::VRSQRTSSr_Int:
7013 case X86::VRSQRTSSm:
7014 case X86::VRSQRTSSm_Int:
7016 case X86::VSQRTSSr_Int:
7018 case X86::VSQRTSSm_Int:
7020 case X86::VSQRTSDr_Int:
7022 case X86::VSQRTSDm_Int:
7024 case X86::VCVTSD2SSZrr:
7025 case X86::VCVTSD2SSZrr_Int:
7026 case X86::VCVTSD2SSZrrb_Int:
7027 case X86::VCVTSD2SSZrm:
7028 case X86::VCVTSD2SSZrm_Int:
7029 case X86::VCVTSS2SDZrr:
7030 case X86::VCVTSS2SDZrr_Int:
7031 case X86::VCVTSS2SDZrrb_Int:
7032 case X86::VCVTSS2SDZrm:
7033 case X86::VCVTSS2SDZrm_Int:
7034 case X86::VGETEXPSDZr:
7035 case X86::VGETEXPSDZrb:
7036 case X86::VGETEXPSDZm:
7037 case X86::VGETEXPSSZr:
7038 case X86::VGETEXPSSZrb:
7039 case X86::VGETEXPSSZm:
7040 case X86::VGETMANTSDZrri:
7041 case X86::VGETMANTSDZrrib:
7042 case X86::VGETMANTSDZrmi:
7043 case X86::VGETMANTSSZrri:
7044 case X86::VGETMANTSSZrrib:
7045 case X86::VGETMANTSSZrmi:
7046 case X86::VRNDSCALESDZrri:
7047 case X86::VRNDSCALESDZrri_Int:
7048 case X86::VRNDSCALESDZrrib_Int:
7049 case X86::VRNDSCALESDZrmi:
7050 case X86::VRNDSCALESDZrmi_Int:
7051 case X86::VRNDSCALESSZrri:
7052 case X86::VRNDSCALESSZrri_Int:
7053 case X86::VRNDSCALESSZrrib_Int:
7054 case X86::VRNDSCALESSZrmi:
7055 case X86::VRNDSCALESSZrmi_Int:
7056 case X86::VRCP14SDZrr:
7057 case X86::VRCP14SDZrm:
7058 case X86::VRCP14SSZrr:
7059 case X86::VRCP14SSZrm:
7060 case X86::VRCPSHZrr:
7061 case X86::VRCPSHZrm:
7062 case X86::VRSQRTSHZrr:
7063 case X86::VRSQRTSHZrm:
7064 case X86::VREDUCESHZrmi:
7065 case X86::VREDUCESHZrri:
7066 case X86::VREDUCESHZrrib:
7067 case X86::VGETEXPSHZr:
7068 case X86::VGETEXPSHZrb:
7069 case X86::VGETEXPSHZm:
7070 case X86::VGETMANTSHZrri:
7071 case X86::VGETMANTSHZrrib:
7072 case X86::VGETMANTSHZrmi:
7073 case X86::VRNDSCALESHZrri:
7074 case X86::VRNDSCALESHZrri_Int:
7075 case X86::VRNDSCALESHZrrib_Int:
7076 case X86::VRNDSCALESHZrmi:
7077 case X86::VRNDSCALESHZrmi_Int:
7078 case X86::VSQRTSHZr:
7079 case X86::VSQRTSHZr_Int:
7080 case X86::VSQRTSHZrb_Int:
7081 case X86::VSQRTSHZm:
7082 case X86::VSQRTSHZm_Int:
7083 case X86::VRCP28SDZr:
7084 case X86::VRCP28SDZrb:
7085 case X86::VRCP28SDZm:
7086 case X86::VRCP28SSZr:
7087 case X86::VRCP28SSZrb:
7088 case X86::VRCP28SSZm:
7089 case X86::VREDUCESSZrmi:
7090 case X86::VREDUCESSZrri:
7091 case X86::VREDUCESSZrrib:
7092 case X86::VRSQRT14SDZrr:
7093 case X86::VRSQRT14SDZrm:
7094 case X86::VRSQRT14SSZrr:
7095 case X86::VRSQRT14SSZrm:
7096 case X86::VRSQRT28SDZr:
7097 case X86::VRSQRT28SDZrb:
7098 case X86::VRSQRT28SDZm:
7099 case X86::VRSQRT28SSZr:
7100 case X86::VRSQRT28SSZrb:
7101 case X86::VRSQRT28SSZm:
7102 case X86::VSQRTSSZr:
7103 case X86::VSQRTSSZr_Int:
7104 case X86::VSQRTSSZrb_Int:
7105 case X86::VSQRTSSZm:
7106 case X86::VSQRTSSZm_Int:
7107 case X86::VSQRTSDZr:
7108 case X86::VSQRTSDZr_Int:
7109 case X86::VSQRTSDZrb_Int:
7110 case X86::VSQRTSDZm:
7111 case X86::VSQRTSDZm_Int:
7112 case X86::VCVTSD2SHZrr:
7113 case X86::VCVTSD2SHZrr_Int:
7114 case X86::VCVTSD2SHZrrb_Int:
7115 case X86::VCVTSD2SHZrm:
7116 case X86::VCVTSD2SHZrm_Int:
7117 case X86::VCVTSS2SHZrr:
7118 case X86::VCVTSS2SHZrr_Int:
7119 case X86::VCVTSS2SHZrrb_Int:
7120 case X86::VCVTSS2SHZrm:
7121 case X86::VCVTSS2SHZrm_Int:
7122 case X86::VCVTSH2SDZrr:
7123 case X86::VCVTSH2SDZrr_Int:
7124 case X86::VCVTSH2SDZrrb_Int:
7125 case X86::VCVTSH2SDZrm:
7126 case X86::VCVTSH2SDZrm_Int:
7127 case X86::VCVTSH2SSZrr:
7128 case X86::VCVTSH2SSZrr_Int:
7129 case X86::VCVTSH2SSZrrb_Int:
7130 case X86::VCVTSH2SSZrm:
7131 case X86::VCVTSH2SSZrm_Int:
7133 case X86::VMOVSSZrrk:
7134 case X86::VMOVSDZrrk:
7135 return OpNum == 3 && !ForLoadFold;
7136 case X86::VMOVSSZrrkz:
7137 case X86::VMOVSDZrrkz:
7138 return OpNum == 2 && !ForLoadFold;
7170 Register Reg =
MI.getOperand(OpNum).getReg();
7172 if (
MI.killsRegister(Reg,
TRI))
7175 if (X86::VR128RegClass.
contains(Reg)) {
7178 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
7182 MI.addRegisterKilled(Reg,
TRI,
true);
7183 }
else if (X86::VR256RegClass.
contains(Reg)) {
7186 Register XReg =
TRI->getSubReg(Reg, X86::sub_xmm);
7191 MI.addRegisterKilled(Reg,
TRI,
true);
7192 }
else if (X86::VR128XRegClass.
contains(Reg)) {
7194 if (!Subtarget.hasVLX())
7197 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
get(X86::VPXORDZ128rr), Reg)
7200 MI.addRegisterKilled(Reg,
TRI,
true);
7201 }
else if (X86::VR256XRegClass.
contains(Reg) ||
7202 X86::VR512RegClass.
contains(Reg)) {
7204 if (!Subtarget.hasVLX())
7208 Register XReg =
TRI->getSubReg(Reg, X86::sub_xmm);
7209 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
get(X86::VPXORDZ128rr), XReg)
7213 MI.addRegisterKilled(Reg,
TRI,
true);
7214 }
else if (X86::GR64RegClass.
contains(Reg)) {
7217 Register XReg =
TRI->getSubReg(Reg, X86::sub_32bit);
7222 MI.addRegisterKilled(Reg,
TRI,
true);
7223 }
else if (X86::GR32RegClass.
contains(Reg)) {
7227 MI.addRegisterKilled(Reg,
TRI,
true);
7228 }
else if ((X86::GR16RegClass.
contains(Reg) ||
7237 if (!
MI.definesRegister(SuperReg,
nullptr))
7243 int PtrOffset = 0) {
7244 unsigned NumAddrOps = MOs.
size();
7246 if (NumAddrOps < 4) {
7248 for (
unsigned i = 0; i != NumAddrOps; ++i)
7254 assert(MOs.
size() == 5 &&
"Unexpected memory operand list length");
7255 for (
unsigned i = 0; i != NumAddrOps; ++i) {
7257 if (i == 3 && PtrOffset != 0) {
7277 if (!
Reg.isVirtual())
7284 dbgs() <<
"WARNING: Unable to update register constraint for operand "
7285 << Idx <<
" of instruction:\n";
7299 MF.CreateMachineInstr(
TII.get(Opcode),
MI.getDebugLoc(),
true);
7304 unsigned NumOps =
MI.getDesc().getNumOperands() - 2;
7305 for (
unsigned i = 0; i !=
NumOps; ++i) {
7315 MBB->insert(InsertPt, NewMI);
7324 int PtrOffset = 0) {
7327 MF.CreateMachineInstr(
TII.get(Opcode),
MI.getDebugLoc(),
true);
7330 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
7333 assert(MO.
isReg() &&
"Expected to fold into reg operand!");
7347 MBB->insert(InsertPt, NewMI);
7357 MI.getDebugLoc(),
TII.get(Opcode));
7366 switch (
MI.getOpcode()) {
7367 case X86::INSERTPSrri:
7368 case X86::VINSERTPSrri:
7369 case X86::VINSERTPSZrri:
7373 unsigned Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
7374 unsigned ZMask =
Imm & 15;
7375 unsigned DstIdx = (
Imm >> 4) & 3;
7376 unsigned SrcIdx = (
Imm >> 6) & 3;
7379 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum);
7380 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7381 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 &&
7382 (
MI.getOpcode() != X86::INSERTPSrri || Alignment >=
Align(4))) {
7383 int PtrOffset = SrcIdx * 4;
7384 unsigned NewImm = (DstIdx << 4) | ZMask;
7385 unsigned NewOpCode =
7386 (
MI.getOpcode() == X86::VINSERTPSZrri) ? X86::VINSERTPSZrmi
7387 : (
MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi
7389 MachineInstr *NewMI =
7390 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt,
MI, *
this, PtrOffset);
7396 case X86::MOVHLPSrr:
7397 case X86::VMOVHLPSrr:
7398 case X86::VMOVHLPSZrr:
7404 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum);
7405 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7406 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 && Alignment >=
Align(8)) {
7407 unsigned NewOpCode =
7408 (
MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm
7409 : (
MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm
7411 MachineInstr *NewMI =
7412 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt,
MI, *
this, 8);
7417 case X86::UNPCKLPDrr:
7423 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum);
7424 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7425 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 && Alignment <
Align(16)) {
7426 MachineInstr *NewMI =
7427 fuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt,
MI, *
this);
7434 makeM0Inst(*
this, (
Size == 4) ? X86::MOV32mi : X86::MOV64mi32, MOs,
7446 !
MI.getOperand(1).isReg())
7454 if (
MI.getOperand(1).isUndef())
7463 unsigned Idx1)
const {
7464 unsigned Idx2 = CommuteAnyOperandIndex;
7468 bool HasDef =
MI.getDesc().getNumDefs();
7470 Register Reg1 =
MI.getOperand(Idx1).getReg();
7471 Register Reg2 =
MI.getOperand(Idx2).getReg();
7472 bool Tied1 = 0 ==
MI.getDesc().getOperandConstraint(Idx1,
MCOI::TIED_TO);
7473 bool Tied2 = 0 ==
MI.getDesc().getOperandConstraint(Idx2,
MCOI::TIED_TO);
7477 if ((HasDef && Reg0 == Reg1 && Tied1) || (HasDef && Reg0 == Reg2 && Tied2))
7480 return commuteInstruction(
MI,
false, Idx1, Idx2) ? Idx2 : Idx1;
7485 dbgs() <<
"We failed to fuse operand " << Idx <<
" in " <<
MI;
7491 unsigned Size,
Align Alignment,
bool AllowCommute)
const {
7492 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
7493 unsigned Opc =
MI.getOpcode();
7499 (
Opc == X86::CALL32r ||
Opc == X86::CALL64r ||
7500 Opc == X86::CALL64r_ImpCall ||
Opc == X86::PUSH16r ||
7501 Opc == X86::PUSH32r ||
Opc == X86::PUSH64r))
7510 unsigned NumOps =
MI.getDesc().getNumOperands();
7511 bool IsTwoAddr =
NumOps > 1 && OpNum < 2 &&
MI.getOperand(0).isReg() &&
7512 MI.getOperand(1).isReg() &&
7513 MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg();
7517 if (
Opc == X86::ADD32ri &&
7526 Opc != X86::ADD64rr)
7531 if (
MI.isCall() &&
MI.getCFIType())
7535 if (
auto *CustomMI = foldMemoryOperandCustom(MF,
MI, OpNum, MOs, InsertPt,
7551 unsigned Opcode =
I->DstOp;
7555 bool NarrowToMOV32rm =
false;
7559 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7567 if (Opcode != X86::MOV64rm || RCSize != 8 ||
Size != 4)
7569 if (
MI.getOperand(0).getSubReg() ||
MI.getOperand(1).getSubReg())
7571 Opcode = X86::MOV32rm;
7572 NarrowToMOV32rm =
true;
7582 :
fuseInst(MF, Opcode, OpNum, MOs, InsertPt,
MI, *
this);
7584 if (NarrowToMOV32rm) {
7600 unsigned CommuteOpIdx2 = commuteOperandsForFold(
MI, OpNum);
7601 if (CommuteOpIdx2 == OpNum) {
7611 commuteInstruction(
MI,
false, OpNum, CommuteOpIdx2);
7633 for (
auto Op :
Ops) {
7638 if (
MI.getOpcode() == X86::MOV32r0 &&
SubReg == X86::sub_32bit)
7649 if (!RI.hasStackRealignment(MF))
7651 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
7656 Size, Alignment,
true);
7658 if (
Ops.size() == 2 &&
Ops[0] == 0 &&
Ops[1] == 1) {
7659 unsigned NewOpc = 0;
7660 unsigned RCSize = 0;
7661 unsigned Opc =
MI.getOpcode();
7668 NewOpc = X86::CMP8ri;
7672 NewOpc = X86::CMP16ri;
7676 NewOpc = X86::CMP32ri;
7680 NewOpc = X86::CMP64ri32;
7689 MI.setDesc(
get(NewOpc));
7690 MI.getOperand(1).ChangeToImmediate(0);
7691 }
else if (
Ops.size() != 1)
7719 unsigned RegSize =
TRI.getRegSizeInBits(*RC);
7721 if ((
Opc == X86::MOVSSrm ||
Opc == X86::VMOVSSrm ||
Opc == X86::VMOVSSZrm ||
7722 Opc == X86::MOVSSrm_alt ||
Opc == X86::VMOVSSrm_alt ||
7723 Opc == X86::VMOVSSZrm_alt) &&
7729 case X86::CVTSS2SDrr_Int:
7730 case X86::VCVTSS2SDrr_Int:
7731 case X86::VCVTSS2SDZrr_Int:
7732 case X86::VCVTSS2SDZrrk_Int:
7733 case X86::VCVTSS2SDZrrkz_Int:
7734 case X86::CVTSS2SIrr_Int:
7735 case X86::CVTSS2SI64rr_Int:
7736 case X86::VCVTSS2SIrr_Int:
7737 case X86::VCVTSS2SI64rr_Int:
7738 case X86::VCVTSS2SIZrr_Int:
7739 case X86::VCVTSS2SI64Zrr_Int:
7740 case X86::CVTTSS2SIrr_Int:
7741 case X86::CVTTSS2SI64rr_Int:
7742 case X86::VCVTTSS2SIrr_Int:
7743 case X86::VCVTTSS2SI64rr_Int:
7744 case X86::VCVTTSS2SIZrr_Int:
7745 case X86::VCVTTSS2SI64Zrr_Int:
7746 case X86::VCVTSS2USIZrr_Int:
7747 case X86::VCVTSS2USI64Zrr_Int:
7748 case X86::VCVTTSS2USIZrr_Int:
7749 case X86::VCVTTSS2USI64Zrr_Int:
7750 case X86::RCPSSr_Int:
7751 case X86::VRCPSSr_Int:
7752 case X86::RSQRTSSr_Int:
7753 case X86::VRSQRTSSr_Int:
7754 case X86::ROUNDSSri_Int:
7755 case X86::VROUNDSSri_Int:
7756 case X86::COMISSrr_Int:
7757 case X86::VCOMISSrr_Int:
7758 case X86::VCOMISSZrr_Int:
7759 case X86::UCOMISSrr_Int:
7760 case X86::VUCOMISSrr_Int:
7761 case X86::VUCOMISSZrr_Int:
7762 case X86::ADDSSrr_Int:
7763 case X86::VADDSSrr_Int:
7764 case X86::VADDSSZrr_Int:
7765 case X86::CMPSSrri_Int:
7766 case X86::VCMPSSrri_Int:
7767 case X86::VCMPSSZrri_Int:
7768 case X86::DIVSSrr_Int:
7769 case X86::VDIVSSrr_Int:
7770 case X86::VDIVSSZrr_Int:
7771 case X86::MAXSSrr_Int:
7772 case X86::VMAXSSrr_Int:
7773 case X86::VMAXSSZrr_Int:
7774 case X86::MINSSrr_Int:
7775 case X86::VMINSSrr_Int:
7776 case X86::VMINSSZrr_Int:
7777 case X86::MULSSrr_Int:
7778 case X86::VMULSSrr_Int:
7779 case X86::VMULSSZrr_Int:
7780 case X86::SQRTSSr_Int:
7781 case X86::VSQRTSSr_Int:
7782 case X86::VSQRTSSZr_Int:
7783 case X86::SUBSSrr_Int:
7784 case X86::VSUBSSrr_Int:
7785 case X86::VSUBSSZrr_Int:
7786 case X86::VADDSSZrrk_Int:
7787 case X86::VADDSSZrrkz_Int:
7788 case X86::VCMPSSZrrik_Int:
7789 case X86::VDIVSSZrrk_Int:
7790 case X86::VDIVSSZrrkz_Int:
7791 case X86::VMAXSSZrrk_Int:
7792 case X86::VMAXSSZrrkz_Int:
7793 case X86::VMINSSZrrk_Int:
7794 case X86::VMINSSZrrkz_Int:
7795 case X86::VMULSSZrrk_Int:
7796 case X86::VMULSSZrrkz_Int:
7797 case X86::VSQRTSSZrk_Int:
7798 case X86::VSQRTSSZrkz_Int:
7799 case X86::VSUBSSZrrk_Int:
7800 case X86::VSUBSSZrrkz_Int:
7801 case X86::VFMADDSS4rr_Int:
7802 case X86::VFNMADDSS4rr_Int:
7803 case X86::VFMSUBSS4rr_Int:
7804 case X86::VFNMSUBSS4rr_Int:
7805 case X86::VFMADD132SSr_Int:
7806 case X86::VFNMADD132SSr_Int:
7807 case X86::VFMADD213SSr_Int:
7808 case X86::VFNMADD213SSr_Int:
7809 case X86::VFMADD231SSr_Int:
7810 case X86::VFNMADD231SSr_Int:
7811 case X86::VFMSUB132SSr_Int:
7812 case X86::VFNMSUB132SSr_Int:
7813 case X86::VFMSUB213SSr_Int:
7814 case X86::VFNMSUB213SSr_Int:
7815 case X86::VFMSUB231SSr_Int:
7816 case X86::VFNMSUB231SSr_Int:
7817 case X86::VFMADD132SSZr_Int:
7818 case X86::VFNMADD132SSZr_Int:
7819 case X86::VFMADD213SSZr_Int:
7820 case X86::VFNMADD213SSZr_Int:
7821 case X86::VFMADD231SSZr_Int:
7822 case X86::VFNMADD231SSZr_Int:
7823 case X86::VFMSUB132SSZr_Int:
7824 case X86::VFNMSUB132SSZr_Int:
7825 case X86::VFMSUB213SSZr_Int:
7826 case X86::VFNMSUB213SSZr_Int:
7827 case X86::VFMSUB231SSZr_Int:
7828 case X86::VFNMSUB231SSZr_Int:
7829 case X86::VFMADD132SSZrk_Int:
7830 case X86::VFNMADD132SSZrk_Int:
7831 case X86::VFMADD213SSZrk_Int:
7832 case X86::VFNMADD213SSZrk_Int:
7833 case X86::VFMADD231SSZrk_Int:
7834 case X86::VFNMADD231SSZrk_Int:
7835 case X86::VFMSUB132SSZrk_Int:
7836 case X86::VFNMSUB132SSZrk_Int:
7837 case X86::VFMSUB213SSZrk_Int:
7838 case X86::VFNMSUB213SSZrk_Int:
7839 case X86::VFMSUB231SSZrk_Int:
7840 case X86::VFNMSUB231SSZrk_Int:
7841 case X86::VFMADD132SSZrkz_Int:
7842 case X86::VFNMADD132SSZrkz_Int:
7843 case X86::VFMADD213SSZrkz_Int:
7844 case X86::VFNMADD213SSZrkz_Int:
7845 case X86::VFMADD231SSZrkz_Int:
7846 case X86::VFNMADD231SSZrkz_Int:
7847 case X86::VFMSUB132SSZrkz_Int:
7848 case X86::VFNMSUB132SSZrkz_Int:
7849 case X86::VFMSUB213SSZrkz_Int:
7850 case X86::VFNMSUB213SSZrkz_Int:
7851 case X86::VFMSUB231SSZrkz_Int:
7852 case X86::VFNMSUB231SSZrkz_Int:
7853 case X86::VFIXUPIMMSSZrri:
7854 case X86::VFIXUPIMMSSZrrik:
7855 case X86::VFIXUPIMMSSZrrikz:
7856 case X86::VFPCLASSSSZri:
7857 case X86::VFPCLASSSSZrik:
7858 case X86::VGETEXPSSZr:
7859 case X86::VGETEXPSSZrk:
7860 case X86::VGETEXPSSZrkz:
7861 case X86::VGETMANTSSZrri:
7862 case X86::VGETMANTSSZrrik:
7863 case X86::VGETMANTSSZrrikz:
7864 case X86::VRANGESSZrri:
7865 case X86::VRANGESSZrrik:
7866 case X86::VRANGESSZrrikz:
7867 case X86::VRCP14SSZrr:
7868 case X86::VRCP14SSZrrk:
7869 case X86::VRCP14SSZrrkz:
7870 case X86::VRCP28SSZr:
7871 case X86::VRCP28SSZrk:
7872 case X86::VRCP28SSZrkz:
7873 case X86::VREDUCESSZrri:
7874 case X86::VREDUCESSZrrik:
7875 case X86::VREDUCESSZrrikz:
7876 case X86::VRNDSCALESSZrri_Int:
7877 case X86::VRNDSCALESSZrrik_Int:
7878 case X86::VRNDSCALESSZrrikz_Int:
7879 case X86::VRSQRT14SSZrr:
7880 case X86::VRSQRT14SSZrrk:
7881 case X86::VRSQRT14SSZrrkz:
7882 case X86::VRSQRT28SSZr:
7883 case X86::VRSQRT28SSZrk:
7884 case X86::VRSQRT28SSZrkz:
7885 case X86::VSCALEFSSZrr:
7886 case X86::VSCALEFSSZrrk:
7887 case X86::VSCALEFSSZrrkz:
7894 if ((
Opc == X86::MOVSDrm ||
Opc == X86::VMOVSDrm ||
Opc == X86::VMOVSDZrm ||
7895 Opc == X86::MOVSDrm_alt ||
Opc == X86::VMOVSDrm_alt ||
7896 Opc == X86::VMOVSDZrm_alt) &&
7902 case X86::CVTSD2SSrr_Int:
7903 case X86::VCVTSD2SSrr_Int:
7904 case X86::VCVTSD2SSZrr_Int:
7905 case X86::VCVTSD2SSZrrk_Int:
7906 case X86::VCVTSD2SSZrrkz_Int:
7907 case X86::CVTSD2SIrr_Int:
7908 case X86::CVTSD2SI64rr_Int:
7909 case X86::VCVTSD2SIrr_Int:
7910 case X86::VCVTSD2SI64rr_Int:
7911 case X86::VCVTSD2SIZrr_Int:
7912 case X86::VCVTSD2SI64Zrr_Int:
7913 case X86::CVTTSD2SIrr_Int:
7914 case X86::CVTTSD2SI64rr_Int:
7915 case X86::VCVTTSD2SIrr_Int:
7916 case X86::VCVTTSD2SI64rr_Int:
7917 case X86::VCVTTSD2SIZrr_Int:
7918 case X86::VCVTTSD2SI64Zrr_Int:
7919 case X86::VCVTSD2USIZrr_Int:
7920 case X86::VCVTSD2USI64Zrr_Int:
7921 case X86::VCVTTSD2USIZrr_Int:
7922 case X86::VCVTTSD2USI64Zrr_Int:
7923 case X86::ROUNDSDri_Int:
7924 case X86::VROUNDSDri_Int:
7925 case X86::COMISDrr_Int:
7926 case X86::VCOMISDrr_Int:
7927 case X86::VCOMISDZrr_Int:
7928 case X86::UCOMISDrr_Int:
7929 case X86::VUCOMISDrr_Int:
7930 case X86::VUCOMISDZrr_Int:
7931 case X86::ADDSDrr_Int:
7932 case X86::VADDSDrr_Int:
7933 case X86::VADDSDZrr_Int:
7934 case X86::CMPSDrri_Int:
7935 case X86::VCMPSDrri_Int:
7936 case X86::VCMPSDZrri_Int:
7937 case X86::DIVSDrr_Int:
7938 case X86::VDIVSDrr_Int:
7939 case X86::VDIVSDZrr_Int:
7940 case X86::MAXSDrr_Int:
7941 case X86::VMAXSDrr_Int:
7942 case X86::VMAXSDZrr_Int:
7943 case X86::MINSDrr_Int:
7944 case X86::VMINSDrr_Int:
7945 case X86::VMINSDZrr_Int:
7946 case X86::MULSDrr_Int:
7947 case X86::VMULSDrr_Int:
7948 case X86::VMULSDZrr_Int:
7949 case X86::SQRTSDr_Int:
7950 case X86::VSQRTSDr_Int:
7951 case X86::VSQRTSDZr_Int:
7952 case X86::SUBSDrr_Int:
7953 case X86::VSUBSDrr_Int:
7954 case X86::VSUBSDZrr_Int:
7955 case X86::VADDSDZrrk_Int:
7956 case X86::VADDSDZrrkz_Int:
7957 case X86::VCMPSDZrrik_Int:
7958 case X86::VDIVSDZrrk_Int:
7959 case X86::VDIVSDZrrkz_Int:
7960 case X86::VMAXSDZrrk_Int:
7961 case X86::VMAXSDZrrkz_Int:
7962 case X86::VMINSDZrrk_Int:
7963 case X86::VMINSDZrrkz_Int:
7964 case X86::VMULSDZrrk_Int:
7965 case X86::VMULSDZrrkz_Int:
7966 case X86::VSQRTSDZrk_Int:
7967 case X86::VSQRTSDZrkz_Int:
7968 case X86::VSUBSDZrrk_Int:
7969 case X86::VSUBSDZrrkz_Int:
7970 case X86::VFMADDSD4rr_Int:
7971 case X86::VFNMADDSD4rr_Int:
7972 case X86::VFMSUBSD4rr_Int:
7973 case X86::VFNMSUBSD4rr_Int:
7974 case X86::VFMADD132SDr_Int:
7975 case X86::VFNMADD132SDr_Int:
7976 case X86::VFMADD213SDr_Int:
7977 case X86::VFNMADD213SDr_Int:
7978 case X86::VFMADD231SDr_Int:
7979 case X86::VFNMADD231SDr_Int:
7980 case X86::VFMSUB132SDr_Int:
7981 case X86::VFNMSUB132SDr_Int:
7982 case X86::VFMSUB213SDr_Int:
7983 case X86::VFNMSUB213SDr_Int:
7984 case X86::VFMSUB231SDr_Int:
7985 case X86::VFNMSUB231SDr_Int:
7986 case X86::VFMADD132SDZr_Int:
7987 case X86::VFNMADD132SDZr_Int:
7988 case X86::VFMADD213SDZr_Int:
7989 case X86::VFNMADD213SDZr_Int:
7990 case X86::VFMADD231SDZr_Int:
7991 case X86::VFNMADD231SDZr_Int:
7992 case X86::VFMSUB132SDZr_Int:
7993 case X86::VFNMSUB132SDZr_Int:
7994 case X86::VFMSUB213SDZr_Int:
7995 case X86::VFNMSUB213SDZr_Int:
7996 case X86::VFMSUB231SDZr_Int:
7997 case X86::VFNMSUB231SDZr_Int:
7998 case X86::VFMADD132SDZrk_Int:
7999 case X86::VFNMADD132SDZrk_Int:
8000 case X86::VFMADD213SDZrk_Int:
8001 case X86::VFNMADD213SDZrk_Int:
8002 case X86::VFMADD231SDZrk_Int:
8003 case X86::VFNMADD231SDZrk_Int:
8004 case X86::VFMSUB132SDZrk_Int:
8005 case X86::VFNMSUB132SDZrk_Int:
8006 case X86::VFMSUB213SDZrk_Int:
8007 case X86::VFNMSUB213SDZrk_Int:
8008 case X86::VFMSUB231SDZrk_Int:
8009 case X86::VFNMSUB231SDZrk_Int:
8010 case X86::VFMADD132SDZrkz_Int:
8011 case X86::VFNMADD132SDZrkz_Int:
8012 case X86::VFMADD213SDZrkz_Int:
8013 case X86::VFNMADD213SDZrkz_Int:
8014 case X86::VFMADD231SDZrkz_Int:
8015 case X86::VFNMADD231SDZrkz_Int:
8016 case X86::VFMSUB132SDZrkz_Int:
8017 case X86::VFNMSUB132SDZrkz_Int:
8018 case X86::VFMSUB213SDZrkz_Int:
8019 case X86::VFNMSUB213SDZrkz_Int:
8020 case X86::VFMSUB231SDZrkz_Int:
8021 case X86::VFNMSUB231SDZrkz_Int:
8022 case X86::VFIXUPIMMSDZrri:
8023 case X86::VFIXUPIMMSDZrrik:
8024 case X86::VFIXUPIMMSDZrrikz:
8025 case X86::VFPCLASSSDZri:
8026 case X86::VFPCLASSSDZrik:
8027 case X86::VGETEXPSDZr:
8028 case X86::VGETEXPSDZrk:
8029 case X86::VGETEXPSDZrkz:
8030 case X86::VGETMANTSDZrri:
8031 case X86::VGETMANTSDZrrik:
8032 case X86::VGETMANTSDZrrikz:
8033 case X86::VRANGESDZrri:
8034 case X86::VRANGESDZrrik:
8035 case X86::VRANGESDZrrikz:
8036 case X86::VRCP14SDZrr:
8037 case X86::VRCP14SDZrrk:
8038 case X86::VRCP14SDZrrkz:
8039 case X86::VRCP28SDZr:
8040 case X86::VRCP28SDZrk:
8041 case X86::VRCP28SDZrkz:
8042 case X86::VREDUCESDZrri:
8043 case X86::VREDUCESDZrrik:
8044 case X86::VREDUCESDZrrikz:
8045 case X86::VRNDSCALESDZrri_Int:
8046 case X86::VRNDSCALESDZrrik_Int:
8047 case X86::VRNDSCALESDZrrikz_Int:
8048 case X86::VRSQRT14SDZrr:
8049 case X86::VRSQRT14SDZrrk:
8050 case X86::VRSQRT14SDZrrkz:
8051 case X86::VRSQRT28SDZr:
8052 case X86::VRSQRT28SDZrk:
8053 case X86::VRSQRT28SDZrkz:
8054 case X86::VSCALEFSDZrr:
8055 case X86::VSCALEFSDZrrk:
8056 case X86::VSCALEFSDZrrkz:
8063 if ((
Opc == X86::VMOVSHZrm ||
Opc == X86::VMOVSHZrm_alt) &&
RegSize > 16) {
8068 case X86::VADDSHZrr_Int:
8069 case X86::VCMPSHZrri_Int:
8070 case X86::VDIVSHZrr_Int:
8071 case X86::VMAXSHZrr_Int:
8072 case X86::VMINSHZrr_Int:
8073 case X86::VMULSHZrr_Int:
8074 case X86::VSUBSHZrr_Int:
8075 case X86::VADDSHZrrk_Int:
8076 case X86::VADDSHZrrkz_Int:
8077 case X86::VCMPSHZrrik_Int:
8078 case X86::VDIVSHZrrk_Int:
8079 case X86::VDIVSHZrrkz_Int:
8080 case X86::VMAXSHZrrk_Int:
8081 case X86::VMAXSHZrrkz_Int:
8082 case X86::VMINSHZrrk_Int:
8083 case X86::VMINSHZrrkz_Int:
8084 case X86::VMULSHZrrk_Int:
8085 case X86::VMULSHZrrkz_Int:
8086 case X86::VSUBSHZrrk_Int:
8087 case X86::VSUBSHZrrkz_Int:
8088 case X86::VFMADD132SHZr_Int:
8089 case X86::VFNMADD132SHZr_Int:
8090 case X86::VFMADD213SHZr_Int:
8091 case X86::VFNMADD213SHZr_Int:
8092 case X86::VFMADD231SHZr_Int:
8093 case X86::VFNMADD231SHZr_Int:
8094 case X86::VFMSUB132SHZr_Int:
8095 case X86::VFNMSUB132SHZr_Int:
8096 case X86::VFMSUB213SHZr_Int:
8097 case X86::VFNMSUB213SHZr_Int:
8098 case X86::VFMSUB231SHZr_Int:
8099 case X86::VFNMSUB231SHZr_Int:
8100 case X86::VFMADD132SHZrk_Int:
8101 case X86::VFNMADD132SHZrk_Int:
8102 case X86::VFMADD213SHZrk_Int:
8103 case X86::VFNMADD213SHZrk_Int:
8104 case X86::VFMADD231SHZrk_Int:
8105 case X86::VFNMADD231SHZrk_Int:
8106 case X86::VFMSUB132SHZrk_Int:
8107 case X86::VFNMSUB132SHZrk_Int:
8108 case X86::VFMSUB213SHZrk_Int:
8109 case X86::VFNMSUB213SHZrk_Int:
8110 case X86::VFMSUB231SHZrk_Int:
8111 case X86::VFNMSUB231SHZrk_Int:
8112 case X86::VFMADD132SHZrkz_Int:
8113 case X86::VFNMADD132SHZrkz_Int:
8114 case X86::VFMADD213SHZrkz_Int:
8115 case X86::VFNMADD213SHZrkz_Int:
8116 case X86::VFMADD231SHZrkz_Int:
8117 case X86::VFNMADD231SHZrkz_Int:
8118 case X86::VFMSUB132SHZrkz_Int:
8119 case X86::VFNMSUB132SHZrkz_Int:
8120 case X86::VFMSUB213SHZrkz_Int:
8121 case X86::VFNMSUB213SHZrkz_Int:
8122 case X86::VFMSUB231SHZrkz_Int:
8123 case X86::VFNMSUB231SHZrkz_Int:
8147 return RC == &X86::VK2WMRegClass || RC == &X86::VK4WMRegClass ||
8148 RC == &X86::VK8WMRegClass || RC == &X86::VK16WMRegClass ||
8149 RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass;
8158 bool HasSameMask =
false;
8159 for (
unsigned I = 1, E =
MI.getDesc().getNumOperands();
I < E; ++
I) {
8161 if (
Op.isReg() &&
Op.getReg() == MaskReg) {
8173 for (
auto Op :
Ops) {
8174 if (
MI.getOperand(
Op).getSubReg())
8211 case X86::AVX512_512_SET0:
8212 case X86::AVX512_512_SETALLONES:
8213 Alignment =
Align(64);
8215 case X86::AVX2_SETALLONES:
8216 case X86::AVX1_SETALLONES:
8218 case X86::AVX512_256_SET0:
8219 case X86::AVX512_256_SETALLONES:
8220 Alignment =
Align(32);
8223 case X86::V_SETALLONES:
8224 case X86::AVX512_128_SET0:
8225 case X86::FsFLD0F128:
8226 case X86::AVX512_FsFLD0F128:
8227 case X86::AVX512_128_SETALLONES:
8228 Alignment =
Align(16);
8232 case X86::AVX512_FsFLD0SD:
8233 Alignment =
Align(8);
8236 case X86::AVX512_FsFLD0SS:
8237 Alignment =
Align(4);
8240 case X86::AVX512_FsFLD0SH:
8241 Alignment =
Align(2);
8246 if (
Ops.size() == 2 &&
Ops[0] == 0 &&
Ops[1] == 1) {
8247 unsigned NewOpc = 0;
8248 switch (
MI.getOpcode()) {
8252 NewOpc = X86::CMP8ri;
8255 NewOpc = X86::CMP16ri;
8258 NewOpc = X86::CMP32ri;
8261 NewOpc = X86::CMP64ri32;
8265 MI.setDesc(
get(NewOpc));
8266 MI.getOperand(1).ChangeToImmediate(0);
8267 }
else if (
Ops.size() != 1)
8279 case X86::V_SETALLONES:
8280 case X86::AVX2_SETALLONES:
8281 case X86::AVX1_SETALLONES:
8283 case X86::AVX512_128_SET0:
8284 case X86::AVX512_256_SET0:
8285 case X86::AVX512_512_SET0:
8286 case X86::AVX512_128_SETALLONES:
8287 case X86::AVX512_256_SETALLONES:
8288 case X86::AVX512_512_SETALLONES:
8290 case X86::AVX512_FsFLD0SH:
8292 case X86::AVX512_FsFLD0SD:
8294 case X86::AVX512_FsFLD0SS:
8295 case X86::FsFLD0F128:
8296 case X86::AVX512_FsFLD0F128: {
8305 unsigned PICBase = 0;
8308 if (Subtarget.is64Bit()) {
8321 bool IsAllOnes =
false;
8324 case X86::AVX512_FsFLD0SS:
8328 case X86::AVX512_FsFLD0SD:
8331 case X86::FsFLD0F128:
8332 case X86::AVX512_FsFLD0F128:
8336 case X86::AVX512_FsFLD0SH:
8339 case X86::AVX512_512_SETALLONES:
8342 case X86::AVX512_512_SET0:
8346 case X86::AVX1_SETALLONES:
8347 case X86::AVX2_SETALLONES:
8348 case X86::AVX512_256_SETALLONES:
8351 case X86::AVX512_256_SET0:
8361 case X86::V_SETALLONES:
8362 case X86::AVX512_128_SETALLONES:
8366 case X86::AVX512_128_SET0:
8384 case X86::VPBROADCASTBZ128rm:
8385 case X86::VPBROADCASTBZ256rm:
8386 case X86::VPBROADCASTBZrm:
8387 case X86::VBROADCASTF32X2Z256rm:
8388 case X86::VBROADCASTF32X2Zrm:
8389 case X86::VBROADCASTI32X2Z128rm:
8390 case X86::VBROADCASTI32X2Z256rm:
8391 case X86::VBROADCASTI32X2Zrm:
8395#define FOLD_BROADCAST(SIZE) \
8396 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, \
8397 LoadMI.operands_begin() + NumOps); \
8398 return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, SIZE, \
8400 case X86::VPBROADCASTWZ128rm:
8401 case X86::VPBROADCASTWZ256rm:
8402 case X86::VPBROADCASTWZrm:
8404 case X86::VPBROADCASTDZ128rm:
8405 case X86::VPBROADCASTDZ256rm:
8406 case X86::VPBROADCASTDZrm:
8407 case X86::VBROADCASTSSZ128rm:
8408 case X86::VBROADCASTSSZ256rm:
8409 case X86::VBROADCASTSSZrm:
8411 case X86::VPBROADCASTQZ128rm:
8412 case X86::VPBROADCASTQZ256rm:
8413 case X86::VPBROADCASTQZrm:
8414 case X86::VBROADCASTSDZ256rm:
8415 case X86::VBROADCASTSDZrm:
8428 0, Alignment,
true);
8435 unsigned BitsSize,
bool AllowCommute)
const {
8439 ?
fuseInst(MF,
I->DstOp, OpNum, MOs, InsertPt,
MI, *
this)
8445 unsigned CommuteOpIdx2 = commuteOperandsForFold(
MI, OpNum);
8446 if (CommuteOpIdx2 == OpNum) {
8451 foldMemoryBroadcast(MF,
MI, CommuteOpIdx2, MOs, InsertPt, BitsSize,
8456 commuteInstruction(
MI,
false, OpNum, CommuteOpIdx2);
8471 if (!MMO->isStore()) {
8489 if (!MMO->isStore())
8492 if (!MMO->isLoad()) {
8510 assert((SpillSize == 64 || STI.hasVLX()) &&
8511 "Can't broadcast less than 64 bytes without AVX512VL!");
8513#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64) \
8515 switch (SpillSize) { \
8517 llvm_unreachable("Unknown spill size"); \
8551 unsigned Opc =
I->DstOp;
8555 if (UnfoldLoad && !FoldedLoad)
8557 UnfoldLoad &= FoldedLoad;
8558 if (UnfoldStore && !FoldedStore)
8560 UnfoldStore &= FoldedStore;
8567 if (!
MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
8568 Subtarget.isUnalignedMem16Slow())
8577 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
8581 else if (
Op.isReg() &&
Op.isImplicit())
8597 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8598 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8642 case X86::CMP64ri32:
8653 case X86::CMP64ri32:
8654 NewOpc = X86::TEST64rr;
8657 NewOpc = X86::TEST32rr;
8660 NewOpc = X86::TEST16rr;
8663 NewOpc = X86::TEST8rr;
8677 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*DstRC), 16);
8678 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8694 if (!
N->isMachineOpcode())
8700 unsigned Opc =
I->DstOp;
8708 unsigned NumDefs =
MCID.NumDefs;
8709 std::vector<SDValue> AddrOps;
8710 std::vector<SDValue> BeforeOps;
8711 std::vector<SDValue> AfterOps;
8713 unsigned NumOps =
N->getNumOperands();
8714 for (
unsigned i = 0; i !=
NumOps - 1; ++i) {
8717 AddrOps.push_back(
Op);
8718 else if (i < Index - NumDefs)
8719 BeforeOps.push_back(
Op);
8720 else if (i > Index - NumDefs)
8721 AfterOps.push_back(
Op);
8724 AddrOps.push_back(Chain);
8729 EVT VT = *
TRI.legalclasstypes_begin(*RC);
8731 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8732 Subtarget.isUnalignedMem16Slow())
8742 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8743 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8755 std::vector<EVT> VTs;
8757 if (
MCID.getNumDefs() > 0) {
8759 VTs.push_back(*
TRI.legalclasstypes_begin(*DstRC));
8761 for (
unsigned i = 0, e =
N->getNumValues(); i != e; ++i) {
8762 EVT VT =
N->getValueType(i);
8763 if (VT != MVT::Other && i >= (
unsigned)
MCID.getNumDefs())
8767 BeforeOps.push_back(
SDValue(Load, 0));
8773 case X86::CMP64ri32:
8781 case X86::CMP64ri32:
8782 Opc = X86::TEST64rr;
8785 Opc = X86::TEST32rr;
8788 Opc = X86::TEST16rr;
8794 BeforeOps[1] = BeforeOps[0];
8803 AddrOps.push_back(
SDValue(NewNode, 0));
8804 AddrOps.push_back(Chain);
8806 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8807 Subtarget.isUnalignedMem16Slow())
8812 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8813 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8816 dl, MVT::Other, AddrOps);
8829 unsigned *LoadRegIndex)
const {
8835 if (UnfoldLoad && !FoldedLoad)
8837 if (UnfoldStore && !FoldedStore)
8846 int64_t &Offset2)
const {
8850 auto IsLoadOpcode = [&](
unsigned Opcode) {
8862 case X86::MOVSSrm_alt:
8864 case X86::MOVSDrm_alt:
8865 case X86::MMX_MOVD64rm:
8866 case X86::MMX_MOVQ64rm:
8875 case X86::VMOVSSrm_alt:
8877 case X86::VMOVSDrm_alt:
8878 case X86::VMOVAPSrm:
8879 case X86::VMOVUPSrm:
8880 case X86::VMOVAPDrm:
8881 case X86::VMOVUPDrm:
8882 case X86::VMOVDQArm:
8883 case X86::VMOVDQUrm:
8884 case X86::VMOVAPSYrm:
8885 case X86::VMOVUPSYrm:
8886 case X86::VMOVAPDYrm:
8887 case X86::VMOVUPDYrm:
8888 case X86::VMOVDQAYrm:
8889 case X86::VMOVDQUYrm:
8891 case X86::VMOVSSZrm:
8892 case X86::VMOVSSZrm_alt:
8893 case X86::VMOVSDZrm:
8894 case X86::VMOVSDZrm_alt:
8895 case X86::VMOVAPSZ128rm:
8896 case X86::VMOVUPSZ128rm:
8897 case X86::VMOVAPSZ128rm_NOVLX:
8898 case X86::VMOVUPSZ128rm_NOVLX:
8899 case X86::VMOVAPDZ128rm:
8900 case X86::VMOVUPDZ128rm:
8901 case X86::VMOVDQU8Z128rm:
8902 case X86::VMOVDQU16Z128rm:
8903 case X86::VMOVDQA32Z128rm:
8904 case X86::VMOVDQU32Z128rm:
8905 case X86::VMOVDQA64Z128rm:
8906 case X86::VMOVDQU64Z128rm:
8907 case X86::VMOVAPSZ256rm:
8908 case X86::VMOVUPSZ256rm:
8909 case X86::VMOVAPSZ256rm_NOVLX:
8910 case X86::VMOVUPSZ256rm_NOVLX:
8911 case X86::VMOVAPDZ256rm:
8912 case X86::VMOVUPDZ256rm:
8913 case X86::VMOVDQU8Z256rm:
8914 case X86::VMOVDQU16Z256rm:
8915 case X86::VMOVDQA32Z256rm:
8916 case X86::VMOVDQU32Z256rm:
8917 case X86::VMOVDQA64Z256rm:
8918 case X86::VMOVDQU64Z256rm:
8919 case X86::VMOVAPSZrm:
8920 case X86::VMOVUPSZrm:
8921 case X86::VMOVAPDZrm:
8922 case X86::VMOVUPDZrm:
8923 case X86::VMOVDQU8Zrm:
8924 case X86::VMOVDQU16Zrm:
8925 case X86::VMOVDQA32Zrm:
8926 case X86::VMOVDQU32Zrm:
8927 case X86::VMOVDQA64Zrm:
8928 case X86::VMOVDQU64Zrm:
8930 case X86::KMOVBkm_EVEX:
8932 case X86::KMOVWkm_EVEX:
8934 case X86::KMOVDkm_EVEX:
8936 case X86::KMOVQkm_EVEX:
8946 auto HasSameOp = [&](
int I) {
8962 if (!Disp1 || !Disp2)
8965 Offset1 = Disp1->getSExtValue();
8966 Offset2 = Disp2->getSExtValue();
8971 int64_t Offset1, int64_t Offset2,
8972 unsigned NumLoads)
const {
8973 assert(Offset2 > Offset1);
8974 if ((Offset2 - Offset1) / 8 > 64)
8988 case X86::MMX_MOVD64rm:
8989 case X86::MMX_MOVQ64rm:
8998 if (Subtarget.is64Bit()) {
9001 }
else if (NumLoads) {
9024 unsigned Opcode =
MI.getOpcode();
9025 if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
9026 Opcode == X86::PLDTILECFGV)
9039 assert(
Cond.size() == 1 &&
"Invalid X86 branch condition!");
9049 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
9050 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
9051 RC == &X86::RFP80RegClass);
9064 return GlobalBaseReg;
9069 GlobalBaseReg = RegInfo.createVirtualRegister(
9070 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
9072 return GlobalBaseReg;
9080 for (
const uint16_t(&Row)[3] : Table)
9081 if (Row[domain - 1] == opcode)
9089 for (
const uint16_t(&Row)[4] : Table)
9090 if (Row[domain - 1] == opcode || (domain == 3 && Row[3] == opcode))
9097 unsigned NewWidth,
unsigned *pNewMask =
nullptr) {
9098 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
9099 "Illegal blend mask scale");
9100 unsigned NewMask = 0;
9102 if ((OldWidth % NewWidth) == 0) {
9103 unsigned Scale = OldWidth / NewWidth;
9104 unsigned SubMask = (1u << Scale) - 1;
9105 for (
unsigned i = 0; i != NewWidth; ++i) {
9106 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
9108 NewMask |= (1u << i);
9109 else if (
Sub != 0x0)
9113 unsigned Scale = NewWidth / OldWidth;
9114 unsigned SubMask = (1u << Scale) - 1;
9115 for (
unsigned i = 0; i != OldWidth; ++i) {
9116 if (OldMask & (1 << i)) {
9117 NewMask |= (SubMask << (i * Scale));
9123 *pNewMask = NewMask;
9128 unsigned Opcode =
MI.getOpcode();
9129 unsigned NumOperands =
MI.getDesc().getNumOperands();
9131 auto GetBlendDomains = [&](
unsigned ImmWidth,
bool Is256) {
9133 if (
MI.getOperand(NumOperands - 1).isImm()) {
9134 unsigned Imm =
MI.getOperand(NumOperands - 1).getImm();
9136 validDomains |= 0x2;
9138 validDomains |= 0x4;
9139 if (!Is256 || Subtarget.hasAVX2())
9140 validDomains |= 0x8;
9142 return validDomains;
9146 case X86::BLENDPDrmi:
9147 case X86::BLENDPDrri:
9148 case X86::VBLENDPDrmi:
9149 case X86::VBLENDPDrri:
9150 return GetBlendDomains(2,
false);
9151 case X86::VBLENDPDYrmi:
9152 case X86::VBLENDPDYrri:
9153 return GetBlendDomains(4,
true);
9154 case X86::BLENDPSrmi:
9155 case X86::BLENDPSrri:
9156 case X86::VBLENDPSrmi:
9157 case X86::VBLENDPSrri:
9158 case X86::VPBLENDDrmi:
9159 case X86::VPBLENDDrri:
9160 return GetBlendDomains(4,
false);
9161 case X86::VBLENDPSYrmi:
9162 case X86::VBLENDPSYrri:
9163 case X86::VPBLENDDYrmi:
9164 case X86::VPBLENDDYrri:
9165 return GetBlendDomains(8,
true);
9166 case X86::PBLENDWrmi:
9167 case X86::PBLENDWrri:
9168 case X86::VPBLENDWrmi:
9169 case X86::VPBLENDWrri:
9171 case X86::VPBLENDWYrmi:
9172 case X86::VPBLENDWYrri:
9173 return GetBlendDomains(8,
false);
9174 case X86::VPANDDZ128rr:
9175 case X86::VPANDDZ128rm:
9176 case X86::VPANDDZ256rr:
9177 case X86::VPANDDZ256rm:
9178 case X86::VPANDQZ128rr:
9179 case X86::VPANDQZ128rm:
9180 case X86::VPANDQZ256rr:
9181 case X86::VPANDQZ256rm:
9182 case X86::VPANDNDZ128rr:
9183 case X86::VPANDNDZ128rm:
9184 case X86::VPANDNDZ256rr:
9185 case X86::VPANDNDZ256rm:
9186 case X86::VPANDNQZ128rr:
9187 case X86::VPANDNQZ128rm:
9188 case X86::VPANDNQZ256rr:
9189 case X86::VPANDNQZ256rm:
9190 case X86::VPORDZ128rr:
9191 case X86::VPORDZ128rm:
9192 case X86::VPORDZ256rr:
9193 case X86::VPORDZ256rm:
9194 case X86::VPORQZ128rr:
9195 case X86::VPORQZ128rm:
9196 case X86::VPORQZ256rr:
9197 case X86::VPORQZ256rm:
9198 case X86::VPXORDZ128rr:
9199 case X86::VPXORDZ128rm:
9200 case X86::VPXORDZ256rr:
9201 case X86::VPXORDZ256rm:
9202 case X86::VPXORQZ128rr:
9203 case X86::VPXORQZ128rm:
9204 case X86::VPXORQZ256rr:
9205 case X86::VPXORQZ256rm:
9208 if (Subtarget.hasDQI())
9211 if (RI.getEncodingValue(
MI.getOperand(0).getReg()) >= 16)
9213 if (RI.getEncodingValue(
MI.getOperand(1).getReg()) >= 16)
9216 if (NumOperands == 3 &&
9217 RI.getEncodingValue(
MI.getOperand(2).getReg()) >= 16)
9222 case X86::MOVHLPSrr:
9229 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg() &&
9230 MI.getOperand(0).getSubReg() == 0 &&
9231 MI.getOperand(1).getSubReg() == 0 &&
MI.getOperand(2).getSubReg() == 0)
9234 case X86::SHUFPDrri:
9240#include "X86ReplaceableInstrs.def"
9246 assert(dom &&
"Not an SSE instruction");
9248 unsigned Opcode =
MI.getOpcode();
9249 unsigned NumOperands =
MI.getDesc().getNumOperands();
9251 auto SetBlendDomain = [&](
unsigned ImmWidth,
bool Is256) {
9252 if (
MI.getOperand(NumOperands - 1).isImm()) {
9253 unsigned Imm =
MI.getOperand(NumOperands - 1).getImm() & 255;
9254 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
9255 unsigned NewImm = Imm;
9257 const uint16_t *table =
lookup(Opcode, dom, ReplaceableBlendInstrs);
9259 table =
lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9263 }
else if (
Domain == 2) {
9265 }
else if (
Domain == 3) {
9266 if (Subtarget.hasAVX2()) {
9268 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
9269 table =
lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9273 assert(!Is256 &&
"128-bit vector expected");
9278 assert(table && table[
Domain - 1] &&
"Unknown domain op");
9280 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
9286 case X86::BLENDPDrmi:
9287 case X86::BLENDPDrri:
9288 case X86::VBLENDPDrmi:
9289 case X86::VBLENDPDrri:
9290 return SetBlendDomain(2,
false);
9291 case X86::VBLENDPDYrmi:
9292 case X86::VBLENDPDYrri:
9293 return SetBlendDomain(4,
true);
9294 case X86::BLENDPSrmi:
9295 case X86::BLENDPSrri:
9296 case X86::VBLENDPSrmi:
9297 case X86::VBLENDPSrri:
9298 case X86::VPBLENDDrmi:
9299 case X86::VPBLENDDrri:
9300 return SetBlendDomain(4,
false);
9301 case X86::VBLENDPSYrmi:
9302 case X86::VBLENDPSYrri:
9303 case X86::VPBLENDDYrmi:
9304 case X86::VPBLENDDYrri:
9305 return SetBlendDomain(8,
true);
9306 case X86::PBLENDWrmi:
9307 case X86::PBLENDWrri:
9308 case X86::VPBLENDWrmi:
9309 case X86::VPBLENDWrri:
9310 return SetBlendDomain(8,
false);
9311 case X86::VPBLENDWYrmi:
9312 case X86::VPBLENDWYrri:
9313 return SetBlendDomain(16,
true);
9314 case X86::VPANDDZ128rr:
9315 case X86::VPANDDZ128rm:
9316 case X86::VPANDDZ256rr:
9317 case X86::VPANDDZ256rm:
9318 case X86::VPANDQZ128rr:
9319 case X86::VPANDQZ128rm:
9320 case X86::VPANDQZ256rr:
9321 case X86::VPANDQZ256rm:
9322 case X86::VPANDNDZ128rr:
9323 case X86::VPANDNDZ128rm:
9324 case X86::VPANDNDZ256rr:
9325 case X86::VPANDNDZ256rm:
9326 case X86::VPANDNQZ128rr:
9327 case X86::VPANDNQZ128rm:
9328 case X86::VPANDNQZ256rr:
9329 case X86::VPANDNQZ256rm:
9330 case X86::VPORDZ128rr:
9331 case X86::VPORDZ128rm:
9332 case X86::VPORDZ256rr:
9333 case X86::VPORDZ256rm:
9334 case X86::VPORQZ128rr:
9335 case X86::VPORQZ128rm:
9336 case X86::VPORQZ256rr:
9337 case X86::VPORQZ256rm:
9338 case X86::VPXORDZ128rr:
9339 case X86::VPXORDZ128rm:
9340 case X86::VPXORDZ256rr:
9341 case X86::VPXORDZ256rm:
9342 case X86::VPXORQZ128rr:
9343 case X86::VPXORQZ128rm:
9344 case X86::VPXORQZ256rr:
9345 case X86::VPXORQZ256rm: {
9347 if (Subtarget.hasDQI())
9351 lookupAVX512(
MI.getOpcode(), dom, ReplaceableCustomAVX512LogicInstrs);
9352 assert(table &&
"Instruction not found in table?");
9355 if (
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9360 case X86::UNPCKHPDrr:
9361 case X86::MOVHLPSrr:
9364 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg() &&
9365 MI.getOperand(0).getSubReg() == 0 &&
9366 MI.getOperand(1).getSubReg() == 0 &&
9367 MI.getOperand(2).getSubReg() == 0) {
9368 commuteInstruction(
MI,
false);
9372 if (Opcode == X86::MOVHLPSrr)
9375 case X86::SHUFPDrri: {
9377 unsigned Imm =
MI.getOperand(3).getImm();
9378 unsigned NewImm = 0x44;
9383 MI.getOperand(3).setImm(NewImm);
9384 MI.setDesc(
get(X86::SHUFPSrri));
9392std::pair<uint16_t, uint16_t>
9395 unsigned opcode =
MI.getOpcode();
9401 return std::make_pair(domain, validDomains);
9403 if (
lookup(opcode, domain, ReplaceableInstrs)) {
9405 }
else if (
lookup(opcode, domain, ReplaceableInstrsAVX2)) {
9406 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
9407 }
else if (
lookup(opcode, domain, ReplaceableInstrsFP)) {
9409 }
else if (
lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
9412 if (!Subtarget.hasAVX2())
9413 return std::make_pair(0, 0);
9415 }
else if (
lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
9417 }
else if (Subtarget.hasDQI() &&
9418 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQ)) {
9420 }
else if (Subtarget.hasDQI()) {
9422 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQMasked)) {
9423 if (domain == 1 || (domain == 3 && table[3] == opcode))
9430 return std::make_pair(domain, validDomains);
9436 assert(dom &&
"Not an SSE instruction");
9445 "256-bit vector operations only available in AVX2");
9446 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsAVX2);
9449 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsFP);
9451 "Can only select PackedSingle or PackedDouble");
9454 assert(Subtarget.hasAVX2() &&
9455 "256-bit insert/extract only available in AVX2");
9456 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
9459 assert(Subtarget.hasAVX512() &&
"Requires AVX-512");
9460 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512);
9462 if (table &&
Domain == 3 && table[3] ==
MI.getOpcode())
9466 assert((Subtarget.hasDQI() ||
Domain >= 3) &&
"Requires AVX-512DQ");
9467 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
9470 if (table &&
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9474 assert((Subtarget.hasDQI() ||
Domain >= 3) &&
"Requires AVX-512DQ");
9475 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
9476 if (table &&
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9479 assert(table &&
"Cannot change domain");
9505 case X86::DIVSDrm_Int:
9507 case X86::DIVSDrr_Int:
9509 case X86::DIVSSrm_Int:
9511 case X86::DIVSSrr_Int:
9517 case X86::SQRTSDm_Int:
9519 case X86::SQRTSDr_Int:
9521 case X86::SQRTSSm_Int:
9523 case X86::SQRTSSr_Int:
9527 case X86::VDIVPDYrm:
9528 case X86::VDIVPDYrr:
9531 case X86::VDIVPSYrm:
9532 case X86::VDIVPSYrr:
9534 case X86::VDIVSDrm_Int:
9536 case X86::VDIVSDrr_Int:
9538 case X86::VDIVSSrm_Int:
9540 case X86::VDIVSSrr_Int:
9543 case X86::VSQRTPDYm:
9544 case X86::VSQRTPDYr:
9547 case X86::VSQRTPSYm:
9548 case X86::VSQRTPSYr:
9550 case X86::VSQRTSDm_Int:
9552 case X86::VSQRTSDr_Int:
9554 case X86::VSQRTSSm_Int:
9556 case X86::VSQRTSSr_Int:
9558 case X86::VDIVPDZ128rm:
9559 case X86::VDIVPDZ128rmb:
9560 case X86::VDIVPDZ128rmbk:
9561 case X86::VDIVPDZ128rmbkz:
9562 case X86::VDIVPDZ128rmk:
9563 case X86::VDIVPDZ128rmkz:
9564 case X86::VDIVPDZ128rr:
9565 case X86::VDIVPDZ128rrk:
9566 case X86::VDIVPDZ128rrkz:
9567 case X86::VDIVPDZ256rm:
9568 case X86::VDIVPDZ256rmb:
9569 case X86::VDIVPDZ256rmbk:
9570 case X86::VDIVPDZ256rmbkz:
9571 case X86::VDIVPDZ256rmk:
9572 case X86::VDIVPDZ256rmkz:
9573 case X86::VDIVPDZ256rr:
9574 case X86::VDIVPDZ256rrk:
9575 case X86::VDIVPDZ256rrkz:
9576 case X86::VDIVPDZrrb:
9577 case X86::VDIVPDZrrbk:
9578 case X86::VDIVPDZrrbkz:
9579 case X86::VDIVPDZrm:
9580 case X86::VDIVPDZrmb:
9581 case X86::VDIVPDZrmbk:
9582 case X86::VDIVPDZrmbkz:
9583 case X86::VDIVPDZrmk:
9584 case X86::VDIVPDZrmkz:
9585 case X86::VDIVPDZrr:
9586 case X86::VDIVPDZrrk:
9587 case X86::VDIVPDZrrkz:
9588 case X86::VDIVPSZ128rm:
9589 case X86::VDIVPSZ128rmb:
9590 case X86::VDIVPSZ128rmbk:
9591 case X86::VDIVPSZ128rmbkz:
9592 case X86::VDIVPSZ128rmk:
9593 case X86::VDIVPSZ128rmkz:
9594 case X86::VDIVPSZ128rr:
9595 case X86::VDIVPSZ128rrk:
9596 case X86::VDIVPSZ128rrkz:
9597 case X86::VDIVPSZ256rm:
9598 case X86::VDIVPSZ256rmb:
9599 case X86::VDIVPSZ256rmbk:
9600 case X86::VDIVPSZ256rmbkz:
9601 case X86::VDIVPSZ256rmk:
9602 case X86::VDIVPSZ256rmkz:
9603 case X86::VDIVPSZ256rr:
9604 case X86::VDIVPSZ256rrk:
9605 case X86::VDIVPSZ256rrkz:
9606 case X86::VDIVPSZrrb:
9607 case X86::VDIVPSZrrbk:
9608 case X86::VDIVPSZrrbkz:
9609 case X86::VDIVPSZrm:
9610 case X86::VDIVPSZrmb:
9611 case X86::VDIVPSZrmbk:
9612 case X86::VDIVPSZrmbkz:
9613 case X86::VDIVPSZrmk:
9614 case X86::VDIVPSZrmkz:
9615 case X86::VDIVPSZrr:
9616 case X86::VDIVPSZrrk:
9617 case X86::VDIVPSZrrkz:
9618 case X86::VDIVSDZrm:
9619 case X86::VDIVSDZrr:
9620 case X86::VDIVSDZrm_Int:
9621 case X86::VDIVSDZrmk_Int:
9622 case X86::VDIVSDZrmkz_Int:
9623 case X86::VDIVSDZrr_Int:
9624 case X86::VDIVSDZrrk_Int:
9625 case X86::VDIVSDZrrkz_Int:
9626 case X86::VDIVSDZrrb_Int:
9627 case X86::VDIVSDZrrbk_Int:
9628 case X86::VDIVSDZrrbkz_Int:
9629 case X86::VDIVSSZrm:
9630 case X86::VDIVSSZrr:
9631 case X86::VDIVSSZrm_Int:
9632 case X86::VDIVSSZrmk_Int:
9633 case X86::VDIVSSZrmkz_Int:
9634 case X86::VDIVSSZrr_Int:
9635 case X86::VDIVSSZrrk_Int:
9636 case X86::VDIVSSZrrkz_Int:
9637 case X86::VDIVSSZrrb_Int:
9638 case X86::VDIVSSZrrbk_Int:
9639 case X86::VDIVSSZrrbkz_Int:
9640 case X86::VSQRTPDZ128m:
9641 case X86::VSQRTPDZ128mb:
9642 case X86::VSQRTPDZ128mbk:
9643 case X86::VSQRTPDZ128mbkz:
9644 case X86::VSQRTPDZ128mk:
9645 case X86::VSQRTPDZ128mkz:
9646 case X86::VSQRTPDZ128r:
9647 case X86::VSQRTPDZ128rk:
9648 case X86::VSQRTPDZ128rkz:
9649 case X86::VSQRTPDZ256m:
9650 case X86::VSQRTPDZ256mb:
9651 case X86::VSQRTPDZ256mbk:
9652 case X86::VSQRTPDZ256mbkz:
9653 case X86::VSQRTPDZ256mk:
9654 case X86::VSQRTPDZ256mkz:
9655 case X86::VSQRTPDZ256r:
9656 case X86::VSQRTPDZ256rk:
9657 case X86::VSQRTPDZ256rkz:
9658 case X86::VSQRTPDZm:
9659 case X86::VSQRTPDZmb:
9660 case X86::VSQRTPDZmbk:
9661 case X86::VSQRTPDZmbkz:
9662 case X86::VSQRTPDZmk:
9663 case X86::VSQRTPDZmkz:
9664 case X86::VSQRTPDZr:
9665 case X86::VSQRTPDZrb:
9666 case X86::VSQRTPDZrbk:
9667 case X86::VSQRTPDZrbkz:
9668 case X86::VSQRTPDZrk:
9669 case X86::VSQRTPDZrkz:
9670 case X86::VSQRTPSZ128m:
9671 case X86::VSQRTPSZ128mb:
9672 case X86::VSQRTPSZ128mbk:
9673 case X86::VSQRTPSZ128mbkz:
9674 case X86::VSQRTPSZ128mk:
9675 case X86::VSQRTPSZ128mkz:
9676 case X86::VSQRTPSZ128r:
9677 case X86::VSQRTPSZ128rk:
9678 case X86::VSQRTPSZ128rkz:
9679 case X86::VSQRTPSZ256m:
9680 case X86::VSQRTPSZ256mb:
9681 case X86::VSQRTPSZ256mbk:
9682 case X86::VSQRTPSZ256mbkz:
9683 case X86::VSQRTPSZ256mk:
9684 case X86::VSQRTPSZ256mkz:
9685 case X86::VSQRTPSZ256r:
9686 case X86::VSQRTPSZ256rk:
9687 case X86::VSQRTPSZ256rkz:
9688 case X86::VSQRTPSZm:
9689 case X86::VSQRTPSZmb:
9690 case X86::VSQRTPSZmbk:
9691 case X86::VSQRTPSZmbkz:
9692 case X86::VSQRTPSZmk:
9693 case X86::VSQRTPSZmkz:
9694 case X86::VSQRTPSZr:
9695 case X86::VSQRTPSZrb:
9696 case X86::VSQRTPSZrbk:
9697 case X86::VSQRTPSZrbkz:
9698 case X86::VSQRTPSZrk:
9699 case X86::VSQRTPSZrkz:
9700 case X86::VSQRTSDZm:
9701 case X86::VSQRTSDZm_Int:
9702 case X86::VSQRTSDZmk_Int:
9703 case X86::VSQRTSDZmkz_Int:
9704 case X86::VSQRTSDZr:
9705 case X86::VSQRTSDZr_Int:
9706 case X86::VSQRTSDZrk_Int:
9707 case X86::VSQRTSDZrkz_Int:
9708 case X86::VSQRTSDZrb_Int:
9709 case X86::VSQRTSDZrbk_Int:
9710 case X86::VSQRTSDZrbkz_Int:
9711 case X86::VSQRTSSZm:
9712 case X86::VSQRTSSZm_Int:
9713 case X86::VSQRTSSZmk_Int:
9714 case X86::VSQRTSSZmkz_Int:
9715 case X86::VSQRTSSZr:
9716 case X86::VSQRTSSZr_Int:
9717 case X86::VSQRTSSZrk_Int:
9718 case X86::VSQRTSSZrkz_Int:
9719 case X86::VSQRTSSZrb_Int:
9720 case X86::VSQRTSSZrbk_Int:
9721 case X86::VSQRTSSZrbkz_Int:
9723 case X86::VGATHERDPDYrm:
9724 case X86::VGATHERDPDZ128rm:
9725 case X86::VGATHERDPDZ256rm:
9726 case X86::VGATHERDPDZrm:
9727 case X86::VGATHERDPDrm:
9728 case X86::VGATHERDPSYrm:
9729 case X86::VGATHERDPSZ128rm:
9730 case X86::VGATHERDPSZ256rm:
9731 case X86::VGATHERDPSZrm:
9732 case X86::VGATHERDPSrm:
9733 case X86::VGATHERPF0DPDm:
9734 case X86::VGATHERPF0DPSm:
9735 case X86::VGATHERPF0QPDm:
9736 case X86::VGATHERPF0QPSm:
9737 case X86::VGATHERPF1DPDm:
9738 case X86::VGATHERPF1DPSm:
9739 case X86::VGATHERPF1QPDm:
9740 case X86::VGATHERPF1QPSm:
9741 case X86::VGATHERQPDYrm:
9742 case X86::VGATHERQPDZ128rm:
9743 case X86::VGATHERQPDZ256rm:
9744 case X86::VGATHERQPDZrm:
9745 case X86::VGATHERQPDrm:
9746 case X86::VGATHERQPSYrm:
9747 case X86::VGATHERQPSZ128rm:
9748 case X86::VGATHERQPSZ256rm:
9749 case X86::VGATHERQPSZrm:
9750 case X86::VGATHERQPSrm:
9751 case X86::VPGATHERDDYrm:
9752 case X86::VPGATHERDDZ128rm:
9753 case X86::VPGATHERDDZ256rm:
9754 case X86::VPGATHERDDZrm:
9755 case X86::VPGATHERDDrm:
9756 case X86::VPGATHERDQYrm:
9757 case X86::VPGATHERDQZ128rm:
9758 case X86::VPGATHERDQZ256rm:
9759 case X86::VPGATHERDQZrm:
9760 case X86::VPGATHERDQrm:
9761 case X86::VPGATHERQDYrm:
9762 case X86::VPGATHERQDZ128rm:
9763 case X86::VPGATHERQDZ256rm:
9764 case X86::VPGATHERQDZrm:
9765 case X86::VPGATHERQDrm:
9766 case X86::VPGATHERQQYrm:
9767 case X86::VPGATHERQQZ128rm:
9768 case X86::VPGATHERQQZ256rm:
9769 case X86::VPGATHERQQZrm:
9770 case X86::VPGATHERQQrm:
9771 case X86::VSCATTERDPDZ128mr:
9772 case X86::VSCATTERDPDZ256mr:
9773 case X86::VSCATTERDPDZmr:
9774 case X86::VSCATTERDPSZ128mr:
9775 case X86::VSCATTERDPSZ256mr:
9776 case X86::VSCATTERDPSZmr:
9777 case X86::VSCATTERPF0DPDm:
9778 case X86::VSCATTERPF0DPSm:
9779 case X86::VSCATTERPF0QPDm:
9780 case X86::VSCATTERPF0QPSm:
9781 case X86::VSCATTERPF1DPDm:
9782 case X86::VSCATTERPF1DPSm:
9783 case X86::VSCATTERPF1QPDm:
9784 case X86::VSCATTERPF1QPSm:
9785 case X86::VSCATTERQPDZ128mr:
9786 case X86::VSCATTERQPDZ256mr:
9787 case X86::VSCATTERQPDZmr:
9788 case X86::VSCATTERQPSZ128mr:
9789 case X86::VSCATTERQPSZ256mr:
9790 case X86::VSCATTERQPSZmr:
9791 case X86::VPSCATTERDDZ128mr:
9792 case X86::VPSCATTERDDZ256mr:
9793 case X86::VPSCATTERDDZmr:
9794 case X86::VPSCATTERDQZ128mr:
9795 case X86::VPSCATTERDQZ256mr:
9796 case X86::VPSCATTERDQZmr:
9797 case X86::VPSCATTERQDZ128mr:
9798 case X86::VPSCATTERQDZ256mr:
9799 case X86::VPSCATTERQDZmr:
9800 case X86::VPSCATTERQQZ128mr:
9801 case X86::VPSCATTERQQZ256mr:
9802 case X86::VPSCATTERQQZmr:
9812 unsigned UseIdx)
const {
9819 Inst.
getNumDefs() <= 2 &&
"Reassociation needs binary operators");
9829 assert((Inst.
getNumDefs() == 1 || FlagDef) &&
"Implicit def isn't flags?");
9830 if (FlagDef && !FlagDef->
isDead())
9841 bool Invert)
const {
9893 case X86::VPANDDZ128rr:
9894 case X86::VPANDDZ256rr:
9895 case X86::VPANDDZrr:
9896 case X86::VPANDQZ128rr:
9897 case X86::VPANDQZ256rr:
9898 case X86::VPANDQZrr:
9901 case X86::VPORDZ128rr:
9902 case X86::VPORDZ256rr:
9904 case X86::VPORQZ128rr:
9905 case X86::VPORQZ256rr:
9909 case X86::VPXORDZ128rr:
9910 case X86::VPXORDZ256rr:
9911 case X86::VPXORDZrr:
9912 case X86::VPXORQZ128rr:
9913 case X86::VPXORQZ256rr:
9914 case X86::VPXORQZrr:
9917 case X86::VANDPDYrr:
9918 case X86::VANDPSYrr:
9919 case X86::VANDPDZ128rr:
9920 case X86::VANDPSZ128rr:
9921 case X86::VANDPDZ256rr:
9922 case X86::VANDPSZ256rr:
9923 case X86::VANDPDZrr:
9924 case X86::VANDPSZrr:
9929 case X86::VORPDZ128rr:
9930 case X86::VORPSZ128rr:
9931 case X86::VORPDZ256rr:
9932 case X86::VORPSZ256rr:
9937 case X86::VXORPDYrr:
9938 case X86::VXORPSYrr:
9939 case X86::VXORPDZ128rr:
9940 case X86::VXORPSZ128rr:
9941 case X86::VXORPDZ256rr:
9942 case X86::VXORPSZ256rr:
9943 case X86::VXORPDZrr:
9944 case X86::VXORPSZrr:
9965 case X86::VPADDBYrr:
9966 case X86::VPADDWYrr:
9967 case X86::VPADDDYrr:
9968 case X86::VPADDQYrr:
9969 case X86::VPADDBZ128rr:
9970 case X86::VPADDWZ128rr:
9971 case X86::VPADDDZ128rr:
9972 case X86::VPADDQZ128rr:
9973 case X86::VPADDBZ256rr:
9974 case X86::VPADDWZ256rr:
9975 case X86::VPADDDZ256rr:
9976 case X86::VPADDQZ256rr:
9977 case X86::VPADDBZrr:
9978 case X86::VPADDWZrr:
9979 case X86::VPADDDZrr:
9980 case X86::VPADDQZrr:
9981 case X86::VPMULLWrr:
9982 case X86::VPMULLWYrr:
9983 case X86::VPMULLWZ128rr:
9984 case X86::VPMULLWZ256rr:
9985 case X86::VPMULLWZrr:
9986 case X86::VPMULLDrr:
9987 case X86::VPMULLDYrr:
9988 case X86::VPMULLDZ128rr:
9989 case X86::VPMULLDZ256rr:
9990 case X86::VPMULLDZrr:
9991 case X86::VPMULLQZ128rr:
9992 case X86::VPMULLQZ256rr:
9993 case X86::VPMULLQZrr:
9994 case X86::VPMAXSBrr:
9995 case X86::VPMAXSBYrr:
9996 case X86::VPMAXSBZ128rr:
9997 case X86::VPMAXSBZ256rr:
9998 case X86::VPMAXSBZrr:
9999 case X86::VPMAXSDrr:
10000 case X86::VPMAXSDYrr:
10001 case X86::VPMAXSDZ128rr:
10002 case X86::VPMAXSDZ256rr:
10003 case X86::VPMAXSDZrr:
10004 case X86::VPMAXSQZ128rr:
10005 case X86::VPMAXSQZ256rr:
10006 case X86::VPMAXSQZrr:
10007 case X86::VPMAXSWrr:
10008 case X86::VPMAXSWYrr:
10009 case X86::VPMAXSWZ128rr:
10010 case X86::VPMAXSWZ256rr:
10011 case X86::VPMAXSWZrr:
10012 case X86::VPMAXUBrr:
10013 case X86::VPMAXUBYrr:
10014 case X86::VPMAXUBZ128rr:
10015 case X86::VPMAXUBZ256rr:
10016 case X86::VPMAXUBZrr:
10017 case X86::VPMAXUDrr:
10018 case X86::VPMAXUDYrr:
10019 case X86::VPMAXUDZ128rr:
10020 case X86::VPMAXUDZ256rr:
10021 case X86::VPMAXUDZrr:
10022 case X86::VPMAXUQZ128rr:
10023 case X86::VPMAXUQZ256rr:
10024 case X86::VPMAXUQZrr:
10025 case X86::VPMAXUWrr:
10026 case X86::VPMAXUWYrr:
10027 case X86::VPMAXUWZ128rr:
10028 case X86::VPMAXUWZ256rr:
10029 case X86::VPMAXUWZrr:
10030 case X86::VPMINSBrr:
10031 case X86::VPMINSBYrr:
10032 case X86::VPMINSBZ128rr:
10033 case X86::VPMINSBZ256rr:
10034 case X86::VPMINSBZrr:
10035 case X86::VPMINSDrr:
10036 case X86::VPMINSDYrr:
10037 case X86::VPMINSDZ128rr:
10038 case X86::VPMINSDZ256rr:
10039 case X86::VPMINSDZrr:
10040 case X86::VPMINSQZ128rr:
10041 case X86::VPMINSQZ256rr:
10042 case X86::VPMINSQZrr:
10043 case X86::VPMINSWrr:
10044 case X86::VPMINSWYrr:
10045 case X86::VPMINSWZ128rr:
10046 case X86::VPMINSWZ256rr:
10047 case X86::VPMINSWZrr:
10048 case X86::VPMINUBrr:
10049 case X86::VPMINUBYrr:
10050 case X86::VPMINUBZ128rr:
10051 case X86::VPMINUBZ256rr:
10052 case X86::VPMINUBZrr:
10053 case X86::VPMINUDrr:
10054 case X86::VPMINUDYrr:
10055 case X86::VPMINUDZ128rr:
10056 case X86::VPMINUDZ256rr:
10057 case X86::VPMINUDZrr:
10058 case X86::VPMINUQZ128rr:
10059 case X86::VPMINUQZ256rr:
10060 case X86::VPMINUQZrr:
10061 case X86::VPMINUWrr:
10062 case X86::VPMINUWYrr:
10063 case X86::VPMINUWZ128rr:
10064 case X86::VPMINUWZ256rr:
10065 case X86::VPMINUWZrr:
10069 case X86::MAXCPDrr:
10070 case X86::MAXCPSrr:
10071 case X86::MAXCSDrr:
10072 case X86::MAXCSSrr:
10073 case X86::MINCPDrr:
10074 case X86::MINCPSrr:
10075 case X86::MINCSDrr:
10076 case X86::MINCSSrr:
10077 case X86::VMAXCPDrr:
10078 case X86::VMAXCPSrr:
10079 case X86::VMAXCPDYrr:
10080 case X86::VMAXCPSYrr:
10081 case X86::VMAXCPDZ128rr:
10082 case X86::VMAXCPSZ128rr:
10083 case X86::VMAXCPDZ256rr:
10084 case X86::VMAXCPSZ256rr:
10085 case X86::VMAXCPDZrr:
10086 case X86::VMAXCPSZrr:
10087 case X86::VMAXCSDrr:
10088 case X86::VMAXCSSrr:
10089 case X86::VMAXCSDZrr:
10090 case X86::VMAXCSSZrr:
10091 case X86::VMINCPDrr:
10092 case X86::VMINCPSrr:
10093 case X86::VMINCPDYrr:
10094 case X86::VMINCPSYrr:
10095 case X86::VMINCPDZ128rr:
10096 case X86::VMINCPSZ128rr:
10097 case X86::VMINCPDZ256rr:
10098 case X86::VMINCPSZ256rr:
10099 case X86::VMINCPDZrr:
10100 case X86::VMINCPSZrr:
10101 case X86::VMINCSDrr:
10102 case X86::VMINCSSrr:
10103 case X86::VMINCSDZrr:
10104 case X86::VMINCSSZrr:
10105 case X86::VMAXCPHZ128rr:
10106 case X86::VMAXCPHZ256rr:
10107 case X86::VMAXCPHZrr:
10108 case X86::VMAXCSHZrr:
10109 case X86::VMINCPHZ128rr:
10110 case X86::VMINCPHZ256rr:
10111 case X86::VMINCPHZrr:
10112 case X86::VMINCSHZrr:
10122 case X86::VADDPDrr:
10123 case X86::VADDPSrr:
10124 case X86::VADDPDYrr:
10125 case X86::VADDPSYrr:
10126 case X86::VADDPDZ128rr:
10127 case X86::VADDPSZ128rr:
10128 case X86::VADDPDZ256rr:
10129 case X86::VADDPSZ256rr:
10130 case X86::VADDPDZrr:
10131 case X86::VADDPSZrr:
10132 case X86::VADDSDrr:
10133 case X86::VADDSSrr:
10134 case X86::VADDSDZrr:
10135 case X86::VADDSSZrr:
10136 case X86::VMULPDrr:
10137 case X86::VMULPSrr:
10138 case X86::VMULPDYrr:
10139 case X86::VMULPSYrr:
10140 case X86::VMULPDZ128rr:
10141 case X86::VMULPSZ128rr:
10142 case X86::VMULPDZ256rr:
10143 case X86::VMULPSZ256rr:
10144 case X86::VMULPDZrr:
10145 case X86::VMULPSZrr:
10146 case X86::VMULSDrr:
10147 case X86::VMULSSrr:
10148 case X86::VMULSDZrr:
10149 case X86::VMULSSZrr:
10150 case X86::VADDPHZ128rr:
10151 case X86::VADDPHZ256rr:
10152 case X86::VADDPHZrr:
10153 case X86::VADDSHZrr:
10154 case X86::VMULPHZ128rr:
10155 case X86::VMULPHZ256rr:
10156 case X86::VMULPHZrr:
10157 case X86::VMULSHZrr:
10168static std::optional<ParamLoadedValue>
10171 Register DestReg =
MI.getOperand(0).getReg();
10172 Register SrcReg =
MI.getOperand(1).getReg();
10177 if (DestReg == DescribedReg)
10182 if (
unsigned SubRegIdx =
TRI->getSubRegIndex(DestReg, DescribedReg)) {
10183 Register SrcSubReg =
TRI->getSubReg(SrcReg, SubRegIdx);
10193 if (
MI.getOpcode() == X86::MOV8rr ||
MI.getOpcode() == X86::MOV16rr ||
10194 !
TRI->isSuperRegister(DestReg, DescribedReg))
10195 return std::nullopt;
10197 assert(
MI.getOpcode() == X86::MOV32rr &&
"Unexpected super-register case");
10201std::optional<ParamLoadedValue>
10208 switch (
MI.getOpcode()) {
10211 case X86::LEA64_32r: {
10213 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10214 return std::nullopt;
10218 if (!
MI.getOperand(4).isImm() || !
MI.getOperand(2).isImm())
10219 return std::nullopt;
10228 if ((Op1.
isReg() && Op1.
getReg() ==
MI.getOperand(0).getReg()) ||
10229 Op2.
getReg() ==
MI.getOperand(0).getReg())
10230 return std::nullopt;
10231 else if ((Op1.
isReg() && Op1.
getReg() != X86::NoRegister &&
10232 TRI->regsOverlap(Op1.
getReg(),
MI.getOperand(0).getReg())) ||
10233 (Op2.
getReg() != X86::NoRegister &&
10234 TRI->regsOverlap(Op2.
getReg(),
MI.getOperand(0).getReg())))
10235 return std::nullopt;
10237 int64_t Coef =
MI.getOperand(2).getImm();
10238 int64_t
Offset =
MI.getOperand(4).getImm();
10241 if ((Op1.
isReg() && Op1.
getReg() != X86::NoRegister)) {
10243 }
else if (Op1.
isFI())
10246 if (
Op &&
Op->isReg() &&
Op->getReg() == Op2.
getReg() && Coef > 0) {
10247 Ops.push_back(dwarf::DW_OP_constu);
10248 Ops.push_back(Coef + 1);
10249 Ops.push_back(dwarf::DW_OP_mul);
10251 if (
Op && Op2.
getReg() != X86::NoRegister) {
10252 int dwarfReg =
TRI->getDwarfRegNum(Op2.
getReg(),
false);
10254 return std::nullopt;
10255 else if (dwarfReg < 32) {
10256 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
10259 Ops.push_back(dwarf::DW_OP_bregx);
10260 Ops.push_back(dwarfReg);
10270 Ops.push_back(dwarf::DW_OP_constu);
10271 Ops.push_back(Coef);
10272 Ops.push_back(dwarf::DW_OP_mul);
10275 if (((Op1.
isReg() && Op1.
getReg() != X86::NoRegister) || Op1.
isFI()) &&
10276 Op2.
getReg() != X86::NoRegister) {
10277 Ops.push_back(dwarf::DW_OP_plus);
10289 return std::nullopt;
10292 case X86::MOV64ri32:
10295 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10296 return std::nullopt;
10303 case X86::XOR32rr: {
10306 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10307 return std::nullopt;
10308 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
10310 return std::nullopt;
10312 case X86::MOVSX64rr32: {
10319 if (!
TRI->isSubRegisterEq(
MI.getOperand(0).getReg(), Reg))
10320 return std::nullopt;
10329 if (Reg ==
MI.getOperand(0).getReg())
10332 assert(X86MCRegisterClasses[X86::GR32RegClassID].
contains(Reg) &&
10333 "Unhandled sub-register case for MOVSX64rr32");
10338 assert(!
MI.isMoveImmediate() &&
"Unexpected MoveImm instruction");
10355 assert(!OldFlagDef1 == !OldFlagDef2 &&
10356 "Unexpected instruction type for reassociation");
10358 if (!OldFlagDef1 || !OldFlagDef2)
10362 "Must have dead EFLAGS operand in reassociable instruction");
10369 assert(NewFlagDef1 && NewFlagDef2 &&
10370 "Unexpected operand in reassociable instruction");
10380std::pair<unsigned, unsigned>
10382 return std::make_pair(TF, 0u);
10387 using namespace X86II;
10388 static const std::pair<unsigned, const char *> TargetFlags[] = {
10389 {MO_GOT_ABSOLUTE_ADDRESS,
"x86-got-absolute-address"},
10390 {MO_PIC_BASE_OFFSET,
"x86-pic-base-offset"},
10391 {MO_GOT,
"x86-got"},
10392 {MO_GOTOFF,
"x86-gotoff"},
10393 {MO_GOTPCREL,
"x86-gotpcrel"},
10394 {MO_GOTPCREL_NORELAX,
"x86-gotpcrel-norelax"},
10395 {MO_PLT,
"x86-plt"},
10396 {MO_TLSGD,
"x86-tlsgd"},
10397 {MO_TLSLD,
"x86-tlsld"},
10398 {MO_TLSLDM,
"x86-tlsldm"},
10399 {MO_GOTTPOFF,
"x86-gottpoff"},
10400 {MO_INDNTPOFF,
"x86-indntpoff"},
10401 {MO_TPOFF,
"x86-tpoff"},
10402 {MO_DTPOFF,
"x86-dtpoff"},
10403 {MO_NTPOFF,
"x86-ntpoff"},
10404 {MO_GOTNTPOFF,
"x86-gotntpoff"},
10405 {MO_DLLIMPORT,
"x86-dllimport"},
10406 {MO_DARWIN_NONLAZY,
"x86-darwin-nonlazy"},
10407 {MO_DARWIN_NONLAZY_PIC_BASE,
"x86-darwin-nonlazy-pic-base"},
10408 {MO_TLVP,
"x86-tlvp"},
10409 {MO_TLVP_PIC_BASE,
"x86-tlvp-pic-base"},
10410 {MO_SECREL,
"x86-secrel"},
10411 {MO_COFFSTUB,
"x86-coffstub"}};
10435 if (GlobalBaseReg == 0)
10447 PC =
RegInfo.createVirtualRegister(&X86::GR32RegClass);
10449 PC = GlobalBaseReg;
10451 if (STI.is64Bit()) {
10504 StringRef getPassName()
const override {
10505 return "X86 PIC Global Base Reg Initialization";
10508 void getAnalysisUsage(AnalysisUsage &AU)
const override {
10523 bool runOnMachineFunction(MachineFunction &MF)
override {
10527 X86MachineFunctionInfo *MFI = MF.
getInfo<X86MachineFunctionInfo>();
10533 MachineDominatorTree *DT =
10534 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
10544 MachineBasicBlock *BB =
Node->getBlock();
10550 switch (
I->getOpcode()) {
10551 case X86::TLS_base_addr32:
10552 case X86::TLS_base_addr64:
10553 if (TLSBaseAddrReg)
10554 I = ReplaceTLSBaseAddrCall(*
I, TLSBaseAddrReg);
10556 I = SetRegister(*
I, &TLSBaseAddrReg);
10565 for (
auto &
I : *Node) {
10566 Changed |= VisitNode(
I, TLSBaseAddrReg);
10574 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &
I,
10576 MachineFunction *MF =
I.getParent()->getParent();
10577 const X86Subtarget &STI = MF->
getSubtarget<X86Subtarget>();
10578 const bool is64Bit = STI.is64Bit();
10582 MachineInstr *
Copy =
10584 TII->get(TargetOpcode::COPY),
is64Bit ? X86::RAX : X86::EAX)
10585 .
addReg(TLSBaseAddrReg);
10588 I.eraseFromParent();
10595 MachineInstr *SetRegister(MachineInstr &
I,
Register *TLSBaseAddrReg) {
10596 MachineFunction *MF =
I.getParent()->getParent();
10597 const X86Subtarget &STI = MF->
getSubtarget<X86Subtarget>();
10598 const bool is64Bit = STI.is64Bit();
10602 MachineRegisterInfo &RegInfo = MF->
getRegInfo();
10604 is64Bit ? &X86::GR64RegClass : &X86::GR32RegClass);
10607 MachineInstr *
Next =
I.getNextNode();
10609 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
10615 StringRef getPassName()
const override {
10616 return "Local Dynamic TLS Access Clean-up";
10619 void getAnalysisUsage(AnalysisUsage &AU)
const override {
10621 AU.
addRequired<MachineDominatorTreeWrapperPass>();
10627char LDTLSCleanup::ID = 0;
10629 return new LDTLSCleanup();
10662std::optional<std::unique_ptr<outliner::OutlinedFunction>>
10665 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
10666 unsigned MinRepeats)
const {
10667 unsigned SequenceSize = 0;
10668 for (
auto &
MI : RepeatedSequenceLocs[0]) {
10672 if (
MI.isDebugInstr() ||
MI.isKill())
10679 unsigned CFICount = 0;
10680 for (
auto &
I : RepeatedSequenceLocs[0]) {
10681 if (
I.isCFIInstruction())
10691 std::vector<MCCFIInstruction> CFIInstructions =
10692 C.getMF()->getFrameInstructions();
10694 if (CFICount > 0 && CFICount != CFIInstructions.size())
10695 return std::nullopt;
10699 if (RepeatedSequenceLocs[0].back().isTerminator()) {
10703 return std::make_unique<outliner::OutlinedFunction>(
10704 RepeatedSequenceLocs, SequenceSize,
10711 return std::nullopt;
10716 return std::make_unique<outliner::OutlinedFunction>(
10726 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
10735 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
10745 unsigned Flags)
const {
10749 if (
MI.isTerminator())
10763 if (
MI.modifiesRegister(X86::RSP, &RI) ||
MI.readsRegister(X86::RSP, &RI) ||
10764 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
10765 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
10769 if (
MI.readsRegister(X86::RIP, &RI) ||
10770 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
10771 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
10775 if (
MI.isCFIInstruction())
10791 MBB.insert(
MBB.end(), retq);
10801 .addGlobalAddress(M.getNamedValue(MF.
getName())));
10805 .addGlobalAddress(M.getNamedValue(MF.
getName())));
10814 bool AllowSideEffects)
const {
10819 if (ST.hasMMX() && X86::VR64RegClass.contains(Reg))
10823 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
10828 if (!AllowSideEffects)
10835 }
else if (X86::VR128RegClass.
contains(Reg)) {
10841 }
else if (X86::VR256RegClass.
contains(Reg)) {
10847 }
else if (X86::VR512RegClass.
contains(Reg)) {
10849 if (!ST.hasAVX512())
10853 }
else if (X86::VK1RegClass.
contains(Reg) || X86::VK2RegClass.
contains(Reg) ||
10855 X86::VK16RegClass.
contains(Reg)) {
10859 unsigned Op = ST.hasBWI() ? X86::KSET0Q : X86::KSET0W;
10866 bool DoRegPressureReduce)
const {
10869 case X86::VPDPWSSDrr:
10870 case X86::VPDPWSSDrm:
10871 case X86::VPDPWSSDYrr:
10872 case X86::VPDPWSSDYrm: {
10873 if (!Subtarget.hasFastDPWSSD()) {
10879 case X86::VPDPWSSDZ128rr:
10880 case X86::VPDPWSSDZ128rm:
10881 case X86::VPDPWSSDZ256rr:
10882 case X86::VPDPWSSDZ256rm:
10883 case X86::VPDPWSSDZrr:
10884 case X86::VPDPWSSDZrm: {
10885 if (Subtarget.hasBWI() && !Subtarget.hasFastDPWSSD()) {
10893 Patterns, DoRegPressureReduce);
10905 unsigned AddOpc = 0;
10906 unsigned MaddOpc = 0;
10909 assert(
false &&
"It should not reach here");
10915 case X86::VPDPWSSDrr:
10916 MaddOpc = X86::VPMADDWDrr;
10917 AddOpc = X86::VPADDDrr;
10919 case X86::VPDPWSSDrm:
10920 MaddOpc = X86::VPMADDWDrm;
10921 AddOpc = X86::VPADDDrr;
10923 case X86::VPDPWSSDZ128rr:
10924 MaddOpc = X86::VPMADDWDZ128rr;
10925 AddOpc = X86::VPADDDZ128rr;
10927 case X86::VPDPWSSDZ128rm:
10928 MaddOpc = X86::VPMADDWDZ128rm;
10929 AddOpc = X86::VPADDDZ128rr;
10935 case X86::VPDPWSSDYrr:
10936 MaddOpc = X86::VPMADDWDYrr;
10937 AddOpc = X86::VPADDDYrr;
10939 case X86::VPDPWSSDYrm:
10940 MaddOpc = X86::VPMADDWDYrm;
10941 AddOpc = X86::VPADDDYrr;
10943 case X86::VPDPWSSDZ256rr:
10944 MaddOpc = X86::VPMADDWDZ256rr;
10945 AddOpc = X86::VPADDDZ256rr;
10947 case X86::VPDPWSSDZ256rm:
10948 MaddOpc = X86::VPMADDWDZ256rm;
10949 AddOpc = X86::VPADDDZ256rr;
10955 case X86::VPDPWSSDZrr:
10956 MaddOpc = X86::VPMADDWDZrr;
10957 AddOpc = X86::VPADDDZrr;
10959 case X86::VPDPWSSDZrm:
10960 MaddOpc = X86::VPMADDWDZrm;
10961 AddOpc = X86::VPADDDZrr;
10973 InstrIdxForVirtReg.
insert(std::make_pair(NewReg, 0));
10995 DelInstrs, InstrIdxForVirtReg);
10999 InstrIdxForVirtReg);
11009 M.Base.FrameIndex = FI;
11010 M.getFullAddress(
Ops);
11013#define GET_INSTRINFO_HELPERS
11014#include "X86GenInstrInfo.inc"
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool isFrameStoreOpcode(int Opcode)
static bool isFrameLoadOpcode(int Opcode)
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static SDValue isNOT(SDValue V, SelectionDAG &DAG)
static bool Expand2AddrUndef(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
Expand a single-def pseudo instruction to a two-addr instruction with two undef reads of the register...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
#define FROM_TO(FROM, TO)
cl::opt< bool > X86EnableAPXForRelocation
static bool is64Bit(const char *name)
#define GET_EGPR_IF_ENABLED(OPC)
static bool isLEA(unsigned Opcode)
static void addOperands(MachineInstrBuilder &MIB, ArrayRef< MachineOperand > MOs, int PtrOffset=0)
static std::optional< ParamLoadedValue > describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, const TargetRegisterInfo *TRI)
If DescribedReg overlaps with the MOVrr instruction's destination register then, if possible,...
static cl::opt< unsigned > PartialRegUpdateClearance("partial-reg-update-clearance", cl::desc("Clearance between two register writes " "for inserting XOR to avoid partial " "register update"), cl::init(64), cl::Hidden)
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI)
static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg, const X86Subtarget &Subtarget)
static bool isConvertibleLEA(MachineInstr *MI)
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, const X86Subtarget &Subtarget)
static bool isAMXOpcode(unsigned Opc)
static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI, Register Reg)
static void updateOperandRegConstraints(MachineFunction &MF, MachineInstr &NewMI, const TargetInstrInfo &TII)
static int getJumpTableIndexFromAddr(const MachineInstr &MI)
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, unsigned NewWidth, unsigned *pNewMask=nullptr)
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, bool MinusOne)
static unsigned getNewOpcFromTable(ArrayRef< X86TableEntry > Table, unsigned Opc)
static unsigned getStoreRegOpcode(Register SrcReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
#define FOLD_BROADCAST(SIZE)
static cl::opt< unsigned > UndefRegClearance("undef-reg-clearance", cl::desc("How many idle instructions we would like before " "certain undef register reads"), cl::init(128), cl::Hidden)
#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64)
static bool isTruncatedShiftCountForLEA(unsigned ShAmt)
Check whether the given shift count is appropriate can be represented by a LEA instruction.
static cl::opt< bool > ReMatPICStubLoad("remat-pic-stub-load", cl::desc("Re-materialize load from stub in PIC mode"), cl::init(false), cl::Hidden)
static SmallVector< MachineMemOperand *, 2 > extractLoadMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static MachineInstr * fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII)
static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx)
static bool canConvert2Copy(unsigned Opc)
static cl::opt< bool > NoFusing("disable-spill-fusing", cl::desc("Disable fusing of spill code into instructions"), cl::Hidden)
static bool expandNOVLXStore(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &StoreDesc, const MCInstrDesc &ExtractDesc, unsigned SubIdx)
static bool isX87Reg(Register Reg)
Return true if the Reg is X87 register.
static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, Register Reg)
Expand a single-def pseudo instruction to a two-addr instruction with two k0 reads.
#define VPERM_CASES_BROADCAST(Suffix)
static std::pair< X86::CondCode, unsigned > isUseDefConvertible(const MachineInstr &MI)
Check whether the use can be converted to remove a comparison against zero.
static bool findRedundantFlagInstr(MachineInstr &CmpInstr, MachineInstr &CmpValDefInstr, const MachineRegisterInfo *MRI, MachineInstr **AndInstr, const TargetRegisterInfo *TRI, const X86Subtarget &ST, bool &NoSignFlag, bool &ClearsOverflowFlag)
static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
static unsigned getLoadRegOpcode(Register DestReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
static void expandLoadStackGuard(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, bool ForLoadFold=false)
static MachineInstr * makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI)
#define GET_ND_IF_ENABLED(OPC)
static bool expandMOVSHP(MachineInstrBuilder &MIB, MachineInstr &MI, const TargetInstrInfo &TII, bool HasAVX)
static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget, bool ForLoadFold=false)
Return true for all instructions that only update the first 32 or 64-bits of the destination register...
static const uint16_t * lookupAVX512(unsigned opcode, unsigned domain, ArrayRef< uint16_t[4]> Table)
static unsigned getLoadStoreRegOpcode(Register Reg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI, bool Load)
#define VPERM_CASES(Suffix)
#define FROM_TO_SIZE(A, B, S)
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, bool &ClearsOverflowFlag)
Check whether the definition can be converted to remove a comparison against zero.
static MachineInstr * fuseInst(MachineFunction &MF, unsigned Opcode, unsigned OpNo, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII, int PtrOffset=0)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode)
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static MachineBasicBlock * getFallThroughMBB(MachineBasicBlock *MBB, MachineBasicBlock *TBB)
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, const MachineInstr &UserMI, const MachineFunction &MF)
Check if LoadMI is a partial register load that we can't fold into MI because the latter uses content...
static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI)
static bool isHReg(Register Reg)
Test if the given register is a physical h register.
static cl::opt< bool > PrintFailedFusing("print-failed-fuse-candidates", cl::desc("Print instructions that the allocator wants to" " fuse, but the X86 backend currently can't"), cl::Hidden)
static bool expandNOVLXLoad(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &LoadDesc, const MCInstrDesc &BroadcastDesc, unsigned SubIdx)
static void genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
This determines which of three possible cases of a three source commute the source indexes correspond...
static unsigned getTruncatedShiftCount(const MachineInstr &MI, unsigned ShiftAmtOperandIdx)
Check whether the shift count for a machine operand is non-zero.
static SmallVector< MachineMemOperand *, 2 > extractStoreMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static unsigned getBroadcastOpcode(const X86FoldTableEntry *I, const TargetRegisterClass *RC, const X86Subtarget &STI)
static unsigned convertALUrr2ALUri(unsigned Opc)
Convert an ALUrr opcode to corresponding ALUri opcode.
static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI)
Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool isCommutableVPERMV3Instruction(unsigned Opcode)
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionPass class - This class is used to implement most global optimizations.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
LiveInterval - This class represents the liveness of a register, or stack slot.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
A set of physical registers with utility functions to track liveness when walking backward/forward th...
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
static LocationSize precise(uint64_t Value)
bool usesWindowsCFI() const
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
MachineInstrBundleIterator< const MachineInstr > const_iterator
void push_back(MachineInstr *MI)
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
LLVM_ABI void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_iterator operands_begin()
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
void dropDebugNumber()
Drop any variable location debugging information associated with this instruction.
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
unsigned getNumDefs() const
Returns the total number of definitions.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateCPI(unsigned Idx, int Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
A Module instance is used to store all the information related to an LLVM module.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
MachineFunction & getMachineFunction() const
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getFP128Ty(LLVMContext &C)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
SlotIndex def
The index of the defining instruction.
LLVM Value Representation.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const override
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
Check if there exists an earlier instruction that operates on the same source operands and sets eflag...
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Overrides the isSchedulingBoundary from Codegen/TargetInstrInfo.cpp to make it capable of identifying...
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Returns true iff the routine could find two commutable operands in the given machine instruction.
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
X86InstrInfo(const X86Subtarget &STI)
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
const X86RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override
Returns true if we have preference on the operands order in MI, the commute decision is returned in C...
bool hasLiveCondCodeDef(MachineInstr &MI) const
True if MI has a condition code def, e.g.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
convertToThreeAddress - This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_AD...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
MCInst getNop() const override
Return the noop instruction to use for a noop.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Fold a load or store of the specified stack slot into the specified machine instruction for the speci...
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const override
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool isUnconditionalTailCall(const MachineInstr &MI) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned LEAOpcode, bool AllowSP, Register &NewSrc, unsigned &NewSrcSubReg, bool &isKill, MachineOperand &ImplicitOp, LiveVariables *LV, LiveIntervals *LIS) const
Given an operand within a MachineInstr, insert preceding code to put it into the right format for a p...
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isLoadFromStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool setExecutionDomainCustom(MachineInstr &MI, unsigned Domain) const
int getSPAdjust(const MachineInstr &MI) const override
getSPAdjust - This returns the stack pointer adjustment made by this instruction.
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isReMaterializableImpl(const MachineInstr &MI) const override
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
int getJumpTableIndex(const MachineInstr &MI) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
This is an architecture-specific helper function of reassociateOps.
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
isCoalescableExtInstr - Return true if the instruction is a "coalescable" extension instruction.
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Opc, Register Reg, int FrameIdx, bool isKill=false) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds potential patterns, this function generates the instructions ...
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, TargetInstrInfo::MachineBranchPredicate &MBP, bool AllowModify=false) const override
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before certain undef register...
void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
int64_t getFrameAdjustment(const MachineInstr &I) const
Returns the stack pointer adjustment that happens inside the frame setup..destroy sequence (e....
bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override
uint16_t getExecutionDomainCustom(const MachineInstr &MI) const
bool isHighLatencyDef(int opc) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getFMA3OpcodeToCommuteOperands(const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, const X86InstrFMA3Group &FMA3Group) const
Returns an adjusted FMA opcode that must be used in FMA instruction that performs the same computatio...
bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before a partial register upd...
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
Register getGlobalBaseReg() const
int getTCReturnAddrDelta() const
void setGlobalBaseReg(Register Reg)
unsigned getNumLocalDynamicTLSAccesses() const
bool getUsesRedZone() const
const TargetRegisterClass * constrainRegClassToNonRex2(const TargetRegisterClass *RC) const
bool isPICStyleGOT() const
const X86InstrInfo * getInstrInfo() const override
const X86RegisterInfo * getRegisterInfo() const override
const X86FrameLowering * getFrameLowering() const override
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ X86
Windows x64, Windows Itanium (IA-64)
X86II - This namespace holds all of the target specific flags that instruction info tracks.
bool isKMergeMasked(uint64_t TSFlags)
bool hasNewDataDest(uint64_t TSFlags)
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ SSEDomainShift
Execution domain for SSE instructions.
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isPseudo(uint64_t TSFlags)
bool isKMasked(uint64_t TSFlags)
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Define some predicates that are used for node matching.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode getCondFromCFCMov(const MachineInstr &MI)
CondCode getCondFromMI(const MachineInstr &MI)
Return the condition code of the instruction.
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
unsigned getSwappedVCMPImm(unsigned Imm)
Get the VCMP immediate if the opcodes are swapped.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
unsigned getSwappedVPCOMImm(unsigned Imm)
Get the VPCOM immediate if the opcodes are swapped.
bool isX87Instruction(MachineInstr &MI)
Check if the instruction is X87 instruction.
unsigned getNonNDVariant(unsigned Opc)
unsigned getVPCMPImmForCond(ISD::CondCode CC)
Get the VPCMP immediate for the given condition.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
CondCode getCondFromSETCC(const MachineInstr &MI)
unsigned getSwappedVPCMPImm(unsigned Imm)
Get the VPCMP immediate if the opcodes are swapped.
CondCode getCondFromCCMP(const MachineInstr &MI)
int getCCMPCondFlagsFromCondCode(CondCode CC)
int getCondSrcNoFromDesc(const MCInstrDesc &MCID)
Return the source operand # for condition code by MCID.
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
unsigned getNFVariant(unsigned Opc)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
CondCode getCondFromCMov(const MachineInstr &MI)
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static bool isAddMemInstrWithRelocation(const MachineInstr &MI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
static bool isMem(const MachineInstr &MI, unsigned Op)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createX86GlobalBaseRegPass()
This pass initializes a global base register for PIC on x86-32.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
static const MachineInstrBuilder & addRegReg(const MachineInstrBuilder &MIB, Register Reg1, bool isKill1, unsigned SubReg1, Register Reg2, bool isKill2, unsigned SubReg2)
addRegReg - This function is used to add a memory reference of the form: [Reg + Reg].
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
FunctionPass * createCleanupLocalDynamicTLSPass()
This pass combines multiple accesses to local-dynamic TLS variables so that the TLS base address for ...
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
const X86FoldTableEntry * lookupBroadcastFoldTable(unsigned RegOp, unsigned OpNum)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
const X86InstrFMA3Group * getFMA3Group(unsigned Opcode, uint64_t TSFlags)
Returns a reference to a group of FMA3 opcodes to where the given Opcode is included.
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const X86FoldTableEntry * lookupTwoAddrFoldTable(unsigned RegOp)
FunctionAddr VTableAddr Count
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
static bool isMemInstrWithGOTPCREL(const MachineInstr &MI)
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
unsigned getUndefRegState(bool B)
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
unsigned getDefRegState(bool B)
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
@ Sub
Subtraction of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
const X86FoldTableEntry * lookupUnfoldTable(unsigned MemOp)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool matchBroadcastSize(const X86FoldTableEntry &Entry, unsigned BroadcastBits)
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
const X86FoldTableEntry * lookupFoldTable(unsigned RegOp, unsigned OpNum)
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, Register Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
std::vector< MachineInstr * > Kills
Kills - List of MachineInstruction's which are the last use of this virtual register (kill it) in the...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType
This class is used to group {132, 213, 231} forms of FMA opcodes together.
unsigned get213Opcode() const
Returns the 213 form of FMA opcode.
unsigned get231Opcode() const
Returns the 231 form of FMA opcode.
bool isIntrinsic() const
Returns true iff the group of FMA opcodes holds intrinsic opcodes.
unsigned get132Opcode() const
Returns the 132 form of FMA opcode.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.