52#define DEBUG_TYPE "x86-instr-info"
54#define GET_INSTRINFO_CTOR_DTOR
55#include "X86GenInstrInfo.inc"
61 cl::desc(
"Disable fusing of spill code into instructions"),
65 cl::desc(
"Print instructions that the allocator wants to"
66 " fuse, but the X86 backend currently can't"),
70 cl::desc(
"Re-materialize load from stub in PIC mode"),
74 cl::desc(
"Clearance between two register writes "
75 "for inserting XOR to avoid partial "
79 "undef-reg-clearance",
80 cl::desc(
"How many idle instructions we would like before "
81 "certain undef register reads"),
85void X86InstrInfo::anchor() {}
89 (STI.isTarget64BitLP64() ?
X86::ADJCALLSTACKDOWN64
90 :
X86::ADJCALLSTACKDOWN32),
91 (STI.isTarget64BitLP64() ?
X86::ADJCALLSTACKUP64
92 :
X86::ADJCALLSTACKUP32),
94 Subtarget(STI), RI(STI.getTargetTriple()) {}
102 if (!RC || !Subtarget.hasEGPR())
114 unsigned &SubIdx)
const {
115 switch (
MI.getOpcode()) {
118 case X86::MOVSX16rr8:
119 case X86::MOVZX16rr8:
120 case X86::MOVSX32rr8:
121 case X86::MOVZX32rr8:
122 case X86::MOVSX64rr8:
123 if (!Subtarget.is64Bit())
128 case X86::MOVSX32rr16:
129 case X86::MOVZX32rr16:
130 case X86::MOVSX64rr16:
131 case X86::MOVSX64rr32: {
132 if (
MI.getOperand(0).getSubReg() ||
MI.getOperand(1).getSubReg())
135 SrcReg =
MI.getOperand(1).getReg();
136 DstReg =
MI.getOperand(0).getReg();
137 switch (
MI.getOpcode()) {
140 case X86::MOVSX16rr8:
141 case X86::MOVZX16rr8:
142 case X86::MOVSX32rr8:
143 case X86::MOVZX32rr8:
144 case X86::MOVSX64rr8:
145 SubIdx = X86::sub_8bit;
147 case X86::MOVSX32rr16:
148 case X86::MOVZX32rr16:
149 case X86::MOVSX64rr16:
150 SubIdx = X86::sub_16bit;
152 case X86::MOVSX64rr32:
153 SubIdx = X86::sub_32bit;
163 if (
MI.mayLoad() ||
MI.mayStore())
168 if (
MI.isCopyLike() ||
MI.isInsertSubreg())
171 unsigned Opcode =
MI.getOpcode();
182 if (isBSF(Opcode) || isBSR(Opcode) || isLZCNT(Opcode) || isPOPCNT(Opcode) ||
188 if (isBLCFILL(Opcode) || isBLCI(Opcode) || isBLCIC(Opcode) ||
189 isBLCMSK(Opcode) || isBLCS(Opcode) || isBLSFILL(Opcode) ||
190 isBLSI(Opcode) || isBLSIC(Opcode) || isBLSMSK(Opcode) || isBLSR(Opcode) ||
195 if (isBEXTR(Opcode) || isBZHI(Opcode))
198 if (isROL(Opcode) || isROR(Opcode) || isSAR(Opcode) || isSHL(Opcode) ||
199 isSHR(Opcode) || isSHLD(Opcode) || isSHRD(Opcode))
202 if (isADC(Opcode) || isADD(Opcode) || isAND(Opcode) || isOR(Opcode) ||
203 isSBB(Opcode) || isSUB(Opcode) || isXOR(Opcode))
209 if (isDEC(Opcode) || isINC(Opcode) || isNEG(Opcode))
217 if (isMOVSX(Opcode) || isMOVZX(Opcode) || isMOVSXD(Opcode) || isMOV(Opcode))
220 if (isRORX(Opcode) || isSARX(Opcode) || isSHLX(Opcode) || isSHRX(Opcode))
230 switch (
MI.getOpcode()) {
243 case X86::IMUL64rmi32:
258 case X86::POPCNT16rm:
259 case X86::POPCNT32rm:
260 case X86::POPCNT64rm:
268 case X86::BLCFILL32rm:
269 case X86::BLCFILL64rm:
274 case X86::BLCMSK32rm:
275 case X86::BLCMSK64rm:
278 case X86::BLSFILL32rm:
279 case X86::BLSFILL64rm:
284 case X86::BLSMSK32rm:
285 case X86::BLSMSK64rm:
295 case X86::BEXTRI32mi:
296 case X86::BEXTRI64mi:
349 case X86::CVTTSD2SI64rm:
350 case X86::VCVTTSD2SI64rm:
351 case X86::VCVTTSD2SI64Zrm:
352 case X86::CVTTSD2SIrm:
353 case X86::VCVTTSD2SIrm:
354 case X86::VCVTTSD2SIZrm:
355 case X86::CVTTSS2SI64rm:
356 case X86::VCVTTSS2SI64rm:
357 case X86::VCVTTSS2SI64Zrm:
358 case X86::CVTTSS2SIrm:
359 case X86::VCVTTSS2SIrm:
360 case X86::VCVTTSS2SIZrm:
361 case X86::CVTSI2SDrm:
362 case X86::VCVTSI2SDrm:
363 case X86::VCVTSI2SDZrm:
364 case X86::CVTSI2SSrm:
365 case X86::VCVTSI2SSrm:
366 case X86::VCVTSI2SSZrm:
367 case X86::CVTSI642SDrm:
368 case X86::VCVTSI642SDrm:
369 case X86::VCVTSI642SDZrm:
370 case X86::CVTSI642SSrm:
371 case X86::VCVTSI642SSrm:
372 case X86::VCVTSI642SSZrm:
373 case X86::CVTSS2SDrm:
374 case X86::VCVTSS2SDrm:
375 case X86::VCVTSS2SDZrm:
376 case X86::CVTSD2SSrm:
377 case X86::VCVTSD2SSrm:
378 case X86::VCVTSD2SSZrm:
380 case X86::VCVTTSD2USI64Zrm:
381 case X86::VCVTTSD2USIZrm:
382 case X86::VCVTTSS2USI64Zrm:
383 case X86::VCVTTSS2USIZrm:
384 case X86::VCVTUSI2SDZrm:
385 case X86::VCVTUSI642SDZrm:
386 case X86::VCVTUSI2SSZrm:
387 case X86::VCVTUSI642SSZrm:
391 case X86::MOV8rm_NOREX:
395 case X86::MOVSX16rm8:
396 case X86::MOVSX32rm16:
397 case X86::MOVSX32rm8:
398 case X86::MOVSX32rm8_NOREX:
399 case X86::MOVSX64rm16:
400 case X86::MOVSX64rm32:
401 case X86::MOVSX64rm8:
402 case X86::MOVZX16rm8:
403 case X86::MOVZX32rm16:
404 case X86::MOVZX32rm8:
405 case X86::MOVZX32rm8_NOREX:
406 case X86::MOVZX64rm16:
407 case X86::MOVZX64rm8:
416 if (isFrameInstr(
MI)) {
419 if (!isFrameSetup(
MI))
430 for (
auto E =
MBB->end();
I != E; ++
I) {
431 if (
I->getOpcode() == getCallFrameDestroyOpcode() ||
I->isCall())
437 if (
I->getOpcode() != getCallFrameDestroyOpcode())
440 return -(
I->getOperand(1).
getImm());
445 switch (
MI.getOpcode()) {
464 int &FrameIndex)
const {
484 case X86::KMOVBkm_EVEX:
489 case X86::KMOVWkm_EVEX:
491 case X86::VMOVSHZrm_alt:
496 case X86::MOVSSrm_alt:
498 case X86::VMOVSSrm_alt:
500 case X86::VMOVSSZrm_alt:
502 case X86::KMOVDkm_EVEX:
508 case X86::MOVSDrm_alt:
510 case X86::VMOVSDrm_alt:
512 case X86::VMOVSDZrm_alt:
513 case X86::MMX_MOVD64rm:
514 case X86::MMX_MOVQ64rm:
516 case X86::KMOVQkm_EVEX:
531 case X86::VMOVAPSZ128rm:
532 case X86::VMOVUPSZ128rm:
533 case X86::VMOVAPSZ128rm_NOVLX:
534 case X86::VMOVUPSZ128rm_NOVLX:
535 case X86::VMOVAPDZ128rm:
536 case X86::VMOVUPDZ128rm:
537 case X86::VMOVDQU8Z128rm:
538 case X86::VMOVDQU16Z128rm:
539 case X86::VMOVDQA32Z128rm:
540 case X86::VMOVDQU32Z128rm:
541 case X86::VMOVDQA64Z128rm:
542 case X86::VMOVDQU64Z128rm:
545 case X86::VMOVAPSYrm:
546 case X86::VMOVUPSYrm:
547 case X86::VMOVAPDYrm:
548 case X86::VMOVUPDYrm:
549 case X86::VMOVDQAYrm:
550 case X86::VMOVDQUYrm:
551 case X86::VMOVAPSZ256rm:
552 case X86::VMOVUPSZ256rm:
553 case X86::VMOVAPSZ256rm_NOVLX:
554 case X86::VMOVUPSZ256rm_NOVLX:
555 case X86::VMOVAPDZ256rm:
556 case X86::VMOVUPDZ256rm:
557 case X86::VMOVDQU8Z256rm:
558 case X86::VMOVDQU16Z256rm:
559 case X86::VMOVDQA32Z256rm:
560 case X86::VMOVDQU32Z256rm:
561 case X86::VMOVDQA64Z256rm:
562 case X86::VMOVDQU64Z256rm:
565 case X86::VMOVAPSZrm:
566 case X86::VMOVUPSZrm:
567 case X86::VMOVAPDZrm:
568 case X86::VMOVUPDZrm:
569 case X86::VMOVDQU8Zrm:
570 case X86::VMOVDQU16Zrm:
571 case X86::VMOVDQA32Zrm:
572 case X86::VMOVDQU32Zrm:
573 case X86::VMOVDQA64Zrm:
574 case X86::VMOVDQU64Zrm:
586 case X86::KMOVBmk_EVEX:
591 case X86::KMOVWmk_EVEX:
600 case X86::KMOVDmk_EVEX:
608 case X86::MMX_MOVD64mr:
609 case X86::MMX_MOVQ64mr:
610 case X86::MMX_MOVNTQmr:
612 case X86::KMOVQmk_EVEX:
627 case X86::VMOVUPSZ128mr:
628 case X86::VMOVAPSZ128mr:
629 case X86::VMOVUPSZ128mr_NOVLX:
630 case X86::VMOVAPSZ128mr_NOVLX:
631 case X86::VMOVUPDZ128mr:
632 case X86::VMOVAPDZ128mr:
633 case X86::VMOVDQA32Z128mr:
634 case X86::VMOVDQU32Z128mr:
635 case X86::VMOVDQA64Z128mr:
636 case X86::VMOVDQU64Z128mr:
637 case X86::VMOVDQU8Z128mr:
638 case X86::VMOVDQU16Z128mr:
641 case X86::VMOVUPSYmr:
642 case X86::VMOVAPSYmr:
643 case X86::VMOVUPDYmr:
644 case X86::VMOVAPDYmr:
645 case X86::VMOVDQUYmr:
646 case X86::VMOVDQAYmr:
647 case X86::VMOVUPSZ256mr:
648 case X86::VMOVAPSZ256mr:
649 case X86::VMOVUPSZ256mr_NOVLX:
650 case X86::VMOVAPSZ256mr_NOVLX:
651 case X86::VMOVUPDZ256mr:
652 case X86::VMOVAPDZ256mr:
653 case X86::VMOVDQU8Z256mr:
654 case X86::VMOVDQU16Z256mr:
655 case X86::VMOVDQA32Z256mr:
656 case X86::VMOVDQU32Z256mr:
657 case X86::VMOVDQA64Z256mr:
658 case X86::VMOVDQU64Z256mr:
661 case X86::VMOVUPSZmr:
662 case X86::VMOVAPSZmr:
663 case X86::VMOVUPDZmr:
664 case X86::VMOVAPDZmr:
665 case X86::VMOVDQU8Zmr:
666 case X86::VMOVDQU16Zmr:
667 case X86::VMOVDQA32Zmr:
668 case X86::VMOVDQU32Zmr:
669 case X86::VMOVDQA64Zmr:
670 case X86::VMOVDQU64Zmr:
678 int &FrameIndex)
const {
687 if (
MI.getOperand(0).getSubReg() == 0 && isFrameOperand(
MI, 1, FrameIndex))
688 return MI.getOperand(0).getReg();
693 int &FrameIndex)
const {
704 return MI.getOperand(0).getReg();
711 int &FrameIndex)
const {
721 isFrameOperand(
MI, 0, FrameIndex))
727 int &FrameIndex)
const {
747 if (!BaseReg.isVirtual())
749 bool isPICBase =
false;
751 if (
DefMI.getOpcode() != X86::MOVPC32r)
753 assert(!isPICBase &&
"More than one PIC base?");
761 switch (
MI.getOpcode()) {
767 case X86::IMPLICIT_DEF:
770 case X86::LOAD_STACK_GUARD:
777 case X86::AVX1_SETALLONES:
778 case X86::AVX2_SETALLONES:
779 case X86::AVX512_128_SET0:
780 case X86::AVX512_256_SET0:
781 case X86::AVX512_512_SET0:
782 case X86::AVX512_512_SETALLONES:
783 case X86::AVX512_FsFLD0SD:
784 case X86::AVX512_FsFLD0SH:
785 case X86::AVX512_FsFLD0SS:
786 case X86::AVX512_FsFLD0F128:
791 case X86::FsFLD0F128:
799 case X86::MOV32ImmSExti8:
804 case X86::MOV64ImmSExti8:
806 case X86::V_SETALLONES:
812 case X86::PTILEZEROV:
816 case X86::MOV8rm_NOREX:
821 case X86::MOVSSrm_alt:
823 case X86::MOVSDrm_alt:
831 case X86::VMOVSSrm_alt:
833 case X86::VMOVSDrm_alt:
840 case X86::VMOVAPSYrm:
841 case X86::VMOVUPSYrm:
842 case X86::VMOVAPDYrm:
843 case X86::VMOVUPDYrm:
844 case X86::VMOVDQAYrm:
845 case X86::VMOVDQUYrm:
846 case X86::MMX_MOVD64rm:
847 case X86::MMX_MOVQ64rm:
848 case X86::VBROADCASTSSrm:
849 case X86::VBROADCASTSSYrm:
850 case X86::VBROADCASTSDYrm:
852 case X86::VPBROADCASTBZ128rm:
853 case X86::VPBROADCASTBZ256rm:
854 case X86::VPBROADCASTBZrm:
855 case X86::VBROADCASTF32X2Z256rm:
856 case X86::VBROADCASTF32X2Zrm:
857 case X86::VBROADCASTI32X2Z128rm:
858 case X86::VBROADCASTI32X2Z256rm:
859 case X86::VBROADCASTI32X2Zrm:
860 case X86::VPBROADCASTWZ128rm:
861 case X86::VPBROADCASTWZ256rm:
862 case X86::VPBROADCASTWZrm:
863 case X86::VPBROADCASTDZ128rm:
864 case X86::VPBROADCASTDZ256rm:
865 case X86::VPBROADCASTDZrm:
866 case X86::VBROADCASTSSZ128rm:
867 case X86::VBROADCASTSSZ256rm:
868 case X86::VBROADCASTSSZrm:
869 case X86::VPBROADCASTQZ128rm:
870 case X86::VPBROADCASTQZ256rm:
871 case X86::VPBROADCASTQZrm:
872 case X86::VBROADCASTSDZ256rm:
873 case X86::VBROADCASTSDZrm:
875 case X86::VMOVSSZrm_alt:
877 case X86::VMOVSDZrm_alt:
879 case X86::VMOVSHZrm_alt:
880 case X86::VMOVAPDZ128rm:
881 case X86::VMOVAPDZ256rm:
882 case X86::VMOVAPDZrm:
883 case X86::VMOVAPSZ128rm:
884 case X86::VMOVAPSZ256rm:
885 case X86::VMOVAPSZ128rm_NOVLX:
886 case X86::VMOVAPSZ256rm_NOVLX:
887 case X86::VMOVAPSZrm:
888 case X86::VMOVDQA32Z128rm:
889 case X86::VMOVDQA32Z256rm:
890 case X86::VMOVDQA32Zrm:
891 case X86::VMOVDQA64Z128rm:
892 case X86::VMOVDQA64Z256rm:
893 case X86::VMOVDQA64Zrm:
894 case X86::VMOVDQU16Z128rm:
895 case X86::VMOVDQU16Z256rm:
896 case X86::VMOVDQU16Zrm:
897 case X86::VMOVDQU32Z128rm:
898 case X86::VMOVDQU32Z256rm:
899 case X86::VMOVDQU32Zrm:
900 case X86::VMOVDQU64Z128rm:
901 case X86::VMOVDQU64Z256rm:
902 case X86::VMOVDQU64Zrm:
903 case X86::VMOVDQU8Z128rm:
904 case X86::VMOVDQU8Z256rm:
905 case X86::VMOVDQU8Zrm:
906 case X86::VMOVUPDZ128rm:
907 case X86::VMOVUPDZ256rm:
908 case X86::VMOVUPDZrm:
909 case X86::VMOVUPSZ128rm:
910 case X86::VMOVUPSZ256rm:
911 case X86::VMOVUPSZ128rm_NOVLX:
912 case X86::VMOVUPSZ256rm_NOVLX:
913 case X86::VMOVUPSZrm: {
919 MI.isDereferenceableInvariantLoad()) {
921 if (BaseReg == 0 || BaseReg == X86::RIP)
964 if (ClobbersEFLAGS &&
MBB.computeRegisterLiveness(&
TRI, X86::EFLAGS,
I) !=
999 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS &&
1009 unsigned ShiftAmtOperandIdx) {
1011 unsigned ShiftCountMask = (
MI.getDesc().TSFlags &
X86II::REX_W) ? 63 : 31;
1012 unsigned Imm =
MI.getOperand(ShiftAmtOperandIdx).getImm();
1013 return Imm & ShiftCountMask;
1024 return ShAmt < 4 && ShAmt > 0;
1031 bool &NoSignFlag,
bool &ClearsOverflowFlag) {
1032 if (!(CmpValDefInstr.
getOpcode() == X86::SUBREG_TO_REG &&
1033 CmpInstr.
getOpcode() == X86::TEST64rr) &&
1034 !(CmpValDefInstr.
getOpcode() == X86::COPY &&
1042 "CmpInstr is an analyzable TEST16rr/TEST64rr, and "
1043 "`X86InstrInfo::analyzeCompare` requires two reg operands are the"
1052 "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG or TEST16rr "
1053 "is a user of COPY sub16bit.");
1055 if (CmpInstr.
getOpcode() == X86::TEST16rr) {
1064 if (!((VregDefInstr->
getOpcode() == X86::AND32ri ||
1065 VregDefInstr->
getOpcode() == X86::AND64ri32) &&
1070 if (CmpInstr.
getOpcode() == X86::TEST64rr) {
1084 assert(VregDefInstr &&
"Must have a definition (SSA)");
1094 if (X86::isAND(VregDefInstr->
getOpcode()) &&
1115 if (Instr.modifiesRegister(X86::EFLAGS,
TRI))
1119 *AndInstr = VregDefInstr;
1140 ClearsOverflowFlag =
true;
1148 unsigned &NewSrcSubReg,
bool &isKill,
1154 RC =
Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1156 RC =
Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1159 unsigned SubReg = Src.getSubReg();
1160 isKill =
MI.killsRegister(SrcReg,
nullptr);
1162 NewSrcSubReg = X86::NoSubRegister;
1166 if (
Opc != X86::LEA64_32r) {
1169 assert(!Src.isUndef() &&
"Undef op doesn't need optimization");
1186 assert(!Src.isUndef() &&
"Undef op doesn't need optimization");
1191 NewSrcSubReg = X86::NoSubRegister;
1217MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
unsigned MIOpc,
1221 bool Is8BitOp)
const {
1226 RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1227 *RegInfo.getRegClass(
MI.getOperand(0).getReg())) == 16) &&
1228 "Unexpected type for LEA transform");
1237 if (!Subtarget.is64Bit())
1240 unsigned Opcode = X86::LEA64_32r;
1241 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1242 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1255 unsigned SrcSubReg =
MI.getOperand(1).getSubReg();
1257 unsigned Src2SubReg;
1258 bool IsDead =
MI.getOperand(0).isDead();
1259 bool IsKill =
MI.getOperand(1).isKill();
1260 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1261 assert(!
MI.getOperand(1).isUndef() &&
"Undef op doesn't need optimization");
1273#define CASE_NF(OP) \
1281 unsigned ShAmt =
MI.getOperand(2).getImm();
1299 case X86::ADD8ri_DB:
1300 case X86::ADD16ri_DB:
1305 case X86::ADD8rr_DB:
1306 case X86::ADD16rr_DB: {
1307 Src2 =
MI.getOperand(2).getReg();
1308 Src2SubReg =
MI.getOperand(2).getSubReg();
1309 bool IsKill2 =
MI.getOperand(2).isKill();
1310 assert(!
MI.getOperand(2).isUndef() &&
"Undef op doesn't need optimization");
1314 addRegReg(MIB, InRegLEA,
true, X86::NoSubRegister, InRegLEA,
false,
1315 X86::NoSubRegister);
1317 if (Subtarget.is64Bit())
1323 ImpDef2 =
BuildMI(
MBB, &*MIB,
MI.getDebugLoc(),
get(X86::IMPLICIT_DEF),
1325 InsMI2 =
BuildMI(
MBB, &*MIB,
MI.getDebugLoc(),
get(TargetOpcode::COPY))
1328 addRegReg(MIB, InRegLEA,
true, X86::NoSubRegister, InRegLEA2,
true,
1329 X86::NoSubRegister);
1331 if (LV && IsKill2 && InsMI2)
1337 MachineInstr *NewMI = MIB;
1338 MachineInstr *ExtMI =
1386 LiveRange::Segment *DestSeg =
1427 if (
MI.getNumOperands() > 2)
1428 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).isUndef())
1433 unsigned SrcSubReg, SrcSubReg2;
1434 bool Is64Bit = Subtarget.is64Bit();
1436 bool Is8BitOp =
false;
1437 unsigned NumRegOperands = 2;
1438 unsigned MIOpc =
MI.getOpcode();
1443 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1450 Src.getReg(), &X86::GR64_NOSPRegClass))
1453 NewMI =
BuildMI(MF,
MI.getDebugLoc(),
get(X86::LEA64r))
1463 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1468 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1474 isKill, ImplicitOp, LV, LIS))
1485 if (ImplicitOp.
getReg() != 0)
1486 MIB.
add(ImplicitOp);
1490 if (LV && SrcReg != Src.getReg())
1498 assert(
MI.getNumOperands() >= 3 &&
"Unknown shift instruction!");
1502 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1506 assert(
MI.getNumOperands() >= 2 &&
"Unknown inc instruction!");
1507 unsigned Opc = (MIOpc == X86::INC64r || MIOpc == X86::INC64r_NF)
1509 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1513 isKill, ImplicitOp, LV, LIS))
1519 if (ImplicitOp.
getReg() != 0)
1520 MIB.
add(ImplicitOp);
1525 if (LV && SrcReg != Src.getReg())
1531 assert(
MI.getNumOperands() >= 2 &&
"Unknown dec instruction!");
1532 unsigned Opc = (MIOpc == X86::DEC64r || MIOpc == X86::DEC64r_NF)
1534 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1539 isKill, ImplicitOp, LV, LIS))
1545 if (ImplicitOp.
getReg() != 0)
1546 MIB.
add(ImplicitOp);
1551 if (LV && SrcReg != Src.getReg())
1561 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1564 case X86::ADD64rr_DB:
1565 case X86::ADD32rr_DB: {
1566 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1568 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_NF ||
1569 MIOpc == X86::ADD64rr_DB)
1572 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1578 isKill2, ImplicitOp2, LV, LIS))
1583 if (Src.getReg() == Src2.
getReg()) {
1588 SrcSubReg = SrcSubReg2;
1591 isKill, ImplicitOp, LV, LIS))
1596 if (ImplicitOp.
getReg() != 0)
1597 MIB.
add(ImplicitOp);
1598 if (ImplicitOp2.
getReg() != 0)
1599 MIB.
add(ImplicitOp2);
1602 addRegReg(MIB, SrcReg, isKill, SrcSubReg, SrcReg2, isKill2, SrcSubReg2);
1606 if (SrcReg2 != Src2.
getReg())
1608 if (SrcReg != SrcReg2 && SrcReg != Src.getReg())
1615 case X86::ADD8rr_DB:
1619 case X86::ADD16rr_DB:
1620 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1622 case X86::ADD64ri32_DB:
1623 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1625 BuildMI(MF,
MI.getDebugLoc(),
get(X86::LEA64r)).add(Dest).add(Src),
1629 case X86::ADD32ri_DB: {
1630 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1631 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1636 isKill, ImplicitOp, LV, LIS))
1643 if (ImplicitOp.
getReg() != 0)
1644 MIB.
add(ImplicitOp);
1649 if (LV && SrcReg != Src.getReg())
1654 case X86::ADD8ri_DB:
1658 case X86::ADD16ri_DB:
1659 return convertToThreeAddressWithLEA(MIOpc,
MI, LV, LIS, Is8BitOp);
1665 if (!
MI.getOperand(2).isImm())
1667 int64_t Imm =
MI.getOperand(2).getImm();
1671 assert(
MI.getNumOperands() >= 3 &&
"Unknown add instruction!");
1672 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1677 isKill, ImplicitOp, LV, LIS))
1684 if (ImplicitOp.
getReg() != 0)
1685 MIB.
add(ImplicitOp);
1690 if (LV && SrcReg != Src.getReg())
1696 if (!
MI.getOperand(2).isImm())
1698 int64_t Imm =
MI.getOperand(2).getImm();
1702 assert(
MI.getNumOperands() >= 3 &&
"Unknown sub instruction!");
1710 case X86::VMOVDQU8Z128rmk:
1711 case X86::VMOVDQU8Z256rmk:
1712 case X86::VMOVDQU8Zrmk:
1713 case X86::VMOVDQU16Z128rmk:
1714 case X86::VMOVDQU16Z256rmk:
1715 case X86::VMOVDQU16Zrmk:
1716 case X86::VMOVDQU32Z128rmk:
1717 case X86::VMOVDQA32Z128rmk:
1718 case X86::VMOVDQU32Z256rmk:
1719 case X86::VMOVDQA32Z256rmk:
1720 case X86::VMOVDQU32Zrmk:
1721 case X86::VMOVDQA32Zrmk:
1722 case X86::VMOVDQU64Z128rmk:
1723 case X86::VMOVDQA64Z128rmk:
1724 case X86::VMOVDQU64Z256rmk:
1725 case X86::VMOVDQA64Z256rmk:
1726 case X86::VMOVDQU64Zrmk:
1727 case X86::VMOVDQA64Zrmk:
1728 case X86::VMOVUPDZ128rmk:
1729 case X86::VMOVAPDZ128rmk:
1730 case X86::VMOVUPDZ256rmk:
1731 case X86::VMOVAPDZ256rmk:
1732 case X86::VMOVUPDZrmk:
1733 case X86::VMOVAPDZrmk:
1734 case X86::VMOVUPSZ128rmk:
1735 case X86::VMOVAPSZ128rmk:
1736 case X86::VMOVUPSZ256rmk:
1737 case X86::VMOVAPSZ256rmk:
1738 case X86::VMOVUPSZrmk:
1739 case X86::VMOVAPSZrmk:
1740 case X86::VBROADCASTSDZ256rmk:
1741 case X86::VBROADCASTSDZrmk:
1742 case X86::VBROADCASTSSZ128rmk:
1743 case X86::VBROADCASTSSZ256rmk:
1744 case X86::VBROADCASTSSZrmk:
1745 case X86::VPBROADCASTDZ128rmk:
1746 case X86::VPBROADCASTDZ256rmk:
1747 case X86::VPBROADCASTDZrmk:
1748 case X86::VPBROADCASTQZ128rmk:
1749 case X86::VPBROADCASTQZ256rmk:
1750 case X86::VPBROADCASTQZrmk: {
1755 case X86::VMOVDQU8Z128rmk:
1756 Opc = X86::VPBLENDMBZ128rmk;
1758 case X86::VMOVDQU8Z256rmk:
1759 Opc = X86::VPBLENDMBZ256rmk;
1761 case X86::VMOVDQU8Zrmk:
1762 Opc = X86::VPBLENDMBZrmk;
1764 case X86::VMOVDQU16Z128rmk:
1765 Opc = X86::VPBLENDMWZ128rmk;
1767 case X86::VMOVDQU16Z256rmk:
1768 Opc = X86::VPBLENDMWZ256rmk;
1770 case X86::VMOVDQU16Zrmk:
1771 Opc = X86::VPBLENDMWZrmk;
1773 case X86::VMOVDQU32Z128rmk:
1774 Opc = X86::VPBLENDMDZ128rmk;
1776 case X86::VMOVDQU32Z256rmk:
1777 Opc = X86::VPBLENDMDZ256rmk;
1779 case X86::VMOVDQU32Zrmk:
1780 Opc = X86::VPBLENDMDZrmk;
1782 case X86::VMOVDQU64Z128rmk:
1783 Opc = X86::VPBLENDMQZ128rmk;
1785 case X86::VMOVDQU64Z256rmk:
1786 Opc = X86::VPBLENDMQZ256rmk;
1788 case X86::VMOVDQU64Zrmk:
1789 Opc = X86::VPBLENDMQZrmk;
1791 case X86::VMOVUPDZ128rmk:
1792 Opc = X86::VBLENDMPDZ128rmk;
1794 case X86::VMOVUPDZ256rmk:
1795 Opc = X86::VBLENDMPDZ256rmk;
1797 case X86::VMOVUPDZrmk:
1798 Opc = X86::VBLENDMPDZrmk;
1800 case X86::VMOVUPSZ128rmk:
1801 Opc = X86::VBLENDMPSZ128rmk;
1803 case X86::VMOVUPSZ256rmk:
1804 Opc = X86::VBLENDMPSZ256rmk;
1806 case X86::VMOVUPSZrmk:
1807 Opc = X86::VBLENDMPSZrmk;
1809 case X86::VMOVDQA32Z128rmk:
1810 Opc = X86::VPBLENDMDZ128rmk;
1812 case X86::VMOVDQA32Z256rmk:
1813 Opc = X86::VPBLENDMDZ256rmk;
1815 case X86::VMOVDQA32Zrmk:
1816 Opc = X86::VPBLENDMDZrmk;
1818 case X86::VMOVDQA64Z128rmk:
1819 Opc = X86::VPBLENDMQZ128rmk;
1821 case X86::VMOVDQA64Z256rmk:
1822 Opc = X86::VPBLENDMQZ256rmk;
1824 case X86::VMOVDQA64Zrmk:
1825 Opc = X86::VPBLENDMQZrmk;
1827 case X86::VMOVAPDZ128rmk:
1828 Opc = X86::VBLENDMPDZ128rmk;
1830 case X86::VMOVAPDZ256rmk:
1831 Opc = X86::VBLENDMPDZ256rmk;
1833 case X86::VMOVAPDZrmk:
1834 Opc = X86::VBLENDMPDZrmk;
1836 case X86::VMOVAPSZ128rmk:
1837 Opc = X86::VBLENDMPSZ128rmk;
1839 case X86::VMOVAPSZ256rmk:
1840 Opc = X86::VBLENDMPSZ256rmk;
1842 case X86::VMOVAPSZrmk:
1843 Opc = X86::VBLENDMPSZrmk;
1845 case X86::VBROADCASTSDZ256rmk:
1846 Opc = X86::VBLENDMPDZ256rmbk;
1848 case X86::VBROADCASTSDZrmk:
1849 Opc = X86::VBLENDMPDZrmbk;
1851 case X86::VBROADCASTSSZ128rmk:
1852 Opc = X86::VBLENDMPSZ128rmbk;
1854 case X86::VBROADCASTSSZ256rmk:
1855 Opc = X86::VBLENDMPSZ256rmbk;
1857 case X86::VBROADCASTSSZrmk:
1858 Opc = X86::VBLENDMPSZrmbk;
1860 case X86::VPBROADCASTDZ128rmk:
1861 Opc = X86::VPBLENDMDZ128rmbk;
1863 case X86::VPBROADCASTDZ256rmk:
1864 Opc = X86::VPBLENDMDZ256rmbk;
1866 case X86::VPBROADCASTDZrmk:
1867 Opc = X86::VPBLENDMDZrmbk;
1869 case X86::VPBROADCASTQZ128rmk:
1870 Opc = X86::VPBLENDMQZ128rmbk;
1872 case X86::VPBROADCASTQZ256rmk:
1873 Opc = X86::VPBLENDMQZ256rmbk;
1875 case X86::VPBROADCASTQZrmk:
1876 Opc = X86::VPBLENDMQZrmbk;
1882 .
add(
MI.getOperand(2))
1884 .
add(
MI.getOperand(3))
1885 .
add(
MI.getOperand(4))
1886 .
add(
MI.getOperand(5))
1887 .
add(
MI.getOperand(6))
1888 .
add(
MI.getOperand(7));
1893 case X86::VMOVDQU8Z128rrk:
1894 case X86::VMOVDQU8Z256rrk:
1895 case X86::VMOVDQU8Zrrk:
1896 case X86::VMOVDQU16Z128rrk:
1897 case X86::VMOVDQU16Z256rrk:
1898 case X86::VMOVDQU16Zrrk:
1899 case X86::VMOVDQU32Z128rrk:
1900 case X86::VMOVDQA32Z128rrk:
1901 case X86::VMOVDQU32Z256rrk:
1902 case X86::VMOVDQA32Z256rrk:
1903 case X86::VMOVDQU32Zrrk:
1904 case X86::VMOVDQA32Zrrk:
1905 case X86::VMOVDQU64Z128rrk:
1906 case X86::VMOVDQA64Z128rrk:
1907 case X86::VMOVDQU64Z256rrk:
1908 case X86::VMOVDQA64Z256rrk:
1909 case X86::VMOVDQU64Zrrk:
1910 case X86::VMOVDQA64Zrrk:
1911 case X86::VMOVUPDZ128rrk:
1912 case X86::VMOVAPDZ128rrk:
1913 case X86::VMOVUPDZ256rrk:
1914 case X86::VMOVAPDZ256rrk:
1915 case X86::VMOVUPDZrrk:
1916 case X86::VMOVAPDZrrk:
1917 case X86::VMOVUPSZ128rrk:
1918 case X86::VMOVAPSZ128rrk:
1919 case X86::VMOVUPSZ256rrk:
1920 case X86::VMOVAPSZ256rrk:
1921 case X86::VMOVUPSZrrk:
1922 case X86::VMOVAPSZrrk: {
1927 case X86::VMOVDQU8Z128rrk:
1928 Opc = X86::VPBLENDMBZ128rrk;
1930 case X86::VMOVDQU8Z256rrk:
1931 Opc = X86::VPBLENDMBZ256rrk;
1933 case X86::VMOVDQU8Zrrk:
1934 Opc = X86::VPBLENDMBZrrk;
1936 case X86::VMOVDQU16Z128rrk:
1937 Opc = X86::VPBLENDMWZ128rrk;
1939 case X86::VMOVDQU16Z256rrk:
1940 Opc = X86::VPBLENDMWZ256rrk;
1942 case X86::VMOVDQU16Zrrk:
1943 Opc = X86::VPBLENDMWZrrk;
1945 case X86::VMOVDQU32Z128rrk:
1946 Opc = X86::VPBLENDMDZ128rrk;
1948 case X86::VMOVDQU32Z256rrk:
1949 Opc = X86::VPBLENDMDZ256rrk;
1951 case X86::VMOVDQU32Zrrk:
1952 Opc = X86::VPBLENDMDZrrk;
1954 case X86::VMOVDQU64Z128rrk:
1955 Opc = X86::VPBLENDMQZ128rrk;
1957 case X86::VMOVDQU64Z256rrk:
1958 Opc = X86::VPBLENDMQZ256rrk;
1960 case X86::VMOVDQU64Zrrk:
1961 Opc = X86::VPBLENDMQZrrk;
1963 case X86::VMOVUPDZ128rrk:
1964 Opc = X86::VBLENDMPDZ128rrk;
1966 case X86::VMOVUPDZ256rrk:
1967 Opc = X86::VBLENDMPDZ256rrk;
1969 case X86::VMOVUPDZrrk:
1970 Opc = X86::VBLENDMPDZrrk;
1972 case X86::VMOVUPSZ128rrk:
1973 Opc = X86::VBLENDMPSZ128rrk;
1975 case X86::VMOVUPSZ256rrk:
1976 Opc = X86::VBLENDMPSZ256rrk;
1978 case X86::VMOVUPSZrrk:
1979 Opc = X86::VBLENDMPSZrrk;
1981 case X86::VMOVDQA32Z128rrk:
1982 Opc = X86::VPBLENDMDZ128rrk;
1984 case X86::VMOVDQA32Z256rrk:
1985 Opc = X86::VPBLENDMDZ256rrk;
1987 case X86::VMOVDQA32Zrrk:
1988 Opc = X86::VPBLENDMDZrrk;
1990 case X86::VMOVDQA64Z128rrk:
1991 Opc = X86::VPBLENDMQZ128rrk;
1993 case X86::VMOVDQA64Z256rrk:
1994 Opc = X86::VPBLENDMQZ256rrk;
1996 case X86::VMOVDQA64Zrrk:
1997 Opc = X86::VPBLENDMQZrrk;
1999 case X86::VMOVAPDZ128rrk:
2000 Opc = X86::VBLENDMPDZ128rrk;
2002 case X86::VMOVAPDZ256rrk:
2003 Opc = X86::VBLENDMPDZ256rrk;
2005 case X86::VMOVAPDZrrk:
2006 Opc = X86::VBLENDMPDZrrk;
2008 case X86::VMOVAPSZ128rrk:
2009 Opc = X86::VBLENDMPSZ128rrk;
2011 case X86::VMOVAPSZ256rrk:
2012 Opc = X86::VBLENDMPSZ256rrk;
2014 case X86::VMOVAPSZrrk:
2015 Opc = X86::VBLENDMPSZrrk;
2021 .
add(
MI.getOperand(2))
2023 .
add(
MI.getOperand(3));
2034 for (
unsigned I = 0;
I < NumRegOperands; ++
I) {
2036 if (
Op.isReg() && (
Op.isDead() ||
Op.isKill()))
2042 MBB.insert(
MI.getIterator(), NewMI);
2063 unsigned SrcOpIdx2) {
2065 if (SrcOpIdx1 > SrcOpIdx2)
2068 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
2074 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
2076 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
2078 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
2087 unsigned Opc =
MI.getOpcode();
2096 "Intrinsic instructions can't commute operand 1");
2101 assert(Case < 3 &&
"Unexpected case number!");
2106 const unsigned Form132Index = 0;
2107 const unsigned Form213Index = 1;
2108 const unsigned Form231Index = 2;
2109 static const unsigned FormMapping[][3] = {
2114 {Form231Index, Form213Index, Form132Index},
2119 {Form132Index, Form231Index, Form213Index},
2124 {Form213Index, Form132Index, Form231Index}};
2126 unsigned FMAForms[3];
2132 for (
unsigned FormIndex = 0; FormIndex < 3; FormIndex++)
2133 if (
Opc == FMAForms[FormIndex])
2134 return FMAForms[FormMapping[Case][FormIndex]];
2140 unsigned SrcOpIdx2) {
2144 assert(Case < 3 &&
"Unexpected case value!");
2147 static const uint8_t SwapMasks[3][4] = {
2148 {0x04, 0x10, 0x08, 0x20},
2149 {0x02, 0x10, 0x08, 0x40},
2150 {0x02, 0x04, 0x20, 0x40},
2153 uint8_t Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
2155 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
2156 SwapMasks[Case][2] | SwapMasks[Case][3]);
2158 if (Imm & SwapMasks[Case][0])
2159 NewImm |= SwapMasks[Case][1];
2160 if (Imm & SwapMasks[Case][1])
2161 NewImm |= SwapMasks[Case][0];
2162 if (Imm & SwapMasks[Case][2])
2163 NewImm |= SwapMasks[Case][3];
2164 if (Imm & SwapMasks[Case][3])
2165 NewImm |= SwapMasks[Case][2];
2166 MI.getOperand(
MI.getNumOperands() - 1).setImm(NewImm);
2172#define VPERM_CASES(Suffix) \
2173 case X86::VPERMI2##Suffix##Z128rr: \
2174 case X86::VPERMT2##Suffix##Z128rr: \
2175 case X86::VPERMI2##Suffix##Z256rr: \
2176 case X86::VPERMT2##Suffix##Z256rr: \
2177 case X86::VPERMI2##Suffix##Zrr: \
2178 case X86::VPERMT2##Suffix##Zrr: \
2179 case X86::VPERMI2##Suffix##Z128rm: \
2180 case X86::VPERMT2##Suffix##Z128rm: \
2181 case X86::VPERMI2##Suffix##Z256rm: \
2182 case X86::VPERMT2##Suffix##Z256rm: \
2183 case X86::VPERMI2##Suffix##Zrm: \
2184 case X86::VPERMT2##Suffix##Zrm: \
2185 case X86::VPERMI2##Suffix##Z128rrkz: \
2186 case X86::VPERMT2##Suffix##Z128rrkz: \
2187 case X86::VPERMI2##Suffix##Z256rrkz: \
2188 case X86::VPERMT2##Suffix##Z256rrkz: \
2189 case X86::VPERMI2##Suffix##Zrrkz: \
2190 case X86::VPERMT2##Suffix##Zrrkz: \
2191 case X86::VPERMI2##Suffix##Z128rmkz: \
2192 case X86::VPERMT2##Suffix##Z128rmkz: \
2193 case X86::VPERMI2##Suffix##Z256rmkz: \
2194 case X86::VPERMT2##Suffix##Z256rmkz: \
2195 case X86::VPERMI2##Suffix##Zrmkz: \
2196 case X86::VPERMT2##Suffix##Zrmkz:
2198#define VPERM_CASES_BROADCAST(Suffix) \
2199 VPERM_CASES(Suffix) \
2200 case X86::VPERMI2##Suffix##Z128rmb: \
2201 case X86::VPERMT2##Suffix##Z128rmb: \
2202 case X86::VPERMI2##Suffix##Z256rmb: \
2203 case X86::VPERMT2##Suffix##Z256rmb: \
2204 case X86::VPERMI2##Suffix##Zrmb: \
2205 case X86::VPERMT2##Suffix##Zrmb: \
2206 case X86::VPERMI2##Suffix##Z128rmbkz: \
2207 case X86::VPERMT2##Suffix##Z128rmbkz: \
2208 case X86::VPERMI2##Suffix##Z256rmbkz: \
2209 case X86::VPERMT2##Suffix##Z256rmbkz: \
2210 case X86::VPERMI2##Suffix##Zrmbkz: \
2211 case X86::VPERMT2##Suffix##Zrmbkz:
2224#undef VPERM_CASES_BROADCAST
2231#define VPERM_CASES(Orig, New) \
2232 case X86::Orig##Z128rr: \
2233 return X86::New##Z128rr; \
2234 case X86::Orig##Z128rrkz: \
2235 return X86::New##Z128rrkz; \
2236 case X86::Orig##Z128rm: \
2237 return X86::New##Z128rm; \
2238 case X86::Orig##Z128rmkz: \
2239 return X86::New##Z128rmkz; \
2240 case X86::Orig##Z256rr: \
2241 return X86::New##Z256rr; \
2242 case X86::Orig##Z256rrkz: \
2243 return X86::New##Z256rrkz; \
2244 case X86::Orig##Z256rm: \
2245 return X86::New##Z256rm; \
2246 case X86::Orig##Z256rmkz: \
2247 return X86::New##Z256rmkz; \
2248 case X86::Orig##Zrr: \
2249 return X86::New##Zrr; \
2250 case X86::Orig##Zrrkz: \
2251 return X86::New##Zrrkz; \
2252 case X86::Orig##Zrm: \
2253 return X86::New##Zrm; \
2254 case X86::Orig##Zrmkz: \
2255 return X86::New##Zrmkz;
2257#define VPERM_CASES_BROADCAST(Orig, New) \
2258 VPERM_CASES(Orig, New) \
2259 case X86::Orig##Z128rmb: \
2260 return X86::New##Z128rmb; \
2261 case X86::Orig##Z128rmbkz: \
2262 return X86::New##Z128rmbkz; \
2263 case X86::Orig##Z256rmb: \
2264 return X86::New##Z256rmb; \
2265 case X86::Orig##Z256rmbkz: \
2266 return X86::New##Z256rmbkz; \
2267 case X86::Orig##Zrmb: \
2268 return X86::New##Zrmb; \
2269 case X86::Orig##Zrmbkz: \
2270 return X86::New##Zrmbkz;
2288#undef VPERM_CASES_BROADCAST
2294 unsigned OpIdx2)
const {
2296 return std::exchange(NewMI,
false)
2297 ?
MI.getParent()->getParent()->CloneMachineInstr(&
MI)
2301 unsigned Opc =
MI.getOpcode();
2303#define CASE_ND(OP) \
2319#define FROM_TO_SIZE(A, B, S) \
2325 Opc = X86::B##_ND; \
2333 Opc = X86::A##_ND; \
2342 WorkingMI = CloneIfNew(
MI);
2351 WorkingMI = CloneIfNew(
MI);
2353 get(X86::PFSUBRrr ==
Opc ? X86::PFSUBrr : X86::PFSUBRrr));
2355 case X86::BLENDPDrri:
2356 case X86::BLENDPSrri:
2357 case X86::PBLENDWrri:
2358 case X86::VBLENDPDrri:
2359 case X86::VBLENDPSrri:
2360 case X86::VBLENDPDYrri:
2361 case X86::VBLENDPSYrri:
2362 case X86::VPBLENDDrri:
2363 case X86::VPBLENDWrri:
2364 case X86::VPBLENDDYrri:
2365 case X86::VPBLENDWYrri: {
2370 case X86::BLENDPDrri:
2371 Mask = (int8_t)0x03;
2373 case X86::BLENDPSrri:
2374 Mask = (int8_t)0x0F;
2376 case X86::PBLENDWrri:
2377 Mask = (int8_t)0xFF;
2379 case X86::VBLENDPDrri:
2380 Mask = (int8_t)0x03;
2382 case X86::VBLENDPSrri:
2383 Mask = (int8_t)0x0F;
2385 case X86::VBLENDPDYrri:
2386 Mask = (int8_t)0x0F;
2388 case X86::VBLENDPSYrri:
2389 Mask = (int8_t)0xFF;
2391 case X86::VPBLENDDrri:
2392 Mask = (int8_t)0x0F;
2394 case X86::VPBLENDWrri:
2395 Mask = (int8_t)0xFF;
2397 case X86::VPBLENDDYrri:
2398 Mask = (int8_t)0xFF;
2400 case X86::VPBLENDWYrri:
2401 Mask = (int8_t)0xFF;
2407 int8_t Imm =
MI.getOperand(3).getImm() & Mask;
2408 WorkingMI = CloneIfNew(
MI);
2412 case X86::INSERTPSrri:
2413 case X86::VINSERTPSrri:
2414 case X86::VINSERTPSZrri: {
2415 unsigned Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
2416 unsigned ZMask = Imm & 15;
2417 unsigned DstIdx = (Imm >> 4) & 3;
2418 unsigned SrcIdx = (Imm >> 6) & 3;
2422 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2425 assert(AltIdx < 4 &&
"Illegal insertion index");
2426 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2427 WorkingMI = CloneIfNew(
MI);
2436 case X86::VMOVSSrr: {
2438 if (Subtarget.hasSSE41()) {
2444 Opc = X86::BLENDPDrri;
2448 Opc = X86::BLENDPSrri;
2452 Opc = X86::VBLENDPDrri;
2456 Opc = X86::VBLENDPSrri;
2461 WorkingMI = CloneIfNew(
MI);
2467 assert(
Opc == X86::MOVSDrr &&
"Only MOVSD can commute to SHUFPD");
2468 WorkingMI = CloneIfNew(
MI);
2473 case X86::SHUFPDrri: {
2475 assert(
MI.getOperand(3).getImm() == 0x02 &&
"Unexpected immediate!");
2476 WorkingMI = CloneIfNew(
MI);
2481 case X86::PCLMULQDQrri:
2482 case X86::VPCLMULQDQrri:
2483 case X86::VPCLMULQDQYrri:
2484 case X86::VPCLMULQDQZrri:
2485 case X86::VPCLMULQDQZ128rri:
2486 case X86::VPCLMULQDQZ256rri: {
2489 unsigned Imm =
MI.getOperand(3).getImm();
2490 unsigned Src1Hi = Imm & 0x01;
2491 unsigned Src2Hi = Imm & 0x10;
2492 WorkingMI = CloneIfNew(
MI);
2496 case X86::VPCMPBZ128rri:
2497 case X86::VPCMPUBZ128rri:
2498 case X86::VPCMPBZ256rri:
2499 case X86::VPCMPUBZ256rri:
2500 case X86::VPCMPBZrri:
2501 case X86::VPCMPUBZrri:
2502 case X86::VPCMPDZ128rri:
2503 case X86::VPCMPUDZ128rri:
2504 case X86::VPCMPDZ256rri:
2505 case X86::VPCMPUDZ256rri:
2506 case X86::VPCMPDZrri:
2507 case X86::VPCMPUDZrri:
2508 case X86::VPCMPQZ128rri:
2509 case X86::VPCMPUQZ128rri:
2510 case X86::VPCMPQZ256rri:
2511 case X86::VPCMPUQZ256rri:
2512 case X86::VPCMPQZrri:
2513 case X86::VPCMPUQZrri:
2514 case X86::VPCMPWZ128rri:
2515 case X86::VPCMPUWZ128rri:
2516 case X86::VPCMPWZ256rri:
2517 case X86::VPCMPUWZ256rri:
2518 case X86::VPCMPWZrri:
2519 case X86::VPCMPUWZrri:
2520 case X86::VPCMPBZ128rrik:
2521 case X86::VPCMPUBZ128rrik:
2522 case X86::VPCMPBZ256rrik:
2523 case X86::VPCMPUBZ256rrik:
2524 case X86::VPCMPBZrrik:
2525 case X86::VPCMPUBZrrik:
2526 case X86::VPCMPDZ128rrik:
2527 case X86::VPCMPUDZ128rrik:
2528 case X86::VPCMPDZ256rrik:
2529 case X86::VPCMPUDZ256rrik:
2530 case X86::VPCMPDZrrik:
2531 case X86::VPCMPUDZrrik:
2532 case X86::VPCMPQZ128rrik:
2533 case X86::VPCMPUQZ128rrik:
2534 case X86::VPCMPQZ256rrik:
2535 case X86::VPCMPUQZ256rrik:
2536 case X86::VPCMPQZrrik:
2537 case X86::VPCMPUQZrrik:
2538 case X86::VPCMPWZ128rrik:
2539 case X86::VPCMPUWZ128rrik:
2540 case X86::VPCMPWZ256rrik:
2541 case X86::VPCMPUWZ256rrik:
2542 case X86::VPCMPWZrrik:
2543 case X86::VPCMPUWZrrik:
2544 WorkingMI = CloneIfNew(
MI);
2548 MI.getOperand(
MI.getNumOperands() - 1).getImm() & 0x7));
2551 case X86::VPCOMUBri:
2553 case X86::VPCOMUDri:
2555 case X86::VPCOMUQri:
2557 case X86::VPCOMUWri:
2558 WorkingMI = CloneIfNew(
MI);
2563 case X86::VCMPSDZrri:
2564 case X86::VCMPSSZrri:
2565 case X86::VCMPPDZrri:
2566 case X86::VCMPPSZrri:
2567 case X86::VCMPSHZrri:
2568 case X86::VCMPPHZrri:
2569 case X86::VCMPPHZ128rri:
2570 case X86::VCMPPHZ256rri:
2571 case X86::VCMPPDZ128rri:
2572 case X86::VCMPPSZ128rri:
2573 case X86::VCMPPDZ256rri:
2574 case X86::VCMPPSZ256rri:
2575 case X86::VCMPPDZrrik:
2576 case X86::VCMPPSZrrik:
2577 case X86::VCMPPHZrrik:
2578 case X86::VCMPPDZ128rrik:
2579 case X86::VCMPPSZ128rrik:
2580 case X86::VCMPPHZ128rrik:
2581 case X86::VCMPPDZ256rrik:
2582 case X86::VCMPPSZ256rrik:
2583 case X86::VCMPPHZ256rrik:
2584 WorkingMI = CloneIfNew(
MI);
2587 MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 0x1f));
2589 case X86::VPERM2F128rri:
2590 case X86::VPERM2I128rri:
2594 WorkingMI = CloneIfNew(
MI);
2597 case X86::MOVHLPSrr:
2598 case X86::UNPCKHPDrr:
2599 case X86::VMOVHLPSrr:
2600 case X86::VUNPCKHPDrr:
2601 case X86::VMOVHLPSZrr:
2602 case X86::VUNPCKHPDZ128rr:
2603 assert(Subtarget.hasSSE2() &&
"Commuting MOVHLP/UNPCKHPD requires SSE2!");
2608 case X86::MOVHLPSrr:
2609 Opc = X86::UNPCKHPDrr;
2611 case X86::UNPCKHPDrr:
2612 Opc = X86::MOVHLPSrr;
2614 case X86::VMOVHLPSrr:
2615 Opc = X86::VUNPCKHPDrr;
2617 case X86::VUNPCKHPDrr:
2618 Opc = X86::VMOVHLPSrr;
2620 case X86::VMOVHLPSZrr:
2621 Opc = X86::VUNPCKHPDZ128rr;
2623 case X86::VUNPCKHPDZ128rr:
2624 Opc = X86::VMOVHLPSZrr;
2627 WorkingMI = CloneIfNew(
MI);
2633 WorkingMI = CloneIfNew(
MI);
2634 unsigned OpNo =
MI.getDesc().getNumOperands() - 1;
2639 case X86::VPTERNLOGDZrri:
2640 case X86::VPTERNLOGDZrmi:
2641 case X86::VPTERNLOGDZ128rri:
2642 case X86::VPTERNLOGDZ128rmi:
2643 case X86::VPTERNLOGDZ256rri:
2644 case X86::VPTERNLOGDZ256rmi:
2645 case X86::VPTERNLOGQZrri:
2646 case X86::VPTERNLOGQZrmi:
2647 case X86::VPTERNLOGQZ128rri:
2648 case X86::VPTERNLOGQZ128rmi:
2649 case X86::VPTERNLOGQZ256rri:
2650 case X86::VPTERNLOGQZ256rmi:
2651 case X86::VPTERNLOGDZrrik:
2652 case X86::VPTERNLOGDZ128rrik:
2653 case X86::VPTERNLOGDZ256rrik:
2654 case X86::VPTERNLOGQZrrik:
2655 case X86::VPTERNLOGQZ128rrik:
2656 case X86::VPTERNLOGQZ256rrik:
2657 case X86::VPTERNLOGDZrrikz:
2658 case X86::VPTERNLOGDZrmikz:
2659 case X86::VPTERNLOGDZ128rrikz:
2660 case X86::VPTERNLOGDZ128rmikz:
2661 case X86::VPTERNLOGDZ256rrikz:
2662 case X86::VPTERNLOGDZ256rmikz:
2663 case X86::VPTERNLOGQZrrikz:
2664 case X86::VPTERNLOGQZrmikz:
2665 case X86::VPTERNLOGQZ128rrikz:
2666 case X86::VPTERNLOGQZ128rmikz:
2667 case X86::VPTERNLOGQZ256rrikz:
2668 case X86::VPTERNLOGQZ256rmikz:
2669 case X86::VPTERNLOGDZ128rmbi:
2670 case X86::VPTERNLOGDZ256rmbi:
2671 case X86::VPTERNLOGDZrmbi:
2672 case X86::VPTERNLOGQZ128rmbi:
2673 case X86::VPTERNLOGQZ256rmbi:
2674 case X86::VPTERNLOGQZrmbi:
2675 case X86::VPTERNLOGDZ128rmbikz:
2676 case X86::VPTERNLOGDZ256rmbikz:
2677 case X86::VPTERNLOGDZrmbikz:
2678 case X86::VPTERNLOGQZ128rmbikz:
2679 case X86::VPTERNLOGQZ256rmbikz:
2680 case X86::VPTERNLOGQZrmbikz: {
2681 WorkingMI = CloneIfNew(
MI);
2687 WorkingMI = CloneIfNew(
MI);
2693 WorkingMI = CloneIfNew(
MI);
2702bool X86InstrInfo::findThreeSrcCommutedOpIndices(
const MachineInstr &
MI,
2703 unsigned &SrcOpIdx1,
2704 unsigned &SrcOpIdx2,
2705 bool IsIntrinsic)
const {
2708 unsigned FirstCommutableVecOp = 1;
2709 unsigned LastCommutableVecOp = 3;
2710 unsigned KMaskOp = -1U;
2733 FirstCommutableVecOp = 3;
2735 LastCommutableVecOp++;
2736 }
else if (IsIntrinsic) {
2739 FirstCommutableVecOp = 2;
2742 if (
isMem(
MI, LastCommutableVecOp))
2743 LastCommutableVecOp--;
2748 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2749 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2750 SrcOpIdx1 == KMaskOp))
2752 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2753 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2754 SrcOpIdx2 == KMaskOp))
2759 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2760 SrcOpIdx2 == CommuteAnyOperandIndex) {
2761 unsigned CommutableOpIdx2 = SrcOpIdx2;
2765 if (SrcOpIdx1 == SrcOpIdx2)
2768 CommutableOpIdx2 = LastCommutableVecOp;
2769 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2771 CommutableOpIdx2 = SrcOpIdx1;
2775 Register Op2Reg =
MI.getOperand(CommutableOpIdx2).getReg();
2777 unsigned CommutableOpIdx1;
2778 for (CommutableOpIdx1 = LastCommutableVecOp;
2779 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2781 if (CommutableOpIdx1 == KMaskOp)
2787 if (Op2Reg !=
MI.getOperand(CommutableOpIdx1).getReg())
2792 if (CommutableOpIdx1 < FirstCommutableVecOp)
2797 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2806 unsigned &SrcOpIdx1,
2807 unsigned &SrcOpIdx2)
const {
2809 if (!
Desc.isCommutable())
2812 switch (
MI.getOpcode()) {
2817 case X86::VCMPSDrri:
2818 case X86::VCMPSSrri:
2819 case X86::VCMPPDrri:
2820 case X86::VCMPPSrri:
2821 case X86::VCMPPDYrri:
2822 case X86::VCMPPSYrri:
2823 case X86::VCMPSDZrri:
2824 case X86::VCMPSSZrri:
2825 case X86::VCMPPDZrri:
2826 case X86::VCMPPSZrri:
2827 case X86::VCMPSHZrri:
2828 case X86::VCMPPHZrri:
2829 case X86::VCMPPHZ128rri:
2830 case X86::VCMPPHZ256rri:
2831 case X86::VCMPPDZ128rri:
2832 case X86::VCMPPSZ128rri:
2833 case X86::VCMPPDZ256rri:
2834 case X86::VCMPPSZ256rri:
2835 case X86::VCMPPDZrrik:
2836 case X86::VCMPPSZrrik:
2837 case X86::VCMPPHZrrik:
2838 case X86::VCMPPDZ128rrik:
2839 case X86::VCMPPSZ128rrik:
2840 case X86::VCMPPHZ128rrik:
2841 case X86::VCMPPDZ256rrik:
2842 case X86::VCMPPSZ256rrik:
2843 case X86::VCMPPHZ256rrik: {
2848 unsigned Imm =
MI.getOperand(3 + OpOffset).getImm() & 0x7;
2865 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2872 if (Subtarget.hasSSE41())
2875 case X86::SHUFPDrri:
2877 if (
MI.getOperand(3).getImm() == 0x02)
2880 case X86::MOVHLPSrr:
2881 case X86::UNPCKHPDrr:
2882 case X86::VMOVHLPSrr:
2883 case X86::VUNPCKHPDrr:
2884 case X86::VMOVHLPSZrr:
2885 case X86::VUNPCKHPDZ128rr:
2886 if (Subtarget.hasSSE2())
2889 case X86::VPTERNLOGDZrri:
2890 case X86::VPTERNLOGDZrmi:
2891 case X86::VPTERNLOGDZ128rri:
2892 case X86::VPTERNLOGDZ128rmi:
2893 case X86::VPTERNLOGDZ256rri:
2894 case X86::VPTERNLOGDZ256rmi:
2895 case X86::VPTERNLOGQZrri:
2896 case X86::VPTERNLOGQZrmi:
2897 case X86::VPTERNLOGQZ128rri:
2898 case X86::VPTERNLOGQZ128rmi:
2899 case X86::VPTERNLOGQZ256rri:
2900 case X86::VPTERNLOGQZ256rmi:
2901 case X86::VPTERNLOGDZrrik:
2902 case X86::VPTERNLOGDZ128rrik:
2903 case X86::VPTERNLOGDZ256rrik:
2904 case X86::VPTERNLOGQZrrik:
2905 case X86::VPTERNLOGQZ128rrik:
2906 case X86::VPTERNLOGQZ256rrik:
2907 case X86::VPTERNLOGDZrrikz:
2908 case X86::VPTERNLOGDZrmikz:
2909 case X86::VPTERNLOGDZ128rrikz:
2910 case X86::VPTERNLOGDZ128rmikz:
2911 case X86::VPTERNLOGDZ256rrikz:
2912 case X86::VPTERNLOGDZ256rmikz:
2913 case X86::VPTERNLOGQZrrikz:
2914 case X86::VPTERNLOGQZrmikz:
2915 case X86::VPTERNLOGQZ128rrikz:
2916 case X86::VPTERNLOGQZ128rmikz:
2917 case X86::VPTERNLOGQZ256rrikz:
2918 case X86::VPTERNLOGQZ256rmikz:
2919 case X86::VPTERNLOGDZ128rmbi:
2920 case X86::VPTERNLOGDZ256rmbi:
2921 case X86::VPTERNLOGDZrmbi:
2922 case X86::VPTERNLOGQZ128rmbi:
2923 case X86::VPTERNLOGQZ256rmbi:
2924 case X86::VPTERNLOGQZrmbi:
2925 case X86::VPTERNLOGDZ128rmbikz:
2926 case X86::VPTERNLOGDZ256rmbikz:
2927 case X86::VPTERNLOGDZrmbikz:
2928 case X86::VPTERNLOGQZ128rmbikz:
2929 case X86::VPTERNLOGQZ256rmbikz:
2930 case X86::VPTERNLOGQZrmbikz:
2931 return findThreeSrcCommutedOpIndices(
MI, SrcOpIdx1, SrcOpIdx2);
2932 case X86::VPDPWSSDYrr:
2933 case X86::VPDPWSSDrr:
2934 case X86::VPDPWSSDSYrr:
2935 case X86::VPDPWSSDSrr:
2936 case X86::VPDPWUUDrr:
2937 case X86::VPDPWUUDYrr:
2938 case X86::VPDPWUUDSrr:
2939 case X86::VPDPWUUDSYrr:
2940 case X86::VPDPBSSDSrr:
2941 case X86::VPDPBSSDSYrr:
2942 case X86::VPDPBSSDrr:
2943 case X86::VPDPBSSDYrr:
2944 case X86::VPDPBUUDSrr:
2945 case X86::VPDPBUUDSYrr:
2946 case X86::VPDPBUUDrr:
2947 case X86::VPDPBUUDYrr:
2948 case X86::VPDPBSSDSZ128rr:
2949 case X86::VPDPBSSDSZ128rrk:
2950 case X86::VPDPBSSDSZ128rrkz:
2951 case X86::VPDPBSSDSZ256rr:
2952 case X86::VPDPBSSDSZ256rrk:
2953 case X86::VPDPBSSDSZ256rrkz:
2954 case X86::VPDPBSSDSZrr:
2955 case X86::VPDPBSSDSZrrk:
2956 case X86::VPDPBSSDSZrrkz:
2957 case X86::VPDPBSSDZ128rr:
2958 case X86::VPDPBSSDZ128rrk:
2959 case X86::VPDPBSSDZ128rrkz:
2960 case X86::VPDPBSSDZ256rr:
2961 case X86::VPDPBSSDZ256rrk:
2962 case X86::VPDPBSSDZ256rrkz:
2963 case X86::VPDPBSSDZrr:
2964 case X86::VPDPBSSDZrrk:
2965 case X86::VPDPBSSDZrrkz:
2966 case X86::VPDPBUUDSZ128rr:
2967 case X86::VPDPBUUDSZ128rrk:
2968 case X86::VPDPBUUDSZ128rrkz:
2969 case X86::VPDPBUUDSZ256rr:
2970 case X86::VPDPBUUDSZ256rrk:
2971 case X86::VPDPBUUDSZ256rrkz:
2972 case X86::VPDPBUUDSZrr:
2973 case X86::VPDPBUUDSZrrk:
2974 case X86::VPDPBUUDSZrrkz:
2975 case X86::VPDPBUUDZ128rr:
2976 case X86::VPDPBUUDZ128rrk:
2977 case X86::VPDPBUUDZ128rrkz:
2978 case X86::VPDPBUUDZ256rr:
2979 case X86::VPDPBUUDZ256rrk:
2980 case X86::VPDPBUUDZ256rrkz:
2981 case X86::VPDPBUUDZrr:
2982 case X86::VPDPBUUDZrrk:
2983 case X86::VPDPBUUDZrrkz:
2984 case X86::VPDPWSSDZ128rr:
2985 case X86::VPDPWSSDZ128rrk:
2986 case X86::VPDPWSSDZ128rrkz:
2987 case X86::VPDPWSSDZ256rr:
2988 case X86::VPDPWSSDZ256rrk:
2989 case X86::VPDPWSSDZ256rrkz:
2990 case X86::VPDPWSSDZrr:
2991 case X86::VPDPWSSDZrrk:
2992 case X86::VPDPWSSDZrrkz:
2993 case X86::VPDPWSSDSZ128rr:
2994 case X86::VPDPWSSDSZ128rrk:
2995 case X86::VPDPWSSDSZ128rrkz:
2996 case X86::VPDPWSSDSZ256rr:
2997 case X86::VPDPWSSDSZ256rrk:
2998 case X86::VPDPWSSDSZ256rrkz:
2999 case X86::VPDPWSSDSZrr:
3000 case X86::VPDPWSSDSZrrk:
3001 case X86::VPDPWSSDSZrrkz:
3002 case X86::VPDPWUUDZ128rr:
3003 case X86::VPDPWUUDZ128rrk:
3004 case X86::VPDPWUUDZ128rrkz:
3005 case X86::VPDPWUUDZ256rr:
3006 case X86::VPDPWUUDZ256rrk:
3007 case X86::VPDPWUUDZ256rrkz:
3008 case X86::VPDPWUUDZrr:
3009 case X86::VPDPWUUDZrrk:
3010 case X86::VPDPWUUDZrrkz:
3011 case X86::VPDPWUUDSZ128rr:
3012 case X86::VPDPWUUDSZ128rrk:
3013 case X86::VPDPWUUDSZ128rrkz:
3014 case X86::VPDPWUUDSZ256rr:
3015 case X86::VPDPWUUDSZ256rrk:
3016 case X86::VPDPWUUDSZ256rrkz:
3017 case X86::VPDPWUUDSZrr:
3018 case X86::VPDPWUUDSZrrk:
3019 case X86::VPDPWUUDSZrrkz:
3020 case X86::VPMADD52HUQrr:
3021 case X86::VPMADD52HUQYrr:
3022 case X86::VPMADD52HUQZ128r:
3023 case X86::VPMADD52HUQZ128rk:
3024 case X86::VPMADD52HUQZ128rkz:
3025 case X86::VPMADD52HUQZ256r:
3026 case X86::VPMADD52HUQZ256rk:
3027 case X86::VPMADD52HUQZ256rkz:
3028 case X86::VPMADD52HUQZr:
3029 case X86::VPMADD52HUQZrk:
3030 case X86::VPMADD52HUQZrkz:
3031 case X86::VPMADD52LUQrr:
3032 case X86::VPMADD52LUQYrr:
3033 case X86::VPMADD52LUQZ128r:
3034 case X86::VPMADD52LUQZ128rk:
3035 case X86::VPMADD52LUQZ128rkz:
3036 case X86::VPMADD52LUQZ256r:
3037 case X86::VPMADD52LUQZ256rk:
3038 case X86::VPMADD52LUQZ256rkz:
3039 case X86::VPMADD52LUQZr:
3040 case X86::VPMADD52LUQZrk:
3041 case X86::VPMADD52LUQZrkz:
3042 case X86::VFMADDCPHZr:
3043 case X86::VFMADDCPHZrk:
3044 case X86::VFMADDCPHZrkz:
3045 case X86::VFMADDCPHZ128r:
3046 case X86::VFMADDCPHZ128rk:
3047 case X86::VFMADDCPHZ128rkz:
3048 case X86::VFMADDCPHZ256r:
3049 case X86::VFMADDCPHZ256rk:
3050 case X86::VFMADDCPHZ256rkz:
3051 case X86::VFMADDCSHZr:
3052 case X86::VFMADDCSHZrk:
3053 case X86::VFMADDCSHZrkz: {
3054 unsigned CommutableOpIdx1 = 2;
3055 unsigned CommutableOpIdx2 = 3;
3061 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3064 if (!
MI.getOperand(SrcOpIdx1).isReg() || !
MI.getOperand(SrcOpIdx2).isReg())
3074 return findThreeSrcCommutedOpIndices(
MI, SrcOpIdx1, SrcOpIdx2,
3081 unsigned CommutableOpIdx1 =
Desc.getNumDefs() + 1;
3082 unsigned CommutableOpIdx2 =
Desc.getNumDefs() + 2;
3085 if ((
MI.getDesc().getOperandConstraint(
Desc.getNumDefs(),
3100 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3104 if (!
MI.getOperand(SrcOpIdx1).isReg() ||
3105 !
MI.getOperand(SrcOpIdx2).isReg())
3117 unsigned Opcode =
MI->getOpcode();
3118 if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
3119 Opcode != X86::LEA64_32r)
3141 unsigned Opcode =
MI.getOpcode();
3142 if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
3169 unsigned Opcode =
MCID.getOpcode();
3170 if (!(X86::isJCC(Opcode) || X86::isSETCC(Opcode) || X86::isSETZUCC(Opcode) ||
3171 X86::isCMOVCC(Opcode) || X86::isCFCMOVCC(Opcode) ||
3172 X86::isCCMPCC(Opcode) || X86::isCTESTCC(Opcode)))
3175 unsigned NumUses =
MCID.getNumOperands() -
MCID.getNumDefs();
3184 CondNo +=
MCID.getNumDefs();
3194 return X86::isSETCC(
MI.getOpcode()) || X86::isSETZUCC(
MI.getOpcode())
3210 return X86::isCCMPCC(
MI.getOpcode()) || X86::isCTESTCC(
MI.getOpcode())
3241 enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
3272#define GET_X86_NF_TRANSFORM_TABLE
3273#define GET_X86_ND2NONND_TABLE
3274#include "X86GenInstrMapping.inc"
3279 return (
I == Table.
end() ||
I->OldOpc !=
Opc) ? 0U :
I->NewOpc;
3282#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3284 static std::atomic<bool> NFTableChecked(
false);
3285 if (!NFTableChecked.load(std::memory_order_relaxed)) {
3287 "X86NFTransformTable is not sorted!");
3288 NFTableChecked.store(
true, std::memory_order_relaxed);
3295#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3297 static std::atomic<bool> NDTableChecked(
false);
3298 if (!NDTableChecked.load(std::memory_order_relaxed)) {
3300 "X86ND2NonNDTableis not sorted!");
3301 NDTableChecked.store(
true, std::memory_order_relaxed);
3381std::pair<X86::CondCode, bool>
3384 bool NeedSwap =
false;
3385 switch (Predicate) {
3464 return std::make_pair(CC, NeedSwap);
3473#define GET_ND_IF_ENABLED(OPC) (HasNDD ? OPC##_ND : OPC)
3567 switch (Imm & 0x3) {
3585 if (Info.RegClass == X86::VR128RegClassID ||
3586 Info.RegClass == X86::VR128XRegClassID)
3588 if (Info.RegClass == X86::VR256RegClassID ||
3589 Info.RegClass == X86::VR256XRegClassID)
3591 if (Info.RegClass == X86::VR512RegClassID)
3598 return (
Reg == X86::FPCW ||
Reg == X86::FPSW ||
3599 (
Reg >= X86::ST0 &&
Reg <= X86::ST7));
3607 if (
MI.isCall() ||
MI.isInlineAsm())
3631#ifdef EXPENSIVE_CHECKS
3633 "Got false negative from X86II::getMemoryOperandNo()!");
3643#ifdef EXPENSIVE_CHECKS
3645 "Expected no operands to have OPERAND_MEMORY type!");
3654 if (IsMemOp(
Desc.operands()[
I])) {
3655#ifdef EXPENSIVE_CHECKS
3659 "Expected all five operands in the memory reference to have "
3660 "OPERAND_MEMORY type!");
3672 "Unexpected number of operands!");
3675 if (!Index.isReg() || Index.getReg() != X86::NoRegister)
3683 MI.getParent()->getParent()->getConstantPool()->getConstants();
3695 switch (
MI.getOpcode()) {
3696 case X86::TCRETURNdi:
3697 case X86::TCRETURNri:
3698 case X86::TCRETURNmi:
3699 case X86::TCRETURNdi64:
3700 case X86::TCRETURNri64:
3701 case X86::TCRETURNri64_ImpCall:
3702 case X86::TCRETURNmi64:
3721 if (Symbol ==
"__x86_indirect_thunk_r11")
3726 if (TailCall.getOpcode() != X86::TCRETURNdi &&
3727 TailCall.getOpcode() != X86::TCRETURNdi64) {
3732 if (Subtarget.isTargetWin64() && MF->
hasWinCFI()) {
3745 TailCall.getOperand(1).getImm() != 0) {
3759 while (
I !=
MBB.begin()) {
3761 if (
I->isDebugInstr())
3764 assert(0 &&
"Can't find the branch to replace!");
3768 if (CC != BranchCond[0].
getImm())
3774 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3775 : X86::TCRETURNdi64cc;
3788 LiveRegs.stepForward(*MIB, Clobbers);
3789 for (
const auto &
C : Clobbers) {
3794 I->eraseFromParent();
3808 if (Succ->isEHPad() || (Succ ==
TBB && FallthroughBB))
3811 if (FallthroughBB && FallthroughBB !=
TBB)
3813 FallthroughBB = Succ;
3815 return FallthroughBB;
3818bool X86InstrInfo::analyzeBranchImpl(
3829 if (
I->isDebugInstr())
3834 if (!isUnpredicatedTerminator(*
I))
3843 if (
I->getOpcode() == X86::JMP_1) {
3847 TBB =
I->getOperand(0).getMBB();
3862 UnCondBrIter =
MBB.
end();
3867 TBB =
I->getOperand(0).getMBB();
3878 if (
I->findRegisterUseOperand(X86::EFLAGS,
nullptr)->isUndef())
3884 TBB =
I->getOperand(0).getMBB();
3899 if (OldBranchCode == BranchCode &&
TBB == NewTBB)
3905 if (
TBB == NewTBB &&
3938 Cond[0].setImm(BranchCode);
3949 bool AllowModify)
const {
3951 return analyzeBranchImpl(
MBB,
TBB, FBB,
Cond, CondBranches, AllowModify);
3957 assert(MemRefBegin >= 0 &&
"instr should have memory operand");
3969 if (!
Reg.isVirtual())
3974 unsigned Opcode =
MI->getOpcode();
3975 if (Opcode != X86::LEA64r && Opcode != X86::LEA32r)
3981 unsigned Opcode =
MI.getOpcode();
3984 if (Opcode == X86::JMP64m || Opcode == X86::JMP32m) {
3992 if (Opcode == X86::JMP64r || Opcode == X86::JMP32r) {
3994 if (!Reg.isVirtual())
4001 if (
Add->getOpcode() != X86::ADD64rr &&
Add->getOpcode() != X86::ADD32rr)
4014 MachineBranchPredicate &MBP,
4015 bool AllowModify)
const {
4016 using namespace std::placeholders;
4020 if (analyzeBranchImpl(
MBB, MBP.TrueDest, MBP.FalseDest,
Cond, CondBranches,
4024 if (
Cond.size() != 1)
4027 assert(MBP.TrueDest &&
"expected!");
4030 MBP.FalseDest =
MBB.getNextNode();
4035 bool SingleUseCondition =
true;
4038 if (
MI.modifiesRegister(X86::EFLAGS,
TRI)) {
4043 if (
MI.readsRegister(X86::EFLAGS,
TRI))
4044 SingleUseCondition =
false;
4050 if (SingleUseCondition) {
4051 for (
auto *Succ :
MBB.successors())
4052 if (Succ->isLiveIn(X86::EFLAGS))
4053 SingleUseCondition =
false;
4056 MBP.ConditionDef = ConditionDef;
4057 MBP.SingleUseCondition = SingleUseCondition;
4064 const unsigned TestOpcode =
4065 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
4067 if (ConditionDef->
getOpcode() == TestOpcode &&
4074 ? MachineBranchPredicate::PRED_NE
4075 : MachineBranchPredicate::PRED_EQ;
4083 int *BytesRemoved)
const {
4084 assert(!BytesRemoved &&
"code size not handled");
4089 while (
I !=
MBB.begin()) {
4091 if (
I->isDebugInstr())
4093 if (
I->getOpcode() != X86::JMP_1 &&
4097 I->eraseFromParent();
4111 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
4113 "X86 branch conditions have one component!");
4114 assert(!BytesAdded &&
"code size not handled");
4118 assert(!FBB &&
"Unconditional branch with multiple successors!");
4124 bool FallThru = FBB ==
nullptr;
4139 if (FBB ==
nullptr) {
4141 assert(FBB &&
"MBB cannot be the last block in function when the false "
4142 "body is a fall-through.");
4166 Register FalseReg,
int &CondCycles,
4167 int &TrueCycles,
int &FalseCycles)
const {
4169 if (!Subtarget.canUseCMOV())
4171 if (
Cond.size() != 1)
4180 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
4185 if (X86::GR16RegClass.hasSubClassEq(RC) ||
4186 X86::GR32RegClass.hasSubClassEq(RC) ||
4187 X86::GR64RegClass.hasSubClassEq(RC)) {
4208 assert(
Cond.size() == 1 &&
"Invalid Cond array");
4211 false , Subtarget.hasNDD());
4220 return X86::GR8_ABCD_HRegClass.contains(
Reg);
4226 bool HasAVX = Subtarget.
hasAVX();
4228 bool HasEGPR = Subtarget.hasEGPR();
4235 if (X86::VK16RegClass.
contains(SrcReg)) {
4236 if (X86::GR64RegClass.
contains(DestReg)) {
4237 assert(Subtarget.hasBWI());
4238 return HasEGPR ? X86::KMOVQrk_EVEX : X86::KMOVQrk;
4240 if (X86::GR32RegClass.
contains(DestReg))
4241 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDrk_EVEX : X86::KMOVDrk)
4242 : (HasEGPR ? X86::KMOVWrk_EVEX : X86::KMOVWrk);
4250 if (X86::VK16RegClass.
contains(DestReg)) {
4251 if (X86::GR64RegClass.
contains(SrcReg)) {
4252 assert(Subtarget.hasBWI());
4253 return HasEGPR ? X86::KMOVQkr_EVEX : X86::KMOVQkr;
4255 if (X86::GR32RegClass.
contains(SrcReg))
4256 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDkr_EVEX : X86::KMOVDkr)
4257 : (HasEGPR ? X86::KMOVWkr_EVEX : X86::KMOVWkr);
4265 if (X86::GR64RegClass.
contains(DestReg)) {
4266 if (X86::VR128XRegClass.
contains(SrcReg))
4268 return HasAVX512 ? X86::VMOVPQIto64Zrr
4269 : HasAVX ? X86::VMOVPQIto64rr
4270 : X86::MOVPQIto64rr;
4271 if (X86::VR64RegClass.
contains(SrcReg))
4273 return X86::MMX_MOVD64from64rr;
4274 }
else if (X86::GR64RegClass.
contains(SrcReg)) {
4276 if (X86::VR128XRegClass.
contains(DestReg))
4277 return HasAVX512 ? X86::VMOV64toPQIZrr
4278 : HasAVX ? X86::VMOV64toPQIrr
4279 : X86::MOV64toPQIrr;
4281 if (X86::VR64RegClass.
contains(DestReg))
4282 return X86::MMX_MOVD64to64rr;
4288 if (X86::GR32RegClass.
contains(DestReg) &&
4289 X86::VR128XRegClass.
contains(SrcReg))
4291 return HasAVX512 ? X86::VMOVPDI2DIZrr
4292 : HasAVX ? X86::VMOVPDI2DIrr
4295 if (X86::VR128XRegClass.
contains(DestReg) &&
4296 X86::GR32RegClass.
contains(SrcReg))
4298 return HasAVX512 ? X86::VMOVDI2PDIZrr
4299 : HasAVX ? X86::VMOVDI2PDIrr
4308 bool RenamableDest,
bool RenamableSrc)
const {
4310 bool HasAVX = Subtarget.hasAVX();
4311 bool HasVLX = Subtarget.hasVLX();
4312 bool HasEGPR = Subtarget.hasEGPR();
4314 if (X86::GR64RegClass.
contains(DestReg, SrcReg))
4316 else if (X86::GR32RegClass.
contains(DestReg, SrcReg))
4318 else if (X86::GR16RegClass.
contains(DestReg, SrcReg))
4320 else if (X86::GR8RegClass.
contains(DestReg, SrcReg)) {
4323 if ((
isHReg(DestReg) ||
isHReg(SrcReg)) && Subtarget.is64Bit()) {
4324 Opc = X86::MOV8rr_NOREX;
4327 "8-bit H register can not be copied outside GR8_NOREX");
4330 }
else if (X86::VR64RegClass.
contains(DestReg, SrcReg))
4331 Opc = X86::MMX_MOVQ64rr;
4332 else if (X86::VR128XRegClass.
contains(DestReg, SrcReg)) {
4334 Opc = X86::VMOVAPSZ128rr;
4335 else if (X86::VR128RegClass.
contains(DestReg, SrcReg))
4336 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
4340 Opc = X86::VMOVAPSZrr;
4343 TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, &X86::VR512RegClass);
4345 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4347 }
else if (X86::VR256XRegClass.
contains(DestReg, SrcReg)) {
4349 Opc = X86::VMOVAPSZ256rr;
4350 else if (X86::VR256RegClass.
contains(DestReg, SrcReg))
4351 Opc = X86::VMOVAPSYrr;
4355 Opc = X86::VMOVAPSZrr;
4358 TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, &X86::VR512RegClass);
4360 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4362 }
else if (X86::VR512RegClass.
contains(DestReg, SrcReg))
4363 Opc = X86::VMOVAPSZrr;
4366 else if (X86::VK16RegClass.
contains(DestReg, SrcReg))
4367 Opc = Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVQkk)
4368 : (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVWkk);
4378 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
4386 LLVM_DEBUG(
dbgs() <<
"Cannot copy " << RI.getName(SrcReg) <<
" to "
4387 << RI.getName(DestReg) <<
'\n');
4391std::optional<DestSourcePair>
4393 if (
MI.isMoveReg()) {
4397 if (
MI.getOperand(0).isUndef() &&
MI.getOperand(0).getSubReg())
4398 return std::nullopt;
4402 return std::nullopt;
4407 return Load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
4409 return X86::MOVSHPrm;
4410 return X86::MOVSHPmr;
4415 bool IsStackAligned,
4417 bool HasAVX = STI.
hasAVX();
4419 bool HasVLX = STI.hasVLX();
4420 bool HasEGPR = STI.hasEGPR();
4422 assert(RC !=
nullptr &&
"Invalid target register class");
4427 assert(X86::GR8RegClass.hasSubClassEq(RC) &&
"Unknown 1-byte regclass");
4431 if (
isHReg(
Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
4432 return Load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
4433 return Load ? X86::MOV8rm : X86::MOV8mr;
4435 if (X86::VK16RegClass.hasSubClassEq(RC))
4436 return Load ? (HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm)
4437 : (HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk);
4438 assert(X86::GR16RegClass.hasSubClassEq(RC) &&
"Unknown 2-byte regclass");
4439 return Load ? X86::MOV16rm : X86::MOV16mr;
4441 if (X86::GR32RegClass.hasSubClassEq(RC))
4442 return Load ? X86::MOV32rm : X86::MOV32mr;
4443 if (X86::FR32XRegClass.hasSubClassEq(RC))
4444 return Load ? (HasAVX512 ? X86::VMOVSSZrm_alt
4445 : HasAVX ? X86::VMOVSSrm_alt
4447 : (HasAVX512 ? X86::VMOVSSZmr
4448 : HasAVX ? X86::VMOVSSmr
4450 if (X86::RFP32RegClass.hasSubClassEq(RC))
4451 return Load ? X86::LD_Fp32m : X86::ST_Fp32m;
4452 if (X86::VK32RegClass.hasSubClassEq(RC)) {
4453 assert(STI.hasBWI() &&
"KMOVD requires BWI");
4454 return Load ? (HasEGPR ? X86::KMOVDkm_EVEX : X86::KMOVDkm)
4455 : (HasEGPR ? X86::KMOVDmk_EVEX : X86::KMOVDmk);
4459 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
4460 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
4461 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
4462 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
4463 X86::VK16PAIRRegClass.hasSubClassEq(RC))
4464 return Load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
4465 if (X86::FR16RegClass.hasSubClassEq(RC) ||
4466 X86::FR16XRegClass.hasSubClassEq(RC))
4470 if (X86::GR64RegClass.hasSubClassEq(RC))
4471 return Load ? X86::MOV64rm : X86::MOV64mr;
4472 if (X86::FR64XRegClass.hasSubClassEq(RC))
4473 return Load ? (HasAVX512 ? X86::VMOVSDZrm_alt
4474 : HasAVX ? X86::VMOVSDrm_alt
4476 : (HasAVX512 ? X86::VMOVSDZmr
4477 : HasAVX ? X86::VMOVSDmr
4479 if (X86::VR64RegClass.hasSubClassEq(RC))
4480 return Load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
4481 if (X86::RFP64RegClass.hasSubClassEq(RC))
4482 return Load ? X86::LD_Fp64m : X86::ST_Fp64m;
4483 if (X86::VK64RegClass.hasSubClassEq(RC)) {
4484 assert(STI.hasBWI() &&
"KMOVQ requires BWI");
4485 return Load ? (HasEGPR ? X86::KMOVQkm_EVEX : X86::KMOVQkm)
4486 : (HasEGPR ? X86::KMOVQmk_EVEX : X86::KMOVQmk);
4490 assert(X86::RFP80RegClass.hasSubClassEq(RC) &&
"Unknown 10-byte regclass");
4491 return Load ? X86::LD_Fp80m : X86::ST_FpP80m;
4493 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
4496 return Load ? (HasVLX ? X86::VMOVAPSZ128rm
4497 : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX
4498 : HasAVX ? X86::VMOVAPSrm
4500 : (HasVLX ? X86::VMOVAPSZ128mr
4501 : HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX
4502 : HasAVX ? X86::VMOVAPSmr
4505 return Load ? (HasVLX ? X86::VMOVUPSZ128rm
4506 : HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX
4507 : HasAVX ? X86::VMOVUPSrm
4509 : (HasVLX ? X86::VMOVUPSZ128mr
4510 : HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX
4511 : HasAVX ? X86::VMOVUPSmr
4517 assert(X86::VR256XRegClass.hasSubClassEq(RC) &&
"Unknown 32-byte regclass");
4520 return Load ? (HasVLX ? X86::VMOVAPSZ256rm
4521 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
4523 : (HasVLX ? X86::VMOVAPSZ256mr
4524 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
4527 return Load ? (HasVLX ? X86::VMOVUPSZ256rm
4528 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
4530 : (HasVLX ? X86::VMOVUPSZ256mr
4531 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
4534 assert(X86::VR512RegClass.hasSubClassEq(RC) &&
"Unknown 64-byte regclass");
4537 return Load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
4539 return Load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
4541 assert(X86::TILERegClass.hasSubClassEq(RC) &&
"Unknown 1024-byte regclass");
4542 assert(STI.hasAMXTILE() &&
"Using 8*1024-bit register requires AMX-TILE");
4543#define GET_EGPR_IF_ENABLED(OPC) (STI.hasEGPR() ? OPC##_EVEX : OPC)
4546#undef GET_EGPR_IF_ENABLED
4548 assert(X86::TILEPAIRRegClass.hasSubClassEq(RC) &&
4549 "Unknown 2048-byte regclass");
4550 assert(STI.hasAMXTILE() &&
"Using 2048-bit register requires AMX-TILE");
4551 return Load ? X86::PTILEPAIRLOAD : X86::PTILEPAIRSTORE;
4555std::optional<ExtAddrMode>
4560 if (MemRefBegin < 0)
4561 return std::nullopt;
4566 if (!BaseOp.isReg())
4567 return std::nullopt;
4571 if (!DispMO.
isImm())
4572 return std::nullopt;
4598 ErrInfo =
"Scale factor in address must be 1, 2, 4 or 8";
4603 ErrInfo =
"Displacement in address must fit into 32-bit signed "
4613 int64_t &ImmVal)
const {
4619 if (
MI.isSubregToReg()) {
4623 if (!
MI.getOperand(1).isImm())
4625 unsigned FillBits =
MI.getOperand(1).getImm();
4626 unsigned SubIdx =
MI.getOperand(3).getImm();
4627 MovReg =
MI.getOperand(2).getReg();
4628 if (SubIdx != X86::sub_32bit || FillBits != 0)
4631 MovMI =
MRI.getUniqueVRegDef(MovReg);
4636 if (MovMI->
getOpcode() == X86::MOV32r0 &&
4642 if (MovMI->
getOpcode() != X86::MOV32ri &&
4656 if (!
MI->modifiesRegister(NullValueReg,
TRI))
4658 switch (
MI->getOpcode()) {
4665 assert(
MI->getOperand(0).isDef() &&
MI->getOperand(1).isUse() &&
4666 "expected for shift opcode!");
4667 return MI->getOperand(0).getReg() == NullValueReg &&
4668 MI->getOperand(1).getReg() == NullValueReg;
4673 return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
4687 if (MemRefBegin < 0)
4694 if (!BaseOp->
isReg())
4707 if (!DispMO.
isImm())
4712 if (!BaseOp->
isReg())
4715 OffsetIsScalable =
false;
4719 Width = !
MemOp.memoperands_empty() ?
MemOp.memoperands().front()->getSize()
4727 bool IsStackAligned,
4742 case X86::TILELOADD:
4743 case X86::TILESTORED:
4744 case X86::TILELOADD_EVEX:
4745 case X86::TILESTORED_EVEX:
4746 case X86::PTILEPAIRLOAD:
4747 case X86::PTILEPAIRSTORE:
4755 bool isKill)
const {
4759 case X86::TILESTORED:
4760 case X86::TILESTORED_EVEX:
4761 case X86::PTILEPAIRSTORE: {
4764 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4774 case X86::TILELOADD:
4775 case X86::TILELOADD_EVEX:
4776 case X86::PTILEPAIRLOAD: {
4779 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4799 "Stack slot too small for store");
4801 unsigned Alignment = std::max<uint32_t>(
TRI->getSpillSize(*RC), 16);
4803 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4822 "Load size exceeds stack slot");
4823 unsigned Alignment = std::max<uint32_t>(
TRI->getSpillSize(*RC), 16);
4825 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4837 Register &SrcReg2, int64_t &CmpMask,
4838 int64_t &CmpValue)
const {
4839 switch (
MI.getOpcode()) {
4842 case X86::CMP64ri32:
4846 SrcReg =
MI.getOperand(0).getReg();
4848 if (
MI.getOperand(1).isImm()) {
4850 CmpValue =
MI.getOperand(1).getImm();
4852 CmpMask = CmpValue = 0;
4860 SrcReg =
MI.getOperand(1).getReg();
4869 SrcReg =
MI.getOperand(1).getReg();
4870 SrcReg2 =
MI.getOperand(2).getReg();
4878 SrcReg =
MI.getOperand(1).getReg();
4880 if (
MI.getOperand(2).isImm()) {
4882 CmpValue =
MI.getOperand(2).getImm();
4884 CmpMask = CmpValue = 0;
4891 SrcReg =
MI.getOperand(0).getReg();
4892 SrcReg2 =
MI.getOperand(1).getReg();
4900 SrcReg =
MI.getOperand(0).getReg();
4901 if (
MI.getOperand(1).getReg() != SrcReg)
4908 case X86::TEST64ri32:
4912 SrcReg =
MI.getOperand(0).getReg();
4922bool X86InstrInfo::isRedundantFlagInstr(
const MachineInstr &FlagI,
4924 int64_t ImmMask, int64_t ImmValue,
4926 int64_t *ImmDelta)
const {
4941 OIMask != ImmMask || OIValue != ImmValue)
4943 if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) {
4947 if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) {
4953 case X86::CMP64ri32:
4957 case X86::TEST64ri32:
4968 case X86::TEST8rr: {
4975 SrcReg == OISrcReg && ImmMask == OIMask) {
4976 if (OIValue == ImmValue) {
4979 }
else if (
static_cast<uint64_t
>(ImmValue) ==
4980 static_cast<uint64_t
>(OIValue) - 1) {
4983 }
else if (
static_cast<uint64_t
>(ImmValue) ==
4984 static_cast<uint64_t
>(OIValue) + 1) {
5002 bool &ClearsOverflowFlag) {
5004 ClearsOverflowFlag =
false;
5010 if (
MI.getOpcode() == X86::ADD64rm ||
MI.getOpcode() == X86::ADD32rm) {
5011 unsigned Flags =
MI.getOperand(5).getTargetFlags();
5017 switch (
MI.getOpcode()) {
5113 case X86::LZCNT16rr:
5114 case X86::LZCNT16rm:
5115 case X86::LZCNT32rr:
5116 case X86::LZCNT32rm:
5117 case X86::LZCNT64rr:
5118 case X86::LZCNT64rm:
5119 case X86::POPCNT16rr:
5120 case X86::POPCNT16rm:
5121 case X86::POPCNT32rr:
5122 case X86::POPCNT32rm:
5123 case X86::POPCNT64rr:
5124 case X86::POPCNT64rm:
5125 case X86::TZCNT16rr:
5126 case X86::TZCNT16rm:
5127 case X86::TZCNT32rr:
5128 case X86::TZCNT32rm:
5129 case X86::TZCNT64rr:
5130 case X86::TZCNT64rm:
5176 case X86::BLSMSK32rr:
5177 case X86::BLSMSK32rm:
5178 case X86::BLSMSK64rr:
5179 case X86::BLSMSK64rm:
5184 case X86::BLCFILL32rr:
5185 case X86::BLCFILL32rm:
5186 case X86::BLCFILL64rr:
5187 case X86::BLCFILL64rm:
5192 case X86::BLCIC32rr:
5193 case X86::BLCIC32rm:
5194 case X86::BLCIC64rr:
5195 case X86::BLCIC64rm:
5196 case X86::BLCMSK32rr:
5197 case X86::BLCMSK32rm:
5198 case X86::BLCMSK64rr:
5199 case X86::BLCMSK64rm:
5204 case X86::BLSFILL32rr:
5205 case X86::BLSFILL32rm:
5206 case X86::BLSFILL64rr:
5207 case X86::BLSFILL64rm:
5208 case X86::BLSIC32rr:
5209 case X86::BLSIC32rm:
5210 case X86::BLSIC64rr:
5211 case X86::BLSIC64rm:
5216 case X86::T1MSKC32rr:
5217 case X86::T1MSKC32rm:
5218 case X86::T1MSKC64rr:
5219 case X86::T1MSKC64rm:
5220 case X86::TZMSK32rr:
5221 case X86::TZMSK32rm:
5222 case X86::TZMSK64rr:
5223 case X86::TZMSK64rm:
5227 ClearsOverflowFlag =
true;
5229 case X86::BEXTR32rr:
5230 case X86::BEXTR64rr:
5231 case X86::BEXTR32rm:
5232 case X86::BEXTR64rm:
5233 case X86::BEXTRI32ri:
5234 case X86::BEXTRI32mi:
5235 case X86::BEXTRI64ri:
5236 case X86::BEXTRI64mi:
5247 switch (
MI.getOpcode()) {
5255 case X86::LZCNT16rr:
5256 case X86::LZCNT32rr:
5257 case X86::LZCNT64rr:
5259 case X86::POPCNT16rr:
5260 case X86::POPCNT32rr:
5261 case X86::POPCNT64rr:
5263 case X86::TZCNT16rr:
5264 case X86::TZCNT32rr:
5265 case X86::TZCNT64rr:
5279 case X86::BLSMSK32rr:
5280 case X86::BLSMSK64rr:
5312 unsigned NewOpcode = 0;
5313#define FROM_TO(A, B) \
5314 CASE_ND(A) NewOpcode = X86::B; \
5338 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
5339 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
5347 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
5353 assert(SrcRegDef &&
"Must have a definition (SSA)");
5359 bool NoSignFlag =
false;
5360 bool ClearsOverflowFlag =
false;
5361 bool ShouldUpdateCC =
false;
5362 bool IsSwapped =
false;
5363 bool HasNF = Subtarget.hasNF();
5366 int64_t ImmDelta = 0;
5379 if (&Inst == SrcRegDef) {
5402 Subtarget, NoSignFlag, ClearsOverflowFlag)) {
5411 if (Inst.modifiesRegister(X86::EFLAGS,
TRI)) {
5422 Inst.getOperand(OpNo).getReg() == SrcReg) {
5423 ShouldUpdateCC =
true;
5434 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
5435 Inst, &IsSwapped, &ImmDelta)) {
5443 if (!Movr0Inst && Inst.
getOpcode() == X86::MOV32r0 &&
5444 Inst.registerDefIsDead(X86::EFLAGS,
TRI)) {
5458 if (HasNF && Inst.registerDefIsDead(X86::EFLAGS,
TRI) && !IsWithReloc) {
5463 InstsToUpdate.
push_back(std::make_pair(&Inst, NewOp));
5477 if (
MBB->pred_size() != 1)
5479 MBB = *
MBB->pred_begin();
5480 From =
MBB->rbegin();
5487 bool FlagsMayLiveOut =
true;
5492 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS,
TRI);
5493 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS,
TRI);
5495 if (!UseEFLAGS && ModifyEFLAGS) {
5497 FlagsMayLiveOut =
false;
5500 if (!UseEFLAGS && !ModifyEFLAGS)
5531 if (!ClearsOverflowFlag)
5550 ReplacementCC = NewCC;
5556 }
else if (IsSwapped) {
5563 ShouldUpdateCC =
true;
5564 }
else if (ImmDelta != 0) {
5565 unsigned BitWidth =
TRI->getRegSizeInBits(*
MRI->getRegClass(SrcReg));
5575 if (ImmDelta != 1 || CmpValue == 0)
5585 if (ImmDelta != 1 || CmpValue == 0)
5612 ShouldUpdateCC =
true;
5615 if (ShouldUpdateCC && ReplacementCC != OldCC) {
5619 OpsToUpdate.
push_back(std::make_pair(&Instr, ReplacementCC));
5621 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS,
TRI)) {
5623 FlagsMayLiveOut =
false;
5630 if ((
MI !=
nullptr || ShouldUpdateCC) && FlagsMayLiveOut) {
5637 assert((
MI ==
nullptr ||
Sub ==
nullptr) &&
"Should not have Sub and MI set");
5644 if (&CmpMBB != SubBB)
5648 InsertE =
Sub->getParent()->rend();
5649 for (; InsertI != InsertE; ++InsertI) {
5651 if (!Instr->readsRegister(X86::EFLAGS,
TRI) &&
5652 Instr->modifiesRegister(X86::EFLAGS,
TRI)) {
5659 if (InsertI == InsertE)
5664 for (
auto &Inst : InstsToUpdate) {
5665 Inst.first->setDesc(
get(Inst.second));
5666 Inst.first->removeOperand(
5667 Inst.first->findRegisterDefOperandIdx(X86::EFLAGS,
nullptr));
5672 Sub->findRegisterDefOperand(X86::EFLAGS,
nullptr);
5673 assert(FlagDef &&
"Unable to locate a def EFLAGS operand");
5679 for (
auto &
Op : OpsToUpdate) {
5680 Op.first->getOperand(
Op.first->getDesc().getNumOperands() - 1)
5685 MBB = *
MBB->pred_begin()) {
5686 assert(
MBB->pred_size() == 1 &&
"Expected exactly one predecessor");
5687 if (!
MBB->isLiveIn(X86::EFLAGS))
5688 MBB->addLiveIn(X86::EFLAGS);
5716#define FROM_TO(FROM, TO) \
5719 case X86::FROM##_ND: \
5720 return X86::TO##_ND;
5750#define FROM_TO(FROM, TO) \
5754 FROM_TO(CTEST64rr, CTEST64ri32)
5773 bool MakeChange)
const {
5779 const TargetRegisterClass *RC =
nullptr;
5781 RC =
MRI->getRegClass(
Reg);
5783 (
Reg.
isVirtual() && X86::GR64RegClass.hasSubClassEq(RC))) {
5788 if (
UseMI.findRegisterUseOperand(
Reg,
nullptr)->getSubReg())
5793 !
MRI->hasOneNonDBGUse(
Reg))
5798 if (
Opc == TargetOpcode::COPY) {
5800 const TargetRegisterClass *RC =
nullptr;
5802 RC =
MRI->getRegClass(ToReg);
5803 bool GR32Reg = (ToReg.
isVirtual() && X86::GR32RegClass.hasSubClassEq(RC)) ||
5805 bool GR64Reg = (ToReg.
isVirtual() && X86::GR64RegClass.hasSubClassEq(RC)) ||
5807 bool GR8Reg = (ToReg.
isVirtual() && X86::GR8RegClass.hasSubClassEq(RC)) ||
5818 NewOpc = X86::MOV32ri64;
5820 NewOpc = X86::MOV64ri;
5821 }
else if (GR32Reg) {
5822 NewOpc = X86::MOV32ri;
5826 if (
UseMI.getParent()->computeRegisterLiveness(
5835 UseMI.removeOperand(
5836 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr));
5844 NewOpc = X86::MOV8ri;
5854 if ((NewOpc == X86::SUB64ri32 || NewOpc == X86::SUB32ri ||
5855 NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri ||
5856 NewOpc == X86::SUB64ri32_ND || NewOpc == X86::SUB32ri_ND ||
5857 NewOpc == X86::SBB64ri32_ND || NewOpc == X86::SBB32ri_ND) &&
5858 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr) != 2)
5861 if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
5862 (NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
5863 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr) != 1)
5866 using namespace X86;
5867 if (isSHL(
Opc) || isSHR(
Opc) || isSAR(
Opc) || isROL(
Opc) || isROR(
Opc) ||
5868 isRCL(
Opc) || isRCR(
Opc)) {
5869 unsigned RegIdx =
UseMI.findRegisterUseOperandIdx(
Reg,
nullptr);
5879 UseMI.removeOperand(RegIdx);
5893 UseMI.registerDefIsDead(X86::EFLAGS,
nullptr)) {
5897 UseMI.setDesc(
get(TargetOpcode::COPY));
5898 UseMI.removeOperand(
5899 UseMI.findRegisterUseOperandIdx(
Reg,
nullptr));
5900 UseMI.removeOperand(
5901 UseMI.findRegisterDefOperandIdx(X86::EFLAGS,
nullptr));
5902 UseMI.untieRegOperand(0);
5906 unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
5907 unsigned ImmOpNum = 2;
5908 if (!
UseMI.getOperand(0).isDef()) {
5912 if (
Opc == TargetOpcode::COPY)
5916 commuteInstruction(
UseMI);
5920 UseMI.getOperand(ImmOpNum).ChangeToImmediate(ImmVal);
5938 return foldImmediateImpl(
UseMI, &
DefMI, Reg, ImmVal,
MRI,
true);
5950 assert(
Desc.getNumOperands() == 3 &&
"Expected two-addr instruction.");
5970 assert(
Desc.getNumOperands() == 3 &&
"Expected two-addr instruction.");
5988 MIB->
setDesc(
TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
6000 assert(Imm != 0 &&
"Using push/pop for 0 is not efficient.");
6003 int StackAdjustment;
6005 if (Subtarget.is64Bit()) {
6007 MIB->
getOpcode() == X86::MOV32ImmSExti8);
6021 StackAdjustment = 8;
6027 StackAdjustment = 4;
6039 bool EmitCFI = !TFL->
hasFP(MF) && NeedsDwarfCFI;
6086 MIB->
getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
6098 const MCInstrDesc &BroadcastDesc,
unsigned SubIdx) {
6101 if (
TRI->getEncodingValue(DestReg) < 16) {
6108 DestReg =
TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
6120 const MCInstrDesc &ExtractDesc,
unsigned SubIdx) {
6123 if (
TRI->getEncodingValue(SrcReg) < 16) {
6130 SrcReg =
TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
6153 if (
MI.getOpcode() == X86::MOVSHPrm) {
6154 NewOpc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
6156 if (
Reg > X86::XMM15)
6157 NewOpc = X86::VMOVSSZrm;
6159 NewOpc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
6161 if (
Reg > X86::XMM15)
6162 NewOpc = X86::VMOVSSZmr;
6170 bool HasAVX = Subtarget.hasAVX();
6172 switch (
MI.getOpcode()) {
6179 case X86::MOV32ImmSExti8:
6180 case X86::MOV64ImmSExti8:
6182 case X86::SETB_C32r:
6184 case X86::SETB_C64r:
6192 case X86::FsFLD0F128:
6194 case X86::AVX_SET0: {
6195 assert(HasAVX &&
"AVX not supported");
6198 Register XReg =
TRI->getSubReg(SrcReg, X86::sub_xmm);
6204 case X86::AVX512_128_SET0:
6205 case X86::AVX512_FsFLD0SH:
6206 case X86::AVX512_FsFLD0SS:
6207 case X86::AVX512_FsFLD0SD:
6208 case X86::AVX512_FsFLD0F128: {
6209 bool HasVLX = Subtarget.hasVLX();
6212 if (HasVLX ||
TRI->getEncodingValue(SrcReg) < 16)
6214 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6217 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
6221 case X86::AVX512_256_SET0:
6222 case X86::AVX512_512_SET0: {
6223 bool HasVLX = Subtarget.hasVLX();
6226 if (HasVLX ||
TRI->getEncodingValue(SrcReg) < 16) {
6227 Register XReg =
TRI->getSubReg(SrcReg, X86::sub_xmm);
6233 if (
MI.getOpcode() == X86::AVX512_256_SET0) {
6236 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
6244 case X86::V_SETALLONES:
6246 get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
6247 case X86::AVX2_SETALLONES:
6249 case X86::AVX1_SETALLONES: {
6256 case X86::AVX512_512_SETALLONES: {
6267 case X86::AVX512_512_SEXT_MASK_32:
6268 case X86::AVX512_512_SEXT_MASK_64: {
6272 unsigned Opc = (
MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64)
6273 ? X86::VPTERNLOGQZrrikz
6274 : X86::VPTERNLOGDZrrikz;
6275 MI.removeOperand(1);
6280 .
addReg(MaskReg, MaskState)
6286 case X86::VMOVAPSZ128rm_NOVLX:
6288 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6289 case X86::VMOVUPSZ128rm_NOVLX:
6291 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6292 case X86::VMOVAPSZ256rm_NOVLX:
6294 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6295 case X86::VMOVUPSZ256rm_NOVLX:
6297 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6298 case X86::VMOVAPSZ128mr_NOVLX:
6300 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6301 case X86::VMOVUPSZ128mr_NOVLX:
6303 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6304 case X86::VMOVAPSZ256mr_NOVLX:
6306 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6307 case X86::VMOVUPSZ256mr_NOVLX:
6309 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6310 case X86::MOV32ri64: {
6312 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
6313 MI.setDesc(
get(X86::MOV32ri));
6319 case X86::RDFLAGS32:
6320 case X86::RDFLAGS64: {
6321 unsigned Is64Bit =
MI.getOpcode() == X86::RDFLAGS64;
6325 get(Is64Bit ? X86::PUSHF64 : X86::PUSHF32))
6333 "Unexpected register in operand! Should be EFLAGS.");
6336 "Unexpected register in operand! Should be DF.");
6339 MIB->
setDesc(
get(Is64Bit ? X86::POP64r : X86::POP32r));
6343 case X86::WRFLAGS32:
6344 case X86::WRFLAGS64: {
6345 unsigned Is64Bit =
MI.getOpcode() == X86::WRFLAGS64;
6349 get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
6350 .
addReg(
MI.getOperand(0).getReg());
6352 get(Is64Bit ? X86::POPF64 : X86::POPF32));
6353 MI.eraseFromParent();
6376 case TargetOpcode::LOAD_STACK_GUARD:
6382 case X86::SHLDROT32ri:
6384 case X86::SHLDROT64ri:
6386 case X86::SHRDROT32ri:
6388 case X86::SHRDROT64ri:
6390 case X86::ADD8rr_DB:
6393 case X86::ADD16rr_DB:
6396 case X86::ADD32rr_DB:
6399 case X86::ADD64rr_DB:
6402 case X86::ADD8ri_DB:
6405 case X86::ADD16ri_DB:
6408 case X86::ADD32ri_DB:
6411 case X86::ADD64ri32_DB:
6435 bool ForLoadFold =
false) {
6437 case X86::CVTSI2SSrr:
6438 case X86::CVTSI2SSrm:
6439 case X86::CVTSI642SSrr:
6440 case X86::CVTSI642SSrm:
6441 case X86::CVTSI2SDrr:
6442 case X86::CVTSI2SDrm:
6443 case X86::CVTSI642SDrr:
6444 case X86::CVTSI642SDrm:
6447 return !ForLoadFold;
6448 case X86::CVTSD2SSrr:
6449 case X86::CVTSD2SSrm:
6450 case X86::CVTSS2SDrr:
6451 case X86::CVTSS2SDrm:
6458 case X86::RCPSSr_Int:
6459 case X86::RCPSSm_Int:
6460 case X86::ROUNDSDri:
6461 case X86::ROUNDSDmi:
6462 case X86::ROUNDSSri:
6463 case X86::ROUNDSSmi:
6466 case X86::RSQRTSSr_Int:
6467 case X86::RSQRTSSm_Int:
6470 case X86::SQRTSSr_Int:
6471 case X86::SQRTSSm_Int:
6474 case X86::SQRTSDr_Int:
6475 case X86::SQRTSDm_Int:
6477 case X86::VFCMULCPHZ128rm:
6478 case X86::VFCMULCPHZ128rmb:
6479 case X86::VFCMULCPHZ128rmbkz:
6480 case X86::VFCMULCPHZ128rmkz:
6481 case X86::VFCMULCPHZ128rr:
6482 case X86::VFCMULCPHZ128rrkz:
6483 case X86::VFCMULCPHZ256rm:
6484 case X86::VFCMULCPHZ256rmb:
6485 case X86::VFCMULCPHZ256rmbkz:
6486 case X86::VFCMULCPHZ256rmkz:
6487 case X86::VFCMULCPHZ256rr:
6488 case X86::VFCMULCPHZ256rrkz:
6489 case X86::VFCMULCPHZrm:
6490 case X86::VFCMULCPHZrmb:
6491 case X86::VFCMULCPHZrmbkz:
6492 case X86::VFCMULCPHZrmkz:
6493 case X86::VFCMULCPHZrr:
6494 case X86::VFCMULCPHZrrb:
6495 case X86::VFCMULCPHZrrbkz:
6496 case X86::VFCMULCPHZrrkz:
6497 case X86::VFMULCPHZ128rm:
6498 case X86::VFMULCPHZ128rmb:
6499 case X86::VFMULCPHZ128rmbkz:
6500 case X86::VFMULCPHZ128rmkz:
6501 case X86::VFMULCPHZ128rr:
6502 case X86::VFMULCPHZ128rrkz:
6503 case X86::VFMULCPHZ256rm:
6504 case X86::VFMULCPHZ256rmb:
6505 case X86::VFMULCPHZ256rmbkz:
6506 case X86::VFMULCPHZ256rmkz:
6507 case X86::VFMULCPHZ256rr:
6508 case X86::VFMULCPHZ256rrkz:
6509 case X86::VFMULCPHZrm:
6510 case X86::VFMULCPHZrmb:
6511 case X86::VFMULCPHZrmbkz:
6512 case X86::VFMULCPHZrmkz:
6513 case X86::VFMULCPHZrr:
6514 case X86::VFMULCPHZrrb:
6515 case X86::VFMULCPHZrrbkz:
6516 case X86::VFMULCPHZrrkz:
6517 case X86::VFCMULCSHZrm:
6518 case X86::VFCMULCSHZrmkz:
6519 case X86::VFCMULCSHZrr:
6520 case X86::VFCMULCSHZrrb:
6521 case X86::VFCMULCSHZrrbkz:
6522 case X86::VFCMULCSHZrrkz:
6523 case X86::VFMULCSHZrm:
6524 case X86::VFMULCSHZrmkz:
6525 case X86::VFMULCSHZrr:
6526 case X86::VFMULCSHZrrb:
6527 case X86::VFMULCSHZrrbkz:
6528 case X86::VFMULCSHZrrkz:
6529 return Subtarget.hasMULCFalseDeps();
6530 case X86::VPERMDYrm:
6531 case X86::VPERMDYrr:
6532 case X86::VPERMQYmi:
6533 case X86::VPERMQYri:
6534 case X86::VPERMPSYrm:
6535 case X86::VPERMPSYrr:
6536 case X86::VPERMPDYmi:
6537 case X86::VPERMPDYri:
6538 case X86::VPERMDZ256rm:
6539 case X86::VPERMDZ256rmb:
6540 case X86::VPERMDZ256rmbkz:
6541 case X86::VPERMDZ256rmkz:
6542 case X86::VPERMDZ256rr:
6543 case X86::VPERMDZ256rrkz:
6544 case X86::VPERMDZrm:
6545 case X86::VPERMDZrmb:
6546 case X86::VPERMDZrmbkz:
6547 case X86::VPERMDZrmkz:
6548 case X86::VPERMDZrr:
6549 case X86::VPERMDZrrkz:
6550 case X86::VPERMQZ256mbi:
6551 case X86::VPERMQZ256mbikz:
6552 case X86::VPERMQZ256mi:
6553 case X86::VPERMQZ256mikz:
6554 case X86::VPERMQZ256ri:
6555 case X86::VPERMQZ256rikz:
6556 case X86::VPERMQZ256rm:
6557 case X86::VPERMQZ256rmb:
6558 case X86::VPERMQZ256rmbkz:
6559 case X86::VPERMQZ256rmkz:
6560 case X86::VPERMQZ256rr:
6561 case X86::VPERMQZ256rrkz:
6562 case X86::VPERMQZmbi:
6563 case X86::VPERMQZmbikz:
6564 case X86::VPERMQZmi:
6565 case X86::VPERMQZmikz:
6566 case X86::VPERMQZri:
6567 case X86::VPERMQZrikz:
6568 case X86::VPERMQZrm:
6569 case X86::VPERMQZrmb:
6570 case X86::VPERMQZrmbkz:
6571 case X86::VPERMQZrmkz:
6572 case X86::VPERMQZrr:
6573 case X86::VPERMQZrrkz:
6574 case X86::VPERMPSZ256rm:
6575 case X86::VPERMPSZ256rmb:
6576 case X86::VPERMPSZ256rmbkz:
6577 case X86::VPERMPSZ256rmkz:
6578 case X86::VPERMPSZ256rr:
6579 case X86::VPERMPSZ256rrkz:
6580 case X86::VPERMPSZrm:
6581 case X86::VPERMPSZrmb:
6582 case X86::VPERMPSZrmbkz:
6583 case X86::VPERMPSZrmkz:
6584 case X86::VPERMPSZrr:
6585 case X86::VPERMPSZrrkz:
6586 case X86::VPERMPDZ256mbi:
6587 case X86::VPERMPDZ256mbikz:
6588 case X86::VPERMPDZ256mi:
6589 case X86::VPERMPDZ256mikz:
6590 case X86::VPERMPDZ256ri:
6591 case X86::VPERMPDZ256rikz:
6592 case X86::VPERMPDZ256rm:
6593 case X86::VPERMPDZ256rmb:
6594 case X86::VPERMPDZ256rmbkz:
6595 case X86::VPERMPDZ256rmkz:
6596 case X86::VPERMPDZ256rr:
6597 case X86::VPERMPDZ256rrkz:
6598 case X86::VPERMPDZmbi:
6599 case X86::VPERMPDZmbikz:
6600 case X86::VPERMPDZmi:
6601 case X86::VPERMPDZmikz:
6602 case X86::VPERMPDZri:
6603 case X86::VPERMPDZrikz:
6604 case X86::VPERMPDZrm:
6605 case X86::VPERMPDZrmb:
6606 case X86::VPERMPDZrmbkz:
6607 case X86::VPERMPDZrmkz:
6608 case X86::VPERMPDZrr:
6609 case X86::VPERMPDZrrkz:
6610 return Subtarget.hasPERMFalseDeps();
6611 case X86::VRANGEPDZ128rmbi:
6612 case X86::VRANGEPDZ128rmbikz:
6613 case X86::VRANGEPDZ128rmi:
6614 case X86::VRANGEPDZ128rmikz:
6615 case X86::VRANGEPDZ128rri:
6616 case X86::VRANGEPDZ128rrikz:
6617 case X86::VRANGEPDZ256rmbi:
6618 case X86::VRANGEPDZ256rmbikz:
6619 case X86::VRANGEPDZ256rmi:
6620 case X86::VRANGEPDZ256rmikz:
6621 case X86::VRANGEPDZ256rri:
6622 case X86::VRANGEPDZ256rrikz:
6623 case X86::VRANGEPDZrmbi:
6624 case X86::VRANGEPDZrmbikz:
6625 case X86::VRANGEPDZrmi:
6626 case X86::VRANGEPDZrmikz:
6627 case X86::VRANGEPDZrri:
6628 case X86::VRANGEPDZrrib:
6629 case X86::VRANGEPDZrribkz:
6630 case X86::VRANGEPDZrrikz:
6631 case X86::VRANGEPSZ128rmbi:
6632 case X86::VRANGEPSZ128rmbikz:
6633 case X86::VRANGEPSZ128rmi:
6634 case X86::VRANGEPSZ128rmikz:
6635 case X86::VRANGEPSZ128rri:
6636 case X86::VRANGEPSZ128rrikz:
6637 case X86::VRANGEPSZ256rmbi:
6638 case X86::VRANGEPSZ256rmbikz:
6639 case X86::VRANGEPSZ256rmi:
6640 case X86::VRANGEPSZ256rmikz:
6641 case X86::VRANGEPSZ256rri:
6642 case X86::VRANGEPSZ256rrikz:
6643 case X86::VRANGEPSZrmbi:
6644 case X86::VRANGEPSZrmbikz:
6645 case X86::VRANGEPSZrmi:
6646 case X86::VRANGEPSZrmikz:
6647 case X86::VRANGEPSZrri:
6648 case X86::VRANGEPSZrrib:
6649 case X86::VRANGEPSZrribkz:
6650 case X86::VRANGEPSZrrikz:
6651 case X86::VRANGESDZrmi:
6652 case X86::VRANGESDZrmikz:
6653 case X86::VRANGESDZrri:
6654 case X86::VRANGESDZrrib:
6655 case X86::VRANGESDZrribkz:
6656 case X86::VRANGESDZrrikz:
6657 case X86::VRANGESSZrmi:
6658 case X86::VRANGESSZrmikz:
6659 case X86::VRANGESSZrri:
6660 case X86::VRANGESSZrrib:
6661 case X86::VRANGESSZrribkz:
6662 case X86::VRANGESSZrrikz:
6663 return Subtarget.hasRANGEFalseDeps();
6664 case X86::VGETMANTSSZrmi:
6665 case X86::VGETMANTSSZrmikz:
6666 case X86::VGETMANTSSZrri:
6667 case X86::VGETMANTSSZrrib:
6668 case X86::VGETMANTSSZrribkz:
6669 case X86::VGETMANTSSZrrikz:
6670 case X86::VGETMANTSDZrmi:
6671 case X86::VGETMANTSDZrmikz:
6672 case X86::VGETMANTSDZrri:
6673 case X86::VGETMANTSDZrrib:
6674 case X86::VGETMANTSDZrribkz:
6675 case X86::VGETMANTSDZrrikz:
6676 case X86::VGETMANTSHZrmi:
6677 case X86::VGETMANTSHZrmikz:
6678 case X86::VGETMANTSHZrri:
6679 case X86::VGETMANTSHZrrib:
6680 case X86::VGETMANTSHZrribkz:
6681 case X86::VGETMANTSHZrrikz:
6682 case X86::VGETMANTPSZ128rmbi:
6683 case X86::VGETMANTPSZ128rmbikz:
6684 case X86::VGETMANTPSZ128rmi:
6685 case X86::VGETMANTPSZ128rmikz:
6686 case X86::VGETMANTPSZ256rmbi:
6687 case X86::VGETMANTPSZ256rmbikz:
6688 case X86::VGETMANTPSZ256rmi:
6689 case X86::VGETMANTPSZ256rmikz:
6690 case X86::VGETMANTPSZrmbi:
6691 case X86::VGETMANTPSZrmbikz:
6692 case X86::VGETMANTPSZrmi:
6693 case X86::VGETMANTPSZrmikz:
6694 case X86::VGETMANTPDZ128rmbi:
6695 case X86::VGETMANTPDZ128rmbikz:
6696 case X86::VGETMANTPDZ128rmi:
6697 case X86::VGETMANTPDZ128rmikz:
6698 case X86::VGETMANTPDZ256rmbi:
6699 case X86::VGETMANTPDZ256rmbikz:
6700 case X86::VGETMANTPDZ256rmi:
6701 case X86::VGETMANTPDZ256rmikz:
6702 case X86::VGETMANTPDZrmbi:
6703 case X86::VGETMANTPDZrmbikz:
6704 case X86::VGETMANTPDZrmi:
6705 case X86::VGETMANTPDZrmikz:
6706 return Subtarget.hasGETMANTFalseDeps();
6707 case X86::VPMULLQZ128rm:
6708 case X86::VPMULLQZ128rmb:
6709 case X86::VPMULLQZ128rmbkz:
6710 case X86::VPMULLQZ128rmkz:
6711 case X86::VPMULLQZ128rr:
6712 case X86::VPMULLQZ128rrkz:
6713 case X86::VPMULLQZ256rm:
6714 case X86::VPMULLQZ256rmb:
6715 case X86::VPMULLQZ256rmbkz:
6716 case X86::VPMULLQZ256rmkz:
6717 case X86::VPMULLQZ256rr:
6718 case X86::VPMULLQZ256rrkz:
6719 case X86::VPMULLQZrm:
6720 case X86::VPMULLQZrmb:
6721 case X86::VPMULLQZrmbkz:
6722 case X86::VPMULLQZrmkz:
6723 case X86::VPMULLQZrr:
6724 case X86::VPMULLQZrrkz:
6725 return Subtarget.hasMULLQFalseDeps();
6727 case X86::POPCNT32rm:
6728 case X86::POPCNT32rr:
6729 case X86::POPCNT64rm:
6730 case X86::POPCNT64rr:
6731 return Subtarget.hasPOPCNTFalseDeps();
6732 case X86::LZCNT32rm:
6733 case X86::LZCNT32rr:
6734 case X86::LZCNT64rm:
6735 case X86::LZCNT64rr:
6736 case X86::TZCNT32rm:
6737 case X86::TZCNT32rr:
6738 case X86::TZCNT64rm:
6739 case X86::TZCNT64rr:
6740 return Subtarget.hasLZCNTFalseDeps();
6757 bool HasNDDPartialWrite =
false;
6760 if (!Reg.isVirtual())
6761 HasNDDPartialWrite =
6762 X86::GR8RegClass.contains(Reg) || X86::GR16RegClass.contains(Reg);
6775 bool ReadsReg =
false;
6776 if (Reg.isVirtual())
6777 ReadsReg = (MO.
readsReg() ||
MI.readsVirtualRegister(Reg));
6779 ReadsReg =
MI.readsRegister(Reg,
TRI);
6780 if (ReadsReg != HasNDDPartialWrite)
6794 bool ForLoadFold =
false) {
6797 case X86::MMX_PUNPCKHBWrr:
6798 case X86::MMX_PUNPCKHWDrr:
6799 case X86::MMX_PUNPCKHDQrr:
6800 case X86::MMX_PUNPCKLBWrr:
6801 case X86::MMX_PUNPCKLWDrr:
6802 case X86::MMX_PUNPCKLDQrr:
6803 case X86::MOVHLPSrr:
6804 case X86::PACKSSWBrr:
6805 case X86::PACKUSWBrr:
6806 case X86::PACKSSDWrr:
6807 case X86::PACKUSDWrr:
6808 case X86::PUNPCKHBWrr:
6809 case X86::PUNPCKLBWrr:
6810 case X86::PUNPCKHWDrr:
6811 case X86::PUNPCKLWDrr:
6812 case X86::PUNPCKHDQrr:
6813 case X86::PUNPCKLDQrr:
6814 case X86::PUNPCKHQDQrr:
6815 case X86::PUNPCKLQDQrr:
6816 case X86::SHUFPDrri:
6817 case X86::SHUFPSrri:
6823 return OpNum == 2 && !ForLoadFold;
6825 case X86::VMOVLHPSrr:
6826 case X86::VMOVLHPSZrr:
6827 case X86::VPACKSSWBrr:
6828 case X86::VPACKUSWBrr:
6829 case X86::VPACKSSDWrr:
6830 case X86::VPACKUSDWrr:
6831 case X86::VPACKSSWBZ128rr:
6832 case X86::VPACKUSWBZ128rr:
6833 case X86::VPACKSSDWZ128rr:
6834 case X86::VPACKUSDWZ128rr:
6835 case X86::VPERM2F128rri:
6836 case X86::VPERM2I128rri:
6837 case X86::VSHUFF32X4Z256rri:
6838 case X86::VSHUFF32X4Zrri:
6839 case X86::VSHUFF64X2Z256rri:
6840 case X86::VSHUFF64X2Zrri:
6841 case X86::VSHUFI32X4Z256rri:
6842 case X86::VSHUFI32X4Zrri:
6843 case X86::VSHUFI64X2Z256rri:
6844 case X86::VSHUFI64X2Zrri:
6845 case X86::VPUNPCKHBWrr:
6846 case X86::VPUNPCKLBWrr:
6847 case X86::VPUNPCKHBWYrr:
6848 case X86::VPUNPCKLBWYrr:
6849 case X86::VPUNPCKHBWZ128rr:
6850 case X86::VPUNPCKLBWZ128rr:
6851 case X86::VPUNPCKHBWZ256rr:
6852 case X86::VPUNPCKLBWZ256rr:
6853 case X86::VPUNPCKHBWZrr:
6854 case X86::VPUNPCKLBWZrr:
6855 case X86::VPUNPCKHWDrr:
6856 case X86::VPUNPCKLWDrr:
6857 case X86::VPUNPCKHWDYrr:
6858 case X86::VPUNPCKLWDYrr:
6859 case X86::VPUNPCKHWDZ128rr:
6860 case X86::VPUNPCKLWDZ128rr:
6861 case X86::VPUNPCKHWDZ256rr:
6862 case X86::VPUNPCKLWDZ256rr:
6863 case X86::VPUNPCKHWDZrr:
6864 case X86::VPUNPCKLWDZrr:
6865 case X86::VPUNPCKHDQrr:
6866 case X86::VPUNPCKLDQrr:
6867 case X86::VPUNPCKHDQYrr:
6868 case X86::VPUNPCKLDQYrr:
6869 case X86::VPUNPCKHDQZ128rr:
6870 case X86::VPUNPCKLDQZ128rr:
6871 case X86::VPUNPCKHDQZ256rr:
6872 case X86::VPUNPCKLDQZ256rr:
6873 case X86::VPUNPCKHDQZrr:
6874 case X86::VPUNPCKLDQZrr:
6875 case X86::VPUNPCKHQDQrr:
6876 case X86::VPUNPCKLQDQrr:
6877 case X86::VPUNPCKHQDQYrr:
6878 case X86::VPUNPCKLQDQYrr:
6879 case X86::VPUNPCKHQDQZ128rr:
6880 case X86::VPUNPCKLQDQZ128rr:
6881 case X86::VPUNPCKHQDQZ256rr:
6882 case X86::VPUNPCKLQDQZ256rr:
6883 case X86::VPUNPCKHQDQZrr:
6884 case X86::VPUNPCKLQDQZrr:
6888 return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
6890 case X86::VCVTSI2SSrr:
6891 case X86::VCVTSI2SSrm:
6892 case X86::VCVTSI2SSrr_Int:
6893 case X86::VCVTSI2SSrm_Int:
6894 case X86::VCVTSI642SSrr:
6895 case X86::VCVTSI642SSrm:
6896 case X86::VCVTSI642SSrr_Int:
6897 case X86::VCVTSI642SSrm_Int:
6898 case X86::VCVTSI2SDrr:
6899 case X86::VCVTSI2SDrm:
6900 case X86::VCVTSI2SDrr_Int:
6901 case X86::VCVTSI2SDrm_Int:
6902 case X86::VCVTSI642SDrr:
6903 case X86::VCVTSI642SDrm:
6904 case X86::VCVTSI642SDrr_Int:
6905 case X86::VCVTSI642SDrm_Int:
6907 case X86::VCVTSI2SSZrr:
6908 case X86::VCVTSI2SSZrm:
6909 case X86::VCVTSI2SSZrr_Int:
6910 case X86::VCVTSI2SSZrrb_Int:
6911 case X86::VCVTSI2SSZrm_Int:
6912 case X86::VCVTSI642SSZrr:
6913 case X86::VCVTSI642SSZrm:
6914 case X86::VCVTSI642SSZrr_Int:
6915 case X86::VCVTSI642SSZrrb_Int:
6916 case X86::VCVTSI642SSZrm_Int:
6917 case X86::VCVTSI2SDZrr:
6918 case X86::VCVTSI2SDZrm:
6919 case X86::VCVTSI2SDZrr_Int:
6920 case X86::VCVTSI2SDZrm_Int:
6921 case X86::VCVTSI642SDZrr:
6922 case X86::VCVTSI642SDZrm:
6923 case X86::VCVTSI642SDZrr_Int:
6924 case X86::VCVTSI642SDZrrb_Int:
6925 case X86::VCVTSI642SDZrm_Int:
6926 case X86::VCVTUSI2SSZrr:
6927 case X86::VCVTUSI2SSZrm:
6928 case X86::VCVTUSI2SSZrr_Int:
6929 case X86::VCVTUSI2SSZrrb_Int:
6930 case X86::VCVTUSI2SSZrm_Int:
6931 case X86::VCVTUSI642SSZrr:
6932 case X86::VCVTUSI642SSZrm:
6933 case X86::VCVTUSI642SSZrr_Int:
6934 case X86::VCVTUSI642SSZrrb_Int:
6935 case X86::VCVTUSI642SSZrm_Int:
6936 case X86::VCVTUSI2SDZrr:
6937 case X86::VCVTUSI2SDZrm:
6938 case X86::VCVTUSI2SDZrr_Int:
6939 case X86::VCVTUSI2SDZrm_Int:
6940 case X86::VCVTUSI642SDZrr:
6941 case X86::VCVTUSI642SDZrm:
6942 case X86::VCVTUSI642SDZrr_Int:
6943 case X86::VCVTUSI642SDZrrb_Int:
6944 case X86::VCVTUSI642SDZrm_Int:
6945 case X86::VCVTSI2SHZrr:
6946 case X86::VCVTSI2SHZrm:
6947 case X86::VCVTSI2SHZrr_Int:
6948 case X86::VCVTSI2SHZrrb_Int:
6949 case X86::VCVTSI2SHZrm_Int:
6950 case X86::VCVTSI642SHZrr:
6951 case X86::VCVTSI642SHZrm:
6952 case X86::VCVTSI642SHZrr_Int:
6953 case X86::VCVTSI642SHZrrb_Int:
6954 case X86::VCVTSI642SHZrm_Int:
6955 case X86::VCVTUSI2SHZrr:
6956 case X86::VCVTUSI2SHZrm:
6957 case X86::VCVTUSI2SHZrr_Int:
6958 case X86::VCVTUSI2SHZrrb_Int:
6959 case X86::VCVTUSI2SHZrm_Int:
6960 case X86::VCVTUSI642SHZrr:
6961 case X86::VCVTUSI642SHZrm:
6962 case X86::VCVTUSI642SHZrr_Int:
6963 case X86::VCVTUSI642SHZrrb_Int:
6964 case X86::VCVTUSI642SHZrm_Int:
6967 return OpNum == 1 && !ForLoadFold;
6968 case X86::VCVTSD2SSrr:
6969 case X86::VCVTSD2SSrm:
6970 case X86::VCVTSD2SSrr_Int:
6971 case X86::VCVTSD2SSrm_Int:
6972 case X86::VCVTSS2SDrr:
6973 case X86::VCVTSS2SDrm:
6974 case X86::VCVTSS2SDrr_Int:
6975 case X86::VCVTSS2SDrm_Int:
6977 case X86::VRCPSSr_Int:
6979 case X86::VRCPSSm_Int:
6980 case X86::VROUNDSDri:
6981 case X86::VROUNDSDmi:
6982 case X86::VROUNDSDri_Int:
6983 case X86::VROUNDSDmi_Int:
6984 case X86::VROUNDSSri:
6985 case X86::VROUNDSSmi:
6986 case X86::VROUNDSSri_Int:
6987 case X86::VROUNDSSmi_Int:
6988 case X86::VRSQRTSSr:
6989 case X86::VRSQRTSSr_Int:
6990 case X86::VRSQRTSSm:
6991 case X86::VRSQRTSSm_Int:
6993 case X86::VSQRTSSr_Int:
6995 case X86::VSQRTSSm_Int:
6997 case X86::VSQRTSDr_Int:
6999 case X86::VSQRTSDm_Int:
7001 case X86::VCVTSD2SSZrr:
7002 case X86::VCVTSD2SSZrr_Int:
7003 case X86::VCVTSD2SSZrrb_Int:
7004 case X86::VCVTSD2SSZrm:
7005 case X86::VCVTSD2SSZrm_Int:
7006 case X86::VCVTSS2SDZrr:
7007 case X86::VCVTSS2SDZrr_Int:
7008 case X86::VCVTSS2SDZrrb_Int:
7009 case X86::VCVTSS2SDZrm:
7010 case X86::VCVTSS2SDZrm_Int:
7011 case X86::VGETEXPSDZr:
7012 case X86::VGETEXPSDZrb:
7013 case X86::VGETEXPSDZm:
7014 case X86::VGETEXPSSZr:
7015 case X86::VGETEXPSSZrb:
7016 case X86::VGETEXPSSZm:
7017 case X86::VGETMANTSDZrri:
7018 case X86::VGETMANTSDZrrib:
7019 case X86::VGETMANTSDZrmi:
7020 case X86::VGETMANTSSZrri:
7021 case X86::VGETMANTSSZrrib:
7022 case X86::VGETMANTSSZrmi:
7023 case X86::VRNDSCALESDZrri:
7024 case X86::VRNDSCALESDZrri_Int:
7025 case X86::VRNDSCALESDZrrib_Int:
7026 case X86::VRNDSCALESDZrmi:
7027 case X86::VRNDSCALESDZrmi_Int:
7028 case X86::VRNDSCALESSZrri:
7029 case X86::VRNDSCALESSZrri_Int:
7030 case X86::VRNDSCALESSZrrib_Int:
7031 case X86::VRNDSCALESSZrmi:
7032 case X86::VRNDSCALESSZrmi_Int:
7033 case X86::VRCP14SDZrr:
7034 case X86::VRCP14SDZrm:
7035 case X86::VRCP14SSZrr:
7036 case X86::VRCP14SSZrm:
7037 case X86::VRCPSHZrr:
7038 case X86::VRCPSHZrm:
7039 case X86::VRSQRTSHZrr:
7040 case X86::VRSQRTSHZrm:
7041 case X86::VREDUCESHZrmi:
7042 case X86::VREDUCESHZrri:
7043 case X86::VREDUCESHZrrib:
7044 case X86::VGETEXPSHZr:
7045 case X86::VGETEXPSHZrb:
7046 case X86::VGETEXPSHZm:
7047 case X86::VGETMANTSHZrri:
7048 case X86::VGETMANTSHZrrib:
7049 case X86::VGETMANTSHZrmi:
7050 case X86::VRNDSCALESHZrri:
7051 case X86::VRNDSCALESHZrri_Int:
7052 case X86::VRNDSCALESHZrrib_Int:
7053 case X86::VRNDSCALESHZrmi:
7054 case X86::VRNDSCALESHZrmi_Int:
7055 case X86::VSQRTSHZr:
7056 case X86::VSQRTSHZr_Int:
7057 case X86::VSQRTSHZrb_Int:
7058 case X86::VSQRTSHZm:
7059 case X86::VSQRTSHZm_Int:
7060 case X86::VRCP28SDZr:
7061 case X86::VRCP28SDZrb:
7062 case X86::VRCP28SDZm:
7063 case X86::VRCP28SSZr:
7064 case X86::VRCP28SSZrb:
7065 case X86::VRCP28SSZm:
7066 case X86::VREDUCESSZrmi:
7067 case X86::VREDUCESSZrri:
7068 case X86::VREDUCESSZrrib:
7069 case X86::VRSQRT14SDZrr:
7070 case X86::VRSQRT14SDZrm:
7071 case X86::VRSQRT14SSZrr:
7072 case X86::VRSQRT14SSZrm:
7073 case X86::VRSQRT28SDZr:
7074 case X86::VRSQRT28SDZrb:
7075 case X86::VRSQRT28SDZm:
7076 case X86::VRSQRT28SSZr:
7077 case X86::VRSQRT28SSZrb:
7078 case X86::VRSQRT28SSZm:
7079 case X86::VSQRTSSZr:
7080 case X86::VSQRTSSZr_Int:
7081 case X86::VSQRTSSZrb_Int:
7082 case X86::VSQRTSSZm:
7083 case X86::VSQRTSSZm_Int:
7084 case X86::VSQRTSDZr:
7085 case X86::VSQRTSDZr_Int:
7086 case X86::VSQRTSDZrb_Int:
7087 case X86::VSQRTSDZm:
7088 case X86::VSQRTSDZm_Int:
7089 case X86::VCVTSD2SHZrr:
7090 case X86::VCVTSD2SHZrr_Int:
7091 case X86::VCVTSD2SHZrrb_Int:
7092 case X86::VCVTSD2SHZrm:
7093 case X86::VCVTSD2SHZrm_Int:
7094 case X86::VCVTSS2SHZrr:
7095 case X86::VCVTSS2SHZrr_Int:
7096 case X86::VCVTSS2SHZrrb_Int:
7097 case X86::VCVTSS2SHZrm:
7098 case X86::VCVTSS2SHZrm_Int:
7099 case X86::VCVTSH2SDZrr:
7100 case X86::VCVTSH2SDZrr_Int:
7101 case X86::VCVTSH2SDZrrb_Int:
7102 case X86::VCVTSH2SDZrm:
7103 case X86::VCVTSH2SDZrm_Int:
7104 case X86::VCVTSH2SSZrr:
7105 case X86::VCVTSH2SSZrr_Int:
7106 case X86::VCVTSH2SSZrrb_Int:
7107 case X86::VCVTSH2SSZrm:
7108 case X86::VCVTSH2SSZrm_Int:
7110 case X86::VMOVSSZrrk:
7111 case X86::VMOVSDZrrk:
7112 return OpNum == 3 && !ForLoadFold;
7113 case X86::VMOVSSZrrkz:
7114 case X86::VMOVSDZrrkz:
7115 return OpNum == 2 && !ForLoadFold;
7147 Register Reg =
MI.getOperand(OpNum).getReg();
7149 if (
MI.killsRegister(Reg,
TRI))
7152 if (X86::VR128RegClass.
contains(Reg)) {
7155 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
7159 MI.addRegisterKilled(Reg,
TRI,
true);
7160 }
else if (X86::VR256RegClass.
contains(Reg)) {
7163 Register XReg =
TRI->getSubReg(Reg, X86::sub_xmm);
7168 MI.addRegisterKilled(Reg,
TRI,
true);
7169 }
else if (X86::VR128XRegClass.
contains(Reg)) {
7171 if (!Subtarget.hasVLX())
7174 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
get(X86::VPXORDZ128rr), Reg)
7177 MI.addRegisterKilled(Reg,
TRI,
true);
7178 }
else if (X86::VR256XRegClass.
contains(Reg) ||
7179 X86::VR512RegClass.
contains(Reg)) {
7181 if (!Subtarget.hasVLX())
7185 Register XReg =
TRI->getSubReg(Reg, X86::sub_xmm);
7186 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
get(X86::VPXORDZ128rr), XReg)
7190 MI.addRegisterKilled(Reg,
TRI,
true);
7191 }
else if (X86::GR64RegClass.
contains(Reg)) {
7194 Register XReg =
TRI->getSubReg(Reg, X86::sub_32bit);
7199 MI.addRegisterKilled(Reg,
TRI,
true);
7200 }
else if (X86::GR32RegClass.
contains(Reg)) {
7204 MI.addRegisterKilled(Reg,
TRI,
true);
7205 }
else if ((X86::GR16RegClass.
contains(Reg) ||
7214 if (!
MI.definesRegister(SuperReg,
nullptr))
7220 int PtrOffset = 0) {
7221 unsigned NumAddrOps = MOs.
size();
7223 if (NumAddrOps < 4) {
7225 for (
unsigned i = 0; i != NumAddrOps; ++i)
7231 assert(MOs.
size() == 5 &&
"Unexpected memory operand list length");
7232 for (
unsigned i = 0; i != NumAddrOps; ++i) {
7234 if (i == 3 && PtrOffset != 0) {
7255 if (!
Reg.isVirtual())
7262 dbgs() <<
"WARNING: Unable to update register constraint for operand "
7263 << Idx <<
" of instruction:\n";
7277 MF.CreateMachineInstr(
TII.get(Opcode),
MI.getDebugLoc(),
true);
7282 unsigned NumOps =
MI.getDesc().getNumOperands() - 2;
7283 for (
unsigned i = 0; i !=
NumOps; ++i) {
7293 MBB->insert(InsertPt, NewMI);
7302 int PtrOffset = 0) {
7305 MF.CreateMachineInstr(
TII.get(Opcode),
MI.getDebugLoc(),
true);
7308 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
7311 assert(MO.
isReg() &&
"Expected to fold into reg operand!");
7325 MBB->insert(InsertPt, NewMI);
7335 MI.getDebugLoc(),
TII.get(Opcode));
7344 switch (
MI.getOpcode()) {
7345 case X86::INSERTPSrri:
7346 case X86::VINSERTPSrri:
7347 case X86::VINSERTPSZrri:
7351 unsigned Imm =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
7352 unsigned ZMask =
Imm & 15;
7353 unsigned DstIdx = (
Imm >> 4) & 3;
7354 unsigned SrcIdx = (
Imm >> 6) & 3;
7357 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum, &RI);
7358 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7359 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 &&
7360 (
MI.getOpcode() != X86::INSERTPSrri || Alignment >=
Align(4))) {
7361 int PtrOffset = SrcIdx * 4;
7362 unsigned NewImm = (DstIdx << 4) | ZMask;
7363 unsigned NewOpCode =
7364 (
MI.getOpcode() == X86::VINSERTPSZrri) ? X86::VINSERTPSZrmi
7365 : (
MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi
7367 MachineInstr *NewMI =
7368 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt,
MI, *
this, PtrOffset);
7374 case X86::MOVHLPSrr:
7375 case X86::VMOVHLPSrr:
7376 case X86::VMOVHLPSZrr:
7382 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum, &RI);
7383 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7384 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 && Alignment >=
Align(8)) {
7385 unsigned NewOpCode =
7386 (
MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm
7387 : (
MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm
7389 MachineInstr *NewMI =
7390 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt,
MI, *
this, 8);
7395 case X86::UNPCKLPDrr:
7401 const TargetRegisterClass *RC =
getRegClass(
MI.getDesc(), OpNum, &RI);
7402 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7403 if ((
Size == 0 ||
Size >= 16) && RCSize >= 16 && Alignment <
Align(16)) {
7404 MachineInstr *NewMI =
7405 fuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt,
MI, *
this);
7412 makeM0Inst(*
this, (
Size == 4) ? X86::MOV32mi : X86::MOV64mi32, MOs,
7424 !
MI.getOperand(1).isReg())
7432 if (
MI.getOperand(1).isUndef())
7441 unsigned Idx1)
const {
7442 unsigned Idx2 = CommuteAnyOperandIndex;
7446 bool HasDef =
MI.getDesc().getNumDefs();
7448 Register Reg1 =
MI.getOperand(Idx1).getReg();
7449 Register Reg2 =
MI.getOperand(Idx2).getReg();
7450 bool Tied1 = 0 ==
MI.getDesc().getOperandConstraint(Idx1,
MCOI::TIED_TO);
7451 bool Tied2 = 0 ==
MI.getDesc().getOperandConstraint(Idx2,
MCOI::TIED_TO);
7455 if ((HasDef && Reg0 == Reg1 && Tied1) || (HasDef && Reg0 == Reg2 && Tied2))
7458 return commuteInstruction(
MI,
false, Idx1, Idx2) ? Idx2 : Idx1;
7463 dbgs() <<
"We failed to fuse operand " << Idx <<
" in " <<
MI;
7469 unsigned Size,
Align Alignment,
bool AllowCommute)
const {
7470 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
7471 unsigned Opc =
MI.getOpcode();
7477 (
Opc == X86::CALL32r ||
Opc == X86::CALL64r ||
7478 Opc == X86::CALL64r_ImpCall ||
Opc == X86::PUSH16r ||
7479 Opc == X86::PUSH32r ||
Opc == X86::PUSH64r))
7488 unsigned NumOps =
MI.getDesc().getNumOperands();
7489 bool IsTwoAddr =
NumOps > 1 && OpNum < 2 &&
MI.getOperand(0).isReg() &&
7490 MI.getOperand(1).isReg() &&
7491 MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg();
7495 if (
Opc == X86::ADD32ri &&
7504 Opc != X86::ADD64rr)
7509 if (
MI.isCall() &&
MI.getCFIType())
7513 if (
auto *CustomMI = foldMemoryOperandCustom(MF,
MI, OpNum, MOs, InsertPt,
7529 unsigned Opcode =
I->DstOp;
7533 bool NarrowToMOV32rm =
false;
7537 unsigned RCSize =
TRI.getRegSizeInBits(*RC) / 8;
7545 if (Opcode != X86::MOV64rm || RCSize != 8 ||
Size != 4)
7547 if (
MI.getOperand(0).getSubReg() ||
MI.getOperand(1).getSubReg())
7549 Opcode = X86::MOV32rm;
7550 NarrowToMOV32rm =
true;
7560 :
fuseInst(MF, Opcode, OpNum, MOs, InsertPt,
MI, *
this);
7562 if (NarrowToMOV32rm) {
7578 unsigned CommuteOpIdx2 = commuteOperandsForFold(
MI, OpNum);
7579 if (CommuteOpIdx2 == OpNum) {
7589 commuteInstruction(
MI,
false, OpNum, CommuteOpIdx2);
7611 for (
auto Op :
Ops) {
7616 if (
MI.getOpcode() == X86::MOV32r0 &&
SubReg == X86::sub_32bit)
7627 if (!RI.hasStackRealignment(MF))
7629 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
7634 Size, Alignment,
true);
7636 if (
Ops.size() == 2 &&
Ops[0] == 0 &&
Ops[1] == 1) {
7637 unsigned NewOpc = 0;
7638 unsigned RCSize = 0;
7639 unsigned Opc =
MI.getOpcode();
7646 NewOpc = X86::CMP8ri;
7650 NewOpc = X86::CMP16ri;
7654 NewOpc = X86::CMP32ri;
7658 NewOpc = X86::CMP64ri32;
7667 MI.setDesc(
get(NewOpc));
7668 MI.getOperand(1).ChangeToImmediate(0);
7669 }
else if (
Ops.size() != 1)
7697 unsigned RegSize =
TRI.getRegSizeInBits(*RC);
7699 if ((
Opc == X86::MOVSSrm ||
Opc == X86::VMOVSSrm ||
Opc == X86::VMOVSSZrm ||
7700 Opc == X86::MOVSSrm_alt ||
Opc == X86::VMOVSSrm_alt ||
7701 Opc == X86::VMOVSSZrm_alt) &&
7707 case X86::CVTSS2SDrr_Int:
7708 case X86::VCVTSS2SDrr_Int:
7709 case X86::VCVTSS2SDZrr_Int:
7710 case X86::VCVTSS2SDZrrk_Int:
7711 case X86::VCVTSS2SDZrrkz_Int:
7712 case X86::CVTSS2SIrr_Int:
7713 case X86::CVTSS2SI64rr_Int:
7714 case X86::VCVTSS2SIrr_Int:
7715 case X86::VCVTSS2SI64rr_Int:
7716 case X86::VCVTSS2SIZrr_Int:
7717 case X86::VCVTSS2SI64Zrr_Int:
7718 case X86::CVTTSS2SIrr_Int:
7719 case X86::CVTTSS2SI64rr_Int:
7720 case X86::VCVTTSS2SIrr_Int:
7721 case X86::VCVTTSS2SI64rr_Int:
7722 case X86::VCVTTSS2SIZrr_Int:
7723 case X86::VCVTTSS2SI64Zrr_Int:
7724 case X86::VCVTSS2USIZrr_Int:
7725 case X86::VCVTSS2USI64Zrr_Int:
7726 case X86::VCVTTSS2USIZrr_Int:
7727 case X86::VCVTTSS2USI64Zrr_Int:
7728 case X86::RCPSSr_Int:
7729 case X86::VRCPSSr_Int:
7730 case X86::RSQRTSSr_Int:
7731 case X86::VRSQRTSSr_Int:
7732 case X86::ROUNDSSri_Int:
7733 case X86::VROUNDSSri_Int:
7734 case X86::COMISSrr_Int:
7735 case X86::VCOMISSrr_Int:
7736 case X86::VCOMISSZrr_Int:
7737 case X86::UCOMISSrr_Int:
7738 case X86::VUCOMISSrr_Int:
7739 case X86::VUCOMISSZrr_Int:
7740 case X86::ADDSSrr_Int:
7741 case X86::VADDSSrr_Int:
7742 case X86::VADDSSZrr_Int:
7743 case X86::CMPSSrri_Int:
7744 case X86::VCMPSSrri_Int:
7745 case X86::VCMPSSZrri_Int:
7746 case X86::DIVSSrr_Int:
7747 case X86::VDIVSSrr_Int:
7748 case X86::VDIVSSZrr_Int:
7749 case X86::MAXSSrr_Int:
7750 case X86::VMAXSSrr_Int:
7751 case X86::VMAXSSZrr_Int:
7752 case X86::MINSSrr_Int:
7753 case X86::VMINSSrr_Int:
7754 case X86::VMINSSZrr_Int:
7755 case X86::MULSSrr_Int:
7756 case X86::VMULSSrr_Int:
7757 case X86::VMULSSZrr_Int:
7758 case X86::SQRTSSr_Int:
7759 case X86::VSQRTSSr_Int:
7760 case X86::VSQRTSSZr_Int:
7761 case X86::SUBSSrr_Int:
7762 case X86::VSUBSSrr_Int:
7763 case X86::VSUBSSZrr_Int:
7764 case X86::VADDSSZrrk_Int:
7765 case X86::VADDSSZrrkz_Int:
7766 case X86::VCMPSSZrrik_Int:
7767 case X86::VDIVSSZrrk_Int:
7768 case X86::VDIVSSZrrkz_Int:
7769 case X86::VMAXSSZrrk_Int:
7770 case X86::VMAXSSZrrkz_Int:
7771 case X86::VMINSSZrrk_Int:
7772 case X86::VMINSSZrrkz_Int:
7773 case X86::VMULSSZrrk_Int:
7774 case X86::VMULSSZrrkz_Int:
7775 case X86::VSQRTSSZrk_Int:
7776 case X86::VSQRTSSZrkz_Int:
7777 case X86::VSUBSSZrrk_Int:
7778 case X86::VSUBSSZrrkz_Int:
7779 case X86::VFMADDSS4rr_Int:
7780 case X86::VFNMADDSS4rr_Int:
7781 case X86::VFMSUBSS4rr_Int:
7782 case X86::VFNMSUBSS4rr_Int:
7783 case X86::VFMADD132SSr_Int:
7784 case X86::VFNMADD132SSr_Int:
7785 case X86::VFMADD213SSr_Int:
7786 case X86::VFNMADD213SSr_Int:
7787 case X86::VFMADD231SSr_Int:
7788 case X86::VFNMADD231SSr_Int:
7789 case X86::VFMSUB132SSr_Int:
7790 case X86::VFNMSUB132SSr_Int:
7791 case X86::VFMSUB213SSr_Int:
7792 case X86::VFNMSUB213SSr_Int:
7793 case X86::VFMSUB231SSr_Int:
7794 case X86::VFNMSUB231SSr_Int:
7795 case X86::VFMADD132SSZr_Int:
7796 case X86::VFNMADD132SSZr_Int:
7797 case X86::VFMADD213SSZr_Int:
7798 case X86::VFNMADD213SSZr_Int:
7799 case X86::VFMADD231SSZr_Int:
7800 case X86::VFNMADD231SSZr_Int:
7801 case X86::VFMSUB132SSZr_Int:
7802 case X86::VFNMSUB132SSZr_Int:
7803 case X86::VFMSUB213SSZr_Int:
7804 case X86::VFNMSUB213SSZr_Int:
7805 case X86::VFMSUB231SSZr_Int:
7806 case X86::VFNMSUB231SSZr_Int:
7807 case X86::VFMADD132SSZrk_Int:
7808 case X86::VFNMADD132SSZrk_Int:
7809 case X86::VFMADD213SSZrk_Int:
7810 case X86::VFNMADD213SSZrk_Int:
7811 case X86::VFMADD231SSZrk_Int:
7812 case X86::VFNMADD231SSZrk_Int:
7813 case X86::VFMSUB132SSZrk_Int:
7814 case X86::VFNMSUB132SSZrk_Int:
7815 case X86::VFMSUB213SSZrk_Int:
7816 case X86::VFNMSUB213SSZrk_Int:
7817 case X86::VFMSUB231SSZrk_Int:
7818 case X86::VFNMSUB231SSZrk_Int:
7819 case X86::VFMADD132SSZrkz_Int:
7820 case X86::VFNMADD132SSZrkz_Int:
7821 case X86::VFMADD213SSZrkz_Int:
7822 case X86::VFNMADD213SSZrkz_Int:
7823 case X86::VFMADD231SSZrkz_Int:
7824 case X86::VFNMADD231SSZrkz_Int:
7825 case X86::VFMSUB132SSZrkz_Int:
7826 case X86::VFNMSUB132SSZrkz_Int:
7827 case X86::VFMSUB213SSZrkz_Int:
7828 case X86::VFNMSUB213SSZrkz_Int:
7829 case X86::VFMSUB231SSZrkz_Int:
7830 case X86::VFNMSUB231SSZrkz_Int:
7831 case X86::VFIXUPIMMSSZrri:
7832 case X86::VFIXUPIMMSSZrrik:
7833 case X86::VFIXUPIMMSSZrrikz:
7834 case X86::VFPCLASSSSZri:
7835 case X86::VFPCLASSSSZrik:
7836 case X86::VGETEXPSSZr:
7837 case X86::VGETEXPSSZrk:
7838 case X86::VGETEXPSSZrkz:
7839 case X86::VGETMANTSSZrri:
7840 case X86::VGETMANTSSZrrik:
7841 case X86::VGETMANTSSZrrikz:
7842 case X86::VRANGESSZrri:
7843 case X86::VRANGESSZrrik:
7844 case X86::VRANGESSZrrikz:
7845 case X86::VRCP14SSZrr:
7846 case X86::VRCP14SSZrrk:
7847 case X86::VRCP14SSZrrkz:
7848 case X86::VRCP28SSZr:
7849 case X86::VRCP28SSZrk:
7850 case X86::VRCP28SSZrkz:
7851 case X86::VREDUCESSZrri:
7852 case X86::VREDUCESSZrrik:
7853 case X86::VREDUCESSZrrikz:
7854 case X86::VRNDSCALESSZrri_Int:
7855 case X86::VRNDSCALESSZrrik_Int:
7856 case X86::VRNDSCALESSZrrikz_Int:
7857 case X86::VRSQRT14SSZrr:
7858 case X86::VRSQRT14SSZrrk:
7859 case X86::VRSQRT14SSZrrkz:
7860 case X86::VRSQRT28SSZr:
7861 case X86::VRSQRT28SSZrk:
7862 case X86::VRSQRT28SSZrkz:
7863 case X86::VSCALEFSSZrr:
7864 case X86::VSCALEFSSZrrk:
7865 case X86::VSCALEFSSZrrkz:
7872 if ((
Opc == X86::MOVSDrm ||
Opc == X86::VMOVSDrm ||
Opc == X86::VMOVSDZrm ||
7873 Opc == X86::MOVSDrm_alt ||
Opc == X86::VMOVSDrm_alt ||
7874 Opc == X86::VMOVSDZrm_alt) &&
7880 case X86::CVTSD2SSrr_Int:
7881 case X86::VCVTSD2SSrr_Int:
7882 case X86::VCVTSD2SSZrr_Int:
7883 case X86::VCVTSD2SSZrrk_Int:
7884 case X86::VCVTSD2SSZrrkz_Int:
7885 case X86::CVTSD2SIrr_Int:
7886 case X86::CVTSD2SI64rr_Int:
7887 case X86::VCVTSD2SIrr_Int:
7888 case X86::VCVTSD2SI64rr_Int:
7889 case X86::VCVTSD2SIZrr_Int:
7890 case X86::VCVTSD2SI64Zrr_Int:
7891 case X86::CVTTSD2SIrr_Int:
7892 case X86::CVTTSD2SI64rr_Int:
7893 case X86::VCVTTSD2SIrr_Int:
7894 case X86::VCVTTSD2SI64rr_Int:
7895 case X86::VCVTTSD2SIZrr_Int:
7896 case X86::VCVTTSD2SI64Zrr_Int:
7897 case X86::VCVTSD2USIZrr_Int:
7898 case X86::VCVTSD2USI64Zrr_Int:
7899 case X86::VCVTTSD2USIZrr_Int:
7900 case X86::VCVTTSD2USI64Zrr_Int:
7901 case X86::ROUNDSDri_Int:
7902 case X86::VROUNDSDri_Int:
7903 case X86::COMISDrr_Int:
7904 case X86::VCOMISDrr_Int:
7905 case X86::VCOMISDZrr_Int:
7906 case X86::UCOMISDrr_Int:
7907 case X86::VUCOMISDrr_Int:
7908 case X86::VUCOMISDZrr_Int:
7909 case X86::ADDSDrr_Int:
7910 case X86::VADDSDrr_Int:
7911 case X86::VADDSDZrr_Int:
7912 case X86::CMPSDrri_Int:
7913 case X86::VCMPSDrri_Int:
7914 case X86::VCMPSDZrri_Int:
7915 case X86::DIVSDrr_Int:
7916 case X86::VDIVSDrr_Int:
7917 case X86::VDIVSDZrr_Int:
7918 case X86::MAXSDrr_Int:
7919 case X86::VMAXSDrr_Int:
7920 case X86::VMAXSDZrr_Int:
7921 case X86::MINSDrr_Int:
7922 case X86::VMINSDrr_Int:
7923 case X86::VMINSDZrr_Int:
7924 case X86::MULSDrr_Int:
7925 case X86::VMULSDrr_Int:
7926 case X86::VMULSDZrr_Int:
7927 case X86::SQRTSDr_Int:
7928 case X86::VSQRTSDr_Int:
7929 case X86::VSQRTSDZr_Int:
7930 case X86::SUBSDrr_Int:
7931 case X86::VSUBSDrr_Int:
7932 case X86::VSUBSDZrr_Int:
7933 case X86::VADDSDZrrk_Int:
7934 case X86::VADDSDZrrkz_Int:
7935 case X86::VCMPSDZrrik_Int:
7936 case X86::VDIVSDZrrk_Int:
7937 case X86::VDIVSDZrrkz_Int:
7938 case X86::VMAXSDZrrk_Int:
7939 case X86::VMAXSDZrrkz_Int:
7940 case X86::VMINSDZrrk_Int:
7941 case X86::VMINSDZrrkz_Int:
7942 case X86::VMULSDZrrk_Int:
7943 case X86::VMULSDZrrkz_Int:
7944 case X86::VSQRTSDZrk_Int:
7945 case X86::VSQRTSDZrkz_Int:
7946 case X86::VSUBSDZrrk_Int:
7947 case X86::VSUBSDZrrkz_Int:
7948 case X86::VFMADDSD4rr_Int:
7949 case X86::VFNMADDSD4rr_Int:
7950 case X86::VFMSUBSD4rr_Int:
7951 case X86::VFNMSUBSD4rr_Int:
7952 case X86::VFMADD132SDr_Int:
7953 case X86::VFNMADD132SDr_Int:
7954 case X86::VFMADD213SDr_Int:
7955 case X86::VFNMADD213SDr_Int:
7956 case X86::VFMADD231SDr_Int:
7957 case X86::VFNMADD231SDr_Int:
7958 case X86::VFMSUB132SDr_Int:
7959 case X86::VFNMSUB132SDr_Int:
7960 case X86::VFMSUB213SDr_Int:
7961 case X86::VFNMSUB213SDr_Int:
7962 case X86::VFMSUB231SDr_Int:
7963 case X86::VFNMSUB231SDr_Int:
7964 case X86::VFMADD132SDZr_Int:
7965 case X86::VFNMADD132SDZr_Int:
7966 case X86::VFMADD213SDZr_Int:
7967 case X86::VFNMADD213SDZr_Int:
7968 case X86::VFMADD231SDZr_Int:
7969 case X86::VFNMADD231SDZr_Int:
7970 case X86::VFMSUB132SDZr_Int:
7971 case X86::VFNMSUB132SDZr_Int:
7972 case X86::VFMSUB213SDZr_Int:
7973 case X86::VFNMSUB213SDZr_Int:
7974 case X86::VFMSUB231SDZr_Int:
7975 case X86::VFNMSUB231SDZr_Int:
7976 case X86::VFMADD132SDZrk_Int:
7977 case X86::VFNMADD132SDZrk_Int:
7978 case X86::VFMADD213SDZrk_Int:
7979 case X86::VFNMADD213SDZrk_Int:
7980 case X86::VFMADD231SDZrk_Int:
7981 case X86::VFNMADD231SDZrk_Int:
7982 case X86::VFMSUB132SDZrk_Int:
7983 case X86::VFNMSUB132SDZrk_Int:
7984 case X86::VFMSUB213SDZrk_Int:
7985 case X86::VFNMSUB213SDZrk_Int:
7986 case X86::VFMSUB231SDZrk_Int:
7987 case X86::VFNMSUB231SDZrk_Int:
7988 case X86::VFMADD132SDZrkz_Int:
7989 case X86::VFNMADD132SDZrkz_Int:
7990 case X86::VFMADD213SDZrkz_Int:
7991 case X86::VFNMADD213SDZrkz_Int:
7992 case X86::VFMADD231SDZrkz_Int:
7993 case X86::VFNMADD231SDZrkz_Int:
7994 case X86::VFMSUB132SDZrkz_Int:
7995 case X86::VFNMSUB132SDZrkz_Int:
7996 case X86::VFMSUB213SDZrkz_Int:
7997 case X86::VFNMSUB213SDZrkz_Int:
7998 case X86::VFMSUB231SDZrkz_Int:
7999 case X86::VFNMSUB231SDZrkz_Int:
8000 case X86::VFIXUPIMMSDZrri:
8001 case X86::VFIXUPIMMSDZrrik:
8002 case X86::VFIXUPIMMSDZrrikz:
8003 case X86::VFPCLASSSDZri:
8004 case X86::VFPCLASSSDZrik:
8005 case X86::VGETEXPSDZr:
8006 case X86::VGETEXPSDZrk:
8007 case X86::VGETEXPSDZrkz:
8008 case X86::VGETMANTSDZrri:
8009 case X86::VGETMANTSDZrrik:
8010 case X86::VGETMANTSDZrrikz:
8011 case X86::VRANGESDZrri:
8012 case X86::VRANGESDZrrik:
8013 case X86::VRANGESDZrrikz:
8014 case X86::VRCP14SDZrr:
8015 case X86::VRCP14SDZrrk:
8016 case X86::VRCP14SDZrrkz:
8017 case X86::VRCP28SDZr:
8018 case X86::VRCP28SDZrk:
8019 case X86::VRCP28SDZrkz:
8020 case X86::VREDUCESDZrri:
8021 case X86::VREDUCESDZrrik:
8022 case X86::VREDUCESDZrrikz:
8023 case X86::VRNDSCALESDZrri_Int:
8024 case X86::VRNDSCALESDZrrik_Int:
8025 case X86::VRNDSCALESDZrrikz_Int:
8026 case X86::VRSQRT14SDZrr:
8027 case X86::VRSQRT14SDZrrk:
8028 case X86::VRSQRT14SDZrrkz:
8029 case X86::VRSQRT28SDZr:
8030 case X86::VRSQRT28SDZrk:
8031 case X86::VRSQRT28SDZrkz:
8032 case X86::VSCALEFSDZrr:
8033 case X86::VSCALEFSDZrrk:
8034 case X86::VSCALEFSDZrrkz:
8041 if ((
Opc == X86::VMOVSHZrm ||
Opc == X86::VMOVSHZrm_alt) &&
RegSize > 16) {
8046 case X86::VADDSHZrr_Int:
8047 case X86::VCMPSHZrri_Int:
8048 case X86::VDIVSHZrr_Int:
8049 case X86::VMAXSHZrr_Int:
8050 case X86::VMINSHZrr_Int:
8051 case X86::VMULSHZrr_Int:
8052 case X86::VSUBSHZrr_Int:
8053 case X86::VADDSHZrrk_Int:
8054 case X86::VADDSHZrrkz_Int:
8055 case X86::VCMPSHZrrik_Int:
8056 case X86::VDIVSHZrrk_Int:
8057 case X86::VDIVSHZrrkz_Int:
8058 case X86::VMAXSHZrrk_Int:
8059 case X86::VMAXSHZrrkz_Int:
8060 case X86::VMINSHZrrk_Int:
8061 case X86::VMINSHZrrkz_Int:
8062 case X86::VMULSHZrrk_Int:
8063 case X86::VMULSHZrrkz_Int:
8064 case X86::VSUBSHZrrk_Int:
8065 case X86::VSUBSHZrrkz_Int:
8066 case X86::VFMADD132SHZr_Int:
8067 case X86::VFNMADD132SHZr_Int:
8068 case X86::VFMADD213SHZr_Int:
8069 case X86::VFNMADD213SHZr_Int:
8070 case X86::VFMADD231SHZr_Int:
8071 case X86::VFNMADD231SHZr_Int:
8072 case X86::VFMSUB132SHZr_Int:
8073 case X86::VFNMSUB132SHZr_Int:
8074 case X86::VFMSUB213SHZr_Int:
8075 case X86::VFNMSUB213SHZr_Int:
8076 case X86::VFMSUB231SHZr_Int:
8077 case X86::VFNMSUB231SHZr_Int:
8078 case X86::VFMADD132SHZrk_Int:
8079 case X86::VFNMADD132SHZrk_Int:
8080 case X86::VFMADD213SHZrk_Int:
8081 case X86::VFNMADD213SHZrk_Int:
8082 case X86::VFMADD231SHZrk_Int:
8083 case X86::VFNMADD231SHZrk_Int:
8084 case X86::VFMSUB132SHZrk_Int:
8085 case X86::VFNMSUB132SHZrk_Int:
8086 case X86::VFMSUB213SHZrk_Int:
8087 case X86::VFNMSUB213SHZrk_Int:
8088 case X86::VFMSUB231SHZrk_Int:
8089 case X86::VFNMSUB231SHZrk_Int:
8090 case X86::VFMADD132SHZrkz_Int:
8091 case X86::VFNMADD132SHZrkz_Int:
8092 case X86::VFMADD213SHZrkz_Int:
8093 case X86::VFNMADD213SHZrkz_Int:
8094 case X86::VFMADD231SHZrkz_Int:
8095 case X86::VFNMADD231SHZrkz_Int:
8096 case X86::VFMSUB132SHZrkz_Int:
8097 case X86::VFNMSUB132SHZrkz_Int:
8098 case X86::VFMSUB213SHZrkz_Int:
8099 case X86::VFNMSUB213SHZrkz_Int:
8100 case X86::VFMSUB231SHZrkz_Int:
8101 case X86::VFNMSUB231SHZrkz_Int:
8125 return RC == &X86::VK2WMRegClass || RC == &X86::VK4WMRegClass ||
8126 RC == &X86::VK8WMRegClass || RC == &X86::VK16WMRegClass ||
8127 RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass;
8136 bool HasSameMask =
false;
8137 for (
unsigned I = 1, E =
MI.getDesc().getNumOperands();
I < E; ++
I) {
8139 if (
Op.isReg() &&
Op.getReg() == MaskReg) {
8151 for (
auto Op :
Ops) {
8152 if (
MI.getOperand(
Op).getSubReg())
8189 case X86::AVX512_512_SET0:
8190 case X86::AVX512_512_SETALLONES:
8191 Alignment =
Align(64);
8193 case X86::AVX2_SETALLONES:
8194 case X86::AVX1_SETALLONES:
8196 case X86::AVX512_256_SET0:
8197 Alignment =
Align(32);
8200 case X86::V_SETALLONES:
8201 case X86::AVX512_128_SET0:
8202 case X86::FsFLD0F128:
8203 case X86::AVX512_FsFLD0F128:
8204 Alignment =
Align(16);
8208 case X86::AVX512_FsFLD0SD:
8209 Alignment =
Align(8);
8212 case X86::AVX512_FsFLD0SS:
8213 Alignment =
Align(4);
8216 case X86::AVX512_FsFLD0SH:
8217 Alignment =
Align(2);
8222 if (
Ops.size() == 2 &&
Ops[0] == 0 &&
Ops[1] == 1) {
8223 unsigned NewOpc = 0;
8224 switch (
MI.getOpcode()) {
8228 NewOpc = X86::CMP8ri;
8231 NewOpc = X86::CMP16ri;
8234 NewOpc = X86::CMP32ri;
8237 NewOpc = X86::CMP64ri32;
8241 MI.setDesc(
get(NewOpc));
8242 MI.getOperand(1).ChangeToImmediate(0);
8243 }
else if (
Ops.size() != 1)
8255 case X86::V_SETALLONES:
8256 case X86::AVX2_SETALLONES:
8257 case X86::AVX1_SETALLONES:
8259 case X86::AVX512_128_SET0:
8260 case X86::AVX512_256_SET0:
8261 case X86::AVX512_512_SET0:
8262 case X86::AVX512_512_SETALLONES:
8264 case X86::AVX512_FsFLD0SH:
8266 case X86::AVX512_FsFLD0SD:
8268 case X86::AVX512_FsFLD0SS:
8269 case X86::FsFLD0F128:
8270 case X86::AVX512_FsFLD0F128: {
8279 unsigned PICBase = 0;
8282 if (Subtarget.is64Bit()) {
8295 bool IsAllOnes =
false;
8298 case X86::AVX512_FsFLD0SS:
8302 case X86::AVX512_FsFLD0SD:
8305 case X86::FsFLD0F128:
8306 case X86::AVX512_FsFLD0F128:
8310 case X86::AVX512_FsFLD0SH:
8313 case X86::AVX512_512_SETALLONES:
8316 case X86::AVX512_512_SET0:
8320 case X86::AVX1_SETALLONES:
8321 case X86::AVX2_SETALLONES:
8324 case X86::AVX512_256_SET0:
8334 case X86::V_SETALLONES:
8338 case X86::AVX512_128_SET0:
8356 case X86::VPBROADCASTBZ128rm:
8357 case X86::VPBROADCASTBZ256rm:
8358 case X86::VPBROADCASTBZrm:
8359 case X86::VBROADCASTF32X2Z256rm:
8360 case X86::VBROADCASTF32X2Zrm:
8361 case X86::VBROADCASTI32X2Z128rm:
8362 case X86::VBROADCASTI32X2Z256rm:
8363 case X86::VBROADCASTI32X2Zrm:
8367#define FOLD_BROADCAST(SIZE) \
8368 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, \
8369 LoadMI.operands_begin() + NumOps); \
8370 return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, SIZE, \
8372 case X86::VPBROADCASTWZ128rm:
8373 case X86::VPBROADCASTWZ256rm:
8374 case X86::VPBROADCASTWZrm:
8376 case X86::VPBROADCASTDZ128rm:
8377 case X86::VPBROADCASTDZ256rm:
8378 case X86::VPBROADCASTDZrm:
8379 case X86::VBROADCASTSSZ128rm:
8380 case X86::VBROADCASTSSZ256rm:
8381 case X86::VBROADCASTSSZrm:
8383 case X86::VPBROADCASTQZ128rm:
8384 case X86::VPBROADCASTQZ256rm:
8385 case X86::VPBROADCASTQZrm:
8386 case X86::VBROADCASTSDZ256rm:
8387 case X86::VBROADCASTSDZrm:
8400 0, Alignment,
true);
8407 unsigned BitsSize,
bool AllowCommute)
const {
8411 ?
fuseInst(MF,
I->DstOp, OpNum, MOs, InsertPt,
MI, *
this)
8417 unsigned CommuteOpIdx2 = commuteOperandsForFold(
MI, OpNum);
8418 if (CommuteOpIdx2 == OpNum) {
8423 foldMemoryBroadcast(MF,
MI, CommuteOpIdx2, MOs, InsertPt, BitsSize,
8428 commuteInstruction(
MI,
false, OpNum, CommuteOpIdx2);
8443 if (!MMO->isStore()) {
8461 if (!MMO->isStore())
8464 if (!MMO->isLoad()) {
8482 assert((SpillSize == 64 || STI.hasVLX()) &&
8483 "Can't broadcast less than 64 bytes without AVX512VL!");
8485#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64) \
8487 switch (SpillSize) { \
8489 llvm_unreachable("Unknown spill size"); \
8523 unsigned Opc =
I->DstOp;
8527 if (UnfoldLoad && !FoldedLoad)
8529 UnfoldLoad &= FoldedLoad;
8530 if (UnfoldStore && !FoldedStore)
8532 UnfoldStore &= FoldedStore;
8539 if (!
MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
8540 Subtarget.isUnalignedMem16Slow())
8549 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
8553 else if (
Op.isReg() &&
Op.isImplicit())
8569 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8570 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8614 case X86::CMP64ri32:
8625 case X86::CMP64ri32:
8626 NewOpc = X86::TEST64rr;
8629 NewOpc = X86::TEST32rr;
8632 NewOpc = X86::TEST16rr;
8635 NewOpc = X86::TEST8rr;
8649 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*DstRC), 16);
8650 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8666 if (!
N->isMachineOpcode())
8672 unsigned Opc =
I->DstOp;
8680 unsigned NumDefs =
MCID.NumDefs;
8681 std::vector<SDValue> AddrOps;
8682 std::vector<SDValue> BeforeOps;
8683 std::vector<SDValue> AfterOps;
8685 unsigned NumOps =
N->getNumOperands();
8686 for (
unsigned i = 0; i !=
NumOps - 1; ++i) {
8689 AddrOps.push_back(
Op);
8690 else if (i < Index - NumDefs)
8691 BeforeOps.push_back(
Op);
8692 else if (i > Index - NumDefs)
8693 AfterOps.push_back(
Op);
8696 AddrOps.push_back(Chain);
8701 EVT VT = *
TRI.legalclasstypes_begin(*RC);
8703 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8704 Subtarget.isUnalignedMem16Slow())
8714 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8715 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8727 std::vector<EVT> VTs;
8729 if (
MCID.getNumDefs() > 0) {
8731 VTs.push_back(*
TRI.legalclasstypes_begin(*DstRC));
8733 for (
unsigned i = 0, e =
N->getNumValues(); i != e; ++i) {
8734 EVT VT =
N->getValueType(i);
8735 if (VT != MVT::Other && i >= (
unsigned)
MCID.getNumDefs())
8739 BeforeOps.push_back(
SDValue(Load, 0));
8745 case X86::CMP64ri32:
8753 case X86::CMP64ri32:
8754 Opc = X86::TEST64rr;
8757 Opc = X86::TEST32rr;
8760 Opc = X86::TEST16rr;
8766 BeforeOps[1] = BeforeOps[0];
8775 AddrOps.push_back(
SDValue(NewNode, 0));
8776 AddrOps.push_back(Chain);
8778 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8779 Subtarget.isUnalignedMem16Slow())
8784 unsigned Alignment = std::max<uint32_t>(
TRI.getSpillSize(*RC), 16);
8785 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8788 dl, MVT::Other, AddrOps);
8801 unsigned *LoadRegIndex)
const {
8807 if (UnfoldLoad && !FoldedLoad)
8809 if (UnfoldStore && !FoldedStore)
8818 int64_t &Offset2)
const {
8822 auto IsLoadOpcode = [&](
unsigned Opcode) {
8834 case X86::MOVSSrm_alt:
8836 case X86::MOVSDrm_alt:
8837 case X86::MMX_MOVD64rm:
8838 case X86::MMX_MOVQ64rm:
8847 case X86::VMOVSSrm_alt:
8849 case X86::VMOVSDrm_alt:
8850 case X86::VMOVAPSrm:
8851 case X86::VMOVUPSrm:
8852 case X86::VMOVAPDrm:
8853 case X86::VMOVUPDrm:
8854 case X86::VMOVDQArm:
8855 case X86::VMOVDQUrm:
8856 case X86::VMOVAPSYrm:
8857 case X86::VMOVUPSYrm:
8858 case X86::VMOVAPDYrm:
8859 case X86::VMOVUPDYrm:
8860 case X86::VMOVDQAYrm:
8861 case X86::VMOVDQUYrm:
8863 case X86::VMOVSSZrm:
8864 case X86::VMOVSSZrm_alt:
8865 case X86::VMOVSDZrm:
8866 case X86::VMOVSDZrm_alt:
8867 case X86::VMOVAPSZ128rm:
8868 case X86::VMOVUPSZ128rm:
8869 case X86::VMOVAPSZ128rm_NOVLX:
8870 case X86::VMOVUPSZ128rm_NOVLX:
8871 case X86::VMOVAPDZ128rm:
8872 case X86::VMOVUPDZ128rm:
8873 case X86::VMOVDQU8Z128rm:
8874 case X86::VMOVDQU16Z128rm:
8875 case X86::VMOVDQA32Z128rm:
8876 case X86::VMOVDQU32Z128rm:
8877 case X86::VMOVDQA64Z128rm:
8878 case X86::VMOVDQU64Z128rm:
8879 case X86::VMOVAPSZ256rm:
8880 case X86::VMOVUPSZ256rm:
8881 case X86::VMOVAPSZ256rm_NOVLX:
8882 case X86::VMOVUPSZ256rm_NOVLX:
8883 case X86::VMOVAPDZ256rm:
8884 case X86::VMOVUPDZ256rm:
8885 case X86::VMOVDQU8Z256rm:
8886 case X86::VMOVDQU16Z256rm:
8887 case X86::VMOVDQA32Z256rm:
8888 case X86::VMOVDQU32Z256rm:
8889 case X86::VMOVDQA64Z256rm:
8890 case X86::VMOVDQU64Z256rm:
8891 case X86::VMOVAPSZrm:
8892 case X86::VMOVUPSZrm:
8893 case X86::VMOVAPDZrm:
8894 case X86::VMOVUPDZrm:
8895 case X86::VMOVDQU8Zrm:
8896 case X86::VMOVDQU16Zrm:
8897 case X86::VMOVDQA32Zrm:
8898 case X86::VMOVDQU32Zrm:
8899 case X86::VMOVDQA64Zrm:
8900 case X86::VMOVDQU64Zrm:
8902 case X86::KMOVBkm_EVEX:
8904 case X86::KMOVWkm_EVEX:
8906 case X86::KMOVDkm_EVEX:
8908 case X86::KMOVQkm_EVEX:
8918 auto HasSameOp = [&](
int I) {
8934 if (!Disp1 || !Disp2)
8937 Offset1 = Disp1->getSExtValue();
8938 Offset2 = Disp2->getSExtValue();
8943 int64_t Offset1, int64_t Offset2,
8944 unsigned NumLoads)
const {
8945 assert(Offset2 > Offset1);
8946 if ((Offset2 - Offset1) / 8 > 64)
8960 case X86::MMX_MOVD64rm:
8961 case X86::MMX_MOVQ64rm:
8970 if (Subtarget.is64Bit()) {
8973 }
else if (NumLoads) {
8996 unsigned Opcode =
MI.getOpcode();
8997 if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
8998 Opcode == X86::PLDTILECFGV)
9011 assert(
Cond.size() == 1 &&
"Invalid X86 branch condition!");
9021 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
9022 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
9023 RC == &X86::RFP80RegClass);
9036 return GlobalBaseReg;
9041 GlobalBaseReg = RegInfo.createVirtualRegister(
9042 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
9044 return GlobalBaseReg;
9052 for (
const uint16_t(&Row)[3] : Table)
9053 if (Row[domain - 1] == opcode)
9061 for (
const uint16_t(&Row)[4] : Table)
9062 if (Row[domain - 1] == opcode || (domain == 3 && Row[3] == opcode))
9069 unsigned NewWidth,
unsigned *pNewMask =
nullptr) {
9070 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
9071 "Illegal blend mask scale");
9072 unsigned NewMask = 0;
9074 if ((OldWidth % NewWidth) == 0) {
9075 unsigned Scale = OldWidth / NewWidth;
9076 unsigned SubMask = (1u << Scale) - 1;
9077 for (
unsigned i = 0; i != NewWidth; ++i) {
9078 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
9080 NewMask |= (1u << i);
9081 else if (
Sub != 0x0)
9085 unsigned Scale = NewWidth / OldWidth;
9086 unsigned SubMask = (1u << Scale) - 1;
9087 for (
unsigned i = 0; i != OldWidth; ++i) {
9088 if (OldMask & (1 << i)) {
9089 NewMask |= (SubMask << (i * Scale));
9095 *pNewMask = NewMask;
9100 unsigned Opcode =
MI.getOpcode();
9101 unsigned NumOperands =
MI.getDesc().getNumOperands();
9103 auto GetBlendDomains = [&](
unsigned ImmWidth,
bool Is256) {
9105 if (
MI.getOperand(NumOperands - 1).isImm()) {
9106 unsigned Imm =
MI.getOperand(NumOperands - 1).getImm();
9108 validDomains |= 0x2;
9110 validDomains |= 0x4;
9111 if (!Is256 || Subtarget.hasAVX2())
9112 validDomains |= 0x8;
9114 return validDomains;
9118 case X86::BLENDPDrmi:
9119 case X86::BLENDPDrri:
9120 case X86::VBLENDPDrmi:
9121 case X86::VBLENDPDrri:
9122 return GetBlendDomains(2,
false);
9123 case X86::VBLENDPDYrmi:
9124 case X86::VBLENDPDYrri:
9125 return GetBlendDomains(4,
true);
9126 case X86::BLENDPSrmi:
9127 case X86::BLENDPSrri:
9128 case X86::VBLENDPSrmi:
9129 case X86::VBLENDPSrri:
9130 case X86::VPBLENDDrmi:
9131 case X86::VPBLENDDrri:
9132 return GetBlendDomains(4,
false);
9133 case X86::VBLENDPSYrmi:
9134 case X86::VBLENDPSYrri:
9135 case X86::VPBLENDDYrmi:
9136 case X86::VPBLENDDYrri:
9137 return GetBlendDomains(8,
true);
9138 case X86::PBLENDWrmi:
9139 case X86::PBLENDWrri:
9140 case X86::VPBLENDWrmi:
9141 case X86::VPBLENDWrri:
9143 case X86::VPBLENDWYrmi:
9144 case X86::VPBLENDWYrri:
9145 return GetBlendDomains(8,
false);
9146 case X86::VPANDDZ128rr:
9147 case X86::VPANDDZ128rm:
9148 case X86::VPANDDZ256rr:
9149 case X86::VPANDDZ256rm:
9150 case X86::VPANDQZ128rr:
9151 case X86::VPANDQZ128rm:
9152 case X86::VPANDQZ256rr:
9153 case X86::VPANDQZ256rm:
9154 case X86::VPANDNDZ128rr:
9155 case X86::VPANDNDZ128rm:
9156 case X86::VPANDNDZ256rr:
9157 case X86::VPANDNDZ256rm:
9158 case X86::VPANDNQZ128rr:
9159 case X86::VPANDNQZ128rm:
9160 case X86::VPANDNQZ256rr:
9161 case X86::VPANDNQZ256rm:
9162 case X86::VPORDZ128rr:
9163 case X86::VPORDZ128rm:
9164 case X86::VPORDZ256rr:
9165 case X86::VPORDZ256rm:
9166 case X86::VPORQZ128rr:
9167 case X86::VPORQZ128rm:
9168 case X86::VPORQZ256rr:
9169 case X86::VPORQZ256rm:
9170 case X86::VPXORDZ128rr:
9171 case X86::VPXORDZ128rm:
9172 case X86::VPXORDZ256rr:
9173 case X86::VPXORDZ256rm:
9174 case X86::VPXORQZ128rr:
9175 case X86::VPXORQZ128rm:
9176 case X86::VPXORQZ256rr:
9177 case X86::VPXORQZ256rm:
9180 if (Subtarget.hasDQI())
9183 if (RI.getEncodingValue(
MI.getOperand(0).getReg()) >= 16)
9185 if (RI.getEncodingValue(
MI.getOperand(1).getReg()) >= 16)
9188 if (NumOperands == 3 &&
9189 RI.getEncodingValue(
MI.getOperand(2).getReg()) >= 16)
9194 case X86::MOVHLPSrr:
9201 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg() &&
9202 MI.getOperand(0).getSubReg() == 0 &&
9203 MI.getOperand(1).getSubReg() == 0 &&
MI.getOperand(2).getSubReg() == 0)
9206 case X86::SHUFPDrri:
9212#include "X86ReplaceableInstrs.def"
9218 assert(dom &&
"Not an SSE instruction");
9220 unsigned Opcode =
MI.getOpcode();
9221 unsigned NumOperands =
MI.getDesc().getNumOperands();
9223 auto SetBlendDomain = [&](
unsigned ImmWidth,
bool Is256) {
9224 if (
MI.getOperand(NumOperands - 1).isImm()) {
9225 unsigned Imm =
MI.getOperand(NumOperands - 1).getImm() & 255;
9226 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
9227 unsigned NewImm = Imm;
9229 const uint16_t *table =
lookup(Opcode, dom, ReplaceableBlendInstrs);
9231 table =
lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9235 }
else if (
Domain == 2) {
9237 }
else if (
Domain == 3) {
9238 if (Subtarget.hasAVX2()) {
9240 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
9241 table =
lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9245 assert(!Is256 &&
"128-bit vector expected");
9250 assert(table && table[
Domain - 1] &&
"Unknown domain op");
9252 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
9258 case X86::BLENDPDrmi:
9259 case X86::BLENDPDrri:
9260 case X86::VBLENDPDrmi:
9261 case X86::VBLENDPDrri:
9262 return SetBlendDomain(2,
false);
9263 case X86::VBLENDPDYrmi:
9264 case X86::VBLENDPDYrri:
9265 return SetBlendDomain(4,
true);
9266 case X86::BLENDPSrmi:
9267 case X86::BLENDPSrri:
9268 case X86::VBLENDPSrmi:
9269 case X86::VBLENDPSrri:
9270 case X86::VPBLENDDrmi:
9271 case X86::VPBLENDDrri:
9272 return SetBlendDomain(4,
false);
9273 case X86::VBLENDPSYrmi:
9274 case X86::VBLENDPSYrri:
9275 case X86::VPBLENDDYrmi:
9276 case X86::VPBLENDDYrri:
9277 return SetBlendDomain(8,
true);
9278 case X86::PBLENDWrmi:
9279 case X86::PBLENDWrri:
9280 case X86::VPBLENDWrmi:
9281 case X86::VPBLENDWrri:
9282 return SetBlendDomain(8,
false);
9283 case X86::VPBLENDWYrmi:
9284 case X86::VPBLENDWYrri:
9285 return SetBlendDomain(16,
true);
9286 case X86::VPANDDZ128rr:
9287 case X86::VPANDDZ128rm:
9288 case X86::VPANDDZ256rr:
9289 case X86::VPANDDZ256rm:
9290 case X86::VPANDQZ128rr:
9291 case X86::VPANDQZ128rm:
9292 case X86::VPANDQZ256rr:
9293 case X86::VPANDQZ256rm:
9294 case X86::VPANDNDZ128rr:
9295 case X86::VPANDNDZ128rm:
9296 case X86::VPANDNDZ256rr:
9297 case X86::VPANDNDZ256rm:
9298 case X86::VPANDNQZ128rr:
9299 case X86::VPANDNQZ128rm:
9300 case X86::VPANDNQZ256rr:
9301 case X86::VPANDNQZ256rm:
9302 case X86::VPORDZ128rr:
9303 case X86::VPORDZ128rm:
9304 case X86::VPORDZ256rr:
9305 case X86::VPORDZ256rm:
9306 case X86::VPORQZ128rr:
9307 case X86::VPORQZ128rm:
9308 case X86::VPORQZ256rr:
9309 case X86::VPORQZ256rm:
9310 case X86::VPXORDZ128rr:
9311 case X86::VPXORDZ128rm:
9312 case X86::VPXORDZ256rr:
9313 case X86::VPXORDZ256rm:
9314 case X86::VPXORQZ128rr:
9315 case X86::VPXORQZ128rm:
9316 case X86::VPXORQZ256rr:
9317 case X86::VPXORQZ256rm: {
9319 if (Subtarget.hasDQI())
9323 lookupAVX512(
MI.getOpcode(), dom, ReplaceableCustomAVX512LogicInstrs);
9324 assert(table &&
"Instruction not found in table?");
9327 if (
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9332 case X86::UNPCKHPDrr:
9333 case X86::MOVHLPSrr:
9336 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg() &&
9337 MI.getOperand(0).getSubReg() == 0 &&
9338 MI.getOperand(1).getSubReg() == 0 &&
9339 MI.getOperand(2).getSubReg() == 0) {
9340 commuteInstruction(
MI,
false);
9344 if (Opcode == X86::MOVHLPSrr)
9347 case X86::SHUFPDrri: {
9349 unsigned Imm =
MI.getOperand(3).getImm();
9350 unsigned NewImm = 0x44;
9355 MI.getOperand(3).setImm(NewImm);
9356 MI.setDesc(
get(X86::SHUFPSrri));
9364std::pair<uint16_t, uint16_t>
9367 unsigned opcode =
MI.getOpcode();
9373 return std::make_pair(domain, validDomains);
9375 if (
lookup(opcode, domain, ReplaceableInstrs)) {
9377 }
else if (
lookup(opcode, domain, ReplaceableInstrsAVX2)) {
9378 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
9379 }
else if (
lookup(opcode, domain, ReplaceableInstrsFP)) {
9381 }
else if (
lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
9384 if (!Subtarget.hasAVX2())
9385 return std::make_pair(0, 0);
9387 }
else if (
lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
9389 }
else if (Subtarget.hasDQI() &&
9390 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQ)) {
9392 }
else if (Subtarget.hasDQI()) {
9394 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQMasked)) {
9395 if (domain == 1 || (domain == 3 && table[3] == opcode))
9402 return std::make_pair(domain, validDomains);
9408 assert(dom &&
"Not an SSE instruction");
9417 "256-bit vector operations only available in AVX2");
9418 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsAVX2);
9421 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsFP);
9423 "Can only select PackedSingle or PackedDouble");
9426 assert(Subtarget.hasAVX2() &&
9427 "256-bit insert/extract only available in AVX2");
9428 table =
lookup(
MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
9431 assert(Subtarget.hasAVX512() &&
"Requires AVX-512");
9432 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512);
9434 if (table &&
Domain == 3 && table[3] ==
MI.getOpcode())
9438 assert((Subtarget.hasDQI() ||
Domain >= 3) &&
"Requires AVX-512DQ");
9439 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
9442 if (table &&
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9446 assert((Subtarget.hasDQI() ||
Domain >= 3) &&
"Requires AVX-512DQ");
9447 table =
lookupAVX512(
MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
9448 if (table &&
Domain == 3 && (dom == 1 || table[3] ==
MI.getOpcode()))
9451 assert(table &&
"Cannot change domain");
9477 case X86::DIVSDrm_Int:
9479 case X86::DIVSDrr_Int:
9481 case X86::DIVSSrm_Int:
9483 case X86::DIVSSrr_Int:
9489 case X86::SQRTSDm_Int:
9491 case X86::SQRTSDr_Int:
9493 case X86::SQRTSSm_Int:
9495 case X86::SQRTSSr_Int:
9499 case X86::VDIVPDYrm:
9500 case X86::VDIVPDYrr:
9503 case X86::VDIVPSYrm:
9504 case X86::VDIVPSYrr:
9506 case X86::VDIVSDrm_Int:
9508 case X86::VDIVSDrr_Int:
9510 case X86::VDIVSSrm_Int:
9512 case X86::VDIVSSrr_Int:
9515 case X86::VSQRTPDYm:
9516 case X86::VSQRTPDYr:
9519 case X86::VSQRTPSYm:
9520 case X86::VSQRTPSYr:
9522 case X86::VSQRTSDm_Int:
9524 case X86::VSQRTSDr_Int:
9526 case X86::VSQRTSSm_Int:
9528 case X86::VSQRTSSr_Int:
9530 case X86::VDIVPDZ128rm:
9531 case X86::VDIVPDZ128rmb:
9532 case X86::VDIVPDZ128rmbk:
9533 case X86::VDIVPDZ128rmbkz:
9534 case X86::VDIVPDZ128rmk:
9535 case X86::VDIVPDZ128rmkz:
9536 case X86::VDIVPDZ128rr:
9537 case X86::VDIVPDZ128rrk:
9538 case X86::VDIVPDZ128rrkz:
9539 case X86::VDIVPDZ256rm:
9540 case X86::VDIVPDZ256rmb:
9541 case X86::VDIVPDZ256rmbk:
9542 case X86::VDIVPDZ256rmbkz:
9543 case X86::VDIVPDZ256rmk:
9544 case X86::VDIVPDZ256rmkz:
9545 case X86::VDIVPDZ256rr:
9546 case X86::VDIVPDZ256rrk:
9547 case X86::VDIVPDZ256rrkz:
9548 case X86::VDIVPDZrrb:
9549 case X86::VDIVPDZrrbk:
9550 case X86::VDIVPDZrrbkz:
9551 case X86::VDIVPDZrm:
9552 case X86::VDIVPDZrmb:
9553 case X86::VDIVPDZrmbk:
9554 case X86::VDIVPDZrmbkz:
9555 case X86::VDIVPDZrmk:
9556 case X86::VDIVPDZrmkz:
9557 case X86::VDIVPDZrr:
9558 case X86::VDIVPDZrrk:
9559 case X86::VDIVPDZrrkz:
9560 case X86::VDIVPSZ128rm:
9561 case X86::VDIVPSZ128rmb:
9562 case X86::VDIVPSZ128rmbk:
9563 case X86::VDIVPSZ128rmbkz:
9564 case X86::VDIVPSZ128rmk:
9565 case X86::VDIVPSZ128rmkz:
9566 case X86::VDIVPSZ128rr:
9567 case X86::VDIVPSZ128rrk:
9568 case X86::VDIVPSZ128rrkz:
9569 case X86::VDIVPSZ256rm:
9570 case X86::VDIVPSZ256rmb:
9571 case X86::VDIVPSZ256rmbk:
9572 case X86::VDIVPSZ256rmbkz:
9573 case X86::VDIVPSZ256rmk:
9574 case X86::VDIVPSZ256rmkz:
9575 case X86::VDIVPSZ256rr:
9576 case X86::VDIVPSZ256rrk:
9577 case X86::VDIVPSZ256rrkz:
9578 case X86::VDIVPSZrrb:
9579 case X86::VDIVPSZrrbk:
9580 case X86::VDIVPSZrrbkz:
9581 case X86::VDIVPSZrm:
9582 case X86::VDIVPSZrmb:
9583 case X86::VDIVPSZrmbk:
9584 case X86::VDIVPSZrmbkz:
9585 case X86::VDIVPSZrmk:
9586 case X86::VDIVPSZrmkz:
9587 case X86::VDIVPSZrr:
9588 case X86::VDIVPSZrrk:
9589 case X86::VDIVPSZrrkz:
9590 case X86::VDIVSDZrm:
9591 case X86::VDIVSDZrr:
9592 case X86::VDIVSDZrm_Int:
9593 case X86::VDIVSDZrmk_Int:
9594 case X86::VDIVSDZrmkz_Int:
9595 case X86::VDIVSDZrr_Int:
9596 case X86::VDIVSDZrrk_Int:
9597 case X86::VDIVSDZrrkz_Int:
9598 case X86::VDIVSDZrrb_Int:
9599 case X86::VDIVSDZrrbk_Int:
9600 case X86::VDIVSDZrrbkz_Int:
9601 case X86::VDIVSSZrm:
9602 case X86::VDIVSSZrr:
9603 case X86::VDIVSSZrm_Int:
9604 case X86::VDIVSSZrmk_Int:
9605 case X86::VDIVSSZrmkz_Int:
9606 case X86::VDIVSSZrr_Int:
9607 case X86::VDIVSSZrrk_Int:
9608 case X86::VDIVSSZrrkz_Int:
9609 case X86::VDIVSSZrrb_Int:
9610 case X86::VDIVSSZrrbk_Int:
9611 case X86::VDIVSSZrrbkz_Int:
9612 case X86::VSQRTPDZ128m:
9613 case X86::VSQRTPDZ128mb:
9614 case X86::VSQRTPDZ128mbk:
9615 case X86::VSQRTPDZ128mbkz:
9616 case X86::VSQRTPDZ128mk:
9617 case X86::VSQRTPDZ128mkz:
9618 case X86::VSQRTPDZ128r:
9619 case X86::VSQRTPDZ128rk:
9620 case X86::VSQRTPDZ128rkz:
9621 case X86::VSQRTPDZ256m:
9622 case X86::VSQRTPDZ256mb:
9623 case X86::VSQRTPDZ256mbk:
9624 case X86::VSQRTPDZ256mbkz:
9625 case X86::VSQRTPDZ256mk:
9626 case X86::VSQRTPDZ256mkz:
9627 case X86::VSQRTPDZ256r:
9628 case X86::VSQRTPDZ256rk:
9629 case X86::VSQRTPDZ256rkz:
9630 case X86::VSQRTPDZm:
9631 case X86::VSQRTPDZmb:
9632 case X86::VSQRTPDZmbk:
9633 case X86::VSQRTPDZmbkz:
9634 case X86::VSQRTPDZmk:
9635 case X86::VSQRTPDZmkz:
9636 case X86::VSQRTPDZr:
9637 case X86::VSQRTPDZrb:
9638 case X86::VSQRTPDZrbk:
9639 case X86::VSQRTPDZrbkz:
9640 case X86::VSQRTPDZrk:
9641 case X86::VSQRTPDZrkz:
9642 case X86::VSQRTPSZ128m:
9643 case X86::VSQRTPSZ128mb:
9644 case X86::VSQRTPSZ128mbk:
9645 case X86::VSQRTPSZ128mbkz:
9646 case X86::VSQRTPSZ128mk:
9647 case X86::VSQRTPSZ128mkz:
9648 case X86::VSQRTPSZ128r:
9649 case X86::VSQRTPSZ128rk:
9650 case X86::VSQRTPSZ128rkz:
9651 case X86::VSQRTPSZ256m:
9652 case X86::VSQRTPSZ256mb:
9653 case X86::VSQRTPSZ256mbk:
9654 case X86::VSQRTPSZ256mbkz:
9655 case X86::VSQRTPSZ256mk:
9656 case X86::VSQRTPSZ256mkz:
9657 case X86::VSQRTPSZ256r:
9658 case X86::VSQRTPSZ256rk:
9659 case X86::VSQRTPSZ256rkz:
9660 case X86::VSQRTPSZm:
9661 case X86::VSQRTPSZmb:
9662 case X86::VSQRTPSZmbk:
9663 case X86::VSQRTPSZmbkz:
9664 case X86::VSQRTPSZmk:
9665 case X86::VSQRTPSZmkz:
9666 case X86::VSQRTPSZr:
9667 case X86::VSQRTPSZrb:
9668 case X86::VSQRTPSZrbk:
9669 case X86::VSQRTPSZrbkz:
9670 case X86::VSQRTPSZrk:
9671 case X86::VSQRTPSZrkz:
9672 case X86::VSQRTSDZm:
9673 case X86::VSQRTSDZm_Int:
9674 case X86::VSQRTSDZmk_Int:
9675 case X86::VSQRTSDZmkz_Int:
9676 case X86::VSQRTSDZr:
9677 case X86::VSQRTSDZr_Int:
9678 case X86::VSQRTSDZrk_Int:
9679 case X86::VSQRTSDZrkz_Int:
9680 case X86::VSQRTSDZrb_Int:
9681 case X86::VSQRTSDZrbk_Int:
9682 case X86::VSQRTSDZrbkz_Int:
9683 case X86::VSQRTSSZm:
9684 case X86::VSQRTSSZm_Int:
9685 case X86::VSQRTSSZmk_Int:
9686 case X86::VSQRTSSZmkz_Int:
9687 case X86::VSQRTSSZr:
9688 case X86::VSQRTSSZr_Int:
9689 case X86::VSQRTSSZrk_Int:
9690 case X86::VSQRTSSZrkz_Int:
9691 case X86::VSQRTSSZrb_Int:
9692 case X86::VSQRTSSZrbk_Int:
9693 case X86::VSQRTSSZrbkz_Int:
9695 case X86::VGATHERDPDYrm:
9696 case X86::VGATHERDPDZ128rm:
9697 case X86::VGATHERDPDZ256rm:
9698 case X86::VGATHERDPDZrm:
9699 case X86::VGATHERDPDrm:
9700 case X86::VGATHERDPSYrm:
9701 case X86::VGATHERDPSZ128rm:
9702 case X86::VGATHERDPSZ256rm:
9703 case X86::VGATHERDPSZrm:
9704 case X86::VGATHERDPSrm:
9705 case X86::VGATHERPF0DPDm:
9706 case X86::VGATHERPF0DPSm:
9707 case X86::VGATHERPF0QPDm:
9708 case X86::VGATHERPF0QPSm:
9709 case X86::VGATHERPF1DPDm:
9710 case X86::VGATHERPF1DPSm:
9711 case X86::VGATHERPF1QPDm:
9712 case X86::VGATHERPF1QPSm:
9713 case X86::VGATHERQPDYrm:
9714 case X86::VGATHERQPDZ128rm:
9715 case X86::VGATHERQPDZ256rm:
9716 case X86::VGATHERQPDZrm:
9717 case X86::VGATHERQPDrm:
9718 case X86::VGATHERQPSYrm:
9719 case X86::VGATHERQPSZ128rm:
9720 case X86::VGATHERQPSZ256rm:
9721 case X86::VGATHERQPSZrm:
9722 case X86::VGATHERQPSrm:
9723 case X86::VPGATHERDDYrm:
9724 case X86::VPGATHERDDZ128rm:
9725 case X86::VPGATHERDDZ256rm:
9726 case X86::VPGATHERDDZrm:
9727 case X86::VPGATHERDDrm:
9728 case X86::VPGATHERDQYrm:
9729 case X86::VPGATHERDQZ128rm:
9730 case X86::VPGATHERDQZ256rm:
9731 case X86::VPGATHERDQZrm:
9732 case X86::VPGATHERDQrm:
9733 case X86::VPGATHERQDYrm:
9734 case X86::VPGATHERQDZ128rm:
9735 case X86::VPGATHERQDZ256rm:
9736 case X86::VPGATHERQDZrm:
9737 case X86::VPGATHERQDrm:
9738 case X86::VPGATHERQQYrm:
9739 case X86::VPGATHERQQZ128rm:
9740 case X86::VPGATHERQQZ256rm:
9741 case X86::VPGATHERQQZrm:
9742 case X86::VPGATHERQQrm:
9743 case X86::VSCATTERDPDZ128mr:
9744 case X86::VSCATTERDPDZ256mr:
9745 case X86::VSCATTERDPDZmr:
9746 case X86::VSCATTERDPSZ128mr:
9747 case X86::VSCATTERDPSZ256mr:
9748 case X86::VSCATTERDPSZmr:
9749 case X86::VSCATTERPF0DPDm:
9750 case X86::VSCATTERPF0DPSm:
9751 case X86::VSCATTERPF0QPDm:
9752 case X86::VSCATTERPF0QPSm:
9753 case X86::VSCATTERPF1DPDm:
9754 case X86::VSCATTERPF1DPSm:
9755 case X86::VSCATTERPF1QPDm:
9756 case X86::VSCATTERPF1QPSm:
9757 case X86::VSCATTERQPDZ128mr:
9758 case X86::VSCATTERQPDZ256mr:
9759 case X86::VSCATTERQPDZmr:
9760 case X86::VSCATTERQPSZ128mr:
9761 case X86::VSCATTERQPSZ256mr:
9762 case X86::VSCATTERQPSZmr:
9763 case X86::VPSCATTERDDZ128mr:
9764 case X86::VPSCATTERDDZ256mr:
9765 case X86::VPSCATTERDDZmr:
9766 case X86::VPSCATTERDQZ128mr:
9767 case X86::VPSCATTERDQZ256mr:
9768 case X86::VPSCATTERDQZmr:
9769 case X86::VPSCATTERQDZ128mr:
9770 case X86::VPSCATTERQDZ256mr:
9771 case X86::VPSCATTERQDZmr:
9772 case X86::VPSCATTERQQZ128mr:
9773 case X86::VPSCATTERQQZ256mr:
9774 case X86::VPSCATTERQQZmr:
9784 unsigned UseIdx)
const {
9791 Inst.
getNumDefs() <= 2 &&
"Reassociation needs binary operators");
9801 assert((Inst.
getNumDefs() == 1 || FlagDef) &&
"Implicit def isn't flags?");
9802 if (FlagDef && !FlagDef->
isDead())
9813 bool Invert)
const {
9865 case X86::VPANDDZ128rr:
9866 case X86::VPANDDZ256rr:
9867 case X86::VPANDDZrr:
9868 case X86::VPANDQZ128rr:
9869 case X86::VPANDQZ256rr:
9870 case X86::VPANDQZrr:
9873 case X86::VPORDZ128rr:
9874 case X86::VPORDZ256rr:
9876 case X86::VPORQZ128rr:
9877 case X86::VPORQZ256rr:
9881 case X86::VPXORDZ128rr:
9882 case X86::VPXORDZ256rr:
9883 case X86::VPXORDZrr:
9884 case X86::VPXORQZ128rr:
9885 case X86::VPXORQZ256rr:
9886 case X86::VPXORQZrr:
9889 case X86::VANDPDYrr:
9890 case X86::VANDPSYrr:
9891 case X86::VANDPDZ128rr:
9892 case X86::VANDPSZ128rr:
9893 case X86::VANDPDZ256rr:
9894 case X86::VANDPSZ256rr:
9895 case X86::VANDPDZrr:
9896 case X86::VANDPSZrr:
9901 case X86::VORPDZ128rr:
9902 case X86::VORPSZ128rr:
9903 case X86::VORPDZ256rr:
9904 case X86::VORPSZ256rr:
9909 case X86::VXORPDYrr:
9910 case X86::VXORPSYrr:
9911 case X86::VXORPDZ128rr:
9912 case X86::VXORPSZ128rr:
9913 case X86::VXORPDZ256rr:
9914 case X86::VXORPSZ256rr:
9915 case X86::VXORPDZrr:
9916 case X86::VXORPSZrr:
9937 case X86::VPADDBYrr:
9938 case X86::VPADDWYrr:
9939 case X86::VPADDDYrr:
9940 case X86::VPADDQYrr:
9941 case X86::VPADDBZ128rr:
9942 case X86::VPADDWZ128rr:
9943 case X86::VPADDDZ128rr:
9944 case X86::VPADDQZ128rr:
9945 case X86::VPADDBZ256rr:
9946 case X86::VPADDWZ256rr:
9947 case X86::VPADDDZ256rr:
9948 case X86::VPADDQZ256rr:
9949 case X86::VPADDBZrr:
9950 case X86::VPADDWZrr:
9951 case X86::VPADDDZrr:
9952 case X86::VPADDQZrr:
9953 case X86::VPMULLWrr:
9954 case X86::VPMULLWYrr:
9955 case X86::VPMULLWZ128rr:
9956 case X86::VPMULLWZ256rr:
9957 case X86::VPMULLWZrr:
9958 case X86::VPMULLDrr:
9959 case X86::VPMULLDYrr:
9960 case X86::VPMULLDZ128rr:
9961 case X86::VPMULLDZ256rr:
9962 case X86::VPMULLDZrr:
9963 case X86::VPMULLQZ128rr:
9964 case X86::VPMULLQZ256rr:
9965 case X86::VPMULLQZrr:
9966 case X86::VPMAXSBrr:
9967 case X86::VPMAXSBYrr:
9968 case X86::VPMAXSBZ128rr:
9969 case X86::VPMAXSBZ256rr:
9970 case X86::VPMAXSBZrr:
9971 case X86::VPMAXSDrr:
9972 case X86::VPMAXSDYrr:
9973 case X86::VPMAXSDZ128rr:
9974 case X86::VPMAXSDZ256rr:
9975 case X86::VPMAXSDZrr:
9976 case X86::VPMAXSQZ128rr:
9977 case X86::VPMAXSQZ256rr:
9978 case X86::VPMAXSQZrr:
9979 case X86::VPMAXSWrr:
9980 case X86::VPMAXSWYrr:
9981 case X86::VPMAXSWZ128rr:
9982 case X86::VPMAXSWZ256rr:
9983 case X86::VPMAXSWZrr:
9984 case X86::VPMAXUBrr:
9985 case X86::VPMAXUBYrr:
9986 case X86::VPMAXUBZ128rr:
9987 case X86::VPMAXUBZ256rr:
9988 case X86::VPMAXUBZrr:
9989 case X86::VPMAXUDrr:
9990 case X86::VPMAXUDYrr:
9991 case X86::VPMAXUDZ128rr:
9992 case X86::VPMAXUDZ256rr:
9993 case X86::VPMAXUDZrr:
9994 case X86::VPMAXUQZ128rr:
9995 case X86::VPMAXUQZ256rr:
9996 case X86::VPMAXUQZrr:
9997 case X86::VPMAXUWrr:
9998 case X86::VPMAXUWYrr:
9999 case X86::VPMAXUWZ128rr:
10000 case X86::VPMAXUWZ256rr:
10001 case X86::VPMAXUWZrr:
10002 case X86::VPMINSBrr:
10003 case X86::VPMINSBYrr:
10004 case X86::VPMINSBZ128rr:
10005 case X86::VPMINSBZ256rr:
10006 case X86::VPMINSBZrr:
10007 case X86::VPMINSDrr:
10008 case X86::VPMINSDYrr:
10009 case X86::VPMINSDZ128rr:
10010 case X86::VPMINSDZ256rr:
10011 case X86::VPMINSDZrr:
10012 case X86::VPMINSQZ128rr:
10013 case X86::VPMINSQZ256rr:
10014 case X86::VPMINSQZrr:
10015 case X86::VPMINSWrr:
10016 case X86::VPMINSWYrr:
10017 case X86::VPMINSWZ128rr:
10018 case X86::VPMINSWZ256rr:
10019 case X86::VPMINSWZrr:
10020 case X86::VPMINUBrr:
10021 case X86::VPMINUBYrr:
10022 case X86::VPMINUBZ128rr:
10023 case X86::VPMINUBZ256rr:
10024 case X86::VPMINUBZrr:
10025 case X86::VPMINUDrr:
10026 case X86::VPMINUDYrr:
10027 case X86::VPMINUDZ128rr:
10028 case X86::VPMINUDZ256rr:
10029 case X86::VPMINUDZrr:
10030 case X86::VPMINUQZ128rr:
10031 case X86::VPMINUQZ256rr:
10032 case X86::VPMINUQZrr:
10033 case X86::VPMINUWrr:
10034 case X86::VPMINUWYrr:
10035 case X86::VPMINUWZ128rr:
10036 case X86::VPMINUWZ256rr:
10037 case X86::VPMINUWZrr:
10041 case X86::MAXCPDrr:
10042 case X86::MAXCPSrr:
10043 case X86::MAXCSDrr:
10044 case X86::MAXCSSrr:
10045 case X86::MINCPDrr:
10046 case X86::MINCPSrr:
10047 case X86::MINCSDrr:
10048 case X86::MINCSSrr:
10049 case X86::VMAXCPDrr:
10050 case X86::VMAXCPSrr:
10051 case X86::VMAXCPDYrr:
10052 case X86::VMAXCPSYrr:
10053 case X86::VMAXCPDZ128rr:
10054 case X86::VMAXCPSZ128rr:
10055 case X86::VMAXCPDZ256rr:
10056 case X86::VMAXCPSZ256rr:
10057 case X86::VMAXCPDZrr:
10058 case X86::VMAXCPSZrr:
10059 case X86::VMAXCSDrr:
10060 case X86::VMAXCSSrr:
10061 case X86::VMAXCSDZrr:
10062 case X86::VMAXCSSZrr:
10063 case X86::VMINCPDrr:
10064 case X86::VMINCPSrr:
10065 case X86::VMINCPDYrr:
10066 case X86::VMINCPSYrr:
10067 case X86::VMINCPDZ128rr:
10068 case X86::VMINCPSZ128rr:
10069 case X86::VMINCPDZ256rr:
10070 case X86::VMINCPSZ256rr:
10071 case X86::VMINCPDZrr:
10072 case X86::VMINCPSZrr:
10073 case X86::VMINCSDrr:
10074 case X86::VMINCSSrr:
10075 case X86::VMINCSDZrr:
10076 case X86::VMINCSSZrr:
10077 case X86::VMAXCPHZ128rr:
10078 case X86::VMAXCPHZ256rr:
10079 case X86::VMAXCPHZrr:
10080 case X86::VMAXCSHZrr:
10081 case X86::VMINCPHZ128rr:
10082 case X86::VMINCPHZ256rr:
10083 case X86::VMINCPHZrr:
10084 case X86::VMINCSHZrr:
10094 case X86::VADDPDrr:
10095 case X86::VADDPSrr:
10096 case X86::VADDPDYrr:
10097 case X86::VADDPSYrr:
10098 case X86::VADDPDZ128rr:
10099 case X86::VADDPSZ128rr:
10100 case X86::VADDPDZ256rr:
10101 case X86::VADDPSZ256rr:
10102 case X86::VADDPDZrr:
10103 case X86::VADDPSZrr:
10104 case X86::VADDSDrr:
10105 case X86::VADDSSrr:
10106 case X86::VADDSDZrr:
10107 case X86::VADDSSZrr:
10108 case X86::VMULPDrr:
10109 case X86::VMULPSrr:
10110 case X86::VMULPDYrr:
10111 case X86::VMULPSYrr:
10112 case X86::VMULPDZ128rr:
10113 case X86::VMULPSZ128rr:
10114 case X86::VMULPDZ256rr:
10115 case X86::VMULPSZ256rr:
10116 case X86::VMULPDZrr:
10117 case X86::VMULPSZrr:
10118 case X86::VMULSDrr:
10119 case X86::VMULSSrr:
10120 case X86::VMULSDZrr:
10121 case X86::VMULSSZrr:
10122 case X86::VADDPHZ128rr:
10123 case X86::VADDPHZ256rr:
10124 case X86::VADDPHZrr:
10125 case X86::VADDSHZrr:
10126 case X86::VMULPHZ128rr:
10127 case X86::VMULPHZ256rr:
10128 case X86::VMULPHZrr:
10129 case X86::VMULSHZrr:
10140static std::optional<ParamLoadedValue>
10143 Register DestReg =
MI.getOperand(0).getReg();
10144 Register SrcReg =
MI.getOperand(1).getReg();
10149 if (DestReg == DescribedReg)
10154 if (
unsigned SubRegIdx =
TRI->getSubRegIndex(DestReg, DescribedReg)) {
10155 Register SrcSubReg =
TRI->getSubReg(SrcReg, SubRegIdx);
10165 if (
MI.getOpcode() == X86::MOV8rr ||
MI.getOpcode() == X86::MOV16rr ||
10166 !
TRI->isSuperRegister(DestReg, DescribedReg))
10167 return std::nullopt;
10169 assert(
MI.getOpcode() == X86::MOV32rr &&
"Unexpected super-register case");
10173std::optional<ParamLoadedValue>
10180 switch (
MI.getOpcode()) {
10183 case X86::LEA64_32r: {
10185 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10186 return std::nullopt;
10190 if (!
MI.getOperand(4).isImm() || !
MI.getOperand(2).isImm())
10191 return std::nullopt;
10200 if ((Op1.
isReg() && Op1.
getReg() ==
MI.getOperand(0).getReg()) ||
10201 Op2.
getReg() ==
MI.getOperand(0).getReg())
10202 return std::nullopt;
10203 else if ((Op1.
isReg() && Op1.
getReg() != X86::NoRegister &&
10204 TRI->regsOverlap(Op1.
getReg(),
MI.getOperand(0).getReg())) ||
10205 (Op2.
getReg() != X86::NoRegister &&
10206 TRI->regsOverlap(Op2.
getReg(),
MI.getOperand(0).getReg())))
10207 return std::nullopt;
10209 int64_t Coef =
MI.getOperand(2).getImm();
10210 int64_t
Offset =
MI.getOperand(4).getImm();
10213 if ((Op1.
isReg() && Op1.
getReg() != X86::NoRegister)) {
10215 }
else if (Op1.
isFI())
10218 if (
Op &&
Op->isReg() &&
Op->getReg() == Op2.
getReg() && Coef > 0) {
10219 Ops.push_back(dwarf::DW_OP_constu);
10220 Ops.push_back(Coef + 1);
10221 Ops.push_back(dwarf::DW_OP_mul);
10223 if (
Op && Op2.
getReg() != X86::NoRegister) {
10224 int dwarfReg =
TRI->getDwarfRegNum(Op2.
getReg(),
false);
10226 return std::nullopt;
10227 else if (dwarfReg < 32) {
10228 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
10231 Ops.push_back(dwarf::DW_OP_bregx);
10232 Ops.push_back(dwarfReg);
10242 Ops.push_back(dwarf::DW_OP_constu);
10243 Ops.push_back(Coef);
10244 Ops.push_back(dwarf::DW_OP_mul);
10247 if (((Op1.
isReg() && Op1.
getReg() != X86::NoRegister) || Op1.
isFI()) &&
10248 Op2.
getReg() != X86::NoRegister) {
10249 Ops.push_back(dwarf::DW_OP_plus);
10261 return std::nullopt;
10264 case X86::MOV64ri32:
10267 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10268 return std::nullopt;
10275 case X86::XOR32rr: {
10278 if (!
TRI->isSuperRegisterEq(
MI.getOperand(0).getReg(), Reg))
10279 return std::nullopt;
10280 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
10282 return std::nullopt;
10284 case X86::MOVSX64rr32: {
10291 if (!
TRI->isSubRegisterEq(
MI.getOperand(0).getReg(), Reg))
10292 return std::nullopt;
10301 if (Reg ==
MI.getOperand(0).getReg())
10304 assert(X86MCRegisterClasses[X86::GR32RegClassID].
contains(Reg) &&
10305 "Unhandled sub-register case for MOVSX64rr32");
10310 assert(!
MI.isMoveImmediate() &&
"Unexpected MoveImm instruction");
10327 assert(!OldFlagDef1 == !OldFlagDef2 &&
10328 "Unexpected instruction type for reassociation");
10330 if (!OldFlagDef1 || !OldFlagDef2)
10334 "Must have dead EFLAGS operand in reassociable instruction");
10341 assert(NewFlagDef1 && NewFlagDef2 &&
10342 "Unexpected operand in reassociable instruction");
10352std::pair<unsigned, unsigned>
10354 return std::make_pair(TF, 0u);
10359 using namespace X86II;
10360 static const std::pair<unsigned, const char *> TargetFlags[] = {
10361 {MO_GOT_ABSOLUTE_ADDRESS,
"x86-got-absolute-address"},
10362 {MO_PIC_BASE_OFFSET,
"x86-pic-base-offset"},
10363 {MO_GOT,
"x86-got"},
10364 {MO_GOTOFF,
"x86-gotoff"},
10365 {MO_GOTPCREL,
"x86-gotpcrel"},
10366 {MO_GOTPCREL_NORELAX,
"x86-gotpcrel-norelax"},
10367 {MO_PLT,
"x86-plt"},
10368 {MO_TLSGD,
"x86-tlsgd"},
10369 {MO_TLSLD,
"x86-tlsld"},
10370 {MO_TLSLDM,
"x86-tlsldm"},
10371 {MO_GOTTPOFF,
"x86-gottpoff"},
10372 {MO_INDNTPOFF,
"x86-indntpoff"},
10373 {MO_TPOFF,
"x86-tpoff"},
10374 {MO_DTPOFF,
"x86-dtpoff"},
10375 {MO_NTPOFF,
"x86-ntpoff"},
10376 {MO_GOTNTPOFF,
"x86-gotntpoff"},
10377 {MO_DLLIMPORT,
"x86-dllimport"},
10378 {MO_DARWIN_NONLAZY,
"x86-darwin-nonlazy"},
10379 {MO_DARWIN_NONLAZY_PIC_BASE,
"x86-darwin-nonlazy-pic-base"},
10380 {MO_TLVP,
"x86-tlvp"},
10381 {MO_TLVP_PIC_BASE,
"x86-tlvp-pic-base"},
10382 {MO_SECREL,
"x86-secrel"},
10383 {MO_COFFSTUB,
"x86-coffstub"}};
10400 if (!TM->isPositionIndependent())
10407 if (GlobalBaseReg == 0)
10419 PC =
RegInfo.createVirtualRegister(&X86::GR32RegClass);
10421 PC = GlobalBaseReg;
10423 if (STI.is64Bit()) {
10476 StringRef getPassName()
const override {
10477 return "X86 PIC Global Base Reg Initialization";
10480 void getAnalysisUsage(AnalysisUsage &AU)
const override {
10495 bool runOnMachineFunction(MachineFunction &MF)
override {
10499 X86MachineFunctionInfo *MFI = MF.
getInfo<X86MachineFunctionInfo>();
10505 MachineDominatorTree *DT =
10506 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
10516 MachineBasicBlock *BB =
Node->getBlock();
10522 switch (
I->getOpcode()) {
10523 case X86::TLS_base_addr32:
10524 case X86::TLS_base_addr64:
10525 if (TLSBaseAddrReg)
10526 I = ReplaceTLSBaseAddrCall(*
I, TLSBaseAddrReg);
10528 I = SetRegister(*
I, &TLSBaseAddrReg);
10537 for (
auto &
I : *Node) {
10538 Changed |= VisitNode(
I, TLSBaseAddrReg);
10546 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &
I,
10548 MachineFunction *MF =
I.getParent()->getParent();
10549 const X86Subtarget &STI = MF->
getSubtarget<X86Subtarget>();
10550 const bool is64Bit = STI.is64Bit();
10554 MachineInstr *
Copy =
10556 TII->get(TargetOpcode::COPY),
is64Bit ? X86::RAX : X86::EAX)
10557 .
addReg(TLSBaseAddrReg);
10560 I.eraseFromParent();
10567 MachineInstr *SetRegister(MachineInstr &
I,
Register *TLSBaseAddrReg) {
10568 MachineFunction *MF =
I.getParent()->getParent();
10569 const X86Subtarget &STI = MF->
getSubtarget<X86Subtarget>();
10570 const bool is64Bit = STI.is64Bit();
10574 MachineRegisterInfo &RegInfo = MF->
getRegInfo();
10576 is64Bit ? &X86::GR64RegClass : &X86::GR32RegClass);
10579 MachineInstr *
Next =
I.getNextNode();
10581 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
10587 StringRef getPassName()
const override {
10588 return "Local Dynamic TLS Access Clean-up";
10591 void getAnalysisUsage(AnalysisUsage &AU)
const override {
10593 AU.
addRequired<MachineDominatorTreeWrapperPass>();
10599char LDTLSCleanup::ID = 0;
10601 return new LDTLSCleanup();
10634std::optional<std::unique_ptr<outliner::OutlinedFunction>>
10637 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
10638 unsigned MinRepeats)
const {
10639 unsigned SequenceSize = 0;
10640 for (
auto &
MI : RepeatedSequenceLocs[0]) {
10644 if (
MI.isDebugInstr() ||
MI.isKill())
10651 unsigned CFICount = 0;
10652 for (
auto &
I : RepeatedSequenceLocs[0]) {
10653 if (
I.isCFIInstruction())
10663 std::vector<MCCFIInstruction> CFIInstructions =
10664 C.getMF()->getFrameInstructions();
10666 if (CFICount > 0 && CFICount != CFIInstructions.size())
10667 return std::nullopt;
10671 if (RepeatedSequenceLocs[0].back().isTerminator()) {
10675 return std::make_unique<outliner::OutlinedFunction>(
10676 RepeatedSequenceLocs, SequenceSize,
10683 return std::nullopt;
10688 return std::make_unique<outliner::OutlinedFunction>(
10698 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
10707 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
10717 unsigned Flags)
const {
10721 if (
MI.isTerminator())
10735 if (
MI.modifiesRegister(X86::RSP, &RI) ||
MI.readsRegister(X86::RSP, &RI) ||
10736 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
10737 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
10741 if (
MI.readsRegister(X86::RIP, &RI) ||
10742 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
10743 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
10747 if (
MI.isCFIInstruction())
10763 MBB.insert(
MBB.end(), retq);
10773 .addGlobalAddress(M.getNamedValue(MF.
getName())));
10777 .addGlobalAddress(M.getNamedValue(MF.
getName())));
10786 bool AllowSideEffects)
const {
10791 if (ST.hasMMX() && X86::VR64RegClass.contains(Reg))
10795 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
10800 if (!AllowSideEffects)
10807 }
else if (X86::VR128RegClass.
contains(Reg)) {
10816 }
else if (X86::VR256RegClass.
contains(Reg)) {
10825 }
else if (X86::VR512RegClass.
contains(Reg)) {
10827 if (!ST.hasAVX512())
10834 }
else if (X86::VK1RegClass.
contains(Reg) || X86::VK2RegClass.
contains(Reg) ||
10836 X86::VK16RegClass.
contains(Reg)) {
10841 unsigned Op = ST.hasBWI() ? X86::KXORQkk : X86::KXORWkk;
10850 bool DoRegPressureReduce)
const {
10853 case X86::VPDPWSSDrr:
10854 case X86::VPDPWSSDrm:
10855 case X86::VPDPWSSDYrr:
10856 case X86::VPDPWSSDYrm: {
10857 if (!Subtarget.hasFastDPWSSD()) {
10863 case X86::VPDPWSSDZ128rr:
10864 case X86::VPDPWSSDZ128rm:
10865 case X86::VPDPWSSDZ256rr:
10866 case X86::VPDPWSSDZ256rm:
10867 case X86::VPDPWSSDZrr:
10868 case X86::VPDPWSSDZrm: {
10869 if (Subtarget.hasBWI() && !Subtarget.hasFastDPWSSD()) {
10877 Patterns, DoRegPressureReduce);
10889 unsigned AddOpc = 0;
10890 unsigned MaddOpc = 0;
10893 assert(
false &&
"It should not reach here");
10899 case X86::VPDPWSSDrr:
10900 MaddOpc = X86::VPMADDWDrr;
10901 AddOpc = X86::VPADDDrr;
10903 case X86::VPDPWSSDrm:
10904 MaddOpc = X86::VPMADDWDrm;
10905 AddOpc = X86::VPADDDrr;
10907 case X86::VPDPWSSDZ128rr:
10908 MaddOpc = X86::VPMADDWDZ128rr;
10909 AddOpc = X86::VPADDDZ128rr;
10911 case X86::VPDPWSSDZ128rm:
10912 MaddOpc = X86::VPMADDWDZ128rm;
10913 AddOpc = X86::VPADDDZ128rr;
10919 case X86::VPDPWSSDYrr:
10920 MaddOpc = X86::VPMADDWDYrr;
10921 AddOpc = X86::VPADDDYrr;
10923 case X86::VPDPWSSDYrm:
10924 MaddOpc = X86::VPMADDWDYrm;
10925 AddOpc = X86::VPADDDYrr;
10927 case X86::VPDPWSSDZ256rr:
10928 MaddOpc = X86::VPMADDWDZ256rr;
10929 AddOpc = X86::VPADDDZ256rr;
10931 case X86::VPDPWSSDZ256rm:
10932 MaddOpc = X86::VPMADDWDZ256rm;
10933 AddOpc = X86::VPADDDZ256rr;
10939 case X86::VPDPWSSDZrr:
10940 MaddOpc = X86::VPMADDWDZrr;
10941 AddOpc = X86::VPADDDZrr;
10943 case X86::VPDPWSSDZrm:
10944 MaddOpc = X86::VPMADDWDZrm;
10945 AddOpc = X86::VPADDDZrr;
10957 InstrIdxForVirtReg.
insert(std::make_pair(NewReg, 0));
10979 DelInstrs, InstrIdxForVirtReg);
10983 InstrIdxForVirtReg);
10993 M.Base.FrameIndex = FI;
10994 M.getFullAddress(
Ops);
10997#define GET_INSTRINFO_HELPERS
10998#include "X86GenInstrInfo.inc"
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static bool Expand2AddrUndef(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
Expand a single-def pseudo instruction to a two-addr instruction with two undef reads of the register...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
#define FROM_TO(FROM, TO)
cl::opt< bool > X86EnableAPXForRelocation
static bool is64Bit(const char *name)
#define GET_EGPR_IF_ENABLED(OPC)
static bool isLEA(unsigned Opcode)
static void addOperands(MachineInstrBuilder &MIB, ArrayRef< MachineOperand > MOs, int PtrOffset=0)
static std::optional< ParamLoadedValue > describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, const TargetRegisterInfo *TRI)
If DescribedReg overlaps with the MOVrr instruction's destination register then, if possible,...
static cl::opt< unsigned > PartialRegUpdateClearance("partial-reg-update-clearance", cl::desc("Clearance between two register writes " "for inserting XOR to avoid partial " "register update"), cl::init(64), cl::Hidden)
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI)
static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg, const X86Subtarget &Subtarget)
static bool isConvertibleLEA(MachineInstr *MI)
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, const X86Subtarget &Subtarget)
static bool isAMXOpcode(unsigned Opc)
static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI, Register Reg)
static void updateOperandRegConstraints(MachineFunction &MF, MachineInstr &NewMI, const TargetInstrInfo &TII)
static int getJumpTableIndexFromAddr(const MachineInstr &MI)
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, unsigned NewWidth, unsigned *pNewMask=nullptr)
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, bool MinusOne)
static unsigned getNewOpcFromTable(ArrayRef< X86TableEntry > Table, unsigned Opc)
static unsigned getStoreRegOpcode(Register SrcReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
#define FOLD_BROADCAST(SIZE)
static cl::opt< unsigned > UndefRegClearance("undef-reg-clearance", cl::desc("How many idle instructions we would like before " "certain undef register reads"), cl::init(128), cl::Hidden)
#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64)
static bool isTruncatedShiftCountForLEA(unsigned ShAmt)
Check whether the given shift count is appropriate can be represented by a LEA instruction.
static cl::opt< bool > ReMatPICStubLoad("remat-pic-stub-load", cl::desc("Re-materialize load from stub in PIC mode"), cl::init(false), cl::Hidden)
static SmallVector< MachineMemOperand *, 2 > extractLoadMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static MachineInstr * fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII)
static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx)
static bool canConvert2Copy(unsigned Opc)
static cl::opt< bool > NoFusing("disable-spill-fusing", cl::desc("Disable fusing of spill code into instructions"), cl::Hidden)
static bool expandNOVLXStore(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &StoreDesc, const MCInstrDesc &ExtractDesc, unsigned SubIdx)
static bool isX87Reg(Register Reg)
Return true if the Reg is X87 register.
static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, Register Reg)
Expand a single-def pseudo instruction to a two-addr instruction with two k0 reads.
static bool isFrameLoadOpcode(int Opcode, TypeSize &MemBytes)
#define VPERM_CASES_BROADCAST(Suffix)
static std::pair< X86::CondCode, unsigned > isUseDefConvertible(const MachineInstr &MI)
Check whether the use can be converted to remove a comparison against zero.
static bool findRedundantFlagInstr(MachineInstr &CmpInstr, MachineInstr &CmpValDefInstr, const MachineRegisterInfo *MRI, MachineInstr **AndInstr, const TargetRegisterInfo *TRI, const X86Subtarget &ST, bool &NoSignFlag, bool &ClearsOverflowFlag)
static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
static unsigned getLoadRegOpcode(Register DestReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
static void expandLoadStackGuard(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, bool ForLoadFold=false)
static MachineInstr * makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI)
#define GET_ND_IF_ENABLED(OPC)
static bool expandMOVSHP(MachineInstrBuilder &MIB, MachineInstr &MI, const TargetInstrInfo &TII, bool HasAVX)
static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget, bool ForLoadFold=false)
Return true for all instructions that only update the first 32 or 64-bits of the destination register...
static const uint16_t * lookupAVX512(unsigned opcode, unsigned domain, ArrayRef< uint16_t[4]> Table)
static unsigned getLoadStoreRegOpcode(Register Reg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI, bool Load)
#define VPERM_CASES(Suffix)
#define FROM_TO_SIZE(A, B, S)
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, bool &ClearsOverflowFlag)
Check whether the definition can be converted to remove a comparison against zero.
static MachineInstr * fuseInst(MachineFunction &MF, unsigned Opcode, unsigned OpNo, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII, int PtrOffset=0)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode)
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static MachineBasicBlock * getFallThroughMBB(MachineBasicBlock *MBB, MachineBasicBlock *TBB)
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, const MachineInstr &UserMI, const MachineFunction &MF)
Check if LoadMI is a partial register load that we can't fold into MI because the latter uses content...
static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI)
static bool isHReg(Register Reg)
Test if the given register is a physical h register.
static cl::opt< bool > PrintFailedFusing("print-failed-fuse-candidates", cl::desc("Print instructions that the allocator wants to" " fuse, but the X86 backend currently can't"), cl::Hidden)
static bool expandNOVLXLoad(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &LoadDesc, const MCInstrDesc &BroadcastDesc, unsigned SubIdx)
static void genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
This determines which of three possible cases of a three source commute the source indexes correspond...
static bool isFrameStoreOpcode(int Opcode, TypeSize &MemBytes)
static unsigned getTruncatedShiftCount(const MachineInstr &MI, unsigned ShiftAmtOperandIdx)
Check whether the shift count for a machine operand is non-zero.
static SmallVector< MachineMemOperand *, 2 > extractStoreMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static unsigned getBroadcastOpcode(const X86FoldTableEntry *I, const TargetRegisterClass *RC, const X86Subtarget &STI)
static unsigned convertALUrr2ALUri(unsigned Opc)
Convert an ALUrr opcode to corresponding ALUri opcode.
static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI)
Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool isCommutableVPERMV3Instruction(unsigned Opcode)
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionPass class - This class is used to implement most global optimizations.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
LiveInterval - This class represents the liveness of a register, or stack slot.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
A set of physical registers with utility functions to track liveness when walking backward/forward th...
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
static LocationSize precise(uint64_t Value)
bool usesWindowsCFI() const
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
MachineInstrBundleIterator< const MachineInstr > const_iterator
void push_back(MachineInstr *MI)
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
LLVM_ABI void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_iterator operands_begin()
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
void dropDebugNumber()
Drop any variable location debugging information associated with this instruction.
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
unsigned getNumDefs() const
Returns the total number of definitions.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateCPI(unsigned Idx, int Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
A Module instance is used to store all the information related to an LLVM module.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
MachineFunction & getMachineFunction() const
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getFP128Ty(LLVMContext &C)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
SlotIndex def
The index of the defining instruction.
LLVM Value Representation.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const override
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
Check if there exists an earlier instruction that operates on the same source operands and sets eflag...
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Overrides the isSchedulingBoundary from Codegen/TargetInstrInfo.cpp to make it capable of identifying...
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Returns true iff the routine could find two commutable operands in the given machine instruction.
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
X86InstrInfo(const X86Subtarget &STI)
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
const X86RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override
Returns true if we have preference on the operands order in MI, the commute decision is returned in C...
bool hasLiveCondCodeDef(MachineInstr &MI) const
True if MI has a condition code def, e.g.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
convertToThreeAddress - This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_AD...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
MCInst getNop() const override
Return the noop instruction to use for a noop.
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Fold a load or store of the specified stack slot into the specified machine instruction for the speci...
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
bool isUnconditionalTailCall(const MachineInstr &MI) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned LEAOpcode, bool AllowSP, Register &NewSrc, unsigned &NewSrcSubReg, bool &isKill, MachineOperand &ImplicitOp, LiveVariables *LV, LiveIntervals *LIS) const
Given an operand within a MachineInstr, insert preceding code to put it into the right format for a p...
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isLoadFromStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool setExecutionDomainCustom(MachineInstr &MI, unsigned Domain) const
int getSPAdjust(const MachineInstr &MI) const override
getSPAdjust - This returns the stack pointer adjustment made by this instruction.
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isReMaterializableImpl(const MachineInstr &MI) const override
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
int getJumpTableIndex(const MachineInstr &MI) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
This is an architecture-specific helper function of reassociateOps.
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
isCoalescableExtInstr - Return true if the instruction is a "coalescable" extension instruction.
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Opc, Register Reg, int FrameIdx, bool isKill=false) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds potential patterns, this function generates the instructions ...
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, TargetInstrInfo::MachineBranchPredicate &MBP, bool AllowModify=false) const override
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before certain undef register...
void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
int64_t getFrameAdjustment(const MachineInstr &I) const
Returns the stack pointer adjustment that happens inside the frame setup..destroy sequence (e....
bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override
uint16_t getExecutionDomainCustom(const MachineInstr &MI) const
bool isHighLatencyDef(int opc) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getFMA3OpcodeToCommuteOperands(const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, const X86InstrFMA3Group &FMA3Group) const
Returns an adjusted FMA opcode that must be used in FMA instruction that performs the same computatio...
bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before a partial register upd...
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
Register getGlobalBaseReg() const
int getTCReturnAddrDelta() const
void setGlobalBaseReg(Register Reg)
unsigned getNumLocalDynamicTLSAccesses() const
bool getUsesRedZone() const
const TargetRegisterClass * constrainRegClassToNonRex2(const TargetRegisterClass *RC) const
bool isPICStyleGOT() const
const X86InstrInfo * getInstrInfo() const override
const X86RegisterInfo * getRegisterInfo() const override
const X86FrameLowering * getFrameLowering() const override
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ X86
Windows x64, Windows Itanium (IA-64)
X86II - This namespace holds all of the target specific flags that instruction info tracks.
bool isKMergeMasked(uint64_t TSFlags)
bool hasNewDataDest(uint64_t TSFlags)
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ SSEDomainShift
Execution domain for SSE instructions.
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isPseudo(uint64_t TSFlags)
bool isKMasked(uint64_t TSFlags)
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Define some predicates that are used for node matching.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode getCondFromCFCMov(const MachineInstr &MI)
CondCode getCondFromMI(const MachineInstr &MI)
Return the condition code of the instruction.
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
unsigned getSwappedVCMPImm(unsigned Imm)
Get the VCMP immediate if the opcodes are swapped.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
unsigned getSwappedVPCOMImm(unsigned Imm)
Get the VPCOM immediate if the opcodes are swapped.
bool isX87Instruction(MachineInstr &MI)
Check if the instruction is X87 instruction.
unsigned getNonNDVariant(unsigned Opc)
unsigned getVPCMPImmForCond(ISD::CondCode CC)
Get the VPCMP immediate for the given condition.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
CondCode getCondFromSETCC(const MachineInstr &MI)
unsigned getSwappedVPCMPImm(unsigned Imm)
Get the VPCMP immediate if the opcodes are swapped.
CondCode getCondFromCCMP(const MachineInstr &MI)
int getCCMPCondFlagsFromCondCode(CondCode CC)
int getCondSrcNoFromDesc(const MCInstrDesc &MCID)
Return the source operand # for condition code by MCID.
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
unsigned getNFVariant(unsigned Opc)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
CondCode getCondFromCMov(const MachineInstr &MI)
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static bool isAddMemInstrWithRelocation(const MachineInstr &MI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
static bool isMem(const MachineInstr &MI, unsigned Op)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createX86GlobalBaseRegPass()
This pass initializes a global base register for PIC on x86-32.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
static const MachineInstrBuilder & addRegReg(const MachineInstrBuilder &MIB, Register Reg1, bool isKill1, unsigned SubReg1, Register Reg2, bool isKill2, unsigned SubReg2)
addRegReg - This function is used to add a memory reference of the form: [Reg + Reg].
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
FunctionPass * createCleanupLocalDynamicTLSPass()
This pass combines multiple accesses to local-dynamic TLS variables so that the TLS base address for ...
const X86FoldTableEntry * lookupBroadcastFoldTable(unsigned RegOp, unsigned OpNum)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
const X86InstrFMA3Group * getFMA3Group(unsigned Opcode, uint64_t TSFlags)
Returns a reference to a group of FMA3 opcodes to where the given Opcode is included.
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const X86FoldTableEntry * lookupTwoAddrFoldTable(unsigned RegOp)
FunctionAddr VTableAddr Count
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
static bool isMemInstrWithGOTPCREL(const MachineInstr &MI)
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
unsigned getUndefRegState(bool B)
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
unsigned getDefRegState(bool B)
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
@ Sub
Subtraction of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
const X86FoldTableEntry * lookupUnfoldTable(unsigned MemOp)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool matchBroadcastSize(const X86FoldTableEntry &Entry, unsigned BroadcastBits)
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
const X86FoldTableEntry * lookupFoldTable(unsigned RegOp, unsigned OpNum)
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, Register Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
int popcount(T Value) noexcept
Count the number of set bits in a value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
std::vector< MachineInstr * > Kills
Kills - List of MachineInstruction's which are the last use of this virtual register (kill it) in the...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType
This class is used to group {132, 213, 231} forms of FMA opcodes together.
unsigned get213Opcode() const
Returns the 213 form of FMA opcode.
unsigned get231Opcode() const
Returns the 231 form of FMA opcode.
bool isIntrinsic() const
Returns true iff the group of FMA opcodes holds intrinsic opcodes.
unsigned get132Opcode() const
Returns the 132 form of FMA opcode.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.