71#define DEBUG_TYPE "x86-isel"
76 "x86-experimental-pref-innermost-loop-alignment",
cl::init(4),
78 "Sets the preferable loop alignment for experiments (as log2 bytes) "
79 "for innermost loops only. If specified, this option overrides "
80 "alignment set by x86-experimental-pref-loop-alignment."),
84 "mul-constant-optimization",
cl::init(
true),
85 cl::desc(
"Replace 'mul x, Const' with more effective instructions like "
90 "x86-experimental-unordered-atomic-isel",
cl::init(
false),
91 cl::desc(
"Use LoadSDNode and StoreSDNode instead of "
92 "AtomicSDNode for unordered atomic loads and "
93 "stores respectively."),
131 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
144 if (Subtarget.isAtom())
146 else if (Subtarget.is64Bit())
155 if (Subtarget.hasSlowDivide32())
157 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
163 static const struct {
165 const char *
const Name;
175 for (
const auto &LC : LibraryCalls) {
202 if (Subtarget.is64Bit())
228 if (Subtarget.is64Bit())
236 if (Subtarget.is64Bit())
247 if (Subtarget.is64Bit())
251 if (!Subtarget.useSoftFloat()) {
315 if (!Subtarget.is64Bit()) {
328 if (Subtarget.is64Bit()) {
342 if (Subtarget.is64Bit()) {
347 }
else if (!Subtarget.is64Bit())
376 if (Subtarget.is64Bit())
387 if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
402 if (!Subtarget.hasBMI()) {
405 if (Subtarget.is64Bit()) {
411 if (Subtarget.hasLZCNT()) {
418 if (VT ==
MVT::i64 && !Subtarget.is64Bit())
432 (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ?
Custom :
Expand);
452 if (Subtarget.is64Bit())
454 if (Subtarget.hasPOPCNT()) {
463 if (Subtarget.is64Bit())
471 if (!Subtarget.hasMOVBE())
482 if (VT ==
MVT::i64 && !Subtarget.is64Bit())
503 if (VT ==
MVT::i64 && !Subtarget.is64Bit())
515 if (VT ==
MVT::i64 && !Subtarget.is64Bit())
538 if (!Subtarget.is64Bit())
567 bool Is64Bit = Subtarget.is64Bit();
615 if (!Subtarget.useSoftFloat() && Subtarget.
hasSSE2()) {
619 : &X86::FR16RegClass);
621 : &X86::FR32RegClass);
623 : &X86::FR64RegClass);
697 }
else if (!Subtarget.useSoftFloat() && Subtarget.
hasSSE1() &&
698 (UseX87 || Is64Bit)) {
750 addLegalFPImmediate(
APFloat(+0.0f));
751 addLegalFPImmediate(
APFloat(+1.0f));
752 addLegalFPImmediate(
APFloat(-0.0f));
753 addLegalFPImmediate(
APFloat(-1.0f));
755 addLegalFPImmediate(
APFloat(+0.0f));
760 addLegalFPImmediate(
APFloat(+0.0));
761 addLegalFPImmediate(
APFloat(+1.0));
762 addLegalFPImmediate(
APFloat(-0.0));
763 addLegalFPImmediate(
APFloat(-1.0));
765 addLegalFPImmediate(
APFloat(+0.0));
796 addLegalFPImmediate(TmpFlt);
798 addLegalFPImmediate(TmpFlt);
804 addLegalFPImmediate(TmpFlt2);
806 addLegalFPImmediate(TmpFlt2);
843 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.
hasSSE1()) {
845 : &X86::VR128RegClass);
996 if (!Subtarget.useSoftFloat() && Subtarget.
hasMMX()) {
1001 if (!Subtarget.useSoftFloat() && Subtarget.
hasSSE1()) {
1003 : &X86::VR128RegClass);
1024 if (!Subtarget.useSoftFloat() && Subtarget.
hasSSE2()) {
1026 : &X86::VR128RegClass);
1031 : &X86::VR128RegClass);
1033 : &X86::VR128RegClass);
1035 : &X86::VR128RegClass);
1037 : &X86::VR128RegClass);
1039 : &X86::VR128RegClass);
1126 if (VT ==
MVT::v2i64 && !Subtarget.is64Bit())
1236 if (!Subtarget.useSoftFloat() && Subtarget.
hasSSSE3()) {
1253 if (!Subtarget.useSoftFloat() && Subtarget.
hasSSE41()) {
1314 if (Subtarget.is64Bit() && !Subtarget.
hasAVX512()) {
1326 if (!Subtarget.useSoftFloat() && Subtarget.
hasSSE42()) {
1330 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1346 if (!Subtarget.useSoftFloat() && Subtarget.
hasAVX()) {
1350 : &X86::VR256RegClass);
1352 : &X86::VR256RegClass);
1354 : &X86::VR256RegClass);
1356 : &X86::VR256RegClass);
1358 : &X86::VR256RegClass);
1360 : &X86::VR256RegClass);
1362 : &X86::VR256RegClass);
1594 if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1595 Subtarget.hasF16C()) {
1616 if (!Subtarget.useSoftFloat() && Subtarget.
hasAVX512()) {
1641 if (!Subtarget.hasDQI()) {
1685 if (!Subtarget.useSoftFloat() && Subtarget.
useAVX512Regs()) {
1686 bool HasBWI = Subtarget.hasBWI();
1758 if (!Subtarget.hasVLX()) {
1880 if (Subtarget.hasDQI()) {
1888 if (Subtarget.hasCDI()) {
1895 if (Subtarget.hasVPOPCNTDQ()) {
1945 if (Subtarget.hasVBMI2()) {
1963 if (!Subtarget.useSoftFloat() && Subtarget.
hasAVX512()) {
1971 if (Subtarget.hasDQI()) {
1976 "Unexpected operation action!");
2005 if (Subtarget.hasDQI()) {
2016 if (Subtarget.hasCDI()) {
2022 if (Subtarget.hasVPOPCNTDQ()) {
2031 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
2065 if (Subtarget.hasBITALG()) {
2071 if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2072 auto setGroup = [&] (
MVT VT) {
2176 if (Subtarget.hasVLX()) {
2221 if (!Subtarget.useSoftFloat() &&
2222 (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2230 setF16Action(VT,
Expand);
2240 if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2250 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2263 if (Subtarget.hasBWI()) {
2268 if (Subtarget.hasFP16()) {
2304 if (Subtarget.hasAMXTILE()) {
2312 if (!Subtarget.is64Bit()) {
2323 if (VT ==
MVT::i64 && !Subtarget.is64Bit())
2341 if (!Subtarget.is64Bit()) {
2379 if (Subtarget.is32Bit() &&
2489 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2497 !Subtarget.hasBWI())
2511static std::pair<MVT, unsigned>
2538 if (!
isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2553 unsigned NumRegisters;
2554 std::tie(RegisterVT, NumRegisters) =
2566 !Subtarget.hasX87())