LLVM 19.0.0git
PPCISelLowering.cpp
Go to the documentation of this file.
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the PPCISelLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "PPCISelLowering.h"
16#include "PPC.h"
17#include "PPCCCState.h"
18#include "PPCCallingConv.h"
19#include "PPCFrameLowering.h"
20#include "PPCInstrInfo.h"
22#include "PPCPerfectShuffle.h"
23#include "PPCRegisterInfo.h"
24#include "PPCSubtarget.h"
25#include "PPCTargetMachine.h"
26#include "llvm/ADT/APFloat.h"
27#include "llvm/ADT/APInt.h"
28#include "llvm/ADT/APSInt.h"
29#include "llvm/ADT/ArrayRef.h"
30#include "llvm/ADT/DenseMap.h"
31#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/SmallSet.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/StringRef.h"
60#include "llvm/IR/CallingConv.h"
61#include "llvm/IR/Constant.h"
62#include "llvm/IR/Constants.h"
63#include "llvm/IR/DataLayout.h"
64#include "llvm/IR/DebugLoc.h"
66#include "llvm/IR/Function.h"
67#include "llvm/IR/GlobalValue.h"
68#include "llvm/IR/IRBuilder.h"
70#include "llvm/IR/Intrinsics.h"
71#include "llvm/IR/IntrinsicsPowerPC.h"
72#include "llvm/IR/Module.h"
73#include "llvm/IR/Type.h"
74#include "llvm/IR/Use.h"
75#include "llvm/IR/Value.h"
76#include "llvm/MC/MCContext.h"
77#include "llvm/MC/MCExpr.h"
87#include "llvm/Support/Debug.h"
89#include "llvm/Support/Format.h"
95#include <algorithm>
96#include <cassert>
97#include <cstdint>
98#include <iterator>
99#include <list>
100#include <optional>
101#include <utility>
102#include <vector>
103
104using namespace llvm;
105
106#define DEBUG_TYPE "ppc-lowering"
107
108static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
109cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
110
111static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
112cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
113
114static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
115cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
116
117static cl::opt<bool> DisableSCO("disable-ppc-sco",
118cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
119
120static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
121cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
122
123static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
124cl::desc("use absolute jump tables on ppc"), cl::Hidden);
125
126static cl::opt<bool>
127 DisablePerfectShuffle("ppc-disable-perfect-shuffle",
128 cl::desc("disable vector permute decomposition"),
129 cl::init(true), cl::Hidden);
130
132 "disable-auto-paired-vec-st",
133 cl::desc("disable automatically generated 32byte paired vector stores"),
134 cl::init(true), cl::Hidden);
135
137 "ppc-min-jump-table-entries", cl::init(64), cl::Hidden,
138 cl::desc("Set minimum number of entries to use a jump table on PPC"));
139
140STATISTIC(NumTailCalls, "Number of tail calls");
141STATISTIC(NumSiblingCalls, "Number of sibling calls");
142STATISTIC(ShufflesHandledWithVPERM,
143 "Number of shuffles lowered to a VPERM or XXPERM");
144STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
145
146static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
147
148static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
149
150static const char AIXSSPCanaryWordName[] = "__ssp_canary_word";
151
152// A faster local-exec TLS access sequence (enabled with the
153// -maix-small-local-exec-tls option) can be produced for TLS variables;
154// consistent with the IBM XL compiler, we apply a max size of slightly under
155// 32KB.
157
158// FIXME: Remove this once the bug has been fixed!
160
162 const PPCSubtarget &STI)
163 : TargetLowering(TM), Subtarget(STI) {
164 // Initialize map that relates the PPC addressing modes to the computed flags
165 // of a load/store instruction. The map is used to determine the optimal
166 // addressing mode when selecting load and stores.
167 initializeAddrModeMap();
168 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
169 // arguments are at least 4/8 bytes aligned.
170 bool isPPC64 = Subtarget.isPPC64();
171 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
172
173 // Set up the register classes.
174 addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
175 if (!useSoftFloat()) {
176 if (hasSPE()) {
177 addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
178 // EFPU2 APU only supports f32
179 if (!Subtarget.hasEFPU2())
180 addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
181 } else {
182 addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
183 addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
184 }
185 }
186
187 // Match BITREVERSE to customized fast code sequence in the td file.
190
191 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
193
194 // Custom lower inline assembly to check for special registers.
197
198 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
199 for (MVT VT : MVT::integer_valuetypes()) {
202 }
203
204 if (Subtarget.isISA3_0()) {
205 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
206 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
207 setTruncStoreAction(MVT::f64, MVT::f16, Legal);
208 setTruncStoreAction(MVT::f32, MVT::f16, Legal);
209 } else {
210 // No extending loads from f16 or HW conversions back and forth.
211 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
214 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
217 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
218 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
219 }
220
221 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
222
223 // PowerPC has pre-inc load and store's.
234 if (!Subtarget.hasSPE()) {
239 }
240
241 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
242 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
243 for (MVT VT : ScalarIntVTs) {
248 }
249
250 if (Subtarget.useCRBits()) {
252
253 if (isPPC64 || Subtarget.hasFPCVT()) {
256 isPPC64 ? MVT::i64 : MVT::i32);
259 isPPC64 ? MVT::i64 : MVT::i32);
260
263 isPPC64 ? MVT::i64 : MVT::i32);
266 isPPC64 ? MVT::i64 : MVT::i32);
267
270 isPPC64 ? MVT::i64 : MVT::i32);
273 isPPC64 ? MVT::i64 : MVT::i32);
274
277 isPPC64 ? MVT::i64 : MVT::i32);
280 isPPC64 ? MVT::i64 : MVT::i32);
281 } else {
286 }
287
288 // PowerPC does not support direct load/store of condition registers.
291
292 // FIXME: Remove this once the ANDI glue bug is fixed:
293 if (ANDIGlueBug)
295
296 for (MVT VT : MVT::integer_valuetypes()) {
299 setTruncStoreAction(VT, MVT::i1, Expand);
300 }
301
302 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
303 }
304
305 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
306 // PPC (the libcall is not available).
311
312 // We do not currently implement these libm ops for PowerPC.
313 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
314 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
315 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
316 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
318 setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
319
320 // PowerPC has no SREM/UREM instructions unless we are on P9
321 // On P9 we may use a hardware instruction to compute the remainder.
322 // When the result of both the remainder and the division is required it is
323 // more efficient to compute the remainder from the result of the division
324 // rather than use the remainder instruction. The instructions are legalized
325 // directly because the DivRemPairsPass performs the transformation at the IR
326 // level.
327 if (Subtarget.isISA3_0()) {
332 } else {
337 }
338
339 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
348
349 // Handle constrained floating-point operations of scalar.
350 // TODO: Handle SPE specific operation.
356
361
362 if (!Subtarget.hasSPE()) {
365 }
366
367 if (Subtarget.hasVSX()) {
370 }
371
372 if (Subtarget.hasFSQRT()) {
375 }
376
377 if (Subtarget.hasFPRND()) {
382
387 }
388
389 // We don't support sin/cos/sqrt/fmod/pow
400
401 // MASS transformation for LLVM intrinsics with replicating fast-math flag
402 // to be consistent to PPCGenScalarMASSEntries pass
403 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive) {
416 }
417
418 if (Subtarget.hasSPE()) {
421 } else {
422 setOperationAction(ISD::FMA , MVT::f64, Legal);
423 setOperationAction(ISD::FMA , MVT::f32, Legal);
424 }
425
426 if (Subtarget.hasSPE())
427 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
428
430
431 // If we're enabling GP optimizations, use hardware square root
432 if (!Subtarget.hasFSQRT() &&
433 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
434 Subtarget.hasFRE()))
436
437 if (!Subtarget.hasFSQRT() &&
438 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
439 Subtarget.hasFRES()))
441
442 if (Subtarget.hasFCPSGN()) {
445 } else {
448 }
449
450 if (Subtarget.hasFPRND()) {
455
460 }
461
462 // Prior to P10, PowerPC does not have BSWAP, but we can use vector BSWAP
463 // instruction xxbrd to speed up scalar BSWAP64.
464 if (Subtarget.isISA3_1()) {
467 } else {
470 ISD::BSWAP, MVT::i64,
471 (Subtarget.hasP9Vector() && Subtarget.isPPC64()) ? Custom : Expand);
472 }
473
474 // CTPOP or CTTZ were introduced in P8/P9 respectively
475 if (Subtarget.isISA3_0()) {
476 setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
477 setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
478 } else {
479 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
480 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
481 }
482
483 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
486 } else {
489 }
490
491 // PowerPC does not have ROTR
494
495 if (!Subtarget.useCRBits()) {
496 // PowerPC does not have Select
501 }
502
503 // PowerPC wants to turn select_cc of FP into fsel when possible.
506
507 // PowerPC wants to optimize integer setcc a bit
508 if (!Subtarget.useCRBits())
510
511 if (Subtarget.hasFPU()) {
515
519 }
520
521 // PowerPC does not have BRCOND which requires SetCC
522 if (!Subtarget.useCRBits())
524
526
527 if (Subtarget.hasSPE()) {
528 // SPE has built-in conversions
535
536 // SPE supports signaling compare of f32/f64.
539 } else {
540 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
543
544 // PowerPC does not have [U|S]INT_TO_FP
549 }
550
551 if (Subtarget.hasDirectMove() && isPPC64) {
556 if (TM.Options.UnsafeFPMath) {
565 }
566 } else {
571 }
572
573 // We cannot sextinreg(i1). Expand to shifts.
575
576 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
577 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
578 // support continuation, user-level threading, and etc.. As a result, no
579 // other SjLj exception interfaces are implemented and please don't build
580 // your own exception handling based on them.
581 // LLVM/Clang supports zero-cost DWARF exception handling.
584
585 // We want to legalize GlobalAddress and ConstantPool nodes into the
586 // appropriate instructions to materialize the address.
597
598 // TRAP is legal.
599 setOperationAction(ISD::TRAP, MVT::Other, Legal);
600
601 // TRAMPOLINE is custom lowered.
604
605 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
607
608 if (Subtarget.is64BitELFABI()) {
609 // VAARG always uses double-word chunks, so promote anything smaller.
611 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
613 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
615 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
617 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
619 } else if (Subtarget.is32BitELFABI()) {
620 // VAARG is custom lowered with the 32-bit SVR4 ABI.
623 } else
625
626 // VACOPY is custom lowered with the 32-bit SVR4 ABI.
627 if (Subtarget.is32BitELFABI())
629 else
631
632 // Use the default implementation.
633 setOperationAction(ISD::VAEND , MVT::Other, Expand);
642
643 // We want to custom lower some of our intrinsics.
649
650 // To handle counter-based loop conditions.
652
657
658 // Comparisons that require checking two conditions.
659 if (Subtarget.hasSPE()) {
664 }
677
680
681 if (Subtarget.has64BitSupport()) {
682 // They also have instructions for converting between i64 and fp.
691 // This is just the low 32 bits of a (signed) fp->i64 conversion.
692 // We cannot do this with Promote because i64 is not a legal type.
695
696 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
699 }
700 } else {
701 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
702 if (Subtarget.hasSPE()) {
705 } else {
708 }
709 }
710
711 // With the instructions enabled under FPCVT, we can do everything.
712 if (Subtarget.hasFPCVT()) {
713 if (Subtarget.has64BitSupport()) {
722 }
723
732 }
733
734 if (Subtarget.use64BitRegs()) {
735 // 64-bit PowerPC implementations can support i64 types directly
736 addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
737 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
739 // 64-bit PowerPC wants to expand i128 shifts itself.
743 } else {
744 // 32-bit PowerPC wants to expand i64 shifts itself.
748 }
749
750 // PowerPC has better expansions for funnel shifts than the generic
751 // TargetLowering::expandFunnelShift.
752 if (Subtarget.has64BitSupport()) {
755 }
758
759 if (Subtarget.hasVSX()) {
764 }
765
766 if (Subtarget.hasAltivec()) {
767 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
772 }
773 // First set operation action for all vector types to expand. Then we
774 // will selectively turn on ones that can be effectively codegen'd.
776 // add/sub are legal for all supported vector VT's.
779
780 // For v2i64, these are only valid with P8Vector. This is corrected after
781 // the loop.
782 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
787 }
788 else {
793 }
794
795 if (Subtarget.hasVSX()) {
798 }
799
800 // Vector instructions introduced in P8
801 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
804 }
805 else {
808 }
809
810 // Vector instructions introduced in P9
811 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
813 else
815
816 // We promote all shuffles to v16i8.
818 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
819
820 // We promote all non-typed operations to v4i32.
822 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
824 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
826 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
828 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
830 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
833 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
835 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
836
837 // No other operations are legal.
876
877 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
878 setTruncStoreAction(VT, InnerVT, Expand);
881 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
882 }
883 }
885 if (!Subtarget.hasP8Vector()) {
886 setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
887 setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
888 setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
889 setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
890 }
891
892 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
893 // with merges, splats, etc.
895
896 // Vector truncates to sub-word integer that fit in an Altivec/VSX register
897 // are cheap, so handle them before they get expanded to scalar.
903
904 setOperationAction(ISD::AND , MVT::v4i32, Legal);
905 setOperationAction(ISD::OR , MVT::v4i32, Legal);
906 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
907 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
909 Subtarget.useCRBits() ? Legal : Expand);
910 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
920 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
923
924 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
925 setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
926 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
927 if (Subtarget.hasAltivec())
928 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
930 // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
931 if (Subtarget.hasP8Altivec())
932 setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
933
934 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
935 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
936 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
937 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
938
939 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
940 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
941
942 if (Subtarget.hasVSX()) {
943 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
944 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
946 }
947
948 if (Subtarget.hasP8Altivec())
949 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
950 else
951 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
952
953 if (Subtarget.isISA3_1()) {
954 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
955 setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
956 setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
957 setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
958 setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
959 setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
960 setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
961 setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
962 setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
963 setOperationAction(ISD::UREM, MVT::v2i64, Legal);
964 setOperationAction(ISD::SREM, MVT::v2i64, Legal);
965 setOperationAction(ISD::UREM, MVT::v4i32, Legal);
966 setOperationAction(ISD::SREM, MVT::v4i32, Legal);
967 setOperationAction(ISD::UREM, MVT::v1i128, Legal);
968 setOperationAction(ISD::SREM, MVT::v1i128, Legal);
969 setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
970 setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
971 setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
972 }
973
974 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
975 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
976
979
984
985 // Altivec does not contain unordered floating-point compare instructions
986 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
988 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
990
991 if (Subtarget.hasVSX()) {
994 if (Subtarget.hasP8Vector()) {
997 }
998 if (Subtarget.hasDirectMove() && isPPC64) {
1007 }
1009
1010 // The nearbyint variants are not allowed to raise the inexact exception
1011 // so we can only code-gen them with unsafe math.
1012 if (TM.Options.UnsafeFPMath) {
1015 }
1016
1017 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1018 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1019 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1021 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1022 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
1025
1027 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1028 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1031
1032 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
1033 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1034
1035 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
1036 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
1037
1038 // Share the Altivec comparison restrictions.
1039 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
1040 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
1041 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
1042 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
1043
1044 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1045 setOperationAction(ISD::STORE, MVT::v2f64, Legal);
1046
1048
1049 if (Subtarget.hasP8Vector())
1050 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
1051
1052 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
1053
1054 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
1055 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
1056 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
1057
1058 if (Subtarget.hasP8Altivec()) {
1059 setOperationAction(ISD::SHL, MVT::v2i64, Legal);
1060 setOperationAction(ISD::SRA, MVT::v2i64, Legal);
1061 setOperationAction(ISD::SRL, MVT::v2i64, Legal);
1062
1063 // 128 bit shifts can be accomplished via 3 instructions for SHL and
1064 // SRL, but not for SRA because of the instructions available:
1065 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
1066 // doing
1067 setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1068 setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1069 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1070
1071 setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1072 }
1073 else {
1074 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1075 setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1076 setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1077
1078 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1079
1080 // VSX v2i64 only supports non-arithmetic operations.
1081 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1082 setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1083 }
1084
1085 if (Subtarget.isISA3_1())
1086 setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1087 else
1088 setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1089
1090 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1091 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1093 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1094
1096
1105
1106 // Custom handling for partial vectors of integers converted to
1107 // floating point. We already have optimal handling for v2i32 through
1108 // the DAG combine, so those aren't necessary.
1125
1126 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1127 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1128 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1129 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1132
1135
1136 // Handle constrained floating-point operations of vector.
1137 // The predictor is `hasVSX` because altivec instruction has
1138 // no exception but VSX vector instruction has.
1152
1166
1167 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1168 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1169
1170 for (MVT FPT : MVT::fp_valuetypes())
1171 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1172
1173 // Expand the SELECT to SELECT_CC
1175
1176 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1177 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1178
1179 // No implementation for these ops for PowerPC.
1181 setOperationAction(ISD::FSIN, MVT::f128, Expand);
1182 setOperationAction(ISD::FCOS, MVT::f128, Expand);
1183 setOperationAction(ISD::FPOW, MVT::f128, Expand);
1185 setOperationAction(ISD::FREM, MVT::f128, Expand);
1186 }
1187
1188 if (Subtarget.hasP8Altivec()) {
1189 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1190 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1191 }
1192
1193 if (Subtarget.hasP9Vector()) {
1196
1197 // Test data class instructions store results in CR bits.
1198 if (Subtarget.useCRBits()) {
1202 }
1203
1204 // 128 bit shifts can be accomplished via 3 instructions for SHL and
1205 // SRL, but not for SRA because of the instructions available:
1206 // VS{RL} and VS{RL}O.
1207 setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1208 setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1209 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1210
1211 setOperationAction(ISD::FADD, MVT::f128, Legal);
1212 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1213 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1214 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1216
1217 setOperationAction(ISD::FMA, MVT::f128, Legal);
1224
1226 setOperationAction(ISD::FRINT, MVT::f128, Legal);
1228 setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1231
1235
1236 // Handle constrained floating-point operations of fp128
1253 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1254 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1255 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1256 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1257 } else if (Subtarget.hasVSX()) {
1260
1261 AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1262 AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1263
1264 // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1265 // fp_to_uint and int_to_fp.
1268
1269 setOperationAction(ISD::FMUL, MVT::f128, Expand);
1270 setOperationAction(ISD::FDIV, MVT::f128, Expand);
1271 setOperationAction(ISD::FNEG, MVT::f128, Expand);
1272 setOperationAction(ISD::FABS, MVT::f128, Expand);
1274 setOperationAction(ISD::FMA, MVT::f128, Expand);
1276
1277 // Expand the fp_extend if the target type is fp128.
1280
1281 // Expand the fp_round if the source type is fp128.
1282 for (MVT VT : {MVT::f32, MVT::f64}) {
1285 }
1286
1291
1292 // Lower following f128 select_cc pattern:
1293 // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE
1295
1296 // We need to handle f128 SELECT_CC with integer result type.
1298 setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand);
1299 }
1300
1301 if (Subtarget.hasP9Altivec()) {
1302 if (Subtarget.isISA3_1()) {
1307 } else {
1310 }
1318
1319 setOperationAction(ISD::ABDU, MVT::v16i8, Legal);
1320 setOperationAction(ISD::ABDU, MVT::v8i16, Legal);
1321 setOperationAction(ISD::ABDU, MVT::v4i32, Legal);
1322 setOperationAction(ISD::ABDS, MVT::v4i32, Legal);
1323 }
1324
1325 if (Subtarget.hasP10Vector()) {
1327 }
1328 }
1329
1330 if (Subtarget.pairedVectorMemops()) {
1331 addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1332 setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1333 setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1334 }
1335 if (Subtarget.hasMMA()) {
1336 if (Subtarget.isISAFuture())
1337 addRegisterClass(MVT::v512i1, &PPC::WACCRCRegClass);
1338 else
1339 addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1340 setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1341 setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1343 }
1344
1345 if (Subtarget.has64BitSupport())
1347
1348 if (Subtarget.isISA3_1())
1349 setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1350
1351 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1352
1353 if (!isPPC64) {
1356 }
1357
1362 }
1363
1365
1366 if (Subtarget.hasAltivec()) {
1367 // Altivec instructions set fields to all zeros or all ones.
1369 }
1370
1371 setLibcallName(RTLIB::MULO_I128, nullptr);
1372 if (!isPPC64) {
1373 // These libcalls are not available in 32-bit.
1374 setLibcallName(RTLIB::SHL_I128, nullptr);
1375 setLibcallName(RTLIB::SRL_I128, nullptr);
1376 setLibcallName(RTLIB::SRA_I128, nullptr);
1377 setLibcallName(RTLIB::MUL_I128, nullptr);
1378 setLibcallName(RTLIB::MULO_I64, nullptr);
1379 }
1380
1383 else if (isPPC64)
1385 else
1387
1388 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1389
1390 // We have target-specific dag combine patterns for the following nodes:
1393 if (Subtarget.hasFPCVT())
1396 if (Subtarget.useCRBits())
1400
1402
1404
1405 if (Subtarget.useCRBits()) {
1407 }
1408
1409 setLibcallName(RTLIB::LOG_F128, "logf128");
1410 setLibcallName(RTLIB::LOG2_F128, "log2f128");
1411 setLibcallName(RTLIB::LOG10_F128, "log10f128");
1412 setLibcallName(RTLIB::EXP_F128, "expf128");
1413 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1414 setLibcallName(RTLIB::SIN_F128, "sinf128");
1415 setLibcallName(RTLIB::COS_F128, "cosf128");
1416 setLibcallName(RTLIB::SINCOS_F128, "sincosf128");
1417 setLibcallName(RTLIB::POW_F128, "powf128");
1418 setLibcallName(RTLIB::FMIN_F128, "fminf128");
1419 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1420 setLibcallName(RTLIB::REM_F128, "fmodf128");
1421 setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1422 setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1423 setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1424 setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1425 setLibcallName(RTLIB::ROUND_F128, "roundf128");
1426 setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1427 setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1428 setLibcallName(RTLIB::RINT_F128, "rintf128");
1429 setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1430 setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1431 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1432 setLibcallName(RTLIB::FMA_F128, "fmaf128");
1433 setLibcallName(RTLIB::FREXP_F128, "frexpf128");
1434
1435 if (Subtarget.isAIXABI()) {
1436 setLibcallName(RTLIB::MEMCPY, isPPC64 ? "___memmove64" : "___memmove");
1437 setLibcallName(RTLIB::MEMMOVE, isPPC64 ? "___memmove64" : "___memmove");
1438 setLibcallName(RTLIB::MEMSET, isPPC64 ? "___memset64" : "___memset");
1439 setLibcallName(RTLIB::BZERO, isPPC64 ? "___bzero64" : "___bzero");
1440 }
1441
1442 // With 32 condition bits, we don't need to sink (and duplicate) compares
1443 // aggressively in CodeGenPrep.
1444 if (Subtarget.useCRBits()) {
1447 }
1448
1449 // TODO: The default entry number is set to 64. This stops most jump table
1450 // generation on PPC. But it is good for current PPC HWs because the indirect
1451 // branch instruction mtctr to the jump table may lead to bad branch predict.
1452 // Re-evaluate this value on future HWs that can do better with mtctr.
1454
1456
1457 switch (Subtarget.getCPUDirective()) {
1458 default: break;
1459 case PPC::DIR_970:
1460 case PPC::DIR_A2:
1461 case PPC::DIR_E500:
1462 case PPC::DIR_E500mc:
1463 case PPC::DIR_E5500:
1464 case PPC::DIR_PWR4:
1465 case PPC::DIR_PWR5:
1466 case PPC::DIR_PWR5X:
1467 case PPC::DIR_PWR6:
1468 case PPC::DIR_PWR6X:
1469 case PPC::DIR_PWR7:
1470 case PPC::DIR_PWR8:
1471 case PPC::DIR_PWR9:
1472 case PPC::DIR_PWR10:
1476 break;
1477 }
1478
1479 if (Subtarget.enableMachineScheduler())
1481 else
1483
1485
1486 // The Freescale cores do better with aggressive inlining of memcpy and
1487 // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1488 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1489 Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1490 MaxStoresPerMemset = 32;
1492 MaxStoresPerMemcpy = 32;
1496 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1497 // The A2 also benefits from (very) aggressive inlining of memcpy and
1498 // friends. The overhead of a the function call, even when warm, can be
1499 // over one hundred cycles.
1500 MaxStoresPerMemset = 128;
1501 MaxStoresPerMemcpy = 128;
1502 MaxStoresPerMemmove = 128;
1503 MaxLoadsPerMemcmp = 128;
1504 } else {
1507 }
1508
1509 IsStrictFPEnabled = true;
1510
1511 // Let the subtarget (CPU) decide if a predictable select is more expensive
1512 // than the corresponding branch. This information is used in CGP to decide
1513 // when to convert selects into branches.
1515}
1516
1517// *********************************** NOTE ************************************
1518// For selecting load and store instructions, the addressing modes are defined
1519// as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD
1520// patterns to match the load the store instructions.
1521//
1522// The TD definitions for the addressing modes correspond to their respective
1523// Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely
1524// on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the
1525// address mode flags of a particular node. Afterwards, the computed address
1526// flags are passed into getAddrModeForFlags() in order to retrieve the optimal
1527// addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement
1528// accordingly, based on the preferred addressing mode.
1529//
1530// Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode.
1531// MemOpFlags contains all the possible flags that can be used to compute the
1532// optimal addressing mode for load and store instructions.
1533// AddrMode contains all the possible load and store addressing modes available
1534// on Power (such as DForm, DSForm, DQForm, XForm, etc.)
1535//
1536// When adding new load and store instructions, it is possible that new address
1537// flags may need to be added into MemOpFlags, and a new addressing mode will
1538// need to be added to AddrMode. An entry of the new addressing mode (consisting
1539// of the minimal and main distinguishing address flags for the new load/store
1540// instructions) will need to be added into initializeAddrModeMap() below.
1541// Finally, when adding new addressing modes, the getAddrModeForFlags() will
1542// need to be updated to account for selecting the optimal addressing mode.
1543// *****************************************************************************
1544/// Initialize the map that relates the different addressing modes of the load
1545/// and store instructions to a set of flags. This ensures the load/store
1546/// instruction is correctly matched during instruction selection.
1547void PPCTargetLowering::initializeAddrModeMap() {
1548 AddrModesMap[PPC::AM_DForm] = {
1549 // LWZ, STW
1554 // LBZ, LHZ, STB, STH
1559 // LHA
1564 // LFS, LFD, STFS, STFD
1569 };
1570 AddrModesMap[PPC::AM_DSForm] = {
1571 // LWA
1575 // LD, STD
1579 // DFLOADf32, DFLOADf64, DSTOREf32, DSTOREf64
1583 };
1584 AddrModesMap[PPC::AM_DQForm] = {
1585 // LXV, STXV
1589 };
1590 AddrModesMap[PPC::AM_PrefixDForm] = {PPC::MOF_RPlusSImm34 |
1592 // TODO: Add mapping for quadword load/store.
1593}
1594
1595/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1596/// the desired ByVal argument alignment.
1597static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1598 if (MaxAlign == MaxMaxAlign)
1599 return;
1600 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1601 if (MaxMaxAlign >= 32 &&
1602 VTy->getPrimitiveSizeInBits().getFixedValue() >= 256)
1603 MaxAlign = Align(32);
1604 else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 &&
1605 MaxAlign < 16)
1606 MaxAlign = Align(16);
1607 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1608 Align EltAlign;
1609 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1610 if (EltAlign > MaxAlign)
1611 MaxAlign = EltAlign;
1612 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1613 for (auto *EltTy : STy->elements()) {
1614 Align EltAlign;
1615 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1616 if (EltAlign > MaxAlign)
1617 MaxAlign = EltAlign;
1618 if (MaxAlign == MaxMaxAlign)
1619 break;
1620 }
1621 }
1622}
1623
1624/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1625/// function arguments in the caller parameter area.
1627 const DataLayout &DL) const {
1628 // 16byte and wider vectors are passed on 16byte boundary.
1629 // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1630 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1631 if (Subtarget.hasAltivec())
1632 getMaxByValAlign(Ty, Alignment, Align(16));
1633 return Alignment.value();
1634}
1635
1637 return Subtarget.useSoftFloat();
1638}
1639
1641 return Subtarget.hasSPE();
1642}
1643
1645 return VT.isScalarInteger();
1646}
1647
1649 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
1650 if (!Subtarget.isPPC64() || !Subtarget.hasVSX())
1651 return false;
1652
1653 if (auto *VTy = dyn_cast<VectorType>(VectorTy)) {
1654 if (VTy->getScalarType()->isIntegerTy()) {
1655 // ElemSizeInBits 8/16 can fit in immediate field, not needed here.
1656 if (ElemSizeInBits == 32) {
1657 Index = Subtarget.isLittleEndian() ? 2 : 1;
1658 return true;
1659 }
1660 if (ElemSizeInBits == 64) {
1661 Index = Subtarget.isLittleEndian() ? 1 : 0;
1662 return true;
1663 }
1664 }
1665 }
1666 return false;
1667}
1668
1669const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1670 switch ((PPCISD::NodeType)Opcode) {
1671 case PPCISD::FIRST_NUMBER: break;
1672 case PPCISD::FSEL: return "PPCISD::FSEL";
1673 case PPCISD::XSMAXC: return "PPCISD::XSMAXC";
1674 case PPCISD::XSMINC: return "PPCISD::XSMINC";
1675 case PPCISD::FCFID: return "PPCISD::FCFID";
1676 case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
1677 case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
1678 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
1679 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
1680 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
1681 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
1682 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
1683 case PPCISD::FRE: return "PPCISD::FRE";
1684 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
1685 case PPCISD::FTSQRT:
1686 return "PPCISD::FTSQRT";
1687 case PPCISD::FSQRT:
1688 return "PPCISD::FSQRT";
1689 case PPCISD::STFIWX: return "PPCISD::STFIWX";
1690 case PPCISD::VPERM: return "PPCISD::VPERM";
1691 case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
1693 return "PPCISD::XXSPLTI_SP_TO_DP";
1695 return "PPCISD::XXSPLTI32DX";
1696 case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
1697 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
1698 case PPCISD::XXPERM:
1699 return "PPCISD::XXPERM";
1700 case PPCISD::VECSHL: return "PPCISD::VECSHL";
1701 case PPCISD::CMPB: return "PPCISD::CMPB";
1702 case PPCISD::Hi: return "PPCISD::Hi";
1703 case PPCISD::Lo: return "PPCISD::Lo";
1704 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
1705 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1706 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1707 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
1708 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
1709 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA";
1710 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
1711 case PPCISD::SRL: return "PPCISD::SRL";
1712 case PPCISD::SRA: return "PPCISD::SRA";
1713 case PPCISD::SHL: return "PPCISD::SHL";
1714 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
1715 case PPCISD::CALL: return "PPCISD::CALL";
1716 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
1717 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC";
1718 case PPCISD::CALL_RM:
1719 return "PPCISD::CALL_RM";
1721 return "PPCISD::CALL_NOP_RM";
1723 return "PPCISD::CALL_NOTOC_RM";
1724 case PPCISD::MTCTR: return "PPCISD::MTCTR";
1725 case PPCISD::BCTRL: return "PPCISD::BCTRL";
1726 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
1727 case PPCISD::BCTRL_RM:
1728 return "PPCISD::BCTRL_RM";
1730 return "PPCISD::BCTRL_LOAD_TOC_RM";
1731 case PPCISD::RET_GLUE: return "PPCISD::RET_GLUE";
1732 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
1733 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
1734 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1735 case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
1736 case PPCISD::MFVSR: return "PPCISD::MFVSR";
1737 case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
1738 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
1739 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
1740 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
1742 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1744 return "PPCISD::ANDI_rec_1_EQ_BIT";
1746 return "PPCISD::ANDI_rec_1_GT_BIT";
1747 case PPCISD::VCMP: return "PPCISD::VCMP";
1748 case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec";
1749 case PPCISD::LBRX: return "PPCISD::LBRX";
1750 case PPCISD::STBRX: return "PPCISD::STBRX";
1751 case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
1752 case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
1753 case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
1754 case PPCISD::STXSIX: return "PPCISD::STXSIX";
1755 case PPCISD::VEXTS: return "PPCISD::VEXTS";
1756 case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
1757 case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
1758 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
1759 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
1761 return "PPCISD::ST_VSR_SCAL_INT";
1762 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
1763 case PPCISD::BDNZ: return "PPCISD::BDNZ";
1764 case PPCISD::BDZ: return "PPCISD::BDZ";
1765 case PPCISD::MFFS: return "PPCISD::MFFS";
1766 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
1767 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
1768 case PPCISD::CR6SET: return "PPCISD::CR6SET";
1769 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
1770 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
1771 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
1772 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1773 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
1774 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
1775 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
1776 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
1777 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
1778 case PPCISD::GET_TLS_MOD_AIX: return "PPCISD::GET_TLS_MOD_AIX";
1779 case PPCISD::GET_TPOINTER: return "PPCISD::GET_TPOINTER";
1780 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1781 case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX";
1782 case PPCISD::TLSLD_AIX: return "PPCISD::TLSLD_AIX";
1783 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
1784 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
1785 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
1786 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1787 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1788 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
1790 return "PPCISD::PADDI_DTPREL";
1791 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
1792 case PPCISD::SC: return "PPCISD::SC";
1793 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
1794 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
1795 case PPCISD::RFEBB: return "PPCISD::RFEBB";
1796 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
1797 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
1798 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
1799 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
1800 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
1801 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
1802 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
1803 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
1804 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR";
1806 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1808 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1809 case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD";
1810 case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD";
1811 case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1812 case PPCISD::XXMFACC: return "PPCISD::XXMFACC";
1813 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
1814 case PPCISD::ZEXT_LD_SPLAT: return "PPCISD::ZEXT_LD_SPLAT";
1815 case PPCISD::SEXT_LD_SPLAT: return "PPCISD::SEXT_LD_SPLAT";
1816 case PPCISD::FNMSUB: return "PPCISD::FNMSUB";
1818 return "PPCISD::STRICT_FADDRTZ";
1820 return "PPCISD::STRICT_FCTIDZ";
1822 return "PPCISD::STRICT_FCTIWZ";
1824 return "PPCISD::STRICT_FCTIDUZ";
1826 return "PPCISD::STRICT_FCTIWUZ";
1828 return "PPCISD::STRICT_FCFID";
1830 return "PPCISD::STRICT_FCFIDU";
1832 return "PPCISD::STRICT_FCFIDS";
1834 return "PPCISD::STRICT_FCFIDUS";
1835 case PPCISD::LXVRZX: return "PPCISD::LXVRZX";
1836 case PPCISD::STORE_COND:
1837 return "PPCISD::STORE_COND";
1838 }
1839 return nullptr;
1840}
1841
1843 EVT VT) const {
1844 if (!VT.isVector())
1845 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1846
1848}
1849
1851 assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1852 return true;
1853}
1854
1855//===----------------------------------------------------------------------===//
1856// Node matching predicates, for use by the tblgen matching code.
1857//===----------------------------------------------------------------------===//
1858
1859/// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1861 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1862 return CFP->getValueAPF().isZero();
1863 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1864 // Maybe this has already been legalized into the constant pool?
1865 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1866 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1867 return CFP->getValueAPF().isZero();
1868 }
1869 return false;
1870}
1871
1872/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
1873/// true if Op is undef or if it matches the specified value.
1874static bool isConstantOrUndef(int Op, int Val) {
1875 return Op < 0 || Op == Val;
1876}
1877
1878/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1879/// VPKUHUM instruction.
1880/// The ShuffleKind distinguishes between big-endian operations with
1881/// two different inputs (0), either-endian operations with two identical
1882/// inputs (1), and little-endian operations with two different inputs (2).
1883/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1885 SelectionDAG &DAG) {
1886 bool IsLE = DAG.getDataLayout().isLittleEndian();
1887 if (ShuffleKind == 0) {
1888 if (IsLE)
1889 return false;
1890 for (unsigned i = 0; i != 16; ++i)
1891 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1892 return false;
1893 } else if (ShuffleKind == 2) {
1894 if (!IsLE)
1895 return false;
1896 for (unsigned i = 0; i != 16; ++i)
1897 if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1898 return false;
1899 } else if (ShuffleKind == 1) {
1900 unsigned j = IsLE ? 0 : 1;
1901 for (unsigned i = 0; i != 8; ++i)
1902 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
1903 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
1904 return false;
1905 }
1906 return true;
1907}
1908
1909/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1910/// VPKUWUM instruction.
1911/// The ShuffleKind distinguishes between big-endian operations with
1912/// two different inputs (0), either-endian operations with two identical
1913/// inputs (1), and little-endian operations with two different inputs (2).
1914/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1916 SelectionDAG &DAG) {
1917 bool IsLE = DAG.getDataLayout().isLittleEndian();
1918 if (ShuffleKind == 0) {
1919 if (IsLE)
1920 return false;
1921 for (unsigned i = 0; i != 16; i += 2)
1922 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
1923 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
1924 return false;
1925 } else if (ShuffleKind == 2) {
1926 if (!IsLE)
1927 return false;
1928 for (unsigned i = 0; i != 16; i += 2)
1929 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1930 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
1931 return false;
1932 } else if (ShuffleKind == 1) {
1933 unsigned j = IsLE ? 0 : 2;
1934 for (unsigned i = 0; i != 8; i += 2)
1935 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1936 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1937 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1938 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
1939 return false;
1940 }
1941 return true;
1942}
1943
1944/// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1945/// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1946/// current subtarget.
1947///
1948/// The ShuffleKind distinguishes between big-endian operations with
1949/// two different inputs (0), either-endian operations with two identical
1950/// inputs (1), and little-endian operations with two different inputs (2).
1951/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1953 SelectionDAG &DAG) {
1954 const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>();
1955 if (!Subtarget.hasP8Vector())
1956 return false;
1957
1958 bool IsLE = DAG.getDataLayout().isLittleEndian();
1959 if (ShuffleKind == 0) {
1960 if (IsLE)
1961 return false;
1962 for (unsigned i = 0; i != 16; i += 4)
1963 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
1964 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
1965 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
1966 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
1967 return false;
1968 } else if (ShuffleKind == 2) {
1969 if (!IsLE)
1970 return false;
1971 for (unsigned i = 0; i != 16; i += 4)
1972 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1973 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
1974 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
1975 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
1976 return false;
1977 } else if (ShuffleKind == 1) {
1978 unsigned j = IsLE ? 0 : 4;
1979 for (unsigned i = 0; i != 8; i += 4)
1980 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1981 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1982 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
1983 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
1984 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1985 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
1986 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1987 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1988 return false;
1989 }
1990 return true;
1991}
1992
1993/// isVMerge - Common function, used to match vmrg* shuffles.
1994///
1995static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1996 unsigned LHSStart, unsigned RHSStart) {
1997 if (N->getValueType(0) != MVT::v16i8)
1998 return false;
1999 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
2000 "Unsupported merge size!");
2001
2002 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
2003 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
2004 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
2005 LHSStart+j+i*UnitSize) ||
2006 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
2007 RHSStart+j+i*UnitSize))
2008 return false;
2009 }
2010 return true;
2011}
2012
2013/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
2014/// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
2015/// The ShuffleKind distinguishes between big-endian merges with two
2016/// different inputs (0), either-endian merges with two identical inputs (1),
2017/// and little-endian merges with two different inputs (2). For the latter,
2018/// the input operands are swapped (see PPCInstrAltivec.td).
2020 unsigned ShuffleKind, SelectionDAG &DAG) {
2021 if (DAG.getDataLayout().isLittleEndian()) {
2022 if (ShuffleKind == 1) // unary
2023 return isVMerge(N, UnitSize, 0, 0);
2024 else if (ShuffleKind == 2) // swapped
2025 return isVMerge(N, UnitSize, 0, 16);
2026 else
2027 return false;
2028 } else {
2029 if (ShuffleKind == 1) // unary
2030 return isVMerge(N, UnitSize, 8, 8);
2031 else if (ShuffleKind == 0) // normal
2032 return isVMerge(N, UnitSize, 8, 24);
2033 else
2034 return false;
2035 }
2036}
2037
2038/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
2039/// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
2040/// The ShuffleKind distinguishes between big-endian merges with two
2041/// different inputs (0), either-endian merges with two identical inputs (1),
2042/// and little-endian merges with two different inputs (2). For the latter,
2043/// the input operands are swapped (see PPCInstrAltivec.td).
2045 unsigned ShuffleKind, SelectionDAG &DAG) {
2046 if (DAG.getDataLayout().isLittleEndian()) {
2047 if (ShuffleKind == 1) // unary
2048 return isVMerge(N, UnitSize, 8, 8);
2049 else if (ShuffleKind == 2) // swapped
2050 return isVMerge(N, UnitSize, 8, 24);
2051 else
2052 return false;
2053 } else {
2054 if (ShuffleKind == 1) // unary
2055 return isVMerge(N, UnitSize, 0, 0);
2056 else if (ShuffleKind == 0) // normal
2057 return isVMerge(N, UnitSize, 0, 16);
2058 else
2059 return false;
2060 }
2061}
2062
2063/**
2064 * Common function used to match vmrgew and vmrgow shuffles
2065 *
2066 * The indexOffset determines whether to look for even or odd words in
2067 * the shuffle mask. This is based on the of the endianness of the target
2068 * machine.
2069 * - Little Endian:
2070 * - Use offset of 0 to check for odd elements
2071 * - Use offset of 4 to check for even elements
2072 * - Big Endian:
2073 * - Use offset of 0 to check for even elements
2074 * - Use offset of 4 to check for odd elements
2075 * A detailed description of the vector element ordering for little endian and
2076 * big endian can be found at
2077 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
2078 * Targeting your applications - what little endian and big endian IBM XL C/C++
2079 * compiler differences mean to you
2080 *
2081 * The mask to the shuffle vector instruction specifies the indices of the
2082 * elements from the two input vectors to place in the result. The elements are
2083 * numbered in array-access order, starting with the first vector. These vectors
2084 * are always of type v16i8, thus each vector will contain 16 elements of size
2085 * 8. More info on the shuffle vector can be found in the
2086 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
2087 * Language Reference.
2088 *
2089 * The RHSStartValue indicates whether the same input vectors are used (unary)
2090 * or two different input vectors are used, based on the following:
2091 * - If the instruction uses the same vector for both inputs, the range of the
2092 * indices will be 0 to 15. In this case, the RHSStart value passed should
2093 * be 0.
2094 * - If the instruction has two different vectors then the range of the
2095 * indices will be 0 to 31. In this case, the RHSStart value passed should
2096 * be 16 (indices 0-15 specify elements in the first vector while indices 16
2097 * to 31 specify elements in the second vector).
2098 *
2099 * \param[in] N The shuffle vector SD Node to analyze
2100 * \param[in] IndexOffset Specifies whether to look for even or odd elements
2101 * \param[in] RHSStartValue Specifies the starting index for the righthand input
2102 * vector to the shuffle_vector instruction
2103 * \return true iff this shuffle vector represents an even or odd word merge
2104 */
2105static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
2106 unsigned RHSStartValue) {
2107 if (N->getValueType(0) != MVT::v16i8)
2108 return false;
2109
2110 for (unsigned i = 0; i < 2; ++i)
2111 for (unsigned j = 0; j < 4; ++j)
2112 if (!isConstantOrUndef(N->getMaskElt(i*4+j),
2113 i*RHSStartValue+j+IndexOffset) ||
2114 !isConstantOrUndef(N->getMaskElt(i*4+j+8),
2115 i*RHSStartValue+j+IndexOffset+8))
2116 return false;
2117 return true;
2118}
2119
2120/**
2121 * Determine if the specified shuffle mask is suitable for the vmrgew or
2122 * vmrgow instructions.
2123 *
2124 * \param[in] N The shuffle vector SD Node to analyze
2125 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
2126 * \param[in] ShuffleKind Identify the type of merge:
2127 * - 0 = big-endian merge with two different inputs;
2128 * - 1 = either-endian merge with two identical inputs;
2129 * - 2 = little-endian merge with two different inputs (inputs are swapped for
2130 * little-endian merges).
2131 * \param[in] DAG The current SelectionDAG
2132 * \return true iff this shuffle mask
2133 */
2135 unsigned ShuffleKind, SelectionDAG &DAG) {
2136 if (DAG.getDataLayout().isLittleEndian()) {
2137 unsigned indexOffset = CheckEven ? 4 : 0;
2138 if (ShuffleKind == 1) // Unary
2139 return isVMerge(N, indexOffset, 0);
2140 else if (ShuffleKind == 2) // swapped
2141 return isVMerge(N, indexOffset, 16);
2142 else
2143 return false;
2144 }
2145 else {
2146 unsigned indexOffset = CheckEven ? 0 : 4;
2147 if (ShuffleKind == 1) // Unary
2148 return isVMerge(N, indexOffset, 0);
2149 else if (ShuffleKind == 0) // Normal
2150 return isVMerge(N, indexOffset, 16);
2151 else
2152 return false;
2153 }
2154 return false;
2155}
2156
2157/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
2158/// amount, otherwise return -1.
2159/// The ShuffleKind distinguishes between big-endian operations with two
2160/// different inputs (0), either-endian operations with two identical inputs
2161/// (1), and little-endian operations with two different inputs (2). For the
2162/// latter, the input operands are swapped (see PPCInstrAltivec.td).
2163int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
2164 SelectionDAG &DAG) {
2165 if (N->getValueType(0) != MVT::v16i8)
2166 return -1;
2167
2168 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2169
2170 // Find the first non-undef value in the shuffle mask.
2171 unsigned i;
2172 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
2173 /*search*/;
2174
2175 if (i == 16) return -1; // all undef.
2176
2177 // Otherwise, check to see if the rest of the elements are consecutively
2178 // numbered from this value.
2179 unsigned ShiftAmt = SVOp->getMaskElt(i);
2180 if (ShiftAmt < i) return -1;
2181
2182 ShiftAmt -= i;
2183 bool isLE = DAG.getDataLayout().isLittleEndian();
2184
2185 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2186 // Check the rest of the elements to see if they are consecutive.
2187 for (++i; i != 16; ++i)
2188 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2189 return -1;
2190 } else if (ShuffleKind == 1) {
2191 // Check the rest of the elements to see if they are consecutive.
2192 for (++i; i != 16; ++i)
2193 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
2194 return -1;
2195 } else
2196 return -1;
2197
2198 if (isLE)
2199 ShiftAmt = 16 - ShiftAmt;
2200
2201 return ShiftAmt;
2202}
2203
2204/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
2205/// specifies a splat of a single element that is suitable for input to
2206/// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
2208 EVT VT = N->getValueType(0);
2209 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2210 return EltSize == 8 && N->getMaskElt(0) == N->getMaskElt(1);
2211
2212 assert(VT == MVT::v16i8 && isPowerOf2_32(EltSize) &&
2213 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
2214
2215 // The consecutive indices need to specify an element, not part of two
2216 // different elements. So abandon ship early if this isn't the case.
2217 if (N->getMaskElt(0) % EltSize != 0)
2218 return false;
2219
2220 // This is a splat operation if each element of the permute is the same, and
2221 // if the value doesn't reference the second vector.
2222 unsigned ElementBase = N->getMaskElt(0);
2223
2224 // FIXME: Handle UNDEF elements too!
2225 if (ElementBase >= 16)
2226 return false;
2227
2228 // Check that the indices are consecutive, in the case of a multi-byte element
2229 // splatted with a v16i8 mask.
2230 for (unsigned i = 1; i != EltSize; ++i)
2231 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2232 return false;
2233
2234 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2235 if (N->getMaskElt(i) < 0) continue;
2236 for (unsigned j = 0; j != EltSize; ++j)
2237 if (N->getMaskElt(i+j) != N->getMaskElt(j))
2238 return false;
2239 }
2240 return true;
2241}
2242
2243/// Check that the mask is shuffling N byte elements. Within each N byte
2244/// element of the mask, the indices could be either in increasing or
2245/// decreasing order as long as they are consecutive.
2246/// \param[in] N the shuffle vector SD Node to analyze
2247/// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2248/// Word/DoubleWord/QuadWord).
2249/// \param[in] StepLen the delta indices number among the N byte element, if
2250/// the mask is in increasing/decreasing order then it is 1/-1.
2251/// \return true iff the mask is shuffling N byte elements.
2252static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2253 int StepLen) {
2254 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2255 "Unexpected element width.");
2256 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
2257
2258 unsigned NumOfElem = 16 / Width;
2259 unsigned MaskVal[16]; // Width is never greater than 16
2260 for (unsigned i = 0; i < NumOfElem; ++i) {
2261 MaskVal[0] = N->getMaskElt(i * Width);
2262 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2263 return false;
2264 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2265 return false;
2266 }
2267
2268 for (unsigned int j = 1; j < Width; ++j) {
2269 MaskVal[j] = N->getMaskElt(i * Width + j);
2270 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2271 return false;
2272 }
2273 }
2274 }
2275
2276 return true;
2277}
2278
2279bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2280 unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2281 if (!isNByteElemShuffleMask(N, 4, 1))
2282 return false;
2283
2284 // Now we look at mask elements 0,4,8,12
2285 unsigned M0 = N->getMaskElt(0) / 4;
2286 unsigned M1 = N->getMaskElt(4) / 4;
2287 unsigned M2 = N->getMaskElt(8) / 4;
2288 unsigned M3 = N->getMaskElt(12) / 4;
2289 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2290 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2291
2292 // Below, let H and L be arbitrary elements of the shuffle mask
2293 // where H is in the range [4,7] and L is in the range [0,3].
2294 // H, 1, 2, 3 or L, 5, 6, 7
2295 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2296 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2297 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2298 InsertAtByte = IsLE ? 12 : 0;
2299 Swap = M0 < 4;
2300 return true;
2301 }
2302 // 0, H, 2, 3 or 4, L, 6, 7
2303 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2304 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2305 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2306 InsertAtByte = IsLE ? 8 : 4;
2307 Swap = M1 < 4;
2308 return true;
2309 }
2310 // 0, 1, H, 3 or 4, 5, L, 7
2311 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2312 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2313 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2314 InsertAtByte = IsLE ? 4 : 8;
2315 Swap = M2 < 4;
2316 return true;
2317 }
2318 // 0, 1, 2, H or 4, 5, 6, L
2319 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2320 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2321 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2322 InsertAtByte = IsLE ? 0 : 12;
2323 Swap = M3 < 4;
2324 return true;
2325 }
2326
2327 // If both vector operands for the shuffle are the same vector, the mask will
2328 // contain only elements from the first one and the second one will be undef.
2329 if (N->getOperand(1).isUndef()) {
2330 ShiftElts = 0;
2331 Swap = true;
2332 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2333 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2334 InsertAtByte = IsLE ? 12 : 0;
2335 return true;
2336 }
2337 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2338 InsertAtByte = IsLE ? 8 : 4;
2339 return true;
2340 }
2341 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2342 InsertAtByte = IsLE ? 4 : 8;
2343 return true;
2344 }
2345 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2346 InsertAtByte = IsLE ? 0 : 12;
2347 return true;
2348 }
2349 }
2350
2351 return false;
2352}
2353
2355 bool &Swap, bool IsLE) {
2356 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2357 // Ensure each byte index of the word is consecutive.
2358 if (!isNByteElemShuffleMask(N, 4, 1))
2359 return false;
2360
2361 // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2362 unsigned M0 = N->getMaskElt(0) / 4;
2363 unsigned M1 = N->getMaskElt(4) / 4;
2364 unsigned M2 = N->getMaskElt(8) / 4;
2365 unsigned M3 = N->getMaskElt(12) / 4;
2366
2367 // If both vector operands for the shuffle are the same vector, the mask will
2368 // contain only elements from the first one and the second one will be undef.
2369 if (N->getOperand(1).isUndef()) {
2370 assert(M0 < 4 && "Indexing into an undef vector?");
2371 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2372 return false;
2373
2374 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2375 Swap = false;
2376 return true;
2377 }
2378
2379 // Ensure each word index of the ShuffleVector Mask is consecutive.
2380 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2381 return false;
2382
2383 if (IsLE) {
2384 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2385 // Input vectors don't need to be swapped if the leading element
2386 // of the result is one of the 3 left elements of the second vector
2387 // (or if there is no shift to be done at all).
2388 Swap = false;
2389 ShiftElts = (8 - M0) % 8;
2390 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2391 // Input vectors need to be swapped if the leading element
2392 // of the result is one of the 3 left elements of the first vector
2393 // (or if we're shifting by 4 - thereby simply swapping the vectors).
2394 Swap = true;
2395 ShiftElts = (4 - M0) % 4;
2396 }
2397
2398 return true;
2399 } else { // BE
2400 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2401 // Input vectors don't need to be swapped if the leading element
2402 // of the result is one of the 4 elements of the first vector.
2403 Swap = false;
2404 ShiftElts = M0;
2405 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2406 // Input vectors need to be swapped if the leading element
2407 // of the result is one of the 4 elements of the right vector.
2408 Swap = true;
2409 ShiftElts = M0 - 4;
2410 }
2411
2412 return true;
2413 }
2414}
2415
2417 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2418
2419 if (!isNByteElemShuffleMask(N, Width, -1))
2420 return false;
2421
2422 for (int i = 0; i < 16; i += Width)
2423 if (N->getMaskElt(i) != i + Width - 1)
2424 return false;
2425
2426 return true;
2427}
2428
2430 return isXXBRShuffleMaskHelper(N, 2);
2431}
2432
2434 return isXXBRShuffleMaskHelper(N, 4);
2435}
2436
2438 return isXXBRShuffleMaskHelper(N, 8);
2439}
2440
2442 return isXXBRShuffleMaskHelper(N, 16);
2443}
2444
2445/// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2446/// if the inputs to the instruction should be swapped and set \p DM to the
2447/// value for the immediate.
2448/// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2449/// AND element 0 of the result comes from the first input (LE) or second input
2450/// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2451/// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2452/// mask.
2454 bool &Swap, bool IsLE) {
2455 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2456
2457 // Ensure each byte index of the double word is consecutive.
2458 if (!isNByteElemShuffleMask(N, 8, 1))
2459 return false;
2460
2461 unsigned M0 = N->getMaskElt(0) / 8;
2462 unsigned M1 = N->getMaskElt(8) / 8;
2463 assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2464
2465 // If both vector operands for the shuffle are the same vector, the mask will
2466 // contain only elements from the first one and the second one will be undef.
2467 if (N->getOperand(1).isUndef()) {
2468 if ((M0 | M1) < 2) {
2469 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2470 Swap = false;
2471 return true;
2472 } else
2473 return false;
2474 }
2475
2476 if (IsLE) {
2477 if (M0 > 1 && M1 < 2) {
2478 Swap = false;
2479 } else if (M0 < 2 && M1 > 1) {
2480 M0 = (M0 + 2) % 4;
2481 M1 = (M1 + 2) % 4;
2482 Swap = true;
2483 } else
2484 return false;
2485
2486 // Note: if control flow comes here that means Swap is already set above
2487 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2488 return true;
2489 } else { // BE
2490 if (M0 < 2 && M1 > 1) {
2491 Swap = false;
2492 } else if (M0 > 1 && M1 < 2) {
2493 M0 = (M0 + 2) % 4;
2494 M1 = (M1 + 2) % 4;
2495 Swap = true;
2496 } else
2497 return false;
2498
2499 // Note: if control flow comes here that means Swap is already set above
2500 DM = (M0 << 1) + (M1 & 1);
2501 return true;
2502 }
2503}
2504
2505
2506/// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2507/// appropriate for PPC mnemonics (which have a big endian bias - namely
2508/// elements are counted from the left of the vector register).
2509unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2510 SelectionDAG &DAG) {
2511 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2512 assert(isSplatShuffleMask(SVOp, EltSize));
2513 EVT VT = SVOp->getValueType(0);
2514
2515 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2516 return DAG.getDataLayout().isLittleEndian() ? 1 - SVOp->getMaskElt(0)
2517 : SVOp->getMaskElt(0);
2518
2519 if (DAG.getDataLayout().isLittleEndian())
2520 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2521 else
2522 return SVOp->getMaskElt(0) / EltSize;
2523}
2524
2525/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2526/// by using a vspltis[bhw] instruction of the specified element size, return
2527/// the constant being splatted. The ByteSize field indicates the number of
2528/// bytes of each element [124] -> [bhw].
2530 SDValue OpVal;
2531
2532 // If ByteSize of the splat is bigger than the element size of the
2533 // build_vector, then we have a case where we are checking for a splat where
2534 // multiple elements of the buildvector are folded together into a single
2535 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2536 unsigned EltSize = 16/N->getNumOperands();
2537 if (EltSize < ByteSize) {
2538 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
2539 SDValue UniquedVals[4];
2540 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2541
2542 // See if all of the elements in the buildvector agree across.
2543 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2544 if (N->getOperand(i).isUndef()) continue;
2545 // If the element isn't a constant, bail fully out.
2546 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2547
2548 if (!UniquedVals[i&(Multiple-1)].getNode())
2549 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2550 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2551 return SDValue(); // no match.
2552 }
2553
2554 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2555 // either constant or undef values that are identical for each chunk. See
2556 // if these chunks can form into a larger vspltis*.
2557
2558 // Check to see if all of the leading entries are either 0 or -1. If
2559 // neither, then this won't fit into the immediate field.
2560 bool LeadingZero = true;
2561 bool LeadingOnes = true;
2562 for (unsigned i = 0; i != Multiple-1; ++i) {
2563 if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
2564
2565 LeadingZero &= isNullConstant(UniquedVals[i]);
2566 LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2567 }
2568 // Finally, check the least significant entry.
2569 if (LeadingZero) {
2570 if (!UniquedVals[Multiple-1].getNode())
2571 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
2572 int Val = UniquedVals[Multiple - 1]->getAsZExtVal();
2573 if (Val < 16) // 0,0,0,4 -> vspltisw(4)
2574 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2575 }
2576 if (LeadingOnes) {
2577 if (!UniquedVals[Multiple-1].getNode())
2578 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2579 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2580 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
2581 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2582 }
2583
2584 return SDValue();
2585 }
2586
2587 // Check to see if this buildvec has a single non-undef value in its elements.
2588 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2589 if (N->getOperand(i).isUndef()) continue;
2590 if (!OpVal.getNode())
2591 OpVal = N->getOperand(i);
2592 else if (OpVal != N->getOperand(i))
2593 return SDValue();
2594 }
2595
2596 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
2597
2598 unsigned ValSizeInBytes = EltSize;
2599 uint64_t Value = 0;
2600 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2601 Value = CN->getZExtValue();
2602 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2603 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2604 Value = llvm::bit_cast<uint32_t>(CN->getValueAPF().convertToFloat());
2605 }
2606
2607 // If the splat value is larger than the element value, then we can never do
2608 // this splat. The only case that we could fit the replicated bits into our
2609 // immediate field for would be zero, and we prefer to use vxor for it.
2610 if (ValSizeInBytes < ByteSize) return SDValue();
2611
2612 // If the element value is larger than the splat value, check if it consists
2613 // of a repeated bit pattern of size ByteSize.
2614 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2615 return SDValue();
2616
2617 // Properly sign extend the value.
2618 int MaskVal = SignExtend32(Value, ByteSize * 8);
2619
2620 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2621 if (MaskVal == 0) return SDValue();
2622
2623 // Finally, if this value fits in a 5 bit sext field, return it
2624 if (SignExtend32<5>(MaskVal) == MaskVal)
2625 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2626 return SDValue();
2627}
2628
2629//===----------------------------------------------------------------------===//
2630// Addressing Mode Selection
2631//===----------------------------------------------------------------------===//
2632
2633/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2634/// or 64-bit immediate, and if the value can be accurately represented as a
2635/// sign extension from a 16-bit value. If so, this returns true and the
2636/// immediate.
2637bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2638 if (!isa<ConstantSDNode>(N))
2639 return false;
2640
2641 Imm = (int16_t)N->getAsZExtVal();
2642 if (N->getValueType(0) == MVT::i32)
2643 return Imm == (int32_t)N->getAsZExtVal();
2644 else
2645 return Imm == (int64_t)N->getAsZExtVal();
2646}
2648 return isIntS16Immediate(Op.getNode(), Imm);
2649}
2650
2651/// Used when computing address flags for selecting loads and stores.
2652/// If we have an OR, check if the LHS and RHS are provably disjoint.
2653/// An OR of two provably disjoint values is equivalent to an ADD.
2654/// Most PPC load/store instructions compute the effective address as a sum,
2655/// so doing this conversion is useful.
2656static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) {
2657 if (N.getOpcode() != ISD::OR)
2658 return false;
2659 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2660 if (!LHSKnown.Zero.getBoolValue())
2661 return false;
2662 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2663 return (~(LHSKnown.Zero | RHSKnown.Zero) == 0);
2664}
2665
2666/// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2667/// be represented as an indexed [r+r] operation.
2669 SDValue &Index,
2670 SelectionDAG &DAG) const {
2671 for (SDNode *U : N->uses()) {
2672 if (MemSDNode *Memop = dyn_cast<MemSDNode>(U)) {
2673 if (Memop->getMemoryVT() == MVT::f64) {
2674 Base = N.getOperand(0);
2675 Index = N.getOperand(1);
2676 return true;
2677 }
2678 }
2679 }
2680 return false;
2681}
2682
2683/// isIntS34Immediate - This method tests if value of node given can be
2684/// accurately represented as a sign extension from a 34-bit value. If so,
2685/// this returns true and the immediate.
2686bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2687 if (!isa<ConstantSDNode>(N))
2688 return false;
2689
2690 Imm = (int64_t)N->getAsZExtVal();
2691 return isInt<34>(Imm);
2692}
2694 return isIntS34Immediate(Op.getNode(), Imm);
2695}
2696
2697/// SelectAddressRegReg - Given the specified addressed, check to see if it
2698/// can be represented as an indexed [r+r] operation. Returns false if it
2699/// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2700/// non-zero and N can be represented by a base register plus a signed 16-bit
2701/// displacement, make a more precise judgement by checking (displacement % \p
2702/// EncodingAlignment).
2705 MaybeAlign EncodingAlignment) const {
2706 // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2707 // a [pc+imm].
2709 return false;
2710
2711 int16_t Imm = 0;
2712 if (N.getOpcode() == ISD::ADD) {
2713 // Is there any SPE load/store (f64), which can't handle 16bit offset?
2714 // SPE load/store can only handle 8-bit offsets.
2715 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2716 return true;
2717 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2718 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2719 return false; // r+i
2720 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2721 return false; // r+i
2722
2723 Base = N.getOperand(0);
2724 Index = N.getOperand(1);
2725 return true;
2726 } else if (N.getOpcode() == ISD::OR) {
2727 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2728 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2729 return false; // r+i can fold it if we can.
2730
2731 // If this is an or of disjoint bitfields, we can codegen this as an add
2732 // (for better address arithmetic) if the LHS and RHS of the OR are provably
2733 // disjoint.
2734 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2735
2736 if (LHSKnown.Zero.getBoolValue()) {
2737 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2738 // If all of the bits are known zero on the LHS or RHS, the add won't
2739 // carry.
2740 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2741 Base = N.getOperand(0);
2742 Index = N.getOperand(1);
2743 return true;
2744 }
2745 }
2746 }
2747
2748 return false;
2749}
2750
2751// If we happen to be doing an i64 load or store into a stack slot that has
2752// less than a 4-byte alignment, then the frame-index elimination may need to
2753// use an indexed load or store instruction (because the offset may not be a
2754// multiple of 4). The extra register needed to hold the offset comes from the
2755// register scavenger, and it is possible that the scavenger will need to use
2756// an emergency spill slot. As a result, we need to make sure that a spill slot
2757// is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2758// stack slot.
2759static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2760 // FIXME: This does not handle the LWA case.
2761 if (VT != MVT::i64)
2762 return;
2763
2764 // NOTE: We'll exclude negative FIs here, which come from argument
2765 // lowering, because there are no known test cases triggering this problem
2766 // using packed structures (or similar). We can remove this exclusion if
2767 // we find such a test case. The reason why this is so test-case driven is
2768 // because this entire 'fixup' is only to prevent crashes (from the
2769 // register scavenger) on not-really-valid inputs. For example, if we have:
2770 // %a = alloca i1
2771 // %b = bitcast i1* %a to i64*
2772 // store i64* a, i64 b
2773 // then the store should really be marked as 'align 1', but is not. If it
2774 // were marked as 'align 1' then the indexed form would have been
2775 // instruction-selected initially, and the problem this 'fixup' is preventing
2776 // won't happen regardless.
2777 if (FrameIdx < 0)
2778 return;
2779
2781 MachineFrameInfo &MFI = MF.getFrameInfo();
2782
2783 if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2784 return;
2785
2786 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2787 FuncInfo->setHasNonRISpills();
2788}
2789
2790/// Returns true if the address N can be represented by a base register plus
2791/// a signed 16-bit displacement [r+imm], and if it is not better
2792/// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
2793/// displacements that are multiples of that value.
2795 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2796 MaybeAlign EncodingAlignment) const {
2797 // FIXME dl should come from parent load or store, not from address
2798 SDLoc dl(N);
2799
2800 // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2801 // a [pc+imm].
2803 return false;
2804
2805 // If this can be more profitably realized as r+r, fail.
2806 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2807 return false;
2808
2809 if (N.getOpcode() == ISD::ADD) {
2810 int16_t imm = 0;
2811 if (isIntS16Immediate(N.getOperand(1), imm) &&
2812 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2813 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2814 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2815 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2816 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2817 } else {
2818 Base = N.getOperand(0);
2819 }
2820 return true; // [r+i]
2821 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2822 // Match LOAD (ADD (X, Lo(G))).
2823 assert(!N.getOperand(1).getConstantOperandVal(1) &&
2824 "Cannot handle constant offsets yet!");
2825 Disp = N.getOperand(1).getOperand(0); // The global address.
2830 Base = N.getOperand(0);
2831 return true; // [&g+r]
2832 }
2833 } else if (N.getOpcode() == ISD::OR) {
2834 int16_t imm = 0;
2835 if (isIntS16Immediate(N.getOperand(1), imm) &&
2836 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2837 // If this is an or of disjoint bitfields, we can codegen this as an add
2838 // (for better address arithmetic) if the LHS and RHS of the OR are
2839 // provably disjoint.
2840 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2841
2842 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2843 // If all of the bits are known zero on the LHS or RHS, the add won't
2844 // carry.
2845 if (FrameIndexSDNode *FI =
2846 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2847 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2848 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2849 } else {
2850 Base = N.getOperand(0);
2851 }
2852 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2853 return true;
2854 }
2855 }
2856 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2857 // Loading from a constant address.
2858
2859 // If this address fits entirely in a 16-bit sext immediate field, codegen
2860 // this as "d, 0"
2861 int16_t Imm;
2862 if (isIntS16Immediate(CN, Imm) &&
2863 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2864 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2865 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2866 CN->getValueType(0));
2867 return true;
2868 }
2869
2870 // Handle 32-bit sext immediates with LIS + addr mode.
2871 if ((CN->getValueType(0) == MVT::i32 ||
2872 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2873 (!EncodingAlignment ||
2874 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2875 int Addr = (int)CN->getZExtValue();
2876
2877 // Otherwise, break this down into an LIS + disp.
2878 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2879
2880 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2881 MVT::i32);
2882 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2883 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2884 return true;
2885 }
2886 }
2887
2888 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2889 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2890 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2891 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2892 } else
2893 Base = N;
2894 return true; // [r+0]
2895}
2896
2897/// Similar to the 16-bit case but for instructions that take a 34-bit
2898/// displacement field (prefixed loads/stores).
2900 SDValue &Base,
2901 SelectionDAG &DAG) const {
2902 // Only on 64-bit targets.
2903 if (N.getValueType() != MVT::i64)
2904 return false;
2905
2906 SDLoc dl(N);
2907 int64_t Imm = 0;
2908
2909 if (N.getOpcode() == ISD::ADD) {
2910 if (!isIntS34Immediate(N.getOperand(1), Imm))
2911 return false;
2912 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2913 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2914 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2915 else
2916 Base = N.getOperand(0);
2917 return true;
2918 }
2919
2920 if (N.getOpcode() == ISD::OR) {
2921 if (!isIntS34Immediate(N.getOperand(1), Imm))
2922 return false;
2923 // If this is an or of disjoint bitfields, we can codegen this as an add
2924 // (for better address arithmetic) if the LHS and RHS of the OR are
2925 // provably disjoint.
2926 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2927 if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2928 return false;
2929 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2930 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2931 else
2932 Base = N.getOperand(0);
2933 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2934 return true;
2935 }
2936
2937 if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2938 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2939 Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2940 return true;
2941 }
2942
2943 return false;
2944}
2945
2946/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2947/// represented as an indexed [r+r] operation.
2949 SDValue &Index,
2950 SelectionDAG &DAG) const {
2951 // Check to see if we can easily represent this as an [r+r] address. This
2952 // will fail if it thinks that the address is more profitably represented as
2953 // reg+imm, e.g. where imm = 0.
2954 if (SelectAddressRegReg(N, Base, Index, DAG))
2955 return true;
2956
2957 // If the address is the result of an add, we will utilize the fact that the
2958 // address calculation includes an implicit add. However, we can reduce
2959 // register pressure if we do not materialize a constant just for use as the
2960 // index register. We only get rid of the add if it is not an add of a
2961 // value and a 16-bit signed constant and both have a single use.
2962 int16_t imm = 0;
2963 if (N.getOpcode() == ISD::ADD &&
2964 (!isIntS16Immediate(N.getOperand(1), imm) ||
2965 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2966 Base = N.getOperand(0);
2967 Index = N.getOperand(1);
2968 return true;
2969 }
2970
2971 // Otherwise, do it the hard way, using R0 as the base register.
2972 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2973 N.getValueType());
2974 Index = N;
2975 return true;
2976}
2977
2978template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2979 Ty *PCRelCand = dyn_cast<Ty>(N);
2980 return PCRelCand && (PPCInstrInfo::hasPCRelFlag(PCRelCand->getTargetFlags()));
2981}
2982
2983/// Returns true if this address is a PC Relative address.
2984/// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2985/// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2987 // This is a materialize PC Relative node. Always select this as PC Relative.
2988 Base = N;
2989 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2990 return true;
2991 if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2992 isValidPCRelNode<GlobalAddressSDNode>(N) ||
2993 isValidPCRelNode<JumpTableSDNode>(N) ||
2994 isValidPCRelNode<BlockAddressSDNode>(N))
2995 return true;
2996 return false;
2997}
2998
2999/// Returns true if we should use a direct load into vector instruction
3000/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
3001static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
3002
3003 // If there are any other uses other than scalar to vector, then we should
3004 // keep it as a scalar load -> direct move pattern to prevent multiple
3005 // loads.
3006 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
3007 if (!LD)
3008 return false;
3009
3010 EVT MemVT = LD->getMemoryVT();
3011 if (!MemVT.isSimple())
3012 return false;
3013 switch(MemVT.getSimpleVT().SimpleTy) {
3014 case MVT::i64:
3015 break;
3016 case MVT::i32:
3017 if (!ST.hasP8Vector())
3018 return false;
3019 break;
3020 case MVT::i16:
3021 case MVT::i8:
3022 if (!ST.hasP9Vector())
3023 return false;
3024 break;
3025 default:
3026 return false;
3027 }
3028
3029 SDValue LoadedVal(N, 0);
3030 if (!LoadedVal.hasOneUse())
3031 return false;
3032
3033 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
3034 UI != UE; ++UI)
3035 if (UI.getUse().get().getResNo() == 0 &&
3036 UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
3037 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
3038 return false;
3039
3040 return true;
3041}
3042
3043/// getPreIndexedAddressParts - returns true by value, base pointer and
3044/// offset pointer and addressing mode by reference if the node's address
3045/// can be legally represented as pre-indexed load / store address.
3047 SDValue &Offset,
3049 SelectionDAG &DAG) const {
3050 if (DisablePPCPreinc) return false;
3051
3052 bool isLoad = true;
3053 SDValue Ptr;
3054 EVT VT;
3055 Align Alignment;
3056 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
3057 Ptr = LD->getBasePtr();
3058 VT = LD->getMemoryVT();
3059 Alignment = LD->getAlign();
3060 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
3061 Ptr = ST->getBasePtr();
3062 VT = ST->getMemoryVT();
3063 Alignment = ST->getAlign();
3064 isLoad = false;
3065 } else
3066 return false;
3067
3068 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
3069 // instructions because we can fold these into a more efficient instruction
3070 // instead, (such as LXSD).
3071 if (isLoad && usePartialVectorLoads(N, Subtarget)) {
3072 return false;
3073 }
3074
3075 // PowerPC doesn't have preinc load/store instructions for vectors
3076 if (VT.isVector())
3077 return false;
3078
3079 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
3080 // Common code will reject creating a pre-inc form if the base pointer
3081 // is a frame index, or if N is a store and the base pointer is either
3082 // the same as or a predecessor of the value being stored. Check for
3083 // those situations here, and try with swapped Base/Offset instead.
3084 bool Swap = false;
3085
3086 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
3087 Swap = true;
3088 else if (!isLoad) {
3089 SDValue Val = cast<StoreSDNode>(N)->getValue();
3090 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
3091 Swap = true;
3092 }
3093
3094 if (Swap)
3096
3097 AM = ISD::PRE_INC;
3098 return true;
3099 }
3100
3101 // LDU/STU can only handle immediates that are a multiple of 4.
3102 if (VT != MVT::i64) {
3103 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, std::nullopt))
3104 return false;
3105 } else {
3106 // LDU/STU need an address with at least 4-byte alignment.
3107 if (Alignment < Align(4))
3108 return false;
3109
3110 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
3111 return false;
3112 }
3113
3114 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
3115 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
3116 // sext i32 to i64 when addr mode is r+i.
3117 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
3118 LD->getExtensionType() == ISD::SEXTLOAD &&
3119 isa<ConstantSDNode>(Offset))
3120 return false;
3121 }
3122
3123 AM = ISD::PRE_INC;
3124 return true;
3125}
3126
3127//===----------------------------------------------------------------------===//
3128// LowerOperation implementation
3129//===----------------------------------------------------------------------===//
3130
3131/// Return true if we should reference labels using a PICBase, set the HiOpFlags
3132/// and LoOpFlags to the target MO flags.
3133static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
3134 unsigned &HiOpFlags, unsigned &LoOpFlags,
3135 const GlobalValue *GV = nullptr) {
3136 HiOpFlags = PPCII::MO_HA;
3137 LoOpFlags = PPCII::MO_LO;
3138
3139 // Don't use the pic base if not in PIC relocation model.
3140 if (IsPIC) {
3141 HiOpFlags = PPCII::MO_PIC_HA_FLAG;
3142 LoOpFlags = PPCII::MO_PIC_LO_FLAG;
3143 }
3144}
3145
3146static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
3147 SelectionDAG &DAG) {
3148 SDLoc DL(HiPart);
3149 EVT PtrVT = HiPart.getValueType();
3150 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
3151
3152 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
3153 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
3154
3155 // With PIC, the first instruction is actually "GR+hi(&G)".
3156 if (isPIC)
3157 Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
3158 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
3159
3160 // Generate non-pic code that has direct accesses to the constant pool.
3161 // The address of the global is just (hi(&g)+lo(&g)).
3162 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
3163}
3164
3166 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3167 FuncInfo->setUsesTOCBasePtr();
3168}
3169
3172}
3173
3174SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
3175 SDValue GA) const {
3176 const bool Is64Bit = Subtarget.isPPC64();
3177 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3178 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
3179 : Subtarget.isAIXABI()
3180 ? DAG.getRegister(PPC::R2, VT)
3181 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
3182 SDValue Ops[] = { GA, Reg };
3183 return DAG.getMemIntrinsicNode(
3184 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
3187}
3188
3189SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
3190 SelectionDAG &DAG) const {
3191 EVT PtrVT = Op.getValueType();
3192 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3193 const Constant *C = CP->getConstVal();
3194
3195 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3196 // The actual address of the GlobalValue is stored in the TOC.
3197 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3198 if (Subtarget.isUsingPCRelativeCalls()) {
3199 SDLoc DL(CP);
3200 EVT Ty = getPointerTy(DAG.getDataLayout());
3201 SDValue ConstPool = DAG.getTargetConstantPool(
3202 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
3203 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
3204 }
3205 setUsesTOCBasePtr(DAG);
3206 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
3207 return getTOCEntry(DAG, SDLoc(CP), GA);
3208 }
3209
3210 unsigned MOHiFlag, MOLoFlag;
3211 bool IsPIC = isPositionIndependent();
3212 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3213
3214 if (IsPIC && Subtarget.isSVR4ABI()) {
3215 SDValue GA =
3216 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
3217 return getTOCEntry(DAG, SDLoc(CP), GA);
3218 }
3219
3220 SDValue CPIHi =
3221 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
3222 SDValue CPILo =
3223 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
3224 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
3225}
3226
3227// For 64-bit PowerPC, prefer the more compact relative encodings.
3228// This trades 32 bits per jump table entry for one or two instructions
3229// on the jump site.
3231 if (isJumpTableRelative())
3233
3235}
3236
3239 return false;
3240 if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3241 return true;
3243}
3244
3246 SelectionDAG &DAG) const {
3247 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3249
3250 switch (getTargetMachine().getCodeModel()) {
3251 case CodeModel::Small:
3252 case CodeModel::Medium:
3254 default:
3255 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3257 }
3258}
3259
3260const MCExpr *
3262 unsigned JTI,
3263 MCContext &Ctx) const {
3264 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3266
3267 switch (getTargetMachine().getCodeModel()) {
3268 case CodeModel::Small:
3269 case CodeModel::Medium:
3271 default:
3272 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3273 }
3274}
3275
3276SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3277 EVT PtrVT = Op.getValueType();
3278 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3279
3280 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3281 if (Subtarget.isUsingPCRelativeCalls()) {
3282 SDLoc DL(JT);
3283 EVT Ty = getPointerTy(DAG.getDataLayout());
3284 SDValue GA =
3285 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3286 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3287 return MatAddr;
3288 }
3289
3290 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3291 // The actual address of the GlobalValue is stored in the TOC.
3292 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3293 setUsesTOCBasePtr(DAG);
3294 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3295 return getTOCEntry(DAG, SDLoc(JT), GA);
3296 }
3297
3298 unsigned MOHiFlag, MOLoFlag;
3299 bool IsPIC = isPositionIndependent();
3300 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3301
3302 if (IsPIC && Subtarget.isSVR4ABI()) {
3303 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3305 return getTOCEntry(DAG, SDLoc(GA), GA);
3306 }
3307
3308 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3309 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3310 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3311}
3312
3313SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3314 SelectionDAG &DAG) const {
3315 EVT PtrVT = Op.getValueType();
3316 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3317 const BlockAddress *BA = BASDN->getBlockAddress();
3318
3319 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3320 if (Subtarget.isUsingPCRelativeCalls()) {
3321 SDLoc DL(BASDN);
3322 EVT Ty = getPointerTy(DAG.getDataLayout());
3323 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3325 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3326 return MatAddr;
3327 }
3328
3329 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3330 // The actual BlockAddress is stored in the TOC.
3331 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3332 setUsesTOCBasePtr(DAG);
3333 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3334 return getTOCEntry(DAG, SDLoc(BASDN), GA);
3335 }
3336
3337 // 32-bit position-independent ELF stores the BlockAddress in the .got.
3338 if (Subtarget.is32BitELFABI() && isPositionIndependent())
3339 return getTOCEntry(
3340 DAG, SDLoc(BASDN),
3341 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3342
3343 unsigned MOHiFlag, MOLoFlag;
3344 bool IsPIC = isPositionIndependent();
3345 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3346 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3347 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3348 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3349}
3350
3351SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3352 SelectionDAG &DAG) const {
3353 if (Subtarget.isAIXABI())
3354 return LowerGlobalTLSAddressAIX(Op, DAG);
3355
3356 return LowerGlobalTLSAddressLinux(Op, DAG);
3357}
3358
3359SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
3360 SelectionDAG &DAG) const {
3361 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3362
3363 if (DAG.getTarget().useEmulatedTLS())
3364 report_fatal_error("Emulated TLS is not yet supported on AIX");
3365
3366 SDLoc dl(GA);
3367 const GlobalValue *GV = GA->getGlobal();
3368 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3369 bool Is64Bit = Subtarget.isPPC64();
3370 bool HasAIXSmallLocalExecTLS = Subtarget.hasAIXSmallLocalExecTLS();
3372 bool IsTLSLocalExecModel = Model == TLSModel::LocalExec;
3373
3374 if (IsTLSLocalExecModel || Model == TLSModel::InitialExec) {
3375 SDValue VariableOffsetTGA =
3376 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TPREL_FLAG);
3377 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3378 SDValue TLSReg;
3379 if (Is64Bit) {
3380 // For local-exec and initial-exec on AIX (64-bit), the sequence generated
3381 // involves a load of the variable offset (from the TOC), followed by an
3382 // add of the loaded variable offset to R13 (the thread pointer).
3383 // This code sequence looks like:
3384 // ld reg1,var[TC](2)
3385 // add reg2, reg1, r13 // r13 contains the thread pointer
3386 TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3387
3388 // With the -maix-small-local-exec-tls option, produce a faster access
3389 // sequence for local-exec TLS variables where the offset from the TLS
3390 // base is encoded as an immediate operand.
3391 //
3392 // We only utilize the faster local-exec access sequence when the TLS
3393 // variable has a size within the policy limit. We treat types that are
3394 // not sized or are empty as being over the policy size limit.
3395 if (HasAIXSmallLocalExecTLS && IsTLSLocalExecModel) {
3396 Type *GVType = GV->getValueType();
3397 if (GVType->isSized() && !GVType->isEmptyTy() &&
3398 GV->getParent()->getDataLayout().getTypeAllocSize(GVType) <=
3400 return DAG.getNode(PPCISD::Lo, dl, PtrVT, VariableOffsetTGA, TLSReg);
3401 }
3402 } else {
3403 // For local-exec and initial-exec on AIX (32-bit), the sequence generated
3404 // involves loading the variable offset from the TOC, generating a call to
3405 // .__get_tpointer to get the thread pointer (which will be in R3), and
3406 // adding the two together:
3407 // lwz reg1,var[TC](2)
3408 // bla .__get_tpointer
3409 // add reg2, reg1, r3
3410 TLSReg = DAG.getNode(PPCISD::GET_TPOINTER, dl, PtrVT);
3411
3412 // We do not implement the 32-bit version of the faster access sequence
3413 // for local-exec that is controlled by -maix-small-local-exec-tls.
3414 if (HasAIXSmallLocalExecTLS)
3415 report_fatal_error("The small-local-exec TLS access sequence is "
3416 "currently only supported on AIX (64-bit mode).");
3417 }
3418 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, VariableOffset);
3419 }
3420
3421 if (Model == TLSModel::LocalDynamic) {
3422 // For local-dynamic on AIX, we need to generate one TOC entry for each
3423 // variable offset, and a single module-handle TOC entry for the entire
3424 // file.
3425
3426 SDValue VariableOffsetTGA =
3427 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSLD_FLAG);
3428 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3429
3431 GlobalVariable *TLSGV =
3432 dyn_cast_or_null<GlobalVariable>(M->getOrInsertGlobal(
3433 StringRef("_$TLSML"), PointerType::getUnqual(*DAG.getContext())));
3435 assert(TLSGV && "Not able to create GV for _$TLSML.");
3436 SDValue ModuleHandleTGA =
3437 DAG.getTargetGlobalAddress(TLSGV, dl, PtrVT, 0, PPCII::MO_TLSLDM_FLAG);
3438 SDValue ModuleHandleTOC = getTOCEntry(DAG, dl, ModuleHandleTGA);
3439 SDValue ModuleHandle =
3440 DAG.getNode(PPCISD::TLSLD_AIX, dl, PtrVT, ModuleHandleTOC);
3441
3442 return DAG.getNode(ISD::ADD, dl, PtrVT, ModuleHandle, VariableOffset);
3443 }
3444
3445 // If Local- or Initial-exec or Local-dynamic is not possible or specified,
3446 // all GlobalTLSAddress nodes are lowered using the general-dynamic model. We
3447 // need to generate two TOC entries, one for the variable offset, one for the
3448 // region handle. The global address for the TOC entry of the region handle is
3449 // created with the MO_TLSGDM_FLAG flag and the global address for the TOC
3450 // entry of the variable offset is created with MO_TLSGD_FLAG.
3451 SDValue VariableOffsetTGA =
3452 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
3453 SDValue RegionHandleTGA =
3454 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG);
3455 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3456 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3457 return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset,
3458 RegionHandle);
3459}
3460
3461SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
3462 SelectionDAG &DAG) const {
3463 // FIXME: TLS addresses currently use medium model code sequences,
3464 // which is the most useful form. Eventually support for small and
3465 // large models could be added if users need it, at the cost of
3466 // additional complexity.
3467 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3468 if (DAG.getTarget().useEmulatedTLS())
3469 return LowerToTLSEmulatedModel(GA, DAG);
3470
3471 SDLoc dl(GA);
3472 const GlobalValue *GV = GA->getGlobal();
3473 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3474 bool is64bit = Subtarget.isPPC64();
3475 const Module *M = DAG.getMachineFunction().getFunction().getParent();
3476 PICLevel::Level picLevel = M->getPICLevel();
3477
3479 TLSModel::Model Model = TM.getTLSModel(GV);
3480
3481 if (Model == TLSModel::LocalExec) {
3482 if (Subtarget.isUsingPCRelativeCalls()) {
3483 SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3484 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3486 SDValue MatAddr =
3487 DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3488 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3489 }
3490
3491 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3493 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3495 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3496 : DAG.getRegister(PPC::R2, MVT::i32);
3497
3498 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3499 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3500 }
3501
3502 if (Model == TLSModel::InitialExec) {
3503 bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3505 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3506 SDValue TGATLS = DAG.getTargetGlobalAddress(
3507 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_TLS_PCREL_FLAG : PPCII::MO_TLS);
3508 SDValue TPOffset;
3509 if (IsPCRel) {
3510 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3511 TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3513 } else {
3514 SDValue GOTPtr;
3515 if (is64bit) {
3516 setUsesTOCBasePtr(DAG);
3517 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3518 GOTPtr =
3519 DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3520 } else {
3521 if (!TM.isPositionIndependent())
3522 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3523 else if (picLevel == PICLevel::SmallPIC)
3524 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3525 else
3526 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3527 }
3528 TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3529 }
3530 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3531 }
3532
3533 if (Model == TLSModel::GeneralDynamic) {
3534 if (Subtarget.isUsingPCRelativeCalls()) {
3535 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3537 return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3538 }
3539
3540 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3541 SDValue GOTPtr;
3542 if (is64bit) {
3543 setUsesTOCBasePtr(DAG);
3544 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3545 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3546 GOTReg, TGA);
3547 } else {
3548 if (picLevel == PICLevel::SmallPIC)
3549 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3550 else
3551 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3552 }
3553 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3554 GOTPtr, TGA, TGA);
3555 }
3556
3557 if (Model == TLSModel::LocalDynamic) {
3558 if (Subtarget.isUsingPCRelativeCalls()) {
3559 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3561 SDValue MatPCRel =
3562 DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3563 return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3564 }
3565
3566 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3567 SDValue GOTPtr;
3568 if (is64bit) {
3569 setUsesTOCBasePtr(DAG);
3570 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3571 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3572 GOTReg, TGA);
3573 } else {
3574 if (picLevel == PICLevel::SmallPIC)
3575 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3576 else
3577 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3578 }
3579 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3580 PtrVT, GOTPtr, TGA, TGA);
3581 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3582 PtrVT, TLSAddr, TGA);
3583 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3584 }
3585
3586 llvm_unreachable("Unknown TLS model!");
3587}
3588
3589SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3590 SelectionDAG &DAG) const {
3591 EVT PtrVT = Op.getValueType();
3592 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3593 SDLoc DL(GSDN);
3594 const GlobalValue *GV = GSDN->getGlobal();
3595
3596 // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3597 // The actual address of the GlobalValue is stored in the TOC.
3598 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3599 if (Subtarget.isUsingPCRelativeCalls()) {
3600 EVT Ty = getPointerTy(DAG.getDataLayout());
3602 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3604 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3605 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3607 return Load;
3608 } else {
3609 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3611 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3612 }
3613 }
3614 setUsesTOCBasePtr(DAG);
3615 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3616 return getTOCEntry(DAG, DL, GA);
3617 }
3618
3619 unsigned MOHiFlag, MOLoFlag;
3620 bool IsPIC = isPositionIndependent();
3621 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3622
3623 if (IsPIC && Subtarget.isSVR4ABI()) {
3624 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3625 GSDN->getOffset(),
3627 return getTOCEntry(DAG, DL, GA);
3628 }
3629
3630 SDValue GAHi =
3631 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3632 SDValue GALo =
3633 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3634
3635 return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3636}
3637
3638SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3639 bool IsStrict = Op->isStrictFPOpcode();
3641 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
3642 SDValue LHS = Op.getOperand(IsStrict ? 1 : 0);
3643 SDValue RHS = Op.getOperand(IsStrict ? 2 : 1);
3644 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
3645 EVT LHSVT = LHS.getValueType();
3646 SDLoc dl(Op);
3647
3648 // Soften the setcc with libcall if it is fp128.
3649 if (LHSVT == MVT::f128) {
3650 assert(!Subtarget.hasP9Vector() &&
3651 "SETCC for f128 is already legal under Power9!");
3652 softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain,
3653 Op->getOpcode() == ISD::STRICT_FSETCCS);
3654 if (RHS.getNode())
3655 LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS,
3656 DAG.getCondCode(CC));
3657 if (IsStrict)
3658 return DAG.getMergeValues({LHS, Chain}, dl);
3659 return LHS;
3660 }
3661
3662 assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!");
3663
3664 if (Op.getValueType() == MVT::v2i64) {
3665 // When the operands themselves are v2i64 values, we need to do something
3666 // special because VSX has no underlying comparison operations for these.
3667 if (LHS.getValueType() == MVT::v2i64) {
3668 // Equality can be handled by casting to the legal type for Altivec
3669 // comparisons, everything else needs to be expanded.
3670 if (CC != ISD::SETEQ && CC != ISD::SETNE)
3671 return SDValue();
3672 SDValue SetCC32 = DAG.getSetCC(
3673 dl, MVT::v4i32, DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS),
3674 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC);
3675 int ShuffV[] = {1, 0, 3, 2};
3676 SDValue Shuff =
3677 DAG.getVectorShuffle(MVT::v4i32, dl, SetCC32, SetCC32, ShuffV);
3678 return DAG.getBitcast(MVT::v2i64,
3680 dl, MVT::v4i32, Shuff, SetCC32));
3681 }
3682
3683 // We handle most of these in the usual way.
3684 return Op;
3685 }
3686
3687 // If we're comparing for equality to zero, expose the fact that this is
3688 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3689 // fold the new nodes.
3690 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3691 return V;
3692
3693 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
3694 // Leave comparisons against 0 and -1 alone for now, since they're usually
3695 // optimized. FIXME: revisit this when we can custom lower all setcc
3696 // optimizations.
3697 if (C->isAllOnes() || C->isZero())
3698 return SDValue();
3699 }
3700
3701 // If we have an integer seteq/setne, turn it into a compare against zero
3702 // by xor'ing the rhs with the lhs, which is faster than setting a
3703 // condition register, reading it back out, and masking the correct bit. The
3704 // normal approach here uses sub to do this instead of xor. Using xor exposes
3705 // the result to other bit-twiddling opportunities.
3706 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3707 EVT VT = Op.getValueType();
3708 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS);
3709 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3710 }
3711 return SDValue();
3712}
3713
3714SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3715 SDNode *Node = Op.getNode();
3716 EVT VT = Node->getValueType(0);
3717 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3718 SDValue InChain = Node->getOperand(0);
3719 SDValue VAListPtr = Node->getOperand(1);
3720 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3721 SDLoc dl(Node);
3722
3723 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3724
3725 // gpr_index
3726 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3727 VAListPtr, MachinePointerInfo(SV), MVT::i8);
3728 InChain = GprIndex.getValue(1);
3729
3730 if (VT == MVT::i64) {
3731 // Check if GprIndex is even
3732 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3733 DAG.getConstant(1, dl, MVT::i32));
3734 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3735 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3736 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3737 DAG.getConstant(1, dl, MVT::i32));
3738 // Align GprIndex to be even if it isn't
3739 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3740 GprIndex);
3741 }
3742
3743 // fpr index is 1 byte after gpr
3744 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3745 DAG.getConstant(1, dl, MVT::i32));
3746
3747 // fpr
3748 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3749 FprPtr, MachinePointerInfo(SV), MVT::i8);
3750 InChain = FprIndex.getValue(1);
3751
3752 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3753 DAG.getConstant(8, dl, MVT::i32));
3754
3755 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3756 DAG.getConstant(4, dl, MVT::i32));
3757
3758 // areas
3759 SDValue OverflowArea =
3760 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3761 InChain = OverflowArea.getValue(1);
3762
3763 SDValue RegSaveArea =
3764 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3765 InChain = RegSaveArea.getValue(1);
3766
3767 // select overflow_area if index > 8
3768 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3769 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3770
3771 // adjustment constant gpr_index * 4/8
3772 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3773 VT.isInteger() ? GprIndex : FprIndex,
3774 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3775 MVT::i32));
3776
3777 // OurReg = RegSaveArea + RegConstant
3778 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3779 RegConstant);
3780
3781 // Floating types are 32 bytes into RegSaveArea
3782 if (VT.isFloatingPoint())
3783 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3784 DAG.getConstant(32, dl, MVT::i32));
3785
3786 // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3787 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3788 VT.isInteger() ? GprIndex : FprIndex,
3789 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3790 MVT::i32));
3791
3792 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3793 VT.isInteger() ? VAListPtr : FprPtr,
3794 MachinePointerInfo(SV), MVT::i8);
3795
3796 // determine if we should load from reg_save_area or overflow_area
3797 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3798
3799 // increase overflow_area by 4/8 if gpr/fpr > 8
3800 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3801 DAG.getConstant(VT.isInteger() ? 4 : 8,
3802 dl, MVT::i32));
3803
3804 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3805 OverflowAreaPlusN);
3806
3807 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3808 MachinePointerInfo(), MVT::i32);
3809
3810 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3811}
3812
3813SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3814 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3815
3816 // We have to copy the entire va_list struct:
3817 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3818 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3819 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3820 false, true, false, MachinePointerInfo(),
3822}
3823
3824SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3825 SelectionDAG &DAG) const {
3826 if (Subtarget.isAIXABI())
3827 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3828
3829 return Op.getOperand(0);
3830}
3831
3832SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
3835
3836 assert((Op.getOpcode() == ISD::INLINEASM ||
3837 Op.getOpcode() == ISD::INLINEASM_BR) &&
3838 "Expecting Inline ASM node.");
3839
3840 // If an LR store is already known to be required then there is not point in
3841 // checking this ASM as well.
3842 if (MFI.isLRStoreRequired())
3843 return Op;
3844
3845 // Inline ASM nodes have an optional last operand that is an incoming Flag of
3846 // type MVT::Glue. We want to ignore this last operand if that is the case.
3847 unsigned NumOps = Op.getNumOperands();
3848 if (Op.getOperand(NumOps - 1).getValueType() == MVT::Glue)
3849 --NumOps;
3850
3851 // Check all operands that may contain the LR.
3852 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
3853 const InlineAsm::Flag Flags(Op.getConstantOperandVal(i));
3854 unsigned NumVals = Flags.getNumOperandRegisters();
3855 ++i; // Skip the ID value.
3856
3857 switch (Flags.getKind()) {
3858 default:
3859 llvm_unreachable("Bad flags!");
3863 i += NumVals;
3864 break;
3868 for (; NumVals; --NumVals, ++i) {
3869 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
3870 if (Reg != PPC::LR && Reg != PPC::LR8)
3871 continue;
3872 MFI.setLRStoreRequired();
3873 return Op;
3874 }
3875 break;
3876 }
3877 }
3878 }
3879
3880 return Op;
3881}
3882
3883SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3884 SelectionDAG &DAG) const {
3885 if (Subtarget.isAIXABI())
3886 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3887
3888 SDValue Chain = Op.getOperand(0);
3889 SDValue Trmp = Op.getOperand(1); // trampoline
3890 SDValue FPtr = Op.getOperand(2); // nested function
3891 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3892 SDLoc dl(Op);
3893
3894 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3895 bool isPPC64 = (PtrVT == MVT::i64);
3896 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3897
3900
3901 Entry.Ty = IntPtrTy;
3902 Entry.Node = Trmp; Args.push_back(Entry);
3903
3904 // TrampSize == (isPPC64 ? 48 : 40);
3905 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3906 isPPC64 ? MVT::i64 : MVT::i32);
3907 Args.push_back(Entry);
3908
3909 Entry.Node = FPtr; Args.push_back(Entry);
3910 Entry.Node = Nest; Args.push_back(Entry);
3911
3912 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3914 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3916 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3917
3918 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3919 return CallResult.second;
3920}
3921
3922SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3924 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3925 EVT PtrVT = getPointerTy(MF.getDataLayout());
3926
3927 SDLoc dl(Op);
3928
3929 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3930 // vastart just stores the address of the VarArgsFrameIndex slot into the
3931 // memory location argument.
3932 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3933 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3934 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3935 MachinePointerInfo(SV));
3936 }
3937
3938 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3939 // We suppose the given va_list is already allocated.
3940 //
3941 // typedef struct {
3942 // char gpr; /* index into the array of 8 GPRs
3943 // * stored in the register save area
3944 // * gpr=0 corresponds to r3,
3945 // * gpr=1 to r4, etc.
3946 // */
3947 // char fpr; /* index into the array of 8 FPRs
3948 // * stored in the register save area
3949 // * fpr=0 corresponds to f1,
3950 // * fpr=1 to f2, etc.
3951 // */
3952 // char *overflow_arg_area;
3953 // /* location on stack that holds
3954 // * the next overflow argument
3955 // */
3956 // char *reg_save_area;
3957 // /* where r3:r10 and f1:f8 (if saved)
3958 // * are stored
3959 // */
3960 // } va_list[1];
3961
3962 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3963 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3964 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3965 PtrVT);
3966 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3967 PtrVT);
3968
3969 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3970 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3971
3972 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3973 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3974
3975 uint64_t FPROffset = 1;
3976 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3977
3978 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3979
3980 // Store first byte : number of int regs
3981 SDValue firstStore =
3982 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3983 MachinePointerInfo(SV), MVT::i8);
3984 uint64_t nextOffset = FPROffset;
3985 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3986 ConstFPROffset);
3987
3988 // Store second byte : number of float regs
3989 SDValue secondStore =
3990 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3991 MachinePointerInfo(SV, nextOffset), MVT::i8);
3992 nextOffset += StackOffset;
3993 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3994
3995 // Store second word : arguments given on stack
3996 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3997 MachinePointerInfo(SV, nextOffset));
3998 nextOffset += FrameOffset;
3999 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
4000
4001 // Store third word : arguments given in registers
4002 return DAG.getStore(thirdStore, dl, FR, nextPtr,
4003 MachinePointerInfo(SV, nextOffset));
4004}
4005
4006/// FPR - The set of FP registers that should be allocated for arguments
4007/// on Darwin and AIX.
4008static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
4009 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
4010 PPC::F11, PPC::F12, PPC::F13};
4011
4012/// CalculateStackSlotSize - Calculates the size reserved for this argument on
4013/// the stack.
4014static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
4015 unsigned PtrByteSize) {
4016 unsigned ArgSize = ArgVT.getStoreSize();
4017 if (Flags.isByVal())
4018 ArgSize = Flags.getByValSize();
4019
4020 // Round up to multiples of the pointer size, except for array members,
4021 // which are always packed.
4022 if (!Flags.isInConsecutiveRegs())
4023 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4024
4025 return ArgSize;
4026}
4027
4028/// CalculateStackSlotAlignment - Calculates the alignment of this argument
4029/// on the stack.
4031 ISD::ArgFlagsTy Flags,
4032 unsigned PtrByteSize) {
4033 Align Alignment(PtrByteSize);
4034
4035 // Altivec parameters are padded to a 16 byte boundary.
4036 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
4037 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
4038 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
4039 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
4040 Alignment = Align(16);
4041
4042 // ByVal parameters are aligned as requested.
4043 if (Flags.isByVal()) {
4044 auto BVAlign = Flags.getNonZeroByValAlign();
4045 if (BVAlign > PtrByteSize) {
4046 if (BVAlign.value() % PtrByteSize != 0)
4048 "ByVal alignment is not a multiple of the pointer size");
4049
4050 Alignment = BVAlign;
4051 }
4052 }
4053
4054 // Array members are always packed to their original alignment.
4055 if (Flags.isInConsecutiveRegs()) {
4056 // If the array member was split into multiple registers, the first
4057 // needs to be aligned to the size of the full type. (Except for
4058 // ppcf128, which is only aligned as its f64 components.)
4059 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
4060 Alignment = Align(OrigVT.getStoreSize());
4061 else
4062 Alignment = Align(ArgVT.getStoreSize());
4063 }
4064
4065 return Alignment;
4066}
4067
4068/// CalculateStackSlotUsed - Return whether this argument will use its
4069/// stack slot (instead of being passed in registers). ArgOffset,
4070/// AvailableFPRs, and AvailableVRs must hold the current argument
4071/// position, and will be updated to account for this argument.
4072static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
4073 unsigned PtrByteSize, unsigned LinkageSize,
4074 unsigned ParamAreaSize, unsigned &ArgOffset,
4075 unsigned &AvailableFPRs,
4076 unsigned &AvailableVRs) {
4077 bool UseMemory = false;
4078
4079 // Respect alignment of argument on the stack.
4080 Align Alignment =
4081 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
4082 ArgOffset = alignTo(ArgOffset, Alignment);
4083 // If there's no space left in the argument save area, we must
4084 // use memory (this check also catches zero-sized arguments).
4085 if (ArgOffset >= LinkageSize + ParamAreaSize)
4086 UseMemory = true;
4087
4088 // Allocate argument on the stack.
4089 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
4090 if (Flags.isInConsecutiveRegsLast())
4091 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4092 // If we overran the argument save area, we must use memory
4093 // (this check catches arguments passed partially in memory)
4094 if (ArgOffset > LinkageSize + ParamAreaSize)
4095 UseMemory = true;
4096
4097 // However, if the argument is actually passed in an FPR or a VR,
4098 // we don't use memory after all.
4099 if (!Flags.isByVal()) {
4100 if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
4101 if (AvailableFPRs > 0) {
4102 --AvailableFPRs;
4103 return false;
4104 }
4105 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
4106 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
4107 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
4108 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
4109 if (AvailableVRs > 0) {
4110 --AvailableVRs;
4111 return false;
4112 }
4113 }
4114
4115 return UseMemory;
4116}
4117
4118/// EnsureStackAlignment - Round stack frame size up from NumBytes to
4119/// ensure minimum alignment required for target.
4121 unsigned NumBytes) {
4122 return alignTo(NumBytes, Lowering->getStackAlign());
4123}
4124
4125SDValue PPCTargetLowering::LowerFormalArguments(
4126 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4127 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4128 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4129 if (Subtarget.isAIXABI())
4130 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
4131 InVals);
4132 if (Subtarget.is64BitELFABI())
4133 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
4134 InVals);
4135 assert(Subtarget.is32BitELFABI());
4136 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
4137 InVals);
4138}
4139
4140SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
4141 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4142 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4143 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4144
4145 // 32-bit SVR4 ABI Stack Frame Layout:
4146 // +-----------------------------------+
4147 // +--> | Back chain |
4148 // | +-----------------------------------+
4149 // | | Floating-point register save area |
4150 // | +-----------------------------------+
4151 // | | General register save area |
4152 // | +-----------------------------------+
4153 // | | CR save word |
4154 // | +-----------------------------------+
4155 // | | VRSAVE save word |
4156 // | +-----------------------------------+
4157 // | | Alignment padding |
4158 // | +-----------------------------------+
4159 // | | Vector register save area |
4160 // | +-----------------------------------+
4161 // | | Local variable space |
4162 // | +-----------------------------------+
4163 // | | Parameter list area |
4164 // | +-----------------------------------+
4165 // | | LR save word |
4166 // | +-----------------------------------+
4167 // SP--> +--- | Back chain |
4168 // +-----------------------------------+
4169 //
4170 // Specifications:
4171 // System V Application Binary Interface PowerPC Processor Supplement
4172 // AltiVec Technology Programming Interface Manual
4173
4175 MachineFrameInfo &MFI = MF.getFrameInfo();
4176 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4177
4178 EVT PtrVT = getPointerTy(MF.getDataLayout());
4179 // Potential tail calls could cause overwriting of argument stack slots.
4180 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4181 (CallConv == CallingConv::Fast));
4182 const Align PtrAlign(4);
4183
4184 // Assign locations to all of the incoming arguments.
4186 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
4187 *DAG.getContext());
4188
4189 // Reserve space for the linkage area on the stack.
4190 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4191 CCInfo.AllocateStack(LinkageSize, PtrAlign);
4192 if (useSoftFloat())
4193 CCInfo.PreAnalyzeFormalArguments(Ins);
4194
4195 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
4196 CCInfo.clearWasPPCF128();
4197
4198 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4199 CCValAssign &VA = ArgLocs[i];
4200
4201 // Arguments stored in registers.
4202 if (VA.isRegLoc()) {
4203 const TargetRegisterClass *RC;
4204 EVT ValVT = VA.getValVT();
4205
4206 switch (ValVT.getSimpleVT().SimpleTy) {
4207 default:
4208 llvm_unreachable("ValVT not supported by formal arguments Lowering");
4209 case MVT::i1:
4210 case MVT::i32:
4211 RC = &PPC::GPRCRegClass;
4212 break;
4213 case MVT::f32:
4214 if (Subtarget.hasP8Vector())
4215 RC = &PPC::VSSRCRegClass;
4216 else if (Subtarget.hasSPE())
4217 RC = &PPC::GPRCRegClass;
4218 else
4219 RC = &PPC::F4RCRegClass;
4220 break;
4221 case MVT::f64:
4222 if (Subtarget.hasVSX())
4223 RC = &PPC::VSFRCRegClass;
4224 else if (Subtarget.hasSPE())
4225 // SPE passes doubles in GPR pairs.
4226 RC = &PPC::GPRCRegClass;
4227 else
4228 RC = &PPC::F8RCRegClass;
4229 break;
4230 case MVT::v16i8:
4231 case MVT::v8i16:
4232 case MVT::v4i32:
4233 RC = &PPC::VRRCRegClass;
4234 break;
4235 case MVT::v4f32:
4236 RC = &PPC::VRRCRegClass;
4237 break;
4238 case MVT::v2f64:
4239 case MVT::v2i64:
4240 RC = &PPC::VRRCRegClass;
4241 break;
4242 }
4243
4244 SDValue ArgValue;
4245 // Transform the arguments stored in physical registers into
4246 // virtual ones.
4247 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
4248 assert(i + 1 < e && "No second half of double precision argument");
4249 Register RegLo = MF.addLiveIn(VA.getLocReg(), RC);
4250 Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
4251 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
4252 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
4253 if (!Subtarget.isLittleEndian())
4254 std::swap (ArgValueLo, ArgValueHi);
4255 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
4256 ArgValueHi);
4257 } else {
4258 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4259 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
4260 ValVT == MVT::i1 ? MVT::i32 : ValVT);
4261 if (ValVT == MVT::i1)
4262 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
4263 }
4264
4265 InVals.push_back(ArgValue);
4266 } else {
4267 // Argument stored in memory.
4268 assert(VA.isMemLoc());
4269
4270 // Get the extended size of the argument type in stack
4271 unsigned ArgSize = VA.getLocVT().getStoreSize();
4272 // Get the actual size of the argument type
4273 unsigned ObjSize = VA.getValVT().getStoreSize();
4274 unsigned ArgOffset = VA.getLocMemOffset();
4275 // Stack objects in PPC32 are right justified.
4276 ArgOffset += ArgSize - ObjSize;
4277 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
4278
4279 // Create load nodes to retrieve arguments from the stack.
4280 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4281 InVals.push_back(
4282 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
4283 }
4284 }
4285
4286 // Assign locations to all of the incoming aggregate by value arguments.
4287 // Aggregates passed by value are stored in the local variable space of the
4288 // caller's stack frame, right above the parameter list area.
4289 SmallVector<CCValAssign, 16> ByValArgLocs;
4290 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
4291 ByValArgLocs, *DAG.getContext());
4292
4293 // Reserve stack space for the allocations in CCInfo.
4294 CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign);
4295
4296 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
4297
4298 // Area that is at least reserved in the caller of this function.
4299 unsigned MinReservedArea = CCByValInfo.getStackSize();
4300 MinReservedArea = std::max(MinReservedArea, LinkageSize);
4301
4302 // Set the size that is at least reserved in caller of this function. Tail
4303 // call optimized function's reserved stack space needs to be aligned so that
4304 // taking the difference between two stack areas will result in an aligned
4305 // stack.
4306 MinReservedArea =
4307 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4308 FuncInfo->setMinReservedArea(MinReservedArea);
4309
4311
4312 // If the function takes variable number of arguments, make a frame index for
4313 // the start of the first vararg value... for expansion of llvm.va_start.
4314 if (isVarArg) {
4315 static const MCPhysReg GPArgRegs[] = {
4316 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4317 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4318 };
4319 const unsigned NumGPArgRegs = std::size(GPArgRegs);
4320
4321 static const MCPhysReg FPArgRegs[] = {
4322 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
4323 PPC::F8
4324 };
4325 unsigned NumFPArgRegs = std::size(FPArgRegs);
4326
4327 if (useSoftFloat() || hasSPE())
4328 NumFPArgRegs = 0;
4329
4330 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
4331 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
4332
4333 // Make room for NumGPArgRegs and NumFPArgRegs.
4334 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
4335 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
4336
4338 PtrVT.getSizeInBits() / 8, CCInfo.getStackSize(), true));
4339
4340 FuncInfo->setVarArgsFrameIndex(
4341 MFI.CreateStackObject(Depth, Align(8), false));
4342 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4343
4344 // The fixed integer arguments of a variadic function are stored to the
4345 // VarArgsFrameIndex on the stack so that they may be loaded by
4346 // dereferencing the result of va_next.
4347 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
4348 // Get an existing live-in vreg, or add a new one.
4349 Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
4350 if (!VReg)
4351 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
4352
4353 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4354 SDValue Store =
4355 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4356 MemOps.push_back(Store);
4357 // Increment the address by four for the next argument to store
4358 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4359 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4360 }
4361
4362 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
4363 // is set.
4364 // The double arguments are stored to the VarArgsFrameIndex
4365 // on the stack.
4366 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
4367 // Get an existing live-in vreg, or add a new one.
4368 Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
4369 if (!VReg)
4370 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
4371
4372 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
4373 SDValue Store =
4374 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4375 MemOps.push_back(Store);
4376 // Increment the address by eight for the next argument to store
4377 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
4378 PtrVT);
4379 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4380 }
4381 }
4382
4383 if (!MemOps.empty())
4384 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4385
4386 return Chain;
4387}
4388
4389// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4390// value to MVT::i64 and then truncate to the correct register size.
4391SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
4392 EVT ObjectVT, SelectionDAG &DAG,
4393 SDValue ArgVal,
4394 const SDLoc &dl) const {
4395 if (Flags.isSExt())
4396 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
4397 DAG.getValueType(ObjectVT));
4398 else if (Flags.isZExt())
4399 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
4400 DAG.getValueType(ObjectVT));
4401
4402 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
4403}
4404
4405SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4406 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4407 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4408 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4409 // TODO: add description of PPC stack frame format, or at least some docs.
4410 //
4411 bool isELFv2ABI = Subtarget.isELFv2ABI();
4412 bool isLittleEndian = Subtarget.isLittleEndian();
4414 MachineFrameInfo &MFI = MF.getFrameInfo();
4415 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4416
4417 assert(!(CallConv == CallingConv::Fast && isVarArg) &&
4418 "fastcc not supported on varargs functions");
4419
4420 EVT PtrVT = getPointerTy(MF.getDataLayout());
4421 // Potential tail calls could cause overwriting of argument stack slots.
4422 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4423 (CallConv == CallingConv::Fast));
4424 unsigned PtrByteSize = 8;
4425 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4426
4427 static const MCPhysReg GPR[] = {
4428 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4429 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4430 };
4431 static const MCPhysReg VR[] = {
4432 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4433 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4434 };
4435
4436 const unsigned Num_GPR_Regs = std::size(GPR);
4437 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4438 const unsigned Num_VR_Regs = std::size(VR);
4439
4440 // Do a first pass over the arguments to determine whether the ABI
4441 // guarantees that our caller has allocated the parameter save area
4442 // on its stack frame. In the ELFv1 ABI, this is always the case;
4443 // in the ELFv2 ABI, it is true if this is a vararg function or if
4444 // any parameter is located in a stack slot.
4445
4446 bool HasParameterArea = !isELFv2ABI || isVarArg;
4447 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4448 unsigned NumBytes = LinkageSize;
4449 unsigned AvailableFPRs = Num_FPR_Regs;
4450 unsigned AvailableVRs = Num_VR_Regs;
4451 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4452 if (Ins[i].Flags.isNest())
4453 continue;
4454
4455 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4456 PtrByteSize, LinkageSize, ParamAreaSize,
4457 NumBytes, AvailableFPRs, AvailableVRs))
4458 HasParameterArea = true;
4459 }
4460
4461 // Add DAG nodes to load the arguments or copy them out of registers. On
4462 // entry to a function on PPC, the arguments start after the linkage area,
4463 // although the first ones are often in registers.
4464
4465 unsigned ArgOffset = LinkageSize;
4466 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4469 unsigned CurArgIdx = 0;
4470 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4471 SDValue ArgVal;
4472 bool needsLoad = false;
4473 EVT ObjectVT = Ins[ArgNo].VT;
4474 EVT OrigVT = Ins[ArgNo].ArgVT;
4475 unsigned ObjSize = ObjectVT.getStoreSize();
4476 unsigned ArgSize = ObjSize;
4477 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4478 if (Ins[ArgNo].isOrigArg()) {
4479 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4480 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4481 }
4482 // We re-align the argument offset for each argument, except when using the
4483 // fast calling convention, when we need to make sure we do that only when
4484 // we'll actually use a stack slot.
4485 unsigned CurArgOffset;
4486 Align Alignment;
4487 auto ComputeArgOffset = [&]() {
4488 /* Respect alignment of argument on the stack. */
4489 Alignment =
4490 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4491 ArgOffset = alignTo(ArgOffset, Alignment);
4492 CurArgOffset = ArgOffset;
4493 };
4494
4495 if (CallConv != CallingConv::Fast) {
4496 ComputeArgOffset();
4497
4498 /* Compute GPR index associated with argument offset. */
4499 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4500 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4501 }
4502
4503 // FIXME the codegen can be much improved in some cases.
4504 // We do not have to keep everything in memory.
4505 if (Flags.isByVal()) {
4506 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4507
4508 if (CallConv == CallingConv::Fast)
4509 ComputeArgOffset();
4510
4511 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4512 ObjSize = Flags.getByValSize();
4513 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4514 // Empty aggregate parameters do not take up registers. Examples:
4515 // struct { } a;
4516 // union { } b;
4517 // int c[0];
4518 // etc. However, we have to provide a place-holder in InVals, so
4519 // pretend we have an 8-byte item at the current address for that
4520 // purpose.
4521 if (!ObjSize) {
4522 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4523 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4524 InVals.push_back(FIN);
4525 continue;
4526 }
4527
4528 // Create a stack object covering all stack doublewords occupied
4529 // by the argument. If the argument is (fully or partially) on
4530 // the stack, or if the argument is fully in registers but the
4531 // caller has allocated the parameter save anyway, we can refer
4532 // directly to the caller's stack frame. Otherwise, create a
4533 // local copy in our own frame.
4534 int FI;
4535 if (HasParameterArea ||
4536 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4537 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4538 else
4539 FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4540 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4541
4542 // Handle aggregates smaller than 8 bytes.
4543 if (ObjSize < PtrByteSize) {
4544 // The value of the object is its address, which differs from the
4545 // address of the enclosing doubleword on big-endian systems.
4546 SDValue Arg = FIN;
4547 if (!isLittleEndian) {
4548 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4549 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4550 }
4551 InVals.push_back(Arg);
4552
4553 if (GPR_idx != Num_GPR_Regs) {
4554 Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4555 FuncInfo->addLiveInAttr(VReg, Flags);
4556 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4557 EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8);
4558 SDValue Store =
4559 DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4560 MachinePointerInfo(&*FuncArg), ObjType);
4561 MemOps.push_back(Store);
4562 }
4563 // Whether we copied from a register or not, advance the offset
4564 // into the parameter save area by a full doubleword.
4565 ArgOffset += PtrByteSize;
4566 continue;
4567 }
4568
4569 // The value of the object is its address, which is the address of
4570 // its first stack doubleword.
4571 InVals.push_back(FIN);
4572
4573 // Store whatever pieces of the object are in registers to memory.
4574 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4575 if (GPR_idx == Num_GPR_Regs)
4576 break;
4577
4578 Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4579 FuncInfo->addLiveInAttr(VReg, Flags);
4580 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4581 SDValue Addr = FIN;
4582 if (j) {
4583 SDValue Off = DAG.getConstant(j, dl, PtrVT);
4584 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4585 }
4586 unsigned StoreSizeInBits = std::min(PtrByteSize, (ObjSize - j)) * 8;
4587 EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), StoreSizeInBits);
4588 SDValue Store =
4589 DAG.getTruncStore(Val.getValue(1), dl, Val, Addr,
4590 MachinePointerInfo(&*FuncArg, j), ObjType);
4591 MemOps.push_back(Store);
4592 ++GPR_idx;
4593 }
4594 ArgOffset += ArgSize;
4595 continue;
4596 }
4597
4598 switch (ObjectVT.getSimpleVT().SimpleTy) {
4599 default: llvm_unreachable("Unhandled argument type!");
4600 case MVT::i1:
4601 case MVT::i32:
4602 case MVT::i64:
4603 if (Flags.isNest()) {
4604 // The 'nest' parameter, if any, is passed in R11.
4605 Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4606 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4607
4608 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4609 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4610
4611 break;
4612 }
4613
4614 // These can be scalar arguments or elements of an integer array type
4615 // passed directly. Clang may use those instead of "byval" aggregate
4616 // types to avoid forcing arguments to memory unnecessarily.
4617 if (GPR_idx != Num_GPR_Regs) {
4618 Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4619 FuncInfo->addLiveInAttr(VReg, Flags);
4620 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4621
4622 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4623 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4624 // value to MVT::i64 and then truncate to the correct register size.
4625 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4626 } else {
4627 if (CallConv == CallingConv::Fast)
4628 ComputeArgOffset();
4629
4630 needsLoad = true;
4631 ArgSize = PtrByteSize;
4632 }
4633 if (CallConv != CallingConv::Fast || needsLoad)
4634 ArgOffset += 8;
4635 break;
4636
4637 case MVT::f32:
4638 case MVT::f64:
4639 // These can be scalar arguments or elements of a float array type
4640 // passed directly. The latter are used to implement ELFv2 homogenous
4641 // float aggregates.
4642 if (FPR_idx != Num_FPR_Regs) {
4643 unsigned VReg;
4644
4645 if (ObjectVT == MVT::f32)
4646 VReg = MF.addLiveIn(FPR[FPR_idx],
4647 Subtarget.hasP8Vector()
4648 ? &PPC::VSSRCRegClass
4649 : &PPC::F4RCRegClass);
4650 else
4651 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4652 ? &PPC::VSFRCRegClass
4653 : &PPC::F8RCRegClass);
4654
4655 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4656 ++FPR_idx;
4657 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4658 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4659 // once we support fp <-> gpr moves.
4660
4661 // This can only ever happen in the presence of f32 array types,
4662 // since otherwise we never run out of FPRs before running out
4663 // of GPRs.
4664 Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4665 FuncInfo->addLiveInAttr(VReg, Flags);
4666 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4667
4668 if (ObjectVT == MVT::f32) {
4669 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4670 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4671 DAG.getConstant(32, dl, MVT::i32));
4672 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4673 }
4674
4675 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4676 } else {
4677 if (CallConv == CallingConv::Fast)
4678 ComputeArgOffset();
4679
4680 needsLoad = true;
4681 }
4682
4683 // When passing an array of floats, the array occupies consecutive
4684 // space in the argument area; only round up to the next doubleword
4685 // at the end of the array. Otherwise, each float takes 8 bytes.
4686 if (CallConv != CallingConv::Fast || needsLoad) {
4687 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4688 ArgOffset += ArgSize;
4689 if (Flags.isInConsecutiveRegsLast())
4690 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4691 }
4692 break;
4693 case MVT::v4f32:
4694 case MVT::v4i32:
4695 case MVT::v8i16:
4696 case MVT::v16i8:
4697 case MVT::v2f64:
4698 case MVT::v2i64:
4699 case MVT::v1i128:
4700 case MVT::f128:
4701 // These can be scalar arguments or elements of a vector array type
4702 // passed directly. The latter are used to implement ELFv2 homogenous
4703 // vector aggregates.
4704 if (VR_idx != Num_VR_Regs) {
4705 Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4706 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4707 ++VR_idx;
4708 } else {
4709 if (CallConv == CallingConv::Fast)
4710 ComputeArgOffset();
4711 needsLoad = true;
4712 }
4713 if (CallConv != CallingConv::Fast || needsLoad)
4714 ArgOffset += 16;
4715 break;
4716 }
4717
4718 // We need to load the argument to a virtual register if we determined
4719 // above that we ran out of physical registers of the appropriate type.
4720 if (needsLoad) {
4721 if (ObjSize < ArgSize && !isLittleEndian)
4722 CurArgOffset += ArgSize - ObjSize;
4723 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4724 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4725 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4726 }
4727
4728 InVals.push_back(ArgVal);
4729 }
4730
4731 // Area that is at least reserved in the caller of this function.
4732 unsigned MinReservedArea;
4733 if (HasParameterArea)
4734 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4735 else
4736 MinReservedArea = LinkageSize;
4737
4738 // Set the size that is at least reserved in caller of this function. Tail
4739 // call optimized functions' reserved stack space needs to be aligned so that
4740 // taking the difference between two stack areas will result in an aligned
4741 // stack.
4742 MinReservedArea =
4743 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4744 FuncInfo->setMinReservedArea(MinReservedArea);
4745
4746 // If the function takes variable number of arguments, make a frame index for
4747 // the start of the first vararg value... for expansion of llvm.va_start.
4748 // On ELFv2ABI spec, it writes:
4749 // C programs that are intended to be *portable* across different compilers
4750 // and architectures must use the header file <stdarg.h> to deal with variable
4751 // argument lists.
4752 if (isVarArg && MFI.hasVAStart()) {
4753 int Depth = ArgOffset;
4754
4755 FuncInfo->setVarArgsFrameIndex(
4756 MFI.CreateFixedObject(PtrByteSize, Depth, true));
4757 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4758
4759 // If this function is vararg, store any remaining integer argument regs
4760 // to their spots on the stack so that they may be loaded by dereferencing
4761 // the result of va_next.
4762 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4763 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4764 Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4765 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4766 SDValue Store =
4767 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4768 MemOps.push_back(Store);
4769 // Increment the address by four for the next argument to store
4770 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4771 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4772 }
4773 }
4774
4775 if (!MemOps.empty())
4776 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4777
4778 return Chain;
4779}
4780
4781/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4782/// adjusted to accommodate the arguments for the tailcall.
4783static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4784 unsigned ParamSize) {
4785
4786 if (!isTailCall) return 0;
4787
4789 unsigned CallerMinReservedArea = FI->getMinReservedArea();
4790 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4791 // Remember only if the new adjustment is bigger.
4792 if (SPDiff < FI->getTailCallSPDelta())
4793 FI->setTailCallSPDelta(SPDiff);
4794
4795 return SPDiff;
4796}
4797
4798static bool isFunctionGlobalAddress(const GlobalValue *CalleeGV);
4799
4800static bool callsShareTOCBase(const Function *Caller,
4801 const GlobalValue *CalleeGV,
4802 const TargetMachine &TM) {
4803 // It does not make sense to call callsShareTOCBase() with a caller that
4804 // is PC Relative since PC Relative callers do not have a TOC.
4805#ifndef NDEBUG
4806 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4807 assert(!STICaller->isUsingPCRelativeCalls() &&
4808 "PC Relative callers do not have a TOC and cannot share a TOC Base");
4809#endif
4810
4811 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4812 // don't have enough information to determine if the caller and callee share
4813 // the same TOC base, so we have to pessimistically assume they don't for
4814 // correctness.
4815 if (!CalleeGV)
4816 return false;
4817
4818 // If the callee is preemptable, then the static linker will use a plt-stub
4819 // which saves the toc to the stack, and needs a nop after the call
4820 // instruction to convert to a toc-restore.
4821 if (!TM.shouldAssumeDSOLocal(CalleeGV))
4822 return false;
4823
4824 // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4825 // We may need a TOC restore in the situation where the caller requires a
4826 // valid TOC but the callee is PC Relative and does not.
4827 const Function *F = dyn_cast<Function>(CalleeGV);
4828 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(CalleeGV);
4829
4830 // If we have an Alias we can try to get the function from there.
4831 if (Alias) {
4832 const GlobalObject *GlobalObj = Alias->getAliaseeObject();
4833 F = dyn_cast<Function>(GlobalObj);
4834 }
4835
4836 // If we still have no valid function pointer we do not have enough
4837 // information to determine if the callee uses PC Relative calls so we must
4838 // assume that it does.
4839 if (!F)
4840 return false;
4841
4842 // If the callee uses PC Relative we cannot guarantee that the callee won't
4843 // clobber the TOC of the caller and so we must assume that the two
4844 // functions do not share a TOC base.
4845 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4846 if (STICallee->isUsingPCRelativeCalls())
4847 return false;
4848
4849 // If the GV is not a strong definition then we need to assume it can be
4850 // replaced by another function at link time. The function that replaces
4851 // it may not share the same TOC as the caller since the callee may be
4852 // replaced by a PC Relative version of the same function.
4853 if (!CalleeGV->isStrongDefinitionForLinker())
4854 return false;
4855
4856 // The medium and large code models are expected to provide a sufficiently
4857 // large TOC to provide all data addressing needs of a module with a
4858 // single TOC.
4859 if (CodeModel::Medium == TM.getCodeModel() ||
4860 CodeModel::Large == TM.getCodeModel())
4861 return true;
4862
4863 // Any explicitly-specified sections and section prefixes must also match.
4864 // Also, if we're using -ffunction-sections, then each function is always in
4865 // a different section (the same is true for COMDAT functions).
4866 if (TM.getFunctionSections() || CalleeGV->hasComdat() ||
4867 Caller->hasComdat() || CalleeGV->getSection() != Caller->getSection())
4868 return false;
4869 if (const auto *F = dyn_cast<Function>(CalleeGV)) {
4870 if (F->getSectionPrefix() != Caller->getSectionPrefix())
4871 return false;
4872 }
4873
4874 return true;
4875}
4876
4877static bool
4879 const SmallVectorImpl<ISD::OutputArg> &Outs) {
4880 assert(Subtarget.is64BitELFABI());
4881
4882 const unsigned PtrByteSize = 8;
4883 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4884
4885 static const MCPhysReg GPR[] = {
4886 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4887 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4888 };
4889 static const MCPhysReg VR[] = {
4890 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4891 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4892 };
4893
4894 const unsigned NumGPRs = std::size(GPR);
4895 const unsigned NumFPRs = 13;
4896 const unsigned NumVRs = std::size(VR);
4897 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4898
4899 unsigned NumBytes = LinkageSize;
4900 unsigned AvailableFPRs = NumFPRs;
4901 unsigned AvailableVRs = NumVRs;
4902
4903 for (const ISD::OutputArg& Param : Outs) {
4904 if (Param.Flags.isNest()) continue;
4905
4906 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4907 LinkageSize, ParamAreaSize, NumBytes,
4908 AvailableFPRs, AvailableVRs))
4909 return true;
4910 }
4911 return false;
4912}
4913
4914static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4915 if (CB.arg_size() != CallerFn->arg_size())
4916 return false;
4917
4918 auto CalleeArgIter = CB.arg_begin();
4919 auto CalleeArgEnd = CB.arg_end();
4920 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4921
4922 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4923 const Value* CalleeArg = *CalleeArgIter;
4924 const Value* CallerArg = &(*CallerArgIter);
4925 if (CalleeArg == CallerArg)
4926 continue;
4927
4928 // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4929 // tail call @callee([4 x i64] undef, [4 x i64] %b)
4930 // }
4931 // 1st argument of callee is undef and has the same type as caller.
4932 if (CalleeArg->getType() == CallerArg->getType() &&
4933 isa<UndefValue>(CalleeArg))
4934 continue;
4935
4936 return false;
4937 }
4938
4939 return true;
4940}
4941
4942// Returns true if TCO is possible between the callers and callees
4943// calling conventions.
4944static bool
4946 CallingConv::ID CalleeCC) {
4947 // Tail calls are possible with fastcc and ccc.
4948 auto isTailCallableCC = [] (CallingConv::ID CC){
4949 return CC == CallingConv::C || CC == CallingConv::Fast;
4950 };
4951 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4952 return false;
4953
4954 // We can safely tail call both fastcc and ccc callees from a c calling
4955 // convention caller. If the caller is fastcc, we may have less stack space
4956 // than a non-fastcc caller with the same signature so disable tail-calls in
4957 // that case.
4958 return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4959}
4960
4961bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4962 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
4963 CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg,
4965 const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc,
4966 bool isCalleeExternalSymbol) const {
4967 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4968
4969 if (DisableSCO && !TailCallOpt) return false;
4970
4971 // Variadic argument functions are not supported.
4972 if (isVarArg) return false;
4973
4974 // Check that the calling conventions are compatible for tco.
4975 if (!areCallingConvEligibleForTCO_64SVR4(CallerCC, CalleeCC))
4976 return false;
4977
4978 // Caller contains any byval parameter is not supported.
4979 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4980 return false;
4981
4982 // Callee contains any byval parameter is not supported, too.
4983 // Note: This is a quick work around, because in some cases, e.g.
4984 // caller's stack size > callee's stack size, we are still able to apply
4985 // sibling call optimization. For example, gcc is able to do SCO for caller1
4986 // in the following example, but not for caller2.
4987 // struct test {
4988 // long int a;
4989 // char ary[56];
4990 // } gTest;
4991 // __attribute__((noinline)) int callee(struct test v, struct test *b) {
4992 // b->a = v.a;
4993 // return 0;
4994 // }
4995 // void caller1(struct test a, struct test c, struct test *b) {
4996 // callee(gTest, b); }
4997 // void caller2(struct test *b) { callee(gTest, b); }
4998 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4999 return false;
5000
5001 // If callee and caller use different calling conventions, we cannot pass
5002 // parameters on stack since offsets for the parameter area may be different.
5003 if (CallerCC != CalleeCC && needStackSlotPassParameters(Subtarget, Outs))
5004 return false;
5005
5006 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
5007 // the caller and callee share the same TOC for TCO/SCO. If the caller and
5008 // callee potentially have different TOC bases then we cannot tail call since
5009 // we need to restore the TOC pointer after the call.
5010 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
5011 // We cannot guarantee this for indirect calls or calls to external functions.
5012 // When PC-Relative addressing is used, the concept of the TOC is no longer
5013 // applicable so this check is not required.
5014 // Check first for indirect calls.
5015 if (!Subtarget.isUsingPCRelativeCalls() &&
5016 !isFunctionGlobalAddress(CalleeGV) && !isCalleeExternalSymbol)
5017 return false;
5018
5019 // Check if we share the TOC base.
5020 if (!Subtarget.isUsingPCRelativeCalls() &&
5021 !callsShareTOCBase(CallerFunc, CalleeGV, getTargetMachine()))
5022 return false;
5023
5024 // TCO allows altering callee ABI, so we don't have to check further.
5025 if (CalleeCC == CallingConv::Fast && TailCallOpt)
5026 return true;
5027
5028 if (DisableSCO) return false;
5029
5030 // If callee use the same argument list that caller is using, then we can
5031 // apply SCO on this case. If it is not, then we need to check if callee needs
5032 // stack for passing arguments.
5033 // PC Relative tail calls may not have a CallBase.
5034 // If there is no CallBase we cannot verify if we have the same argument
5035 // list so assume that we don't have the same argument list.
5036 if (CB && !hasSameArgumentList(CallerFunc, *CB) &&
5037 needStackSlotPassParameters(Subtarget, Outs))
5038 return false;
5039 else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
5040 return false;
5041
5042 return true;
5043}
5044
5045/// IsEligibleForTailCallOptimization - Check whether the call is eligible
5046/// for tail call optimization. Targets which want to do tail call
5047/// optimization should implement this function.
5048bool PPCTargetLowering::IsEligibleForTailCallOptimization(
5049 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
5050 CallingConv::ID CallerCC, bool isVarArg,
5051 const SmallVectorImpl<ISD::InputArg> &Ins) const {
5052 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5053 return false;
5054
5055 // Variable argument functions are not supported.
5056 if (isVarArg)
5057 return false;
5058
5059 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
5060 // Functions containing by val parameters are not supported.
5061 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
5062 return false;
5063
5064 // Non-PIC/GOT tail calls are supported.
5065 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
5066 return true;
5067
5068 // At the moment we can only do local tail calls (in same module, hidden
5069 // or protected) if we are generating PIC.
5070 if (CalleeGV)
5071 return CalleeGV->hasHiddenVisibility() ||
5072 CalleeGV->hasProtectedVisibility();
5073 }
5074
5075 return false;
5076}
5077
5078/// isCallCompatibleAddress - Return the immediate to use if the specified
5079/// 32-bit value is representable in the immediate field of a BxA instruction.
5081 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5082 if (!C) return nullptr;
5083
5084 int Addr = C->getZExtValue();
5085 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
5086 SignExtend32<26>(Addr) != Addr)
5087 return nullptr; // Top 6 bits have to be sext of immediate.
5088
5089 return DAG
5090 .getConstant(
5091 (int)C->getZExtValue() >> 2, SDLoc(Op),
5093 .getNode();
5094}
5095
5096namespace {
5097
5098struct TailCallArgumentInfo {
5099 SDValue Arg;
5100 SDValue FrameIdxOp;
5101 int FrameIdx = 0;
5102
5103 TailCallArgumentInfo() = default;
5104};
5105
5106} // end anonymous namespace
5107
5108/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
5110 SelectionDAG &DAG, SDValue Chain,
5111 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
5112 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
5113 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
5114 SDValue Arg = TailCallArgs[i].Arg;
5115 SDValue FIN = TailCallArgs[i].FrameIdxOp;
5116 int FI = TailCallArgs[i].FrameIdx;
5117 // Store relative to framepointer.
5118 MemOpChains.push_back(DAG.getStore(
5119 Chain, dl, Arg, FIN,
5121 }
5122}
5123
5124/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
5125/// the appropriate stack slot for the tail call optimized function call.
5127 SDValue OldRetAddr, SDValue OldFP,
5128 int SPDiff, const SDLoc &dl) {
5129 if (SPDiff) {
5130 // Calculate the new stack slot for the return address.
5132 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
5133 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
5134 bool isPPC64 = Subtarget.isPPC64();
5135 int SlotSize = isPPC64 ? 8 : 4;
5136 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5137 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
5138 NewRetAddrLoc, true);
5139 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5140 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
5141 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5142 MachinePointerInfo::getFixedStack(MF, NewRetAddr));
5143 }
5144 return Chain;
5145}
5146
5147/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
5148/// the position of the argument.
5149static void
5151 SDValue Arg, int SPDiff, unsigned ArgOffset,
5152 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
5153 int Offset = ArgOffset + SPDiff;
5154 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
5155 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
5156 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5157 SDValue FIN = DAG.getFrameIndex(FI, VT);
5158 TailCallArgumentInfo Info;
5159 Info.Arg = Arg;
5160 Info.FrameIdxOp = FIN;
5161 Info.FrameIdx = FI;
5162 TailCallArguments.push_back(Info);
5163}
5164
5165/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5166/// stack slot. Returns the chain as result and the loaded frame pointers in
5167/// LROpOut/FPOpout. Used when tail calling.
5168SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5169 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5170 SDValue &FPOpOut, const SDLoc &dl) const {
5171 if (SPDiff) {
5172 // Load the LR and FP stack slot for later adjusting.
5173 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5174 LROpOut = getReturnAddrFrameIndex(DAG);
5175 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5176 Chain = SDValue(LROpOut.getNode(), 1);
5177 }
5178 return Chain;
5179}
5180
5181/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5182/// by "Src" to address "Dst" of size "Size". Alignment information is
5183/// specified by the specific parameter attribute. The copy will be passed as
5184/// a byval function parameter.
5185/// Sometimes what we are copying is the end of a larger object, the part that
5186/// does not fit in registers.
5188 SDValue Chain, ISD::ArgFlagsTy Flags,
5189 SelectionDAG &DAG, const SDLoc &dl) {
5190 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5191 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5192 Flags.getNonZeroByValAlign(), false, false, false,
5194}
5195
5196/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5197/// tail calls.
5199 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5200 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5201 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5202 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5204 if (!isTailCall) {
5205 if (isVector) {
5206 SDValue StackPtr;
5207 if (isPPC64)
5208 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5209 else
5210 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5211 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5212 DAG.getConstant(ArgOffset, dl, PtrVT));
5213 }
5214 MemOpChains.push_back(
5215 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5216 // Calculate and remember argument location.
5217 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5218 TailCallArguments);
5219}
5220
5221static void
5223 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5224 SDValue FPOp,
5225 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5226 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5227 // might overwrite each other in case of tail call optimization.
5228 SmallVector<SDValue, 8> MemOpChains2;
5229 // Do not flag preceding copytoreg stuff together with the following stuff.
5230 InGlue = SDValue();
5231 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5232 MemOpChains2, dl);
5233 if (!MemOpChains2.empty())
5234 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5235
5236 // Store the return address to the appropriate stack slot.
5237 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5238
5239 // Emit callseq_end just before tailcall node.
5240 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl);
5241 InGlue = Chain.getValue(1);
5242}
5243
5244// Is this global address that of a function that can be called by name? (as
5245// opposed to something that must hold a descriptor for an indirect call).
5246static bool isFunctionGlobalAddress(const GlobalValue *GV) {
5247 if (GV) {
5248 if (GV->isThreadLocal())
5249 return false;
5250
5251 return GV->getValueType()->isFunctionTy();
5252 }
5253
5254 return false;
5255}
5256
5257SDValue PPCTargetLowering::LowerCallResult(
5258 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
5259 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5260 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5262 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5263 *DAG.getContext());
5264
5265 CCRetInfo.AnalyzeCallResult(
5266 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5268 : RetCC_PPC);
5269
5270 // Copy all of the result registers out of their specified physreg.
5271 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5272 CCValAssign &VA = RVLocs[i];
5273 assert(VA.isRegLoc() && "Can only return in registers!");
5274
5275 SDValue Val;
5276
5277 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5278 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5279 InGlue);
5280 Chain = Lo.getValue(1);
5281 InGlue = Lo.getValue(2);
5282 VA = RVLocs[++i]; // skip ahead to next loc
5283 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5284 InGlue);
5285 Chain = Hi.getValue(1);
5286 InGlue = Hi.getValue(2);
5287 if (!Subtarget.isLittleEndian())
5288 std::swap (Lo, Hi);
5289 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5290 } else {
5291 Val = DAG.getCopyFromReg(Chain, dl,
5292 VA.getLocReg(), VA.getLocVT(), InGlue);
5293 Chain = Val.getValue(1);
5294 InGlue = Val.getValue(2);
5295 }
5296
5297 switch (VA.getLocInfo()) {
5298 default: llvm_unreachable("Unknown loc info!");
5299 case CCValAssign::Full: break;
5300 case CCValAssign::AExt:
5301 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5302 break;
5303 case CCValAssign::ZExt:
5304 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5305 DAG.getValueType(VA.getValVT()));
5306 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5307 break;
5308 case CCValAssign::SExt:
5309 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5310 DAG.getValueType(VA.getValVT()));
5311 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5312 break;
5313 }
5314
5315 InVals.push_back(Val);
5316 }
5317
5318 return Chain;
5319}
5320
5321static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5322 const PPCSubtarget &Subtarget, bool isPatchPoint) {
5323 auto *G = dyn_cast<GlobalAddressSDNode>(Callee);
5324 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5325
5326 // PatchPoint calls are not indirect.
5327 if (isPatchPoint)
5328 return false;
5329
5330 if (isFunctionGlobalAddress(GV) || isa<ExternalSymbolSDNode>(Callee))
5331 return false;
5332
5333 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5334 // becuase the immediate function pointer points to a descriptor instead of
5335 // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5336 // pointer immediate points to the global entry point, while the BLA would
5337 // need to jump to the local entry point (see rL211174).
5338 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5339 isBLACompatibleAddress(Callee, DAG))
5340 return false;
5341
5342 return true;
5343}
5344
5345// AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5346static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5347 return Subtarget.isAIXABI() ||
5348 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5349}
5350
5352 const Function &Caller, const SDValue &Callee,
5353 const PPCSubtarget &Subtarget,
5354 const TargetMachine &TM,
5355 bool IsStrictFPCall = false) {
5356 if (CFlags.IsTailCall)
5357 return PPCISD::TC_RETURN;
5358
5359 unsigned RetOpc = 0;
5360 // This is a call through a function pointer.
5361 if (CFlags.IsIndirect) {
5362 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5363 // indirect calls. The save of the caller's TOC pointer to the stack will be
5364 // inserted into the DAG as part of call lowering. The restore of the TOC
5365 // pointer is modeled by using a pseudo instruction for the call opcode that
5366 // represents the 2 instruction sequence of an indirect branch and link,
5367 // immediately followed by a load of the TOC pointer from the stack save
5368 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5369 // as it is not saved or used.
5371 : PPCISD::BCTRL;
5372 } else if (Subtarget.isUsingPCRelativeCalls()) {
5373 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5374 RetOpc = PPCISD::CALL_NOTOC;
5375 } else if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) {
5376 // The ABIs that maintain a TOC pointer accross calls need to have a nop
5377 // immediately following the call instruction if the caller and callee may
5378 // have different TOC bases. At link time if the linker determines the calls
5379 // may not share a TOC base, the call is redirected to a trampoline inserted
5380 // by the linker. The trampoline will (among other things) save the callers
5381 // TOC pointer at an ABI designated offset in the linkage area and the
5382 // linker will rewrite the nop to be a load of the TOC pointer from the
5383 // linkage area into gpr2.
5384 auto *G = dyn_cast<GlobalAddressSDNode>(Callee);
5385 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5386 RetOpc =
5388 } else
5389 RetOpc = PPCISD::CALL;
5390 if (IsStrictFPCall) {
5391 switch (RetOpc) {
5392 default:
5393 llvm_unreachable("Unknown call opcode");
5396 break;
5397 case PPCISD::BCTRL:
5398 RetOpc = PPCISD::BCTRL_RM;
5399 break;
5400 case PPCISD::CALL_NOTOC:
5401 RetOpc = PPCISD::CALL_NOTOC_RM;
5402 break;
5403 case PPCISD::CALL:
5404 RetOpc = PPCISD::CALL_RM;
5405 break;
5406 case PPCISD::CALL_NOP:
5407 RetOpc = PPCISD::CALL_NOP_RM;
5408 break;
5409 }
5410 }
5411 return RetOpc;
5412}
5413
5414static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5415 const SDLoc &dl, const PPCSubtarget &Subtarget) {
5416 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5417 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5418 return SDValue(Dest, 0);
5419
5420 // Returns true if the callee is local, and false otherwise.
5421 auto isLocalCallee = [&]() {
5422 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5423 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5424
5425 return DAG.getTarget().shouldAssumeDSOLocal(GV) &&
5426 !isa_and_nonnull<GlobalIFunc>(GV);
5427 };
5428
5429 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in
5430 // a static relocation model causes some versions of GNU LD (2.17.50, at
5431 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5432 // built with secure-PLT.
5433 bool UsePlt =
5434 Subtarget.is32BitELFABI() && !isLocalCallee() &&
5436
5437 const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5438 const TargetMachine &TM = Subtarget.getTargetMachine();
5439 const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5440 MCSymbolXCOFF *S =
5441 cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5442
5444 return DAG.getMCSymbol(S, PtrVT);
5445 };
5446
5447 auto *G = dyn_cast<GlobalAddressSDNode>(Callee);
5448 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5449 if (isFunctionGlobalAddress(GV)) {
5450 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5451
5452 if (Subtarget.isAIXABI()) {
5453 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5454 return getAIXFuncEntryPointSymbolSDNode(GV);
5455 }
5456 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5457 UsePlt ? PPCII::MO_PLT : 0);
5458 }
5459
5460 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5461 const char *SymName = S->getSymbol();
5462 if (Subtarget.isAIXABI()) {
5463 // If there exists a user-declared function whose name is the same as the
5464 // ExternalSymbol's, then we pick up the user-declared version.
5466 if (const Function *F =
5467 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5468 return getAIXFuncEntryPointSymbolSDNode(F);
5469
5470 // On AIX, direct function calls reference the symbol for the function's
5471 // entry point, which is named by prepending a "." before the function's
5472 // C-linkage name. A Qualname is returned here because an external
5473 // function entry point is a csect with XTY_ER property.
5474 const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5475 auto &Context = DAG.getMachineFunction().getMMI().getContext();
5476 MCSectionXCOFF *Sec = Context.getXCOFFSection(
5477 (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(),
5479 return Sec->getQualNameSymbol();
5480 };
5481
5482 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5483 }
5484 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5485 UsePlt ? PPCII::MO_PLT : 0);
5486 }
5487
5488 // No transformation needed.
5489 assert(Callee.getNode() && "What no callee?");
5490 return Callee;
5491}
5492
5494 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5495 "Expected a CALLSEQ_STARTSDNode.");
5496
5497 // The last operand is the chain, except when the node has glue. If the node
5498 // has glue, then the last operand is the glue, and the chain is the second
5499 // last operand.
5500 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5501 if (LastValue.getValueType() != MVT::Glue)
5502 return LastValue;
5503
5504 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5505}
5506
5507// Creates the node that moves a functions address into the count register
5508// to prepare for an indirect call instruction.
5509static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5510 SDValue &Glue, SDValue &Chain,
5511 const SDLoc &dl) {
5512 SDValue MTCTROps[] = {Chain, Callee, Glue};
5513 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5514 Chain = DAG.getNode(PPCISD::MTCTR, dl, ArrayRef(ReturnTypes, 2),
5515 ArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5516 // The glue is the second value produced.
5517 Glue = Chain.getValue(1);
5518}
5519
5521 SDValue &Glue, SDValue &Chain,
5522 SDValue CallSeqStart,
5523 const CallBase *CB, const SDLoc &dl,
5524 bool hasNest,
5525 const PPCSubtarget &Subtarget) {
5526 // Function pointers in the 64-bit SVR4 ABI do not point to the function
5527 // entry point, but to the function descriptor (the function entry point
5528 // address is part of the function descriptor though).
5529 // The function descriptor is a three doubleword structure with the
5530 // following fields: function entry point, TOC base address and
5531 // environment pointer.
5532 // Thus for a call through a function pointer, the following actions need
5533 // to be performed:
5534 // 1. Save the TOC of the caller in the TOC save area of its stack
5535 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5536 // 2. Load the address of the function entry point from the function
5537 // descriptor.
5538 // 3. Load the TOC of the callee from the function descriptor into r2.
5539 // 4. Load the environment pointer from the function descriptor into
5540 // r11.
5541 // 5. Branch to the function entry point address.
5542 // 6. On return of the callee, the TOC of the caller needs to be
5543 // restored (this is done in FinishCall()).
5544 //
5545 // The loads are scheduled at the beginning of the call sequence, and the
5546 // register copies are flagged together to ensure that no other
5547 // operations can be scheduled in between. E.g. without flagging the
5548 // copies together, a TOC access in the caller could be scheduled between
5549 // the assignment of the callee TOC and the branch to the callee, which leads
5550 // to incorrect code.
5551
5552 // Start by loading the function address from the descriptor.
5553 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5554 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5558
5559 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5560
5561 // Registers used in building the DAG.
5562 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5563 const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5564
5565 // Offsets of descriptor members.
5566 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5567 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5568
5569 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5570 const Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
5571
5572 // One load for the functions entry point address.
5573 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5574 Alignment, MMOFlags);
5575
5576 // One for loading the TOC anchor for the module that contains the called
5577 // function.
5578 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5579 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5580 SDValue TOCPtr =
5581 DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5582 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5583
5584 // One for loading the environment pointer.
5585 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5586 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5587 SDValue LoadEnvPtr =
5588 DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5589 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5590
5591
5592 // Then copy the newly loaded TOC anchor to the TOC pointer.
5593 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5594 Chain = TOCVal.getValue(0);
5595 Glue = TOCVal.getValue(1);
5596
5597 // If the function call has an explicit 'nest' parameter, it takes the
5598 // place of the environment pointer.
5599 assert((!hasNest || !Subtarget.isAIXABI()) &&
5600 "Nest parameter is not supported on AIX.");
5601 if (!hasNest) {
5602 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5603 Chain = EnvVal.getValue(0);
5604 Glue = EnvVal.getValue(1);
5605 }
5606
5607 // The rest of the indirect call sequence is the same as the non-descriptor
5608 // DAG.
5609 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5610}
5611
5612static void
5614 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5615 SelectionDAG &DAG,
5616 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5617 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5618 const PPCSubtarget &Subtarget) {
5619 const bool IsPPC64 = Subtarget.isPPC64();
5620 // MVT for a general purpose register.
5621 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5622
5623 // First operand is always the chain.
5624 Ops.push_back(Chain);
5625
5626 // If it's a direct call pass the callee as the second operand.
5627 if (!CFlags.IsIndirect)
5628 Ops.push_back(Callee);
5629 else {
5630 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5631
5632 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5633 // on the stack (this would have been done in `LowerCall_64SVR4` or
5634 // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5635 // represents both the indirect branch and a load that restores the TOC
5636 // pointer from the linkage area. The operand for the TOC restore is an add
5637 // of the TOC save offset to the stack pointer. This must be the second
5638 // operand: after the chain input but before any other variadic arguments.
5639 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5640 // saved or used.
5641 if (isTOCSaveRestoreRequired(Subtarget)) {
5642 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5643
5644 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5645 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5646 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5647 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5648 Ops.push_back(AddTOC);
5649 }
5650
5651 // Add the register used for the environment pointer.
5652 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5654 RegVT));
5655
5656
5657 // Add CTR register as callee so a bctr can be emitted later.
5658 if (CFlags.IsTailCall)
5659 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5660 }
5661
5662 // If this is a tail call add stack pointer delta.
5663 if (CFlags.IsTailCall)
5664 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5665
5666 // Add argument registers to the end of the list so that they are known live
5667 // into the call.
5668 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5669 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5670 RegsToPass[i].second.getValueType()));
5671
5672 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5673 // no way to mark dependencies as implicit here.
5674 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5675 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5676 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5677 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5678
5679 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5680 if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5681 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5682
5683 // Add a register mask operand representing the call-preserved registers.
5684 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5685 const uint32_t *Mask =
5686 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5687 assert(Mask && "Missing call preserved mask for calling convention");
5688 Ops.push_back(DAG.getRegisterMask(Mask));
5689
5690 // If the glue is valid, it is the last operand.
5691 if (Glue.getNode())
5692 Ops.push_back(Glue);
5693}
5694
5695SDValue PPCTargetLowering::FinishCall(
5696 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5697 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5698 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5699 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5700 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5701
5702 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5703 Subtarget.isAIXABI())
5704 setUsesTOCBasePtr(DAG);
5705
5706 unsigned CallOpc =
5707 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5708 Subtarget, DAG.getTarget(), CB ? CB->isStrictFP() : false);
5709
5710 if (!CFlags.IsIndirect)
5711 Callee = transformCallee(Callee, DAG, dl, Subtarget);
5712 else if (Subtarget.usesFunctionDescriptors())
5713 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5714 dl, CFlags.HasNest, Subtarget);
5715 else
5716 prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5717
5718 // Build the operand list for the call instruction.
5720 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5721 SPDiff, Subtarget);
5722
5723 // Emit tail call.
5724 if (CFlags.IsTailCall) {
5725 // Indirect tail call when using PC Relative calls do not have the same
5726 // constraints.
5727 assert(((Callee.getOpcode() == ISD::Register &&
5728 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5729 Callee.getOpcode() == ISD::TargetExternalSymbol ||
5730 Callee.getOpcode() == ISD::TargetGlobalAddress ||
5731 isa<ConstantSDNode>(Callee) ||
5732 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5733 "Expecting a global address, external symbol, absolute value, "
5734 "register or an indirect tail call when PC Relative calls are "
5735 "used.");
5736 // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5737 assert(CallOpc == PPCISD::TC_RETURN &&
5738 "Unexpected call opcode for a tail call.");
5740 SDValue Ret = DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5741 DAG.addNoMergeSiteInfo(Ret.getNode(), CFlags.NoMerge);
5742 return Ret;
5743 }
5744
5745 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5746 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5747 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5748 Glue = Chain.getValue(1);
5749
5750 // When performing tail call optimization the callee pops its arguments off
5751 // the stack. Account for this here so these bytes can be pushed back on in
5752 // PPCFrameLowering::eliminateCallFramePseudoInstr.
5753 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5755 ? NumBytes
5756 : 0;
5757
5758 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, BytesCalleePops, Glue, dl);
5759 Glue = Chain.getValue(1);
5760
5761 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5762 DAG, InVals);
5763}
5764
5766 CallingConv::ID CalleeCC = CB->getCallingConv();
5767 const Function *CallerFunc = CB->getCaller();
5768 CallingConv::ID CallerCC = CallerFunc->getCallingConv();
5769 const Function *CalleeFunc = CB->getCalledFunction();
5770 if (!CalleeFunc)
5771 return false;
5772 const GlobalValue *CalleeGV = dyn_cast<GlobalValue>(CalleeFunc);
5773
5776
5777 GetReturnInfo(CalleeCC, CalleeFunc->getReturnType(),
5778 CalleeFunc->getAttributes(), Outs, *this,
5779 CalleeFunc->getParent()->getDataLayout());
5780
5781 return isEligibleForTCO(CalleeGV, CalleeCC, CallerCC, CB,
5782 CalleeFunc->isVarArg(), Outs, Ins, CallerFunc,
5783 false /*isCalleeExternalSymbol*/);
5784}
5785
5786bool PPCTargetLowering::isEligibleForTCO(
5787 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
5788 CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg,
5790 const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc,
5791 bool isCalleeExternalSymbol) const {
5792 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5793 return false;
5794
5795 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5796 return IsEligibleForTailCallOptimization_64SVR4(
5797 CalleeGV, CalleeCC, CallerCC, CB, isVarArg, Outs, Ins, CallerFunc,
5798 isCalleeExternalSymbol);
5799 else
5800 return IsEligibleForTailCallOptimization(CalleeGV, CalleeCC, CallerCC,
5801 isVarArg, Ins);
5802}
5803
5804SDValue
5805PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5806 SmallVectorImpl<SDValue> &InVals) const {
5807 SelectionDAG &DAG = CLI.DAG;
5808 SDLoc &dl = CLI.DL;
5810 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5812 SDValue Chain = CLI.Chain;
5813 SDValue Callee = CLI.Callee;
5814 bool &isTailCall = CLI.IsTailCall;
5815 CallingConv::ID CallConv = CLI.CallConv;
5816 bool isVarArg = CLI.IsVarArg;
5817 bool isPatchPoint = CLI.IsPatchPoint;
5818 const CallBase *CB = CLI.CB;
5819
5820 if (isTailCall) {
5822 CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
5823 auto *G = dyn_cast<GlobalAddressSDNode>(Callee);
5824 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5825 bool IsCalleeExternalSymbol = isa<ExternalSymbolSDNode>(Callee);
5826
5827 isTailCall =
5828 isEligibleForTCO(GV, CallConv, CallerCC, CB, isVarArg, Outs, Ins,
5829 &(MF.getFunction()), IsCalleeExternalSymbol);
5830 if (isTailCall) {
5831 ++NumTailCalls;
5832 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5833 ++NumSiblingCalls;
5834
5835 // PC Relative calls no longer guarantee that the callee is a Global
5836 // Address Node. The callee could be an indirect tail call in which
5837 // case the SDValue for the callee could be a load (to load the address
5838 // of a function pointer) or it may be a register copy (to move the
5839 // address of the callee from a function parameter into a virtual
5840 // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5841 assert((Subtarget.isUsingPCRelativeCalls() ||
5842 isa<GlobalAddressSDNode>(Callee)) &&
5843 "Callee should be an llvm::Function object.");
5844
5845 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5846 << "\nTCO callee: ");
5847 LLVM_DEBUG(Callee.dump());
5848 }
5849 }
5850
5851 if (!isTailCall && CB && CB->isMustTailCall())
5852 report_fatal_error("failed to perform tail call elimination on a call "
5853 "site marked musttail");
5854
5855 // When long calls (i.e. indirect calls) are always used, calls are always
5856 // made via function pointer. If we have a function name, first translate it
5857 // into a pointer.
5858 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5859 !isTailCall)
5860 Callee = LowerGlobalAddress(Callee, DAG);
5861
5862 CallFlags CFlags(
5863 CallConv, isTailCall, isVarArg, isPatchPoint,
5864 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5865 // hasNest
5866 Subtarget.is64BitELFABI() &&
5867 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5868 CLI.NoMerge);
5869
5870 if (Subtarget.isAIXABI())
5871 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5872 InVals, CB);
5873
5874 assert(Subtarget.isSVR4ABI());
5875 if (Subtarget.isPPC64())
5876 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5877 InVals, CB);
5878 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5879 InVals, CB);
5880}
5881
5882SDValue PPCTargetLowering::LowerCall_32SVR4(
5883 SDValue Chain, SDValue Callee, CallFlags CFlags,
5885 const SmallVectorImpl<SDValue> &OutVals,
5886 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5888 const CallBase *CB) const {
5889 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5890 // of the 32-bit SVR4 ABI stack frame layout.
5891
5892 const CallingConv::ID CallConv = CFlags.CallConv;
5893 const bool IsVarArg = CFlags.IsVarArg;
5894 const bool IsTailCall = CFlags.IsTailCall;
5895
5896 assert((CallConv == CallingConv::C ||
5897 CallConv == CallingConv::Cold ||
5898 CallConv == CallingConv::Fast) && "Unknown calling convention!");
5899
5900 const Align PtrAlign(4);
5901
5903
5904 // Mark this function as potentially containing a function that contains a
5905 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5906 // and restoring the callers stack pointer in this functions epilog. This is
5907 // done because by tail calling the called function might overwrite the value
5908 // in this function's (MF) stack pointer stack slot 0(SP).
5909 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5910 CallConv == CallingConv::Fast)
5911 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5912
5913 // Count how many bytes are to be pushed on the stack, including the linkage
5914 // area, parameter list area and the part of the local variable space which
5915 // contains copies of aggregates which are passed by value.
5916
5917 // Assign locations to all of the outgoing arguments.
5919 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5920
5921 // Reserve space for the linkage area on the stack.
5922 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5923 PtrAlign);
5924 if (useSoftFloat())
5925 CCInfo.PreAnalyzeCallOperands(Outs);
5926
5927 if (IsVarArg) {
5928 // Handle fixed and variable vector arguments differently.
5929 // Fixed vector arguments go into registers as long as registers are
5930 // available. Variable vector arguments always go into memory.
5931 unsigned NumArgs = Outs.size();
5932
5933 for (unsigned i = 0; i != NumArgs; ++i) {
5934 MVT ArgVT = Outs[i].VT;
5935 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5936 bool Result;
5937
5938 if (Outs[i].IsFixed) {
5939 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5940 CCInfo);
5941 } else {
5943 ArgFlags, CCInfo);
5944 }
5945
5946 if (Result) {
5947#ifndef NDEBUG
5948 errs() << "Call operand #" << i << " has unhandled type "
5949 << ArgVT << "\n";
5950#endif
5951 llvm_unreachable(nullptr);
5952 }
5953 }
5954 } else {
5955 // All arguments are treated the same.
5956 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5957 }
5958 CCInfo.clearWasPPCF128();
5959
5960 // Assign locations to all of the outgoing aggregate by value arguments.
5961 SmallVector<CCValAssign, 16> ByValArgLocs;
5962 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5963
5964 // Reserve stack space for the allocations in CCInfo.
5965 CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign);
5966
5967 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5968
5969 // Size of the linkage area, parameter list area and the part of the local
5970 // space variable where copies of aggregates which are passed by value are
5971 // stored.
5972 unsigned NumBytes = CCByValInfo.getStackSize();
5973
5974 // Calculate by how many bytes the stack has to be adjusted in case of tail
5975 // call optimization.
5976 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5977
5978 // Adjust the stack pointer for the new arguments...
5979 // These operations are automatically eliminated by the prolog/epilog pass
5980 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5981 SDValue CallSeqStart = Chain;
5982
5983 // Load the return address and frame pointer so it can be moved somewhere else
5984 // later.
5985 SDValue LROp, FPOp;
5986 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5987
5988 // Set up a copy of the stack pointer for use loading and storing any
5989 // arguments that may not fit in the registers available for argument
5990 // passing.
5991 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5992
5994 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5995 SmallVector<SDValue, 8> MemOpChains;
5996
5997 bool seenFloatArg = false;
5998 // Walk the register/memloc assignments, inserting copies/loads.
5999 // i - Tracks the index into the list of registers allocated for the call
6000 // RealArgIdx - Tracks the index into the list of actual function arguments
6001 // j - Tracks the index into the list of byval arguments
6002 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
6003 i != e;
6004 ++i, ++RealArgIdx) {
6005 CCValAssign &VA = ArgLocs[i];
6006 SDValue Arg = OutVals[RealArgIdx];
6007 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
6008
6009 if (Flags.isByVal()) {
6010 // Argument is an aggregate which is passed by value, thus we need to
6011 // create a copy of it in the local variable space of the current stack
6012 // frame (which is the stack frame of the caller) and pass the address of
6013 // this copy to the callee.
6014 assert((j < ByValArgLocs.size()) && "Index out of bounds!");
6015 CCValAssign &ByValVA = ByValArgLocs[j++];
6016 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
6017
6018 // Memory reserved in the local variable space of the callers stack frame.
6019 unsigned LocMemOffset = ByValVA.getLocMemOffset();
6020
6021 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
6022 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
6023 StackPtr, PtrOff);
6024
6025 // Create a copy of the argument in the local area of the current
6026 // stack frame.
6027 SDValue MemcpyCall =
6028 CreateCopyOfByValArgument(Arg, PtrOff,
6029 CallSeqStart.getNode()->getOperand(0),
6030 Flags, DAG, dl);
6031
6032 // This must go outside the CALLSEQ_START..END.
6033 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
6034 SDLoc(MemcpyCall));
6035 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6036 NewCallSeqStart.getNode());
6037 Chain = CallSeqStart = NewCallSeqStart;
6038
6039 // Pass the address of the aggregate copy on the stack either in a
6040 // physical register or in the parameter list area of the current stack
6041 // frame to the callee.
6042 Arg = PtrOff;
6043 }
6044
6045 // When useCRBits() is true, there can be i1 arguments.
6046 // It is because getRegisterType(MVT::i1) => MVT::i1,
6047 // and for other integer types getRegisterType() => MVT::i32.
6048 // Extend i1 and ensure callee will get i32.
6049 if (Arg.getValueType() == MVT::i1)
6050 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
6051 dl, MVT::i32, Arg);
6052
6053 if (VA.isRegLoc()) {
6054 seenFloatArg |= VA.getLocVT().isFloatingPoint();
6055 // Put argument in a physical register.
6056 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
6057 bool IsLE = Subtarget.isLittleEndian();
6058 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
6059 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
6060 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
6061 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
6062 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
6063 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
6064 SVal.getValue(0)));
6065 } else
6066 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
6067 } else {
6068 // Put argument in the parameter list area of the current stack frame.
6069 assert(VA.isMemLoc());
6070 unsigned LocMemOffset = VA.getLocMemOffset();
6071
6072 if (!IsTailCall) {
6073 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
6074 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
6075 StackPtr, PtrOff);
6076
6077 MemOpChains.push_back(
6078 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
6079 } else {
6080 // Calculate and remember argument location.
6081 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
6082 TailCallArguments);
6083 }
6084 }
6085 }
6086
6087 if (!MemOpChains.empty())
6088 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6089
6090 // Build a sequence of copy-to-reg nodes chained together with token chain
6091 // and flag operands which copy the outgoing args into the appropriate regs.
6092 SDValue InGlue;
6093 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6094 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6095 RegsToPass[i].second, InGlue);
6096 InGlue = Chain.getValue(1);
6097 }
6098
6099 // Set CR bit 6 to true if this is a vararg call with floating args passed in
6100 // registers.
6101 if (IsVarArg) {
6102 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
6103 SDValue Ops[] = { Chain, InGlue };
6104
6105 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, dl,
6106 VTs, ArrayRef(Ops, InGlue.getNode() ? 2 : 1));
6107
6108 InGlue = Chain.getValue(1);
6109 }
6110
6111 if (IsTailCall)
6112 PrepareTailCall(DAG, InGlue, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6113 TailCallArguments);
6114
6115 return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
6116 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6117}
6118
6119// Copy an argument into memory, being careful to do this outside the
6120// call sequence for the call to which the argument belongs.
6121SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
6122 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
6123 SelectionDAG &DAG, const SDLoc &dl) const {
6124 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
6125 CallSeqStart.getNode()->getOperand(0),
6126 Flags, DAG, dl);
6127 // The MEMCPY must go outside the CALLSEQ_START..END.
6128 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
6129 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
6130 SDLoc(MemcpyCall));
6131 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6132 NewCallSeqStart.getNode());
6133 return NewCallSeqStart;
6134}
6135
6136SDValue PPCTargetLowering::LowerCall_64SVR4(
6137 SDValue Chain, SDValue Callee, CallFlags CFlags,
6139 const SmallVectorImpl<SDValue> &OutVals,
6140 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6142 const CallBase *CB) const {
6143 bool isELFv2ABI = Subtarget.isELFv2ABI();
6144 bool isLittleEndian = Subtarget.isLittleEndian();
6145 unsigned NumOps = Outs.size();
6146 bool IsSibCall = false;
6147 bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
6148
6149 EVT PtrVT = getPointerTy(DAG.getDataLayout());
6150 unsigned PtrByteSize = 8;
6151
6153
6154 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
6155 IsSibCall = true;
6156
6157 // Mark this function as potentially containing a function that contains a
6158 // tail call. As a consequence the frame pointer will be used for dynamicalloc
6159 // and restoring the callers stack pointer in this functions epilog. This is
6160 // done because by tail calling the called function might overwrite the value
6161 // in this function's (MF) stack pointer stack slot 0(SP).
6162 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6163 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6164
6165 assert(!(IsFastCall && CFlags.IsVarArg) &&
6166 "fastcc not supported on varargs functions");
6167
6168 // Count how many bytes are to be pushed on the stack, including the linkage
6169 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
6170 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
6171 // area is 32 bytes reserved space for [SP][CR][LR][TOC].
6172 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6173 unsigned NumBytes = LinkageSize;
6174 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6175
6176 static const MCPhysReg GPR[] = {
6177 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6178 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6179 };
6180 static const MCPhysReg VR[] = {
6181 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6182 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6183 };
6184
6185 const unsigned NumGPRs = std::size(GPR);
6186 const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
6187 const unsigned NumVRs = std::size(VR);
6188
6189 // On ELFv2, we can avoid allocating the parameter area if all the arguments
6190 // can be passed to the callee in registers.
6191 // For the fast calling convention, there is another check below.
6192 // Note: We should keep consistent with LowerFormalArguments_64SVR4()
6193 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6194 if (!HasParameterArea) {
6195 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6196 unsigned AvailableFPRs = NumFPRs;
6197 unsigned AvailableVRs = NumVRs;
6198 unsigned NumBytesTmp = NumBytes;
6199 for (unsigned i = 0; i != NumOps; ++i) {
6200 if (Outs[i].Flags.isNest()) continue;
6201 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
6202 PtrByteSize, LinkageSize, ParamAreaSize,
6203 NumBytesTmp, AvailableFPRs, AvailableVRs))
6204 HasParameterArea = true;
6205 }
6206 }
6207
6208 // When using the fast calling convention, we don't provide backing for
6209 // arguments that will be in registers.
6210 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6211
6212 // Avoid allocating parameter area for fastcc functions if all the arguments
6213 // can be passed in the registers.
6214 if (IsFastCall)
6215 HasParameterArea = false;
6216
6217 // Add up all the space actually used.
6218 for (unsigned i = 0; i != NumOps; ++i) {
6219 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6220 EVT ArgVT = Outs[i].VT;
6221 EVT OrigVT = Outs[i].ArgVT;
6222
6223 if (Flags.isNest())
6224 continue;
6225
6226 if (IsFastCall) {
6227 if (Flags.isByVal()) {
6228 NumGPRsUsed += (Flags.getByValSize()+7)/8;
6229 if (NumGPRsUsed > NumGPRs)
6230 HasParameterArea = true;
6231 } else {
6232 switch (ArgVT.getSimpleVT().SimpleTy) {
6233 default: llvm_unreachable("Unexpected ValueType for argument!");
6234 case MVT::i1:
6235 case MVT::i32:
6236 case MVT::i64:
6237 if (++NumGPRsUsed <= NumGPRs)
6238 continue;
6239 break;
6240 case MVT::v4i32:
6241 case MVT::v8i16:
6242 case MVT::v16i8:
6243 case MVT::v2f64:
6244 case MVT::v2i64:
6245 case MVT::v1i128:
6246 case MVT::f128:
6247 if (++NumVRsUsed <= NumVRs)
6248 continue;
6249 break;
6250 case MVT::v4f32:
6251 if (++NumVRsUsed <= NumVRs)
6252 continue;
6253 break;
6254 case MVT::f32:
6255 case MVT::f64:
6256 if (++NumFPRsUsed <= NumFPRs)
6257 continue;
6258 break;
6259 }
6260 HasParameterArea = true;
6261 }
6262 }
6263
6264 /* Respect alignment of argument on the stack. */
6265 auto Alignement =
6266 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6267 NumBytes = alignTo(NumBytes, Alignement);
6268
6269 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6270 if (Flags.isInConsecutiveRegsLast())
6271 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6272 }
6273
6274 unsigned NumBytesActuallyUsed = NumBytes;
6275
6276 // In the old ELFv1 ABI,
6277 // the prolog code of the callee may store up to 8 GPR argument registers to
6278 // the stack, allowing va_start to index over them in memory if its varargs.
6279 // Because we cannot tell if this is needed on the caller side, we have to
6280 // conservatively assume that it is needed. As such, make sure we have at
6281 // least enough stack space for the caller to store the 8 GPRs.
6282 // In the ELFv2 ABI, we allocate the parameter area iff a callee
6283 // really requires memory operands, e.g. a vararg function.
6284 if (HasParameterArea)
6285 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6286 else
6287 NumBytes = LinkageSize;
6288
6289 // Tail call needs the stack to be aligned.
6290 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6291 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6292
6293 int SPDiff = 0;
6294
6295 // Calculate by how many bytes the stack has to be adjusted in case of tail
6296 // call optimization.
6297 if (!IsSibCall)
6298 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6299
6300 // To protect arguments on the stack from being clobbered in a tail call,
6301 // force all the loads to happen before doing any other lowering.
6302 if (CFlags.IsTailCall)
6303 Chain = DAG.getStackArgumentTokenFactor(Chain);
6304
6305 // Adjust the stack pointer for the new arguments...
6306 // These operations are automatically eliminated by the prolog/epilog pass
6307 if (!IsSibCall)
6308 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6309 SDValue CallSeqStart = Chain;
6310
6311 // Load the return address and frame pointer so it can be move somewhere else
6312 // later.
6313 SDValue LROp, FPOp;
6314 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6315
6316 // Set up a copy of the stack pointer for use loading and storing any
6317 // arguments that may not fit in the registers available for argument
6318 // passing.
6319 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6320
6321 // Figure out which arguments are going to go in registers, and which in
6322 // memory. Also, if this is a vararg function, floating point operations
6323 // must be stored to our stack, and loaded into integer regs as well, if
6324 // any integer regs are available for argument passing.
6325 unsigned ArgOffset = LinkageSize;
6326
6328 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6329
6330 SmallVector<SDValue, 8> MemOpChains;
6331 for (unsigned i = 0; i != NumOps; ++i) {
6332 SDValue Arg = OutVals[i];
6333 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6334 EVT ArgVT = Outs[i].VT;
6335 EVT OrigVT = Outs[i].ArgVT;
6336
6337 // PtrOff will be used to store the current argument to the stack if a
6338 // register cannot be found for it.
6339 SDValue PtrOff;
6340
6341 // We re-align the argument offset for each argument, except when using the
6342 // fast calling convention, when we need to make sure we do that only when
6343 // we'll actually use a stack slot.
6344 auto ComputePtrOff = [&]() {
6345 /* Respect alignment of argument on the stack. */
6346 auto Alignment =
6347 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6348 ArgOffset = alignTo(ArgOffset, Alignment);
6349
6350 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6351
6352 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6353 };
6354
6355 if (!IsFastCall) {
6356 ComputePtrOff();
6357
6358 /* Compute GPR index associated with argument offset. */
6359 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6360 GPR_idx = std::min(GPR_idx, NumGPRs);
6361 }
6362
6363 // Promote integers to 64-bit values.
6364 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6365 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6366 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6367 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6368 }
6369
6370 // FIXME memcpy is used way more than necessary. Correctness first.
6371 // Note: "by value" is code for passing a structure by value, not
6372 // basic types.
6373 if (Flags.isByVal()) {
6374 // Note: Size includes alignment padding, so
6375 // struct x { short a; char b; }
6376 // will have Size = 4. With #pragma pack(1), it will have Size = 3.
6377 // These are the proper values we need for right-justifying the
6378 // aggregate in a parameter register.
6379 unsigned Size = Flags.getByValSize();
6380
6381 // An empty aggregate parameter takes up no storage and no
6382 // registers.
6383 if (Size == 0)
6384 continue;
6385
6386 if (IsFastCall)
6387 ComputePtrOff();
6388
6389 // All aggregates smaller than 8 bytes must be passed right-justified.
6390 if (Size==1 || Size==2 || Size==4) {
6391 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6392 if (GPR_idx != NumGPRs) {
6393 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6394 MachinePointerInfo(), VT);
6395 MemOpChains.push_back(Load.getValue(1));
6396 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6397
6398 ArgOffset += PtrByteSize;
6399 continue;
6400 }
6401 }
6402
6403 if (GPR_idx == NumGPRs && Size < 8) {
6404 SDValue AddPtr = PtrOff;
6405 if (!isLittleEndian) {
6406 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6407 PtrOff.getValueType());
6408 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6409 }
6410 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6411 CallSeqStart,
6412 Flags, DAG, dl);
6413 ArgOffset += PtrByteSize;
6414 continue;
6415 }
6416 // Copy the object to parameter save area if it can not be entirely passed
6417 // by registers.
6418 // FIXME: we only need to copy the parts which need to be passed in
6419 // parameter save area. For the parts passed by registers, we don't need
6420 // to copy them to the stack although we need to allocate space for them
6421 // in parameter save area.
6422 if ((NumGPRs - GPR_idx) * PtrByteSize < Size)
6423 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6424 CallSeqStart,
6425 Flags, DAG, dl);
6426
6427 // When a register is available, pass a small aggregate right-justified.
6428 if (Size < 8 && GPR_idx != NumGPRs) {
6429 // The easiest way to get this right-justified in a register
6430 // is to copy the structure into the rightmost portion of a
6431 // local variable slot, then load the whole slot into the
6432 // register.
6433 // FIXME: The memcpy seems to produce pretty awful code for
6434 // small aggregates, particularly for packed ones.
6435 // FIXME: It would be preferable to use the slot in the
6436 // parameter save area instead of a new local variable.
6437 SDValue AddPtr = PtrOff;
6438 if (!isLittleEndian) {
6439 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6440 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6441 }
6442 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6443 CallSeqStart,
6444 Flags, DAG, dl);
6445
6446 // Load the slot into the register.
6447 SDValue Load =
6448 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6449 MemOpChains.push_back(Load.getValue(1));
6450 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6451
6452 // Done with this argument.
6453 ArgOffset += PtrByteSize;
6454 continue;
6455 }
6456
6457 // For aggregates larger than PtrByteSize, copy the pieces of the
6458 // object that fit into registers from the parameter save area.
6459 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6460 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6461 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6462 if (GPR_idx != NumGPRs) {
6463 unsigned LoadSizeInBits = std::min(PtrByteSize, (Size - j)) * 8;
6464 EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), LoadSizeInBits);
6465 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, AddArg,
6466 MachinePointerInfo(), ObjType);
6467
6468 MemOpChains.push_back(Load.getValue(1));
6469 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6470 ArgOffset += PtrByteSize;
6471 } else {
6472 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6473 break;
6474 }
6475 }
6476 continue;
6477 }
6478
6479 switch (Arg.getSimpleValueType().SimpleTy) {
6480 default: llvm_unreachable("Unexpected ValueType for argument!");
6481 case MVT::i1:
6482 case MVT::i32:
6483 case MVT::i64:
6484 if (Flags.isNest()) {
6485 // The 'nest' parameter, if any, is passed in R11.
6486 RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6487 break;
6488 }
6489
6490 // These can be scalar arguments or elements of an integer array type
6491 // passed directly. Clang may use those instead of "byval" aggregate
6492 // types to avoid forcing arguments to memory unnecessarily.
6493 if (GPR_idx != NumGPRs) {
6494 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6495 } else {
6496 if (IsFastCall)
6497 ComputePtrOff();
6498
6499 assert(HasParameterArea &&
6500 "Parameter area must exist to pass an argument in memory.");
6501 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6502 true, CFlags.IsTailCall, false, MemOpChains,
6503 TailCallArguments, dl);
6504 if (IsFastCall)
6505 ArgOffset += PtrByteSize;
6506 }
6507 if (!IsFastCall)
6508 ArgOffset += PtrByteSize;
6509 break;
6510 case MVT::f32:
6511 case MVT::f64: {
6512 // These can be scalar arguments or elements of a float array type
6513 // passed directly. The latter are used to implement ELFv2 homogenous
6514 // float aggregates.
6515
6516 // Named arguments go into FPRs first, and once they overflow, the
6517 // remaining arguments go into GPRs and then the parameter save area.
6518 // Unnamed arguments for vararg functions always go to GPRs and
6519 // then the parameter save area. For now, put all arguments to vararg
6520 // routines always in both locations (FPR *and* GPR or stack slot).
6521 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6522 bool NeededLoad = false;
6523
6524 // First load the argument into the next available FPR.
6525 if (FPR_idx != NumFPRs)
6526 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6527
6528 // Next, load the argument into GPR or stack slot if needed.
6529 if (!NeedGPROrStack)
6530 ;
6531 else if (GPR_idx != NumGPRs && !IsFastCall) {
6532 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6533 // once we support fp <-> gpr moves.
6534
6535 // In the non-vararg case, this can only ever happen in the
6536 // presence of f32 array types, since otherwise we never run
6537 // out of FPRs before running out of GPRs.
6538 SDValue ArgVal;
6539
6540 // Double values are always passed in a single GPR.
6541 if (Arg.getValueType() != MVT::f32) {
6542 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6543
6544 // Non-array float values are extended and passed in a GPR.
6545 } else if (!Flags.isInConsecutiveRegs()) {
6546 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6547 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6548
6549 // If we have an array of floats, we collect every odd element
6550 // together with its predecessor into one GPR.
6551 } else if (ArgOffset % PtrByteSize != 0) {
6552 SDValue Lo, Hi;
6553 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6554 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6555 if (!isLittleEndian)
6556 std::swap(Lo, Hi);
6557 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6558
6559 // The final element, if even, goes into the first half of a GPR.
6560 } else if (Flags.isInConsecutiveRegsLast()) {
6561 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6562 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6563 if (!isLittleEndian)
6564 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6565 DAG.getConstant(32, dl, MVT::i32));
6566
6567 // Non-final even elements are skipped; they will be handled
6568 // together the with subsequent argument on the next go-around.
6569 } else
6570 ArgVal = SDValue();
6571
6572 if (ArgVal.getNode())
6573 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6574 } else {
6575 if (IsFastCall)
6576 ComputePtrOff();
6577
6578 // Single-precision floating-point values are mapped to the
6579 // second (rightmost) word of the stack doubleword.
6580 if (Arg.getValueType() == MVT::f32 &&
6581 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6582 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6583 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6584 }
6585
6586 assert(HasParameterArea &&
6587 "Parameter area must exist to pass an argument in memory.");
6588 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6589 true, CFlags.IsTailCall, false, MemOpChains,
6590 TailCallArguments, dl);
6591
6592 NeededLoad = true;
6593 }
6594 // When passing an array of floats, the array occupies consecutive
6595 // space in the argument area; only round up to the next doubleword
6596 // at the end of the array. Otherwise, each float takes 8 bytes.
6597 if (!IsFastCall || NeededLoad) {
6598 ArgOffset += (Arg.getValueType() == MVT::f32 &&
6599 Flags.isInConsecutiveRegs()) ? 4 : 8;
6600 if (Flags.isInConsecutiveRegsLast())
6601 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6602 }
6603 break;
6604 }
6605 case MVT::v4f32:
6606 case MVT::v4i32:
6607 case MVT::v8i16:
6608 case MVT::v16i8:
6609 case MVT::v2f64:
6610 case MVT::v2i64:
6611 case MVT::v1i128:
6612 case MVT::f128:
6613 // These can be scalar arguments or elements of a vector array type
6614 // passed directly. The latter are used to implement ELFv2 homogenous
6615 // vector aggregates.
6616
6617 // For a varargs call, named arguments go into VRs or on the stack as
6618 // usual; unnamed arguments always go to the stack or the corresponding
6619 // GPRs when within range. For now, we always put the value in both
6620 // locations (or even all three).
6621 if (CFlags.IsVarArg) {
6622 assert(HasParameterArea &&
6623 "Parameter area must exist if we have a varargs call.");
6624 // We could elide this store in the case where the object fits
6625 // entirely in R registers. Maybe later.
6626 SDValue Store =
6627 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6628 MemOpChains.push_back(Store);
6629 if (VR_idx != NumVRs) {
6630 SDValue Load =
6631 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6632 MemOpChains.push_back(Load.getValue(1));
6633 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6634 }
6635 ArgOffset += 16;
6636 for (unsigned i=0; i<16; i+=PtrByteSize) {
6637 if (GPR_idx == NumGPRs)
6638 break;
6639 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6640 DAG.getConstant(i, dl, PtrVT));
6641 SDValue Load =
6642 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6643 MemOpChains.push_back(Load.getValue(1));
6644 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6645 }
6646 break;
6647 }
6648
6649 // Non-varargs Altivec params go into VRs or on the stack.
6650 if (VR_idx != NumVRs) {
6651 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6652 } else {
6653 if (IsFastCall)
6654 ComputePtrOff();
6655
6656 assert(HasParameterArea &&
6657 "Parameter area must exist to pass an argument in memory.");
6658 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6659 true, CFlags.IsTailCall, true, MemOpChains,
6660 TailCallArguments, dl);
6661 if (IsFastCall)
6662 ArgOffset += 16;
6663 }
6664
6665 if (!IsFastCall)
6666 ArgOffset += 16;
6667 break;
6668 }
6669 }
6670
6671 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6672 "mismatch in size of parameter area");
6673 (void)NumBytesActuallyUsed;
6674
6675 if (!MemOpChains.empty())
6676 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6677
6678 // Check if this is an indirect call (MTCTR/BCTRL).
6679 // See prepareDescriptorIndirectCall and buildCallOperands for more
6680 // information about calls through function pointers in the 64-bit SVR4 ABI.
6681 if (CFlags.IsIndirect) {
6682 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6683 // caller in the TOC save area.
6684 if (isTOCSaveRestoreRequired(Subtarget)) {
6685 assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6686 // Load r2 into a virtual register and store it to the TOC save area.
6687 setUsesTOCBasePtr(DAG);
6688 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6689 // TOC save area offset.
6690 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6691 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6692 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6693 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6695 DAG.getMachineFunction(), TOCSaveOffset));
6696 }
6697 // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6698 // This does not mean the MTCTR instruction must use R12; it's easier
6699 // to model this as an extra parameter, so do that.
6700 if (isELFv2ABI && !CFlags.IsPatchPoint)
6701 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6702 }
6703
6704 // Build a sequence of copy-to-reg nodes chained together with token chain
6705 // and flag operands which copy the outgoing args into the appropriate regs.
6706 SDValue InGlue;
6707 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6708 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6709 RegsToPass[i].second, InGlue);
6710 InGlue = Chain.getValue(1);
6711 }
6712
6713 if (CFlags.IsTailCall && !IsSibCall)
6714 PrepareTailCall(DAG, InGlue, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6715 TailCallArguments);
6716
6717 return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
6718 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6719}
6720
6721// Returns true when the shadow of a general purpose argument register
6722// in the parameter save area is aligned to at least 'RequiredAlign'.
6723static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
6724 assert(RequiredAlign.value() <= 16 &&
6725 "Required alignment greater than stack alignment.");
6726 switch (Reg) {
6727 default:
6728 report_fatal_error("called on invalid register.");
6729 case PPC::R5:
6730 case PPC::R9:
6731 case PPC::X3:
6732 case PPC::X5:
6733 case PPC::X7:
6734 case PPC::X9:
6735 // These registers are 16 byte aligned which is the most strict aligment
6736 // we can support.
6737 return true;
6738 case PPC::R3:
6739 case PPC::R7:
6740 case PPC::X4:
6741 case PPC::X6:
6742 case PPC::X8:
6743 case PPC::X10:
6744 // The shadow of these registers in the PSA is 8 byte aligned.
6745 return RequiredAlign <= 8;
6746 case PPC::R4:
6747 case PPC::R6:
6748 case PPC::R8:
6749 case PPC::R10:
6750 return RequiredAlign <= 4;
6751 }
6752}
6753
6754static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6755 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6756 CCState &S) {
6757 AIXCCState &State = static_cast<AIXCCState &>(S);
6758 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6760 const bool IsPPC64 = Subtarget.isPPC64();
6761 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6762 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6763
6764 if (ValVT == MVT::f128)
6765 report_fatal_error("f128 is unimplemented on AIX.");
6766
6767 if (ArgFlags.isNest())
6768 report_fatal_error("Nest arguments are unimplemented.");
6769
6770 static const MCPhysReg GPR_32[] = {// 32-bit registers.
6771 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6772 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6773 static const MCPhysReg GPR_64[] = {// 64-bit registers.
6774 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6775 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6776
6777 static const MCPhysReg VR[] = {// Vector registers.
6778 PPC::V2, PPC::V3, PPC::V4, PPC::V5,
6779 PPC::V6, PPC::V7, PPC::V8, PPC::V9,
6780 PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6781
6782 if (ArgFlags.isByVal()) {
6783 if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6784 report_fatal_error("Pass-by-value arguments with alignment greater than "
6785 "register width are not supported.");
6786
6787 const unsigned ByValSize = ArgFlags.getByValSize();
6788
6789 // An empty aggregate parameter takes up no storage and no registers,
6790 // but needs a MemLoc for a stack slot for the formal arguments side.
6791 if (ByValSize == 0) {
6793 State.getStackSize(), RegVT, LocInfo));
6794 return false;
6795 }
6796
6797 const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6798 unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6799 for (const unsigned E = Offset + StackSize; Offset < E;
6800 Offset += PtrAlign.value()) {
6801 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6802 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6803 else {
6806 LocInfo));
6807 break;
6808 }
6809 }
6810 return false;
6811 }
6812
6813 // Arguments always reserve parameter save area.
6814 switch (ValVT.SimpleTy) {
6815 default:
6816 report_fatal_error("Unhandled value type for argument.");
6817 case MVT::i64:
6818 // i64 arguments should have been split to i32 for PPC32.
6819 assert(IsPPC64 && "PPC32 should have split i64 values.");
6820 [[fallthrough]];
6821 case MVT::i1:
6822 case MVT::i32: {
6823 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6824 // AIX integer arguments are always passed in register width.
6825 if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6826 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6828 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6829 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6830 else
6831 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6832
6833 return false;
6834 }
6835 case MVT::f32:
6836 case MVT::f64: {
6837 // Parameter save area (PSA) is reserved even if the float passes in fpr.
6838 const unsigned StoreSize = LocVT.getStoreSize();
6839 // Floats are always 4-byte aligned in the PSA on AIX.
6840 // This includes f64 in 64-bit mode for ABI compatibility.
6841 const unsigned Offset =
6842 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6843 unsigned FReg = State.AllocateReg(FPR);
6844 if (FReg)
6845 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6846
6847 // Reserve and initialize GPRs or initialize the PSA as required.
6848 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6849 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6850 assert(FReg && "An FPR should be available when a GPR is reserved.");
6851 if (State.isVarArg()) {
6852 // Successfully reserved GPRs are only initialized for vararg calls.
6853 // Custom handling is required for:
6854 // f64 in PPC32 needs to be split into 2 GPRs.
6855 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6856 State.addLoc(
6857 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6858 }
6859 } else {
6860 // If there are insufficient GPRs, the PSA needs to be initialized.
6861 // Initialization occurs even if an FPR was initialized for
6862 // compatibility with the AIX XL compiler. The full memory for the
6863 // argument will be initialized even if a prior word is saved in GPR.
6864 // A custom memLoc is used when the argument also passes in FPR so
6865 // that the callee handling can skip over it easily.
6866 State.addLoc(
6867 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6868 LocInfo)
6869 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6870 break;
6871 }
6872 }
6873
6874 return false;
6875 }
6876 case MVT::v4f32:
6877 case MVT::v4i32:
6878 case MVT::v8i16:
6879 case MVT::v16i8:
6880 case MVT::v2i64:
6881 case MVT::v2f64:
6882 case MVT::v1i128: {
6883 const unsigned VecSize = 16;
6884 const Align VecAlign(VecSize);
6885
6886 if (!State.isVarArg()) {
6887 // If there are vector registers remaining we don't consume any stack
6888 // space.
6889 if (unsigned VReg = State.AllocateReg(VR)) {
6890 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6891 return false;
6892 }
6893 // Vectors passed on the stack do not shadow GPRs or FPRs even though they
6894 // might be allocated in the portion of the PSA that is shadowed by the
6895 // GPRs.
6896 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6897 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6898 return false;
6899 }
6900
6901 const unsigned PtrSize = IsPPC64 ? 8 : 4;
6902 ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6903
6904 unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
6905 // Burn any underaligned registers and their shadowed stack space until
6906 // we reach the required alignment.
6907 while (NextRegIndex != GPRs.size() &&
6908 !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) {
6909 // Shadow allocate register and its stack shadow.
6910 unsigned Reg = State.AllocateReg(GPRs);
6911 State.AllocateStack(PtrSize, PtrAlign);
6912 assert(Reg && "Allocating register unexpectedly failed.");
6913 (void)Reg;
6914 NextRegIndex = State.getFirstUnallocated(GPRs);
6915 }
6916
6917 // Vectors that are passed as fixed arguments are handled differently.
6918 // They are passed in VRs if any are available (unlike arguments passed
6919 // through ellipses) and shadow GPRs (unlike arguments to non-vaarg
6920 // functions)
6921 if (State.isFixed(ValNo)) {
6922 if (unsigned VReg = State.AllocateReg(VR)) {
6923 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6924 // Shadow allocate GPRs and stack space even though we pass in a VR.
6925 for (unsigned I = 0; I != VecSize; I += PtrSize)
6926 State.AllocateReg(GPRs);
6927 State.AllocateStack(VecSize, VecAlign);
6928 return false;
6929 }
6930 // No vector registers remain so pass on the stack.
6931 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6932 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6933 return false;
6934 }
6935
6936 // If all GPRS are consumed then we pass the argument fully on the stack.
6937 if (NextRegIndex == GPRs.size()) {
6938 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6939 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6940 return false;
6941 }
6942
6943 // Corner case for 32-bit codegen. We have 2 registers to pass the first
6944 // half of the argument, and then need to pass the remaining half on the
6945 // stack.
6946 if (GPRs[NextRegIndex] == PPC::R9) {
6947 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6948 State.addLoc(
6949 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6950
6951 const unsigned FirstReg = State.AllocateReg(PPC::R9);
6952 const unsigned SecondReg = State.AllocateReg(PPC::R10);
6953 assert(FirstReg && SecondReg &&
6954 "Allocating R9 or R10 unexpectedly failed.");
6955 State.addLoc(
6956 CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo));
6957 State.addLoc(
6958 CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo));
6959 return false;
6960 }
6961
6962 // We have enough GPRs to fully pass the vector argument, and we have
6963 // already consumed any underaligned registers. Start with the custom
6964 // MemLoc and then the custom RegLocs.
6965 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6966 State.addLoc(
6967 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6968 for (unsigned I = 0; I != VecSize; I += PtrSize) {
6969 const unsigned Reg = State.AllocateReg(GPRs);
6970 assert(Reg && "Failed to allocated register for vararg vector argument");
6971 State.addLoc(
6972 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6973 }
6974 return false;
6975 }
6976 }
6977 return true;
6978}
6979
6980// So far, this function is only used by LowerFormalArguments_AIX()
6982 bool IsPPC64,
6983 bool HasP8Vector,
6984 bool HasVSX) {
6985 assert((IsPPC64 || SVT != MVT::i64) &&
6986 "i64 should have been split for 32-bit codegen.");
6987
6988 switch (SVT) {
6989 default:
6990 report_fatal_error("Unexpected value type for formal argument");
6991 case MVT::i1:
6992 case MVT::i32:
6993 case MVT::i64:
6994 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6995 case MVT::f32:
6996 return HasP8Vector ? &PPC::VSSRCRegClass : &PPC::F4RCRegClass;
6997 case MVT::f64:
6998 return HasVSX ? &PPC::VSFRCRegClass : &PPC::F8RCRegClass;
6999 case MVT::v4f32:
7000 case MVT::v4i32:
7001 case MVT::v8i16:
7002 case MVT::v16i8:
7003 case MVT::v2i64:
7004 case MVT::v2f64:
7005 case MVT::v1i128:
7006 return &PPC::VRRCRegClass;
7007 }
7008}
7009
7011 SelectionDAG &DAG, SDValue ArgValue,
7012 MVT LocVT, const SDLoc &dl) {
7013 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7014 assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
7015
7016 if (Flags.isSExt())
7017 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7018 DAG.getValueType(ValVT));
7019 else if (Flags.isZExt())
7020 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7021 DAG.getValueType(ValVT));
7022
7023 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7024}
7025
7026static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7027 const unsigned LASize = FL->getLinkageSize();
7028
7029 if (PPC::GPRCRegClass.contains(Reg)) {
7030 assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7031 "Reg must be a valid argument register!");
7032 return LASize + 4 * (Reg - PPC::R3);
7033 }
7034
7035 if (PPC::G8RCRegClass.contains(Reg)) {
7036 assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7037 "Reg must be a valid argument register!");
7038 return LASize + 8 * (Reg - PPC::X3);
7039 }
7040
7041 llvm_unreachable("Only general purpose registers expected.");
7042}
7043
7044// AIX ABI Stack Frame Layout:
7045//
7046// Low Memory +--------------------------------------------+
7047// SP +---> | Back chain | ---+
7048// | +--------------------------------------------+ |
7049// | | Saved Condition Register | |
7050// | +--------------------------------------------+ |
7051// | | Saved Linkage Register | |
7052// | +--------------------------------------------+ | Linkage Area
7053// | | Reserved for compilers | |
7054// | +--------------------------------------------+ |
7055// | | Reserved for binders | |
7056// | +--------------------------------------------+ |
7057// | | Saved TOC pointer | ---+
7058// | +--------------------------------------------+
7059// | | Parameter save area |
7060// | +--------------------------------------------+
7061// | | Alloca space |
7062// | +--------------------------------------------+
7063// | | Local variable space |
7064// | +--------------------------------------------+
7065// | | Float/int conversion temporary |
7066// | +--------------------------------------------+
7067// | | Save area for AltiVec registers |
7068// | +--------------------------------------------+
7069// | | AltiVec alignment padding |
7070// | +--------------------------------------------+
7071// | | Save area for VRSAVE register |
7072// | +--------------------------------------------+
7073// | | Save area for General Purpose registers |
7074// | +--------------------------------------------+
7075// | | Save area for Floating Point registers |
7076// | +--------------------------------------------+
7077// +---- | Back chain |
7078// High Memory +--------------------------------------------+
7079//
7080// Specifications:
7081// AIX 7.2 Assembler Language Reference
7082// Subroutine linkage convention
7083
7084SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7085 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7086 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7087 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7088
7089 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7090 CallConv == CallingConv::Fast) &&
7091 "Unexpected calling convention!");
7092
7093 if (getTargetMachine().Options.GuaranteedTailCallOpt)
7094 report_fatal_error("Tail call support is unimplemented on AIX.");
7095
7096 if (useSoftFloat())
7097 report_fatal_error("Soft float support is unimplemented on AIX.");
7098
7099 const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>();
7100
7101 const bool IsPPC64 = Subtarget.isPPC64();
7102 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7103
7104 // Assign locations to all of the incoming arguments.
7107 MachineFrameInfo &MFI = MF.getFrameInfo();
7108 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7109 AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7110
7111 const EVT PtrVT = getPointerTy(MF.getDataLayout());
7112 // Reserve space for the linkage area on the stack.
7113 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7114 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7115 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7116
7118
7119 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7120 CCValAssign &VA = ArgLocs[I++];
7121 MVT LocVT = VA.getLocVT();
7122 MVT ValVT = VA.getValVT();
7123 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7124 // For compatibility with the AIX XL compiler, the float args in the
7125 // parameter save area are initialized even if the argument is available
7126 // in register. The caller is required to initialize both the register
7127 // and memory, however, the callee can choose to expect it in either.
7128 // The memloc is dismissed here because the argument is retrieved from
7129 // the register.
7130 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint())
7131 continue;
7132
7133 auto HandleMemLoc = [&]() {
7134 const unsigned LocSize = LocVT.getStoreSize();
7135 const unsigned ValSize = ValVT.getStoreSize();
7136 assert((ValSize <= LocSize) &&
7137 "Object size is larger than size of MemLoc");
7138 int CurArgOffset = VA.getLocMemOffset();
7139 // Objects are right-justified because AIX is big-endian.
7140 if (LocSize > ValSize)
7141 CurArgOffset += LocSize - ValSize;
7142 // Potential tail calls could cause overwriting of argument stack slots.
7143 const bool IsImmutable =
7145 (CallConv == CallingConv::Fast));
7146 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7147 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7148 SDValue ArgValue =
7149 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7150 InVals.push_back(ArgValue);
7151 };
7152
7153 // Vector arguments to VaArg functions are passed both on the stack, and
7154 // in any available GPRs. Load the value from the stack and add the GPRs
7155 // as live ins.
7156 if (VA.isMemLoc() && VA.needsCustom()) {
7157 assert(ValVT.isVector() && "Unexpected Custom MemLoc type.");
7158 assert(isVarArg && "Only use custom memloc for vararg.");
7159 // ValNo of the custom MemLoc, so we can compare it to the ValNo of the
7160 // matching custom RegLocs.
7161 const unsigned OriginalValNo = VA.getValNo();
7162 (void)OriginalValNo;
7163
7164 auto HandleCustomVecRegLoc = [&]() {
7165 assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7166 "Missing custom RegLoc.");
7167 VA = ArgLocs[I++];
7168 assert(VA.getValVT().isVector() &&
7169 "Unexpected Val type for custom RegLoc.");
7170 assert(VA.getValNo() == OriginalValNo &&
7171 "ValNo mismatch between custom MemLoc and RegLoc.");
7173 MF.addLiveIn(VA.getLocReg(),
7174 getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(),
7175 Subtarget.hasVSX()));
7176 };
7177
7178 HandleMemLoc();
7179 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
7180 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
7181 // R10.
7182 HandleCustomVecRegLoc();
7183 HandleCustomVecRegLoc();
7184
7185 // If we are targeting 32-bit, there might be 2 extra custom RegLocs if
7186 // we passed the vector in R5, R6, R7 and R8.
7187 if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) {
7188 assert(!IsPPC64 &&
7189 "Only 2 custom RegLocs expected for 64-bit codegen.");
7190 HandleCustomVecRegLoc();
7191 HandleCustomVecRegLoc();
7192 }
7193
7194 continue;
7195 }
7196
7197 if (VA.isRegLoc()) {
7198 if (VA.getValVT().isScalarInteger())
7200 else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector()) {
7201 switch (VA.getValVT().SimpleTy) {
7202 default:
7203 report_fatal_error("Unhandled value type for argument.");
7204 case MVT::f32:
7206 break;
7207 case MVT::f64:
7209 break;
7210 }
7211 } else if (VA.getValVT().isVector()) {
7212 switch (VA.getValVT().SimpleTy) {
7213 default:
7214 report_fatal_error("Unhandled value type for argument.");
7215 case MVT::v16i8:
7217 break;
7218 case MVT::v8i16:
7220 break;
7221 case MVT::v4i32:
7222 case MVT::v2i64:
7223 case MVT::v1i128:
7225 break;
7226 case MVT::v4f32:
7227 case MVT::v2f64:
7229 break;
7230 }
7231 }
7232 }
7233
7234 if (Flags.isByVal() && VA.isMemLoc()) {
7235 const unsigned Size =
7236 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7237 PtrByteSize);
7238 const int FI = MF.getFrameInfo().CreateFixedObject(
7239 Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7240 /* IsAliased */ true);
7241 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7242 InVals.push_back(FIN);
7243
7244 continue;
7245 }
7246
7247 if (Flags.isByVal()) {
7248 assert(VA.isRegLoc() && "MemLocs should already be handled.");
7249
7250 const MCPhysReg ArgReg = VA.getLocReg();
7251 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7252
7253 if (Flags.getNonZeroByValAlign() > PtrByteSize)
7254 report_fatal_error("Over aligned byvals not supported yet.");
7255
7256 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7257 const int FI = MF.getFrameInfo().CreateFixedObject(
7258 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7259 /* IsAliased */ true);
7260 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7261 InVals.push_back(FIN);
7262
7263 // Add live ins for all the RegLocs for the same ByVal.
7264 const TargetRegisterClass *RegClass =
7265 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7266
7267 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7268 unsigned Offset) {
7269 const Register VReg = MF.addLiveIn(PhysReg, RegClass);
7270 // Since the callers side has left justified the aggregate in the
7271 // register, we can simply store the entire register into the stack
7272 // slot.
7273 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7274 // The store to the fixedstack object is needed becuase accessing a
7275 // field of the ByVal will use a gep and load. Ideally we will optimize
7276 // to extracting the value from the register directly, and elide the
7277 // stores when the arguments address is not taken, but that will need to
7278 // be future work.
7279 SDValue Store = DAG.getStore(
7280 CopyFrom.getValue(1), dl, CopyFrom,
7283
7284 MemOps.push_back(Store);
7285 };
7286
7287 unsigned Offset = 0;
7288 HandleRegLoc(VA.getLocReg(), Offset);
7289 Offset += PtrByteSize;
7290 for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7291 Offset += PtrByteSize) {
7292 assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7293 "RegLocs should be for ByVal argument.");
7294
7295 const CCValAssign RL = ArgLocs[I++];
7296 HandleRegLoc(RL.getLocReg(), Offset);
7298 }
7299
7300 if (Offset != StackSize) {
7301 assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7302 "Expected MemLoc for remaining bytes.");
7303 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7304 // Consume the MemLoc.The InVal has already been emitted, so nothing
7305 // more needs to be done.
7306 ++I;
7307 }
7308
7309 continue;
7310 }
7311
7312 if (VA.isRegLoc() && !VA.needsCustom()) {
7313 MVT::SimpleValueType SVT = ValVT.SimpleTy;
7314 Register VReg =
7315 MF.addLiveIn(VA.getLocReg(),
7316 getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(),
7317 Subtarget.hasVSX()));
7318 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7319 if (ValVT.isScalarInteger() &&
7320 (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
7321 ArgValue =
7322 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7323 }
7324 InVals.push_back(ArgValue);
7325 continue;
7326 }
7327 if (VA.isMemLoc()) {
7328 HandleMemLoc();
7329 continue;
7330 }
7331 }
7332
7333 // On AIX a minimum of 8 words is saved to the parameter save area.
7334 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7335 // Area that is at least reserved in the caller of this function.
7336 unsigned CallerReservedArea = std::max<unsigned>(
7337 CCInfo.getStackSize(), LinkageSize + MinParameterSaveArea);
7338
7339 // Set the size that is at least reserved in caller of this function. Tail
7340 // call optimized function's reserved stack space needs to be aligned so
7341 // that taking the difference between two stack areas will result in an
7342 // aligned stack.
7343 CallerReservedArea =
7344 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7345 FuncInfo->setMinReservedArea(CallerReservedArea);
7346
7347 if (isVarArg) {
7348 FuncInfo->setVarArgsFrameIndex(
7349 MFI.CreateFixedObject(PtrByteSize, CCInfo.getStackSize(), true));
7350 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7351
7352 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7353 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7354
7355 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7356 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7357 const unsigned NumGPArgRegs = std::size(IsPPC64 ? GPR_64 : GPR_32);
7358
7359 // The fixed integer arguments of a variadic function are stored to the
7360 // VarArgsFrameIndex on the stack so that they may be loaded by
7361 // dereferencing the result of va_next.
7362 for (unsigned GPRIndex =
7363 (CCInfo.getStackSize() - LinkageSize) / PtrByteSize;
7364 GPRIndex < NumGPArgRegs; ++GPRIndex) {
7365
7366 const Register VReg =
7367 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7368 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7369
7370 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7371 SDValue Store =
7372 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7373 MemOps.push_back(Store);
7374 // Increment the address for the next argument to store.
7375 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7376 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7377 }
7378 }
7379
7380 if (!MemOps.empty())
7381 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7382
7383 return Chain;
7384}
7385
7386SDValue PPCTargetLowering::LowerCall_AIX(
7387 SDValue Chain, SDValue Callee, CallFlags CFlags,
7389 const SmallVectorImpl<SDValue> &OutVals,
7390 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7392 const CallBase *CB) const {
7393 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7394 // AIX ABI stack frame layout.
7395
7396 assert((CFlags.CallConv == CallingConv::C ||
7397 CFlags.CallConv == CallingConv::Cold ||
7398 CFlags.CallConv == CallingConv::Fast) &&
7399 "Unexpected calling convention!");
7400
7401 if (CFlags.IsPatchPoint)
7402 report_fatal_error("This call type is unimplemented on AIX.");
7403
7404 const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>();
7405
7408 AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7409 *DAG.getContext());
7410
7411 // Reserve space for the linkage save area (LSA) on the stack.
7412 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7413 // [SP][CR][LR][2 x reserved][TOC].
7414 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7415 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7416 const bool IsPPC64 = Subtarget.isPPC64();
7417 const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7418 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7419 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7420 CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7421
7422 // The prolog code of the callee may store up to 8 GPR argument registers to
7423 // the stack, allowing va_start to index over them in memory if the callee
7424 // is variadic.
7425 // Because we cannot tell if this is needed on the caller side, we have to
7426 // conservatively assume that it is needed. As such, make sure we have at
7427 // least enough stack space for the caller to store the 8 GPRs.
7428 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7429 const unsigned NumBytes = std::max<unsigned>(
7430 LinkageSize + MinParameterSaveAreaSize, CCInfo.getStackSize());
7431
7432 // Adjust the stack pointer for the new arguments...
7433 // These operations are automatically eliminated by the prolog/epilog pass.
7434 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7435 SDValue CallSeqStart = Chain;
7436
7438 SmallVector<SDValue, 8> MemOpChains;
7439
7440 // Set up a copy of the stack pointer for loading and storing any
7441 // arguments that may not fit in the registers available for argument
7442 // passing.
7443 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7444 : DAG.getRegister(PPC::R1, MVT::i32);
7445
7446 for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7447 const unsigned ValNo = ArgLocs[I].getValNo();
7448 SDValue Arg = OutVals[ValNo];
7449 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7450
7451 if (Flags.isByVal()) {
7452 const unsigned ByValSize = Flags.getByValSize();
7453
7454 // Nothing to do for zero-sized ByVals on the caller side.
7455 if (!ByValSize) {
7456 ++I;
7457 continue;
7458 }
7459
7460 auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7461 return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7462 (LoadOffset != 0)
7463 ? DAG.getObjectPtrOffset(
7464 dl, Arg, TypeSize::getFixed(LoadOffset))
7465 : Arg,
7466 MachinePointerInfo(), VT);
7467 };
7468
7469 unsigned LoadOffset = 0;
7470
7471 // Initialize registers, which are fully occupied by the by-val argument.
7472 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7473 SDValue Load = GetLoad(PtrVT, LoadOffset);
7474 MemOpChains.push_back(Load.getValue(1));
7475 LoadOffset += PtrByteSize;
7476 const CCValAssign &ByValVA = ArgLocs[I++];
7477 assert(ByValVA.getValNo() == ValNo &&
7478 "Unexpected location for pass-by-value argument.");
7479 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7480 }
7481
7482 if (LoadOffset == ByValSize)
7483 continue;
7484
7485 // There must be one more loc to handle the remainder.
7486 assert(ArgLocs[I].getValNo() == ValNo &&
7487 "Expected additional location for by-value argument.");
7488
7489 if (ArgLocs[I].isMemLoc()) {
7490 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7491 const CCValAssign &ByValVA = ArgLocs[I++];
7492 ISD::ArgFlagsTy MemcpyFlags = Flags;
7493 // Only memcpy the bytes that don't pass in register.
7494 MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7495 Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7496 (LoadOffset != 0) ? DAG.getObjectPtrOffset(
7497 dl, Arg, TypeSize::getFixed(LoadOffset))
7498 : Arg,
7500 dl, StackPtr, TypeSize::getFixed(ByValVA.getLocMemOffset())),
7501 CallSeqStart, MemcpyFlags, DAG, dl);
7502 continue;
7503 }
7504
7505 // Initialize the final register residue.
7506 // Any residue that occupies the final by-val arg register must be
7507 // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7508 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7509 // 2 and 1 byte loads.
7510 const unsigned ResidueBytes = ByValSize % PtrByteSize;
7511 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7512 "Unexpected register residue for by-value argument.");
7513 SDValue ResidueVal;
7514 for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7515 const unsigned N = llvm::bit_floor(ResidueBytes - Bytes);
7516 const MVT VT =
7517 N == 1 ? MVT::i8
7518 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7519 SDValue Load = GetLoad(VT, LoadOffset);
7520 MemOpChains.push_back(Load.getValue(1));
7521 LoadOffset += N;
7522 Bytes += N;
7523
7524 // By-val arguments are passed left-justfied in register.
7525 // Every load here needs to be shifted, otherwise a full register load
7526 // should have been used.
7527 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7528 "Unexpected load emitted during handling of pass-by-value "
7529 "argument.");
7530 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7531 EVT ShiftAmountTy =
7532 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7533 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7534 SDValue ShiftedLoad =
7535 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7536 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7537 ShiftedLoad)
7538 : ShiftedLoad;
7539 }
7540
7541 const CCValAssign &ByValVA = ArgLocs[I++];
7542 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7543 continue;
7544 }
7545
7546 CCValAssign &VA = ArgLocs[I++];
7547 const MVT LocVT = VA.getLocVT();
7548 const MVT ValVT = VA.getValVT();
7549
7550 switch (VA.getLocInfo()) {
7551 default:
7552 report_fatal_error("Unexpected argument extension type.");
7553 case CCValAssign::Full:
7554 break;
7555 case CCValAssign::ZExt:
7556 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7557 break;
7558 case CCValAssign::SExt:
7559 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7560 break;
7561 }
7562
7563 if (VA.isRegLoc() && !VA.needsCustom()) {
7564 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7565 continue;
7566 }
7567
7568 // Vector arguments passed to VarArg functions need custom handling when
7569 // they are passed (at least partially) in GPRs.
7570 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) {
7571 assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args.");
7572 // Store value to its stack slot.
7573 SDValue PtrOff =
7574 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7575 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7576 SDValue Store =
7577 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
7578 MemOpChains.push_back(Store);
7579 const unsigned OriginalValNo = VA.getValNo();
7580 // Then load the GPRs from the stack
7581 unsigned LoadOffset = 0;
7582 auto HandleCustomVecRegLoc = [&]() {
7583 assert(I != E && "Unexpected end of CCvalAssigns.");
7584 assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7585 "Expected custom RegLoc.");
7586 CCValAssign RegVA = ArgLocs[I++];
7587 assert(RegVA.getValNo() == OriginalValNo &&
7588 "Custom MemLoc ValNo and custom RegLoc ValNo must match.");
7589 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
7590 DAG.getConstant(LoadOffset, dl, PtrVT));
7591 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo());
7592 MemOpChains.push_back(Load.getValue(1));
7593 RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load));
7594 LoadOffset += PtrByteSize;
7595 };
7596
7597 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
7598 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
7599 // R10.
7600 HandleCustomVecRegLoc();
7601 HandleCustomVecRegLoc();
7602
7603 if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7604 ArgLocs[I].getValNo() == OriginalValNo) {
7605 assert(!IsPPC64 &&
7606 "Only 2 custom RegLocs expected for 64-bit codegen.");
7607 HandleCustomVecRegLoc();
7608 HandleCustomVecRegLoc();
7609 }
7610
7611 continue;
7612 }
7613
7614 if (VA.isMemLoc()) {
7615 SDValue PtrOff =
7616 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7617 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7618 MemOpChains.push_back(
7619 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7620
7621 continue;
7622 }
7623
7624 if (!ValVT.isFloatingPoint())
7626 "Unexpected register handling for calling convention.");
7627
7628 // Custom handling is used for GPR initializations for vararg float
7629 // arguments.
7630 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7631 LocVT.isInteger() &&
7632 "Custom register handling only expected for VarArg.");
7633
7634 SDValue ArgAsInt =
7635 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7636
7637 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7638 // f32 in 32-bit GPR
7639 // f64 in 64-bit GPR
7640 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7641 else if (Arg.getValueType().getFixedSizeInBits() <
7642 LocVT.getFixedSizeInBits())
7643 // f32 in 64-bit GPR.
7644 RegsToPass.push_back(std::make_pair(
7645 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7646 else {
7647 // f64 in two 32-bit GPRs
7648 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7649 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7650 "Unexpected custom register for argument!");
7651 CCValAssign &GPR1 = VA;
7652 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7653 DAG.getConstant(32, dl, MVT::i8));
7654 RegsToPass.push_back(std::make_pair(
7655 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7656
7657 if (I != E) {
7658 // If only 1 GPR was available, there will only be one custom GPR and
7659 // the argument will also pass in memory.
7660 CCValAssign &PeekArg = ArgLocs[I];
7661 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7662 assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7663 CCValAssign &GPR2 = ArgLocs[I++];
7664 RegsToPass.push_back(std::make_pair(
7665 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7666 }
7667 }
7668 }
7669 }
7670
7671 if (!MemOpChains.empty())
7672 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7673
7674 // For indirect calls, we need to save the TOC base to the stack for
7675 // restoration after the call.
7676 if (CFlags.IsIndirect) {
7677 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7678 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7679 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7680 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7681 const unsigned TOCSaveOffset =
7682 Subtarget.getFrameLowering()->getTOCSaveOffset();
7683
7684 setUsesTOCBasePtr(DAG);
7685 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7686 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7687 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7688 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7689 Chain = DAG.getStore(
7690 Val.getValue(1), dl, Val, AddPtr,
7691 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7692 }
7693
7694 // Build a sequence of copy-to-reg nodes chained together with token chain
7695 // and flag operands which copy the outgoing args into the appropriate regs.
7696 SDValue InGlue;
7697 for (auto Reg : RegsToPass) {
7698 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InGlue);
7699 InGlue = Chain.getValue(1);
7700 }
7701
7702 const int SPDiff = 0;
7703 return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
7704 Callee, SPDiff, NumBytes, Ins, InVals, CB);
7705}
7706
7707bool
7708PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7709 MachineFunction &MF, bool isVarArg,
7711 LLVMContext &Context) const {
7713 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7714 return CCInfo.CheckReturn(
7715 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7717 : RetCC_PPC);
7718}
7719
7720SDValue
7721PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7722 bool isVarArg,
7724 const SmallVectorImpl<SDValue> &OutVals,
7725 const SDLoc &dl, SelectionDAG &DAG) const {
7727 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7728 *DAG.getContext());
7729 CCInfo.AnalyzeReturn(Outs,
7730 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7732 : RetCC_PPC);
7733
7734 SDValue Glue;
7735 SmallVector<SDValue, 4> RetOps(1, Chain);
7736
7737 // Copy the result values into the output registers.
7738 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7739 CCValAssign &VA = RVLocs[i];
7740 assert(VA.isRegLoc() && "Can only return in registers!");
7741
7742 SDValue Arg = OutVals[RealResIdx];
7743
7744 switch (VA.getLocInfo()) {
7745 default: llvm_unreachable("Unknown loc info!");
7746 case CCValAssign::Full: break;
7747 case CCValAssign::AExt:
7748 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7749 break;
7750 case CCValAssign::ZExt:
7751 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7752 break;
7753 case CCValAssign::SExt:
7754 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7755 break;
7756 }
7757 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7758 bool isLittleEndian = Subtarget.isLittleEndian();
7759 // Legalize ret f64 -> ret 2 x i32.
7760 SDValue SVal =
7761 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7762 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7763 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Glue);
7764 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7765 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7766 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7767 Glue = Chain.getValue(1);
7768 VA = RVLocs[++i]; // skip ahead to next loc
7769 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Glue);
7770 } else
7771 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Glue);
7772 Glue = Chain.getValue(1);
7773 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7774 }
7775
7776 RetOps[0] = Chain; // Update chain.
7777
7778 // Add the glue if we have it.
7779 if (Glue.getNode())
7780 RetOps.push_back(Glue);
7781
7782 return DAG.getNode(PPCISD::RET_GLUE, dl, MVT::Other, RetOps);
7783}
7784
7785SDValue
7786PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7787 SelectionDAG &DAG) const {
7788 SDLoc dl(Op);
7789
7790 // Get the correct type for integers.
7791 EVT IntVT = Op.getValueType();
7792
7793 // Get the inputs.
7794 SDValue Chain = Op.getOperand(0);
7795 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7796 // Build a DYNAREAOFFSET node.
7797 SDValue Ops[2] = {Chain, FPSIdx};
7798 SDVTList VTs = DAG.getVTList(IntVT);
7799 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7800}
7801
7802SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7803 SelectionDAG &DAG) const {
7804 // When we pop the dynamic allocation we need to restore the SP link.
7805 SDLoc dl(Op);
7806
7807 // Get the correct type for pointers.
7808 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7809
7810 // Construct the stack pointer operand.
7811 bool isPPC64 = Subtarget.isPPC64();
7812 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7813 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7814
7815 // Get the operands for the STACKRESTORE.
7816 SDValue Chain = Op.getOperand(0);
7817 SDValue SaveSP = Op.getOperand(1);
7818
7819 // Load the old link SP.
7820 SDValue LoadLinkSP =
7821 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7822
7823 // Restore the stack pointer.
7824 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7825
7826 // Store the old link SP.
7827 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7828}
7829
7830SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7832 bool isPPC64 = Subtarget.isPPC64();
7833 EVT PtrVT = getPointerTy(MF.getDataLayout());
7834
7835 // Get current frame pointer save index. The users of this index will be
7836 // primarily DYNALLOC instructions.
7838 int RASI = FI->getReturnAddrSaveIndex();
7839
7840 // If the frame pointer save index hasn't been defined yet.
7841 if (!RASI) {
7842 // Find out what the fix offset of the frame pointer save area.
7843 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7844 // Allocate the frame index for frame pointer save area.
7845 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7846 // Save the result.
7847 FI->setReturnAddrSaveIndex(RASI);
7848 }
7849 return DAG.getFrameIndex(RASI, PtrVT);
7850}
7851
7852SDValue
7853PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7855 bool isPPC64 = Subtarget.isPPC64();
7856 EVT PtrVT = getPointerTy(MF.getDataLayout());
7857
7858 // Get current frame pointer save index. The users of this index will be
7859 // primarily DYNALLOC instructions.
7861 int FPSI = FI->getFramePointerSaveIndex();
7862
7863 // If the frame pointer save index hasn't been defined yet.
7864 if (!FPSI) {
7865 // Find out what the fix offset of the frame pointer save area.
7866 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7867 // Allocate the frame index for frame pointer save area.
7868 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7869 // Save the result.
7870 FI->setFramePointerSaveIndex(FPSI);
7871 }
7872 return DAG.getFrameIndex(FPSI, PtrVT);
7873}
7874
7875SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7876 SelectionDAG &DAG) const {
7878 // Get the inputs.
7879 SDValue Chain = Op.getOperand(0);
7880 SDValue Size = Op.getOperand(1);
7881 SDLoc dl(Op);
7882
7883 // Get the correct type for pointers.
7884 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7885 // Negate the size.
7886 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7887 DAG.getConstant(0, dl, PtrVT), Size);
7888 // Construct a node for the frame pointer save index.
7889 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7890 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7891 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7892 if (hasInlineStackProbe(MF))
7893 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7894 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7895}
7896
7897SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7898 SelectionDAG &DAG) const {
7900
7901 bool isPPC64 = Subtarget.isPPC64();
7902 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7903
7904 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7905 return DAG.getFrameIndex(FI, PtrVT);
7906}
7907
7908SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7909 SelectionDAG &DAG) const {
7910 SDLoc DL(Op);
7911 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7912 DAG.getVTList(MVT::i32, MVT::Other),
7913 Op.getOperand(0), Op.getOperand(1));
7914}
7915
7916SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7917 SelectionDAG &DAG) const {
7918 SDLoc DL(Op);
7919 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7920 Op.getOperand(0), Op.getOperand(1));
7921}
7922
7923SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7924 if (Op.getValueType().isVector())
7925 return LowerVectorLoad(Op, DAG);
7926
7927 assert(Op.getValueType() == MVT::i1 &&
7928 "Custom lowering only for i1 loads");
7929
7930 // First, load 8 bits into 32 bits, then truncate to 1 bit.
7931
7932 SDLoc dl(Op);
7933 LoadSDNode *LD = cast<LoadSDNode>(Op);
7934
7935 SDValue Chain = LD->getChain();
7936 SDValue BasePtr = LD->getBasePtr();
7937 MachineMemOperand *MMO = LD->getMemOperand();
7938
7939 SDValue NewLD =
7940 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7941 BasePtr, MVT::i8, MMO);
7942 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7943
7944 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7945 return DAG.getMergeValues(Ops, dl);
7946}
7947
7948SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7949 if (Op.getOperand(1).getValueType().isVector())
7950 return LowerVectorStore(Op, DAG);
7951
7952 assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7953 "Custom lowering only for i1 stores");
7954
7955 // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7956
7957 SDLoc dl(Op);
7958 StoreSDNode *ST = cast<StoreSDNode>(Op);
7959
7960 SDValue Chain = ST->getChain();
7961 SDValue BasePtr = ST->getBasePtr();
7962 SDValue Value = ST->getValue();
7963 MachineMemOperand *MMO = ST->getMemOperand();
7964
7966 Value);
7967 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7968}
7969
7970// FIXME: Remove this once the ANDI glue bug is fixed:
7971SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7972 assert(Op.getValueType() == MVT::i1 &&
7973 "Custom lowering only for i1 results");
7974
7975 SDLoc DL(Op);
7976 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7977}
7978
7979SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7980 SelectionDAG &DAG) const {
7981
7982 // Implements a vector truncate that fits in a vector register as a shuffle.
7983 // We want to legalize vector truncates down to where the source fits in
7984 // a vector register (and target is therefore smaller than vector register
7985 // size). At that point legalization will try to custom lower the sub-legal
7986 // result and get here - where we can contain the truncate as a single target
7987 // operation.
7988
7989 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7990 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7991 //
7992 // We will implement it for big-endian ordering as this (where x denotes
7993 // undefined):
7994 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7995 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7996 //
7997 // The same operation in little-endian ordering will be:
7998 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7999 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
8000
8001 EVT TrgVT = Op.getValueType();
8002 assert(TrgVT.isVector() && "Vector type expected.");
8003 unsigned TrgNumElts = TrgVT.getVectorNumElements();
8004 EVT EltVT = TrgVT.getVectorElementType();
8005 if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
8006 TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
8007 !llvm::has_single_bit<uint32_t>(EltVT.getSizeInBits()))
8008 return SDValue();
8009
8010 SDValue N1 = Op.getOperand(0);
8011 EVT SrcVT = N1.getValueType();
8012 unsigned SrcSize = SrcVT.getSizeInBits();
8013 if (SrcSize > 256 || !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
8014 !llvm::has_single_bit<uint32_t>(
8016 return SDValue();
8017 if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
8018 return SDValue();
8019
8020 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8021 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8022
8023 SDLoc DL(Op);
8024 SDValue Op1, Op2;
8025 if (SrcSize == 256) {
8026 EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
8027 EVT SplitVT =
8029 unsigned SplitNumElts = SplitVT.getVectorNumElements();
8030 Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
8031 DAG.getConstant(0, DL, VecIdxTy));
8032 Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
8033 DAG.getConstant(SplitNumElts, DL, VecIdxTy));
8034 }
8035 else {
8036 Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
8037 Op2 = DAG.getUNDEF(WideVT);
8038 }
8039
8040 // First list the elements we want to keep.
8041 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
8042 SmallVector<int, 16> ShuffV;
8043 if (Subtarget.isLittleEndian())
8044 for (unsigned i = 0; i < TrgNumElts; ++i)
8045 ShuffV.push_back(i * SizeMult);
8046 else
8047 for (unsigned i = 1; i <= TrgNumElts; ++i)
8048 ShuffV.push_back(i * SizeMult - 1);
8049
8050 // Populate the remaining elements with undefs.
8051 for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
8052 // ShuffV.push_back(i + WideNumElts);
8053 ShuffV.push_back(WideNumElts + 1);
8054
8055 Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
8056 Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
8057 return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
8058}
8059
8060/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
8061/// possible.
8062SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
8063 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8064 EVT ResVT = Op.getValueType();
8065 EVT CmpVT = Op.getOperand(0).getValueType();
8066 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8067 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
8068 SDLoc dl(Op);
8069
8070 // Without power9-vector, we don't have native instruction for f128 comparison.
8071 // Following transformation to libcall is needed for setcc:
8072 // select_cc lhs, rhs, tv, fv, cc -> select_cc (setcc cc, x, y), 0, tv, fv, NE
8073 if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
8074 SDValue Z = DAG.getSetCC(
8075 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT),
8076 LHS, RHS, CC);
8077 SDValue Zero = DAG.getConstant(0, dl, Z.getValueType());
8078 return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE);
8079 }
8080
8081 // Not FP, or using SPE? Not a fsel.
8082 if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() ||
8083 Subtarget.hasSPE())
8084 return Op;
8085
8086 SDNodeFlags Flags = Op.getNode()->getFlags();
8087
8088 // We have xsmaxc[dq]p/xsminc[dq]p which are OK to emit even in the
8089 // presence of infinities.
8090 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8091 switch (CC) {
8092 default:
8093 break;
8094 case ISD::SETOGT:
8095 case ISD::SETGT:
8096 return DAG.getNode(PPCISD::XSMAXC, dl, Op.getValueType(), LHS, RHS);
8097 case ISD::SETOLT:
8098 case ISD::SETLT:
8099 return DAG.getNode(PPCISD::XSMINC, dl, Op.getValueType(), LHS, RHS);
8100 }
8101 }
8102
8103 // We might be able to do better than this under some circumstances, but in
8104 // general, fsel-based lowering of select is a finite-math-only optimization.
8105 // For more information, see section F.3 of the 2.06 ISA specification.
8106 // With ISA 3.0
8107 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
8108 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()) ||
8109 ResVT == MVT::f128)
8110 return Op;
8111
8112 // If the RHS of the comparison is a 0.0, we don't need to do the
8113 // subtraction at all.
8114 SDValue Sel1;
8115 if (isFloatingPointZero(RHS))
8116 switch (CC) {
8117 default: break; // SETUO etc aren't handled by fsel.
8118 case ISD::SETNE:
8119 std::swap(TV, FV);
8120 [[fallthrough]];
8121 case ISD::SETEQ:
8122 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
8123 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8124 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8125 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
8126 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8127 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8128 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
8129 case ISD::SETULT:
8130 case ISD::SETLT:
8131 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
8132 [[fallthrough]];
8133 case ISD::SETOGE:
8134 case ISD::SETGE:
8135 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
8136 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8137 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8138 case ISD::SETUGT:
8139 case ISD::SETGT:
8140 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
8141 [[fallthrough]];
8142 case ISD::SETOLE:
8143 case ISD::SETLE:
8144 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
8145 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8146 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8147 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8148 }
8149
8150 SDValue Cmp;
8151 switch (CC) {
8152 default: break; // SETUO etc aren't handled by fsel.
8153 case ISD::SETNE:
8154 std::swap(TV, FV);
8155 [[fallthrough]];
8156 case ISD::SETEQ:
8157 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8158 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8159 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8160 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8161 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
8162 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8163 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8164 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8165 case ISD::SETULT:
8166 case ISD::SETLT:
8167 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8168 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8169 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8170 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8171 case ISD::SETOGE:
8172 case ISD::SETGE:
8173 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8174 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8175 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8176 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8177 case ISD::SETUGT:
8178 case ISD::SETGT:
8179 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8180 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8181 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8182 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8183 case ISD::SETOLE:
8184 case ISD::SETLE:
8185 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8186 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8187 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8188 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8189 }
8190 return Op;
8191}
8192
8193static unsigned getPPCStrictOpcode(unsigned Opc) {
8194 switch (Opc) {
8195 default:
8196 llvm_unreachable("No strict version of this opcode!");
8197 case PPCISD::FCTIDZ:
8198 return PPCISD::STRICT_FCTIDZ;
8199 case PPCISD::FCTIWZ:
8200 return PPCISD::STRICT_FCTIWZ;
8201 case PPCISD::FCTIDUZ:
8203 case PPCISD::FCTIWUZ:
8205 case PPCISD::FCFID:
8206 return PPCISD::STRICT_FCFID;
8207 case PPCISD::FCFIDU:
8208 return PPCISD::STRICT_FCFIDU;
8209 case PPCISD::FCFIDS:
8210 return PPCISD::STRICT_FCFIDS;
8211 case PPCISD::FCFIDUS:
8213 }
8214}
8215
8217 const PPCSubtarget &Subtarget) {
8218 SDLoc dl(Op);
8219 bool IsStrict = Op->isStrictFPOpcode();
8220 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8221 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8222
8223 // TODO: Any other flags to propagate?
8224 SDNodeFlags Flags;
8225 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8226
8227 // For strict nodes, source is the second operand.
8228 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8229 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
8230 MVT DestTy = Op.getSimpleValueType();
8231 assert(Src.getValueType().isFloatingPoint() &&
8232 (DestTy == MVT::i8 || DestTy == MVT::i16 || DestTy == MVT::i32 ||
8233 DestTy == MVT::i64) &&
8234 "Invalid FP_TO_INT types");
8235 if (Src.getValueType() == MVT::f32) {
8236 if (IsStrict) {
8237 Src =
8239 DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
8240 Chain = Src.getValue(1);
8241 } else
8242 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8243 }
8244 if ((DestTy == MVT::i8 || DestTy == MVT::i16) && Subtarget.hasP9Vector())
8245 DestTy = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
8246 unsigned Opc = ISD::DELETED_NODE;
8247 switch (DestTy.SimpleTy) {
8248 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8249 case MVT::i32:
8250 Opc = IsSigned ? PPCISD::FCTIWZ
8251 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
8252 break;
8253 case MVT::i64:
8254 assert((IsSigned || Subtarget.hasFPCVT()) &&
8255 "i64 FP_TO_UINT is supported only with FPCVT");
8256 Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
8257 }
8258 EVT ConvTy = Src.getValueType() == MVT::f128 ? MVT::f128 : MVT::f64;
8259 SDValue Conv;
8260 if (IsStrict) {
8261 Opc = getPPCStrictOpcode(Opc);
8262 Conv = DAG.getNode(Opc, dl, DAG.getVTList(ConvTy, MVT::Other), {Chain, Src},
8263 Flags);
8264 } else {
8265 Conv = DAG.getNode(Opc, dl, ConvTy, Src);
8266 }
8267 return Conv;
8268}
8269
8270void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8271 SelectionDAG &DAG,
8272 const SDLoc &dl) const {
8273 SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
8274 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8275 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8276 bool IsStrict = Op->isStrictFPOpcode();
8277
8278 // Convert the FP value to an int value through memory.
8279 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8280 (IsSigned || Subtarget.hasFPCVT());
8281 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8282 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8283 MachinePointerInfo MPI =
8285
8286 // Emit a store to the stack slot.
8287 SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
8288 Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8289 if (i32Stack) {
8291 Alignment = Align(4);
8292 MachineMemOperand *MMO =
8293 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8294 SDValue Ops[] = { Chain, Tmp, FIPtr };
8295 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8296 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8297 } else
8298 Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8299
8300 // Result is a load from the stack slot. If loading 4 bytes, make sure to
8301 // add in a bias on big endian.
8302 if (Op.getValueType() == MVT::i32 && !i32Stack) {
8303 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8304 DAG.getConstant(4, dl, FIPtr.getValueType()));
8305 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8306 }
8307
8308 RLI.Chain = Chain;
8309 RLI.Ptr = FIPtr;
8310 RLI.MPI = MPI;
8311 RLI.Alignment = Alignment;
8312}
8313
8314/// Custom lowers floating point to integer conversions to use
8315/// the direct move instructions available in ISA 2.07 to avoid the
8316/// need for load/store combinations.
8317SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8318 SelectionDAG &DAG,
8319 const SDLoc &dl) const {
8320 SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
8321 SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
8322 if (Op->isStrictFPOpcode())
8323 return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
8324 else
8325 return Mov;
8326}
8327
8328SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8329 const SDLoc &dl) const {
8330 bool IsStrict = Op->isStrictFPOpcode();
8331 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8332 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8333 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8334 EVT SrcVT = Src.getValueType();
8335 EVT DstVT = Op.getValueType();
8336
8337 // FP to INT conversions are legal for f128.
8338 if (SrcVT == MVT::f128)
8339 return Subtarget.hasP9Vector() ? Op : SDValue();
8340
8341 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8342 // PPC (the libcall is not available).
8343 if (SrcVT == MVT::ppcf128) {
8344 if (DstVT == MVT::i32) {
8345 // TODO: Conservatively pass only nofpexcept flag here. Need to check and
8346 // set other fast-math flags to FP operations in both strict and
8347 // non-strict cases. (FP_TO_SINT, FSUB)
8349 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8350
8351 if (IsSigned) {
8352 SDValue Lo, Hi;
8353 std::tie(Lo, Hi) = DAG.SplitScalar(Src, dl, MVT::f64, MVT::f64);
8354
8355 // Add the two halves of the long double in round-to-zero mode, and use
8356 // a smaller FP_TO_SINT.
8357 if (IsStrict) {
8359 DAG.getVTList(MVT::f64, MVT::Other),
8360 {Op.getOperand(0), Lo, Hi}, Flags);
8361 return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8362 DAG.getVTList(MVT::i32, MVT::Other),
8363 {Res.getValue(1), Res}, Flags);
8364 } else {
8365 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8366 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8367 }
8368 } else {
8369 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8370 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8371 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
8372 SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
8373 if (IsStrict) {
8374 // Sel = Src < 0x80000000
8375 // FltOfs = select Sel, 0.0, 0x80000000
8376 // IntOfs = select Sel, 0, 0x80000000
8377 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
8378 SDValue Chain = Op.getOperand(0);
8379 EVT SetCCVT =
8380 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
8381 EVT DstSetCCVT =
8382 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
8383 SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
8384 Chain, true);
8385 Chain = Sel.getValue(1);
8386
8387 SDValue FltOfs = DAG.getSelect(
8388 dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
8389 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
8390
8391 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
8392 DAG.getVTList(SrcVT, MVT::Other),
8393 {Chain, Src, FltOfs}, Flags);
8394 Chain = Val.getValue(1);
8395 SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8396 DAG.getVTList(DstVT, MVT::Other),
8397 {Chain, Val}, Flags);
8398 Chain = SInt.getValue(1);
8399 SDValue IntOfs = DAG.getSelect(
8400 dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
8401 SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
8402 return DAG.getMergeValues({Result, Chain}, dl);
8403 } else {
8404 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8405 // FIXME: generated code sucks.
8406 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
8407 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8408 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
8409 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8410 return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
8411 }
8412 }
8413 }
8414
8415 return SDValue();
8416 }
8417
8418 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8419 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8420
8421 ReuseLoadInfo RLI;
8422 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8423
8424 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8425 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8426}
8427
8428// We're trying to insert a regular store, S, and then a load, L. If the
8429// incoming value, O, is a load, we might just be able to have our load use the
8430// address used by O. However, we don't know if anything else will store to
8431// that address before we can load from it. To prevent this situation, we need
8432// to insert our load, L, into the chain as a peer of O. To do this, we give L
8433// the same chain operand as O, we create a token factor from the chain results
8434// of O and L, and we replace all uses of O's chain result with that token
8435// factor (see spliceIntoChain below for this last part).
8436bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8437 ReuseLoadInfo &RLI,
8438 SelectionDAG &DAG,
8439 ISD::LoadExtType ET) const {
8440 // Conservatively skip reusing for constrained FP nodes.
8441 if (Op->isStrictFPOpcode())
8442 return false;
8443
8444 SDLoc dl(Op);
8445 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8446 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8447 if (ET == ISD::NON_EXTLOAD &&
8448 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8449 isOperationLegalOrCustom(Op.getOpcode(),
8450 Op.getOperand(0).getValueType())) {
8451
8452 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8453 return true;
8454 }
8455
8456 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8457 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8458 LD->isNonTemporal())
8459 return false;
8460 if (LD->getMemoryVT() != MemVT)
8461 return false;
8462
8463 // If the result of the load is an illegal type, then we can't build a
8464 // valid chain for reuse since the legalised loads and token factor node that
8465 // ties the legalised loads together uses a different output chain then the
8466 // illegal load.
8467 if (!isTypeLegal(LD->getValueType(0)))
8468 return false;
8469
8470 RLI.Ptr = LD->getBasePtr();
8471 if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8472 assert(LD->getAddressingMode() == ISD::PRE_INC &&
8473 "Non-pre-inc AM on PPC?");
8474 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8475 LD->getOffset());
8476 }
8477
8478 RLI.Chain = LD->getChain();
8479 RLI.MPI = LD->getPointerInfo();
8480 RLI.IsDereferenceable = LD->isDereferenceable();
8481 RLI.IsInvariant = LD->isInvariant();
8482 RLI.Alignment = LD->getAlign();
8483 RLI.AAInfo = LD->getAAInfo();
8484 RLI.Ranges = LD->getRanges();
8485
8486 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8487 return true;
8488}
8489
8490// Given the head of the old chain, ResChain, insert a token factor containing
8491// it and NewResChain, and make users of ResChain now be users of that token
8492// factor.
8493// TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8494void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8495 SDValue NewResChain,
8496 SelectionDAG &DAG) const {
8497 if (!ResChain)
8498 return;
8499
8500 SDLoc dl(NewResChain);
8501
8502 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8503 NewResChain, DAG.getUNDEF(MVT::Other));
8504 assert(TF.getNode() != NewResChain.getNode() &&
8505 "A new TF really is required here");
8506
8507 DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8508 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8509}
8510
8511/// Analyze profitability of direct move
8512/// prefer float load to int load plus direct move
8513/// when there is no integer use of int load
8514bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8515 SDNode *Origin = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0).getNode();
8516 if (Origin->getOpcode() != ISD::LOAD)
8517 return true;
8518
8519 // If there is no LXSIBZX/LXSIHZX, like Power8,
8520 // prefer direct move if the memory size is 1 or 2 bytes.
8521 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8522 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8523 return true;
8524
8525 for (SDNode::use_iterator UI = Origin->use_begin(),
8526 UE = Origin->use_end();
8527 UI != UE; ++UI) {
8528
8529 // Only look at the users of the loaded value.
8530 if (UI.getUse().get().getResNo() != 0)
8531 continue;
8532
8533 if (UI->getOpcode() != ISD::SINT_TO_FP &&
8534 UI->getOpcode() != ISD::UINT_TO_FP &&
8535 UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8536 UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8537 return true;
8538 }
8539
8540 return false;
8541}
8542
8544 const PPCSubtarget &Subtarget,
8545 SDValue Chain = SDValue()) {
8546 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8547 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8548 SDLoc dl(Op);
8549
8550 // TODO: Any other flags to propagate?
8551 SDNodeFlags Flags;
8552 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8553
8554 // If we have FCFIDS, then use it when converting to single-precision.
8555 // Otherwise, convert to double-precision and then round.
8556 bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8557 unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8558 : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8559 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8560 if (Op->isStrictFPOpcode()) {
8561 if (!Chain)
8562 Chain = Op.getOperand(0);
8563 return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
8564 DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8565 } else
8566 return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8567}
8568
8569/// Custom lowers integer to floating point conversions to use
8570/// the direct move instructions available in ISA 2.07 to avoid the
8571/// need for load/store combinations.
8572SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8573 SelectionDAG &DAG,
8574 const SDLoc &dl) const {
8575 assert((Op.getValueType() == MVT::f32 ||
8576 Op.getValueType() == MVT::f64) &&
8577 "Invalid floating point type as target of conversion");
8578 assert(Subtarget.hasFPCVT() &&
8579 "Int to FP conversions with direct moves require FPCVT");
8580 SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8581 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8582 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8583 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8584 unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8585 SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8586 return convertIntToFP(Op, Mov, DAG, Subtarget);
8587}
8588
8589static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8590
8591 EVT VecVT = Vec.getValueType();
8592 assert(VecVT.isVector() && "Expected a vector type.");
8593 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8594
8595 EVT EltVT = VecVT.getVectorElementType();
8596 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8597 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8598
8599 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8600 SmallVector<SDValue, 16> Ops(NumConcat);
8601 Ops[0] = Vec;
8602 SDValue UndefVec = DAG.getUNDEF(VecVT);
8603 for (unsigned i = 1; i < NumConcat; ++i)
8604 Ops[i] = UndefVec;
8605
8606 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8607}
8608
8609SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8610 const SDLoc &dl) const {
8611 bool IsStrict = Op->isStrictFPOpcode();
8612 unsigned Opc = Op.getOpcode();
8613 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8614 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
8616 "Unexpected conversion type");
8617 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8618 "Supports conversions to v2f64/v4f32 only.");
8619
8620 // TODO: Any other flags to propagate?
8622 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8623
8624 bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8625 bool FourEltRes = Op.getValueType() == MVT::v4f32;
8626
8627 SDValue Wide = widenVec(DAG, Src, dl);
8628 EVT WideVT = Wide.getValueType();
8629 unsigned WideNumElts = WideVT.getVectorNumElements();
8630 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8631
8632 SmallVector<int, 16> ShuffV;
8633 for (unsigned i = 0; i < WideNumElts; ++i)
8634 ShuffV.push_back(i + WideNumElts);
8635
8636 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8637 int SaveElts = FourEltRes ? 4 : 2;
8638 if (Subtarget.isLittleEndian())
8639 for (int i = 0; i < SaveElts; i++)
8640 ShuffV[i * Stride] = i;
8641 else
8642 for (int i = 1; i <= SaveElts; i++)
8643 ShuffV[i * Stride - 1] = i - 1;
8644
8645 SDValue ShuffleSrc2 =
8646 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8647 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8648
8649 SDValue Extend;
8650 if (SignedConv) {
8651 Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8652 EVT ExtVT = Src.getValueType();
8653 if (Subtarget.hasP9Altivec())
8654 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8655 IntermediateVT.getVectorNumElements());
8656
8657 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8658 DAG.getValueType(ExtVT));
8659 } else
8660 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8661
8662 if (IsStrict)
8663 return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8664 {Op.getOperand(0), Extend}, Flags);
8665
8666 return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8667}
8668
8669SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8670 SelectionDAG &DAG) const {
8671 SDLoc dl(Op);
8672 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8673 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8674 bool IsStrict = Op->isStrictFPOpcode();
8675 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8676 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8677
8678 // TODO: Any other flags to propagate?
8680 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8681
8682 EVT InVT = Src.getValueType();
8683 EVT OutVT = Op.getValueType();
8684 if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8685 isOperationCustom(Op.getOpcode(), InVT))
8686 return LowerINT_TO_FPVector(Op, DAG, dl);
8687
8688 // Conversions to f128 are legal.
8689 if (Op.getValueType() == MVT::f128)
8690 return Subtarget.hasP9Vector() ? Op : SDValue();
8691
8692 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8693 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8694 return SDValue();
8695
8696 if (Src.getValueType() == MVT::i1) {
8697 SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8698 DAG.getConstantFP(1.0, dl, Op.getValueType()),
8699 DAG.getConstantFP(0.0, dl, Op.getValueType()));
8700 if (IsStrict)
8701 return DAG.getMergeValues({Sel, Chain}, dl);
8702 else
8703 return Sel;
8704 }
8705
8706 // If we have direct moves, we can do all the conversion, skip the store/load
8707 // however, without FPCVT we can't do most conversions.
8708 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8709 Subtarget.isPPC64() && Subtarget.hasFPCVT())
8710 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8711
8712 assert((IsSigned || Subtarget.hasFPCVT()) &&
8713 "UINT_TO_FP is supported only with FPCVT");
8714
8715 if (Src.getValueType() == MVT::i64) {
8716 SDValue SINT = Src;
8717 // When converting to single-precision, we actually need to convert
8718 // to double-precision first and then round to single-precision.
8719 // To avoid double-rounding effects during that operation, we have
8720 // to prepare the input operand. Bits that might be truncated when
8721 // converting to double-precision are replaced by a bit that won't
8722 // be lost at this stage, but is below the single-precision rounding
8723 // position.
8724 //
8725 // However, if -enable-unsafe-fp-math is in effect, accept double
8726 // rounding to avoid the extra overhead.
8727 if (Op.getValueType() == MVT::f32 &&
8728 !Subtarget.hasFPCVT() &&
8730
8731 // Twiddle input to make sure the low 11 bits are zero. (If this
8732 // is the case, we are guaranteed the value will fit into the 53 bit
8733 // mantissa of an IEEE double-precision value without rounding.)
8734 // If any of those low 11 bits were not zero originally, make sure
8735 // bit 12 (value 2048) is set instead, so that the final rounding
8736 // to single-precision gets the correct result.
8737 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8738 SINT, DAG.getConstant(2047, dl, MVT::i64));
8739 Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8740 Round, DAG.getConstant(2047, dl, MVT::i64));
8741 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8742 Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8743 Round, DAG.getConstant(-2048, dl, MVT::i64));
8744
8745 // However, we cannot use that value unconditionally: if the magnitude
8746 // of the input value is small, the bit-twiddling we did above might
8747 // end up visibly changing the output. Fortunately, in that case, we
8748 // don't need to twiddle bits since the original input will convert
8749 // exactly to double-precision floating-point already. Therefore,
8750 // construct a conditional to use the original value if the top 11
8751 // bits are all sign-bit copies, and use the rounded value computed
8752 // above otherwise.
8753 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8754 SINT, DAG.getConstant(53, dl, MVT::i32));
8755 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8756 Cond, DAG.getConstant(1, dl, MVT::i64));
8757 Cond = DAG.getSetCC(
8758 dl,
8759 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8760 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8761
8762 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8763 }
8764
8765 ReuseLoadInfo RLI;
8766 SDValue Bits;
8767
8769 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8770 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8771 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8772 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8773 } else if (Subtarget.hasLFIWAX() &&
8774 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8775 MachineMemOperand *MMO =
8777 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8778 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8780 DAG.getVTList(MVT::f64, MVT::Other),
8781 Ops, MVT::i32, MMO);
8782 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8783 } else if (Subtarget.hasFPCVT() &&
8784 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8785 MachineMemOperand *MMO =
8787 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8788 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8790 DAG.getVTList(MVT::f64, MVT::Other),
8791 Ops, MVT::i32, MMO);
8792 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8793 } else if (((Subtarget.hasLFIWAX() &&
8794 SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8795 (Subtarget.hasFPCVT() &&
8796 SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8797 SINT.getOperand(0).getValueType() == MVT::i32) {
8798 MachineFrameInfo &MFI = MF.getFrameInfo();
8799 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8800
8801 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8802 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8803
8804 SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8806 DAG.getMachineFunction(), FrameIdx));
8807 Chain = Store;
8808
8809 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8810 "Expected an i32 store");
8811
8812 RLI.Ptr = FIdx;
8813 RLI.Chain = Chain;
8814 RLI.MPI =
8816 RLI.Alignment = Align(4);
8817
8818 MachineMemOperand *MMO =
8820 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8821 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8824 dl, DAG.getVTList(MVT::f64, MVT::Other),
8825 Ops, MVT::i32, MMO);
8826 Chain = Bits.getValue(1);
8827 } else
8828 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8829
8830 SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8831 if (IsStrict)
8832 Chain = FP.getValue(1);
8833
8834 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8835 if (IsStrict)
8837 DAG.getVTList(MVT::f32, MVT::Other),
8838 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8839 else
8840 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8841 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
8842 }
8843 return FP;
8844 }
8845
8846 assert(Src.getValueType() == MVT::i32 &&
8847 "Unhandled INT_TO_FP type in custom expander!");
8848 // Since we only generate this in 64-bit mode, we can take advantage of
8849 // 64-bit registers. In particular, sign extend the input value into the
8850 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8851 // then lfd it and fcfid it.
8853 MachineFrameInfo &MFI = MF.getFrameInfo();
8854 EVT PtrVT = getPointerTy(MF.getDataLayout());
8855
8856 SDValue Ld;
8857 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8858 ReuseLoadInfo RLI;
8859 bool ReusingLoad;
8860 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8861 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8862 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8863
8864 SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8866 DAG.getMachineFunction(), FrameIdx));
8867 Chain = Store;
8868
8869 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8870 "Expected an i32 store");
8871
8872 RLI.Ptr = FIdx;
8873 RLI.Chain = Chain;
8874 RLI.MPI =
8876 RLI.Alignment = Align(4);
8877 }
8878
8879 MachineMemOperand *MMO =
8881 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8882 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8883 Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8884 DAG.getVTList(MVT::f64, MVT::Other), Ops,
8885 MVT::i32, MMO);
8886 Chain = Ld.getValue(1);
8887 if (ReusingLoad)
8888 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8889 } else {
8890 assert(Subtarget.isPPC64() &&
8891 "i32->FP without LFIWAX supported only on PPC64");
8892
8893 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8894 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8895
8896 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8897
8898 // STD the extended value into the stack slot.
8899 SDValue Store = DAG.getStore(
8900 Chain, dl, Ext64, FIdx,
8902 Chain = Store;
8903
8904 // Load the value as a double.
8905 Ld = DAG.getLoad(
8906 MVT::f64, dl, Chain, FIdx,
8908 Chain = Ld.getValue(1);
8909 }
8910
8911 // FCFID it and return it.
8912 SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8913 if (IsStrict)
8914 Chain = FP.getValue(1);
8915 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8916 if (IsStrict)
8918 DAG.getVTList(MVT::f32, MVT::Other),
8919 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8920 else
8921 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8922 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
8923 }
8924 return FP;
8925}
8926
8927SDValue PPCTargetLowering::LowerGET_ROUNDING(SDValue Op,
8928 SelectionDAG &DAG) const {
8929 SDLoc dl(Op);
8930 /*
8931 The rounding mode is in bits 30:31 of FPSR, and has the following
8932 settings:
8933 00 Round to nearest
8934 01 Round to 0
8935 10 Round to +inf
8936 11 Round to -inf
8937
8938 GET_ROUNDING, on the other hand, expects the following:
8939 -1 Undefined
8940 0 Round to 0
8941 1 Round to nearest
8942 2 Round to +inf
8943 3 Round to -inf
8944
8945 To perform the conversion, we do:
8946 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8947 */
8948
8950 EVT VT = Op.getValueType();
8951 EVT PtrVT = getPointerTy(MF.getDataLayout());
8952
8953 // Save FP Control Word to register
8954 SDValue Chain = Op.getOperand(0);
8955 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8956 Chain = MFFS.getValue(1);
8957
8958 SDValue CWD;
8959 if (isTypeLegal(MVT::i64)) {
8960 CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8961 DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8962 } else {
8963 // Save FP register to stack slot
8964 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8965 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8966 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8967
8968 // Load FP Control Word from low 32 bits of stack slot.
8970 "Stack slot adjustment is valid only on big endian subtargets!");
8971 SDValue Four = DAG.getConstant(4, dl, PtrVT);
8972 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8973 CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8974 Chain = CWD.getValue(1);
8975 }
8976
8977 // Transform as necessary
8978 SDValue CWD1 =
8979 DAG.getNode(ISD::AND, dl, MVT::i32,
8980 CWD, DAG.getConstant(3, dl, MVT::i32));
8981 SDValue CWD2 =
8982 DAG.getNode(ISD::SRL, dl, MVT::i32,
8983 DAG.getNode(ISD::AND, dl, MVT::i32,
8984 DAG.getNode(ISD::XOR, dl, MVT::i32,
8985 CWD, DAG.getConstant(3, dl, MVT::i32)),
8986 DAG.getConstant(3, dl, MVT::i32)),
8987 DAG.getConstant(1, dl, MVT::i32));
8988
8989 SDValue RetVal =
8990 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8991
8992 RetVal =
8994 dl, VT, RetVal);
8995
8996 return DAG.getMergeValues({RetVal, Chain}, dl);
8997}
8998
8999SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
9000 EVT VT = Op.getValueType();
9001 unsigned BitWidth = VT.getSizeInBits();
9002 SDLoc dl(Op);
9003 assert(Op.getNumOperands() == 3 &&
9004 VT == Op.getOperand(1).getValueType() &&
9005 "Unexpected SHL!");
9006
9007 // Expand into a bunch of logical ops. Note that these ops
9008 // depend on the PPC behavior for oversized shift amounts.
9009 SDValue Lo = Op.getOperand(0);
9010 SDValue Hi = Op.getOperand(1);
9011 SDValue Amt = Op.getOperand(2);
9012 EVT AmtVT = Amt.getValueType();
9013
9014 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
9015 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
9016 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
9017 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
9018 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
9019 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
9020 DAG.getConstant(-BitWidth, dl, AmtVT));
9021 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
9022 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
9023 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
9024 SDValue OutOps[] = { OutLo, OutHi };
9025 return DAG.getMergeValues(OutOps, dl);
9026}
9027
9028SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
9029 EVT VT = Op.getValueType();
9030 SDLoc dl(Op);
9031 unsigned BitWidth = VT.getSizeInBits();
9032 assert(Op.getNumOperands() == 3 &&
9033 VT == Op.getOperand(1).getValueType() &&
9034 "Unexpected SRL!");
9035
9036 // Expand into a bunch of logical ops. Note that these ops
9037 // depend on the PPC behavior for oversized shift amounts.
9038 SDValue Lo = Op.getOperand(0);
9039 SDValue Hi = Op.getOperand(1);
9040 SDValue Amt = Op.getOperand(2);
9041 EVT AmtVT = Amt.getValueType();
9042
9043 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
9044 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
9045 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
9046 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
9047 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
9048 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
9049 DAG.getConstant(-BitWidth, dl, AmtVT));
9050 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
9051 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
9052 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
9053 SDValue OutOps[] = { OutLo, OutHi };
9054 return DAG.getMergeValues(OutOps, dl);
9055}
9056
9057SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
9058 SDLoc dl(Op);
9059 EVT VT = Op.getValueType();
9060 unsigned BitWidth = VT.getSizeInBits();
9061 assert(Op.getNumOperands() == 3 &&
9062 VT == Op.getOperand(1).getValueType() &&
9063 "Unexpected SRA!");
9064
9065 // Expand into a bunch of logical ops, followed by a select_cc.
9066 SDValue Lo = Op.getOperand(0);
9067 SDValue Hi = Op.getOperand(1);
9068 SDValue Amt = Op.getOperand(2);
9069 EVT AmtVT = Amt.getValueType();
9070
9071 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
9072 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
9073 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
9074 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
9075 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
9076 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
9077 DAG.getConstant(-BitWidth, dl, AmtVT));
9078 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
9079 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
9080 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
9081 Tmp4, Tmp6, ISD::SETLE);
9082 SDValue OutOps[] = { OutLo, OutHi };
9083 return DAG.getMergeValues(OutOps, dl);
9084}
9085
9086SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
9087 SelectionDAG &DAG) const {
9088 SDLoc dl(Op);
9089 EVT VT = Op.getValueType();
9090 unsigned BitWidth = VT.getSizeInBits();
9091
9092 bool IsFSHL = Op.getOpcode() == ISD::FSHL;
9093 SDValue X = Op.getOperand(0);
9094 SDValue Y = Op.getOperand(1);
9095 SDValue Z = Op.getOperand(2);
9096 EVT AmtVT = Z.getValueType();
9097
9098 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
9099 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
9100 // This is simpler than TargetLowering::expandFunnelShift because we can rely
9101 // on PowerPC shift by BW being well defined.
9102 Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
9103 DAG.getConstant(BitWidth - 1, dl, AmtVT));
9104 SDValue SubZ =
9105 DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
9106 X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
9107 Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
9108 return DAG.getNode(ISD::OR, dl, VT, X, Y);
9109}
9110
9111//===----------------------------------------------------------------------===//
9112// Vector related lowering.
9113//
9114
9115/// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
9116/// element size of SplatSize. Cast the result to VT.
9117static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
9118 SelectionDAG &DAG, const SDLoc &dl) {
9119 static const MVT VTys[] = { // canonical VT to use for each size.
9120 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
9121 };
9122
9123 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
9124
9125 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
9126 if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
9127 SplatSize = 1;
9128 Val = 0xFF;
9129 }
9130
9131 EVT CanonicalVT = VTys[SplatSize-1];
9132
9133 // Build a canonical splat for this value.
9134 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
9135}
9136
9137/// BuildIntrinsicOp - Return a unary operator intrinsic node with the
9138/// specified intrinsic ID.
9140 const SDLoc &dl, EVT DestVT = MVT::Other) {
9141 if (DestVT == MVT::Other) DestVT = Op.getValueType();
9142 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9143 DAG.getConstant(IID, dl, MVT::i32), Op);
9144}
9145
9146/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
9147/// specified intrinsic ID.
9148static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
9149 SelectionDAG &DAG, const SDLoc &dl,
9150 EVT DestVT = MVT::Other) {
9151 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
9152 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9153 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
9154}
9155
9156/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
9157/// specified intrinsic ID.
9158static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
9159 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
9160 EVT DestVT = MVT::Other) {
9161 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
9162 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9163 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9164}
9165
9166/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
9167/// amount. The result has the specified value type.
9168static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
9169 SelectionDAG &DAG, const SDLoc &dl) {
9170 // Force LHS/RHS to be the right type.
9171 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
9172 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
9173
9174 int Ops[16];
9175 for (unsigned i = 0; i != 16; ++i)
9176 Ops[i] = i + Amt;
9177 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9178 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9179}
9180
9181/// Do we have an efficient pattern in a .td file for this node?
9182///
9183/// \param V - pointer to the BuildVectorSDNode being matched
9184/// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9185///
9186/// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9187/// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9188/// the opposite is true (expansion is beneficial) are:
9189/// - The node builds a vector out of integers that are not 32 or 64-bits
9190/// - The node builds a vector out of constants
9191/// - The node is a "load-and-splat"
9192/// In all other cases, we will choose to keep the BUILD_VECTOR.
9194 bool HasDirectMove,
9195 bool HasP8Vector) {
9196 EVT VecVT = V->getValueType(0);
9197 bool RightType = VecVT == MVT::v2f64 ||
9198 (HasP8Vector && VecVT == MVT::v4f32) ||
9199 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9200 if (!RightType)
9201 return false;
9202
9203 bool IsSplat = true;
9204 bool IsLoad = false;
9205 SDValue Op0 = V->getOperand(0);
9206
9207 // This function is called in a block that confirms the node is not a constant
9208 // splat. So a constant BUILD_VECTOR here means the vector is built out of
9209 // different constants.
9210 if (V->isConstant())
9211 return false;
9212 for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9213 if (V->getOperand(i).isUndef())
9214 return false;
9215 // We want to expand nodes that represent load-and-splat even if the
9216 // loaded value is a floating point truncation or conversion to int.
9217 if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9218 (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9219 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9220 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9221 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9222 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9223 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9224 IsLoad = true;
9225 // If the operands are different or the input is not a load and has more
9226 // uses than just this BV node, then it isn't a splat.
9227 if (V->getOperand(i) != Op0 ||
9228 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9229 IsSplat = false;
9230 }
9231 return !(IsSplat && IsLoad);
9232}
9233
9234// Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9235SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9236
9237 SDLoc dl(Op);
9238 SDValue Op0 = Op->getOperand(0);
9239
9240 if ((Op.getValueType() != MVT::f128) ||
9241 (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9242 (Op0.getOperand(0).getValueType() != MVT::i64) ||
9243 (Op0.getOperand(1).getValueType() != MVT::i64))
9244 return SDValue();
9245
9246 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9247 Op0.getOperand(1));
9248}
9249
9250static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
9251 const SDValue *InputLoad = &Op;
9252 while (InputLoad->getOpcode() == ISD::BITCAST)
9253 InputLoad = &InputLoad->getOperand(0);
9254 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9256 IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
9257 InputLoad = &InputLoad->getOperand(0);
9258 }
9259 if (InputLoad->getOpcode() != ISD::LOAD)
9260 return nullptr;
9261 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9262 return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9263}
9264
9265// Convert the argument APFloat to a single precision APFloat if there is no
9266// loss in information during the conversion to single precision APFloat and the
9267// resulting number is not a denormal number. Return true if successful.
9269 APFloat APFloatToConvert = ArgAPFloat;
9270 bool LosesInfo = true;
9272 &LosesInfo);
9273 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9274 if (Success)
9275 ArgAPFloat = APFloatToConvert;
9276 return Success;
9277}
9278
9279// Bitcast the argument APInt to a double and convert it to a single precision
9280// APFloat, bitcast the APFloat to an APInt and assign it to the original
9281// argument if there is no loss in information during the conversion from
9282// double to single precision APFloat and the resulting number is not a denormal
9283// number. Return true if successful.
9285 double DpValue = ArgAPInt.bitsToDouble();
9286 APFloat APFloatDp(DpValue);
9287 bool Success = convertToNonDenormSingle(APFloatDp);
9288 if (Success)
9289 ArgAPInt = APFloatDp.bitcastToAPInt();
9290 return Success;
9291}
9292
9293// Nondestructive check for convertTonNonDenormSingle.
9295 // Only convert if it loses info, since XXSPLTIDP should
9296 // handle the other case.
9297 APFloat APFloatToConvert = ArgAPFloat;
9298 bool LosesInfo = true;
9300 &LosesInfo);
9301
9302 return (!LosesInfo && !APFloatToConvert.isDenormal());
9303}
9304
9305static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op,
9306 unsigned &Opcode) {
9307 LoadSDNode *InputNode = dyn_cast<LoadSDNode>(Op.getOperand(0));
9308 if (!InputNode || !Subtarget.hasVSX() || !ISD::isUNINDEXEDLoad(InputNode))
9309 return false;
9310
9311 EVT Ty = Op->getValueType(0);
9312 // For v2f64, v4f32 and v4i32 types, we require the load to be non-extending
9313 // as we cannot handle extending loads for these types.
9314 if ((Ty == MVT::v2f64 || Ty == MVT::v4f32 || Ty == MVT::v4i32) &&
9315 ISD::isNON_EXTLoad(InputNode))
9316 return true;
9317
9318 EVT MemVT = InputNode->getMemoryVT();
9319 // For v8i16 and v16i8 types, extending loads can be handled as long as the
9320 // memory VT is the same vector element VT type.
9321 // The loads feeding into the v8i16 and v16i8 types will be extending because
9322 // scalar i8/i16 are not legal types.
9323 if ((Ty == MVT::v8i16 || Ty == MVT::v16i8) && ISD::isEXTLoad(InputNode) &&
9324 (MemVT == Ty.getVectorElementType()))
9325 return true;
9326
9327 if (Ty == MVT::v2i64) {
9328 // Check the extend type, when the input type is i32, and the output vector
9329 // type is v2i64.
9330 if (MemVT == MVT::i32) {
9331 if (ISD::isZEXTLoad(InputNode))
9332 Opcode = PPCISD::ZEXT_LD_SPLAT;
9333 if (ISD::isSEXTLoad(InputNode))
9334 Opcode = PPCISD::SEXT_LD_SPLAT;
9335 }
9336 return true;
9337 }
9338 return false;
9339}
9340
9341// If this is a case we can't handle, return null and let the default
9342// expansion code take care of it. If we CAN select this case, and if it
9343// selects to a single instruction, return Op. Otherwise, if we can codegen
9344// this case more efficiently than a constant pool load, lower it to the
9345// sequence of ops that should be used.
9346SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9347 SelectionDAG &DAG) const {
9348 SDLoc dl(Op);
9349 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9350 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9351
9352 // Check if this is a splat of a constant value.
9353 APInt APSplatBits, APSplatUndef;
9354 unsigned SplatBitSize;
9355 bool HasAnyUndefs;
9356 bool BVNIsConstantSplat =
9357 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9358 HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9359
9360 // If it is a splat of a double, check if we can shrink it to a 32 bit
9361 // non-denormal float which when converted back to double gives us the same
9362 // double. This is to exploit the XXSPLTIDP instruction.
9363 // If we lose precision, we use XXSPLTI32DX.
9364 if (BVNIsConstantSplat && (SplatBitSize == 64) &&
9365 Subtarget.hasPrefixInstrs()) {
9366 // Check the type first to short-circuit so we don't modify APSplatBits if
9367 // this block isn't executed.
9368 if ((Op->getValueType(0) == MVT::v2f64) &&
9369 convertToNonDenormSingle(APSplatBits)) {
9370 SDValue SplatNode = DAG.getNode(
9371 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9372 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9373 return DAG.getBitcast(Op.getValueType(), SplatNode);
9374 } else {
9375 // We may lose precision, so we have to use XXSPLTI32DX.
9376
9377 uint32_t Hi =
9378 (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32);
9379 uint32_t Lo =
9380 (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF);
9381 SDValue SplatNode = DAG.getUNDEF(MVT::v2i64);
9382
9383 if (!Hi || !Lo)
9384 // If either load is 0, then we should generate XXLXOR to set to 0.
9385 SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64);
9386
9387 if (Hi)
9388 SplatNode = DAG.getNode(
9389 PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9390 DAG.getTargetConstant(0, dl, MVT::i32),
9391 DAG.getTargetConstant(Hi, dl, MVT::i32));
9392
9393 if (Lo)
9394 SplatNode =
9395 DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9396 DAG.getTargetConstant(1, dl, MVT::i32),
9397 DAG.getTargetConstant(Lo, dl, MVT::i32));
9398
9399 return DAG.getBitcast(Op.getValueType(), SplatNode);
9400 }
9401 }
9402
9403 if (!BVNIsConstantSplat || SplatBitSize > 32) {
9404 unsigned NewOpcode = PPCISD::LD_SPLAT;
9405
9406 // Handle load-and-splat patterns as we have instructions that will do this
9407 // in one go.
9408 if (DAG.isSplatValue(Op, true) &&
9409 isValidSplatLoad(Subtarget, Op, NewOpcode)) {
9410 const SDValue *InputLoad = &Op.getOperand(0);
9411 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9412
9413 // If the input load is an extending load, it will be an i32 -> i64
9414 // extending load and isValidSplatLoad() will update NewOpcode.
9415 unsigned MemorySize = LD->getMemoryVT().getScalarSizeInBits();
9416 unsigned ElementSize =
9417 MemorySize * ((NewOpcode == PPCISD::LD_SPLAT) ? 1 : 2);
9418
9419 assert(((ElementSize == 2 * MemorySize)
9420 ? (NewOpcode == PPCISD::ZEXT_LD_SPLAT ||
9421 NewOpcode == PPCISD::SEXT_LD_SPLAT)
9422 : (NewOpcode == PPCISD::LD_SPLAT)) &&
9423 "Unmatched element size and opcode!\n");
9424
9425 // Checking for a single use of this load, we have to check for vector
9426 // width (128 bits) / ElementSize uses (since each operand of the
9427 // BUILD_VECTOR is a separate use of the value.
9428 unsigned NumUsesOfInputLD = 128 / ElementSize;
9429 for (SDValue BVInOp : Op->ops())
9430 if (BVInOp.isUndef())
9431 NumUsesOfInputLD--;
9432
9433 // Exclude somes case where LD_SPLAT is worse than scalar_to_vector:
9434 // Below cases should also happen for "lfiwzx/lfiwax + LE target + index
9435 // 1" and "lxvrhx + BE target + index 7" and "lxvrbx + BE target + index
9436 // 15", but function IsValidSplatLoad() now will only return true when
9437 // the data at index 0 is not nullptr. So we will not get into trouble for
9438 // these cases.
9439 //
9440 // case 1 - lfiwzx/lfiwax
9441 // 1.1: load result is i32 and is sign/zero extend to i64;
9442 // 1.2: build a v2i64 vector type with above loaded value;
9443 // 1.3: the vector has only one value at index 0, others are all undef;
9444 // 1.4: on BE target, so that lfiwzx/lfiwax does not need any permute.
9445 if (NumUsesOfInputLD == 1 &&
9446 (Op->getValueType(0) == MVT::v2i64 && NewOpcode != PPCISD::LD_SPLAT &&
9447 !Subtarget.isLittleEndian() && Subtarget.hasVSX() &&
9448 Subtarget.hasLFIWAX()))
9449 return SDValue();
9450
9451 // case 2 - lxvr[hb]x
9452 // 2.1: load result is at most i16;
9453 // 2.2: build a vector with above loaded value;
9454 // 2.3: the vector has only one value at index 0, others are all undef;
9455 // 2.4: on LE target, so that lxvr[hb]x does not need any permute.
9456 if (NumUsesOfInputLD == 1 && Subtarget.isLittleEndian() &&
9457 Subtarget.isISA3_1() && ElementSize <= 16)
9458 return SDValue();
9459
9460 assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
9461 if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
9462 Subtarget.hasVSX()) {
9463 SDValue Ops[] = {
9464 LD->getChain(), // Chain
9465 LD->getBasePtr(), // Ptr
9466 DAG.getValueType(Op.getValueType()) // VT
9467 };
9468 SDValue LdSplt = DAG.getMemIntrinsicNode(
9469 NewOpcode, dl, DAG.getVTList(Op.getValueType(), MVT::Other), Ops,
9470 LD->getMemoryVT(), LD->getMemOperand());
9471 // Replace all uses of the output chain of the original load with the
9472 // output chain of the new load.
9473 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
9474 LdSplt.getValue(1));
9475 return LdSplt;
9476 }
9477 }
9478
9479 // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
9480 // 32-bits can be lowered to VSX instructions under certain conditions.
9481 // Without VSX, there is no pattern more efficient than expanding the node.
9482 if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
9483 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9484 Subtarget.hasP8Vector()))
9485 return Op;
9486 return SDValue();
9487 }
9488
9489 uint64_t SplatBits = APSplatBits.getZExtValue();
9490 uint64_t SplatUndef = APSplatUndef.getZExtValue();
9491 unsigned SplatSize = SplatBitSize / 8;
9492
9493 // First, handle single instruction cases.
9494
9495 // All zeros?
9496 if (SplatBits == 0) {
9497 // Canonicalize all zero vectors to be v4i32.
9498 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9499 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9500 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9501 }
9502 return Op;
9503 }
9504
9505 // We have XXSPLTIW for constant splats four bytes wide.
9506 // Given vector length is a multiple of 4, 2-byte splats can be replaced
9507 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9508 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9509 // turned into a 4-byte splat of 0xABABABAB.
9510 if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9511 return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
9512 Op.getValueType(), DAG, dl);
9513
9514 if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9515 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9516 dl);
9517
9518 // We have XXSPLTIB for constant splats one byte wide.
9519 if (Subtarget.hasP9Vector() && SplatSize == 1)
9520 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9521 dl);
9522
9523 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9524 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9525 (32-SplatBitSize));
9526 if (SextVal >= -16 && SextVal <= 15)
9527 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9528 dl);
9529
9530 // Two instruction sequences.
9531
9532 // If this value is in the range [-32,30] and is even, use:
9533 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9534 // If this value is in the range [17,31] and is odd, use:
9535 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9536 // If this value is in the range [-31,-17] and is odd, use:
9537 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9538 // Note the last two are three-instruction sequences.
9539 if (SextVal >= -32 && SextVal <= 31) {
9540 // To avoid having these optimizations undone by constant folding,
9541 // we convert to a pseudo that will be expanded later into one of
9542 // the above forms.
9543 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9544 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9545 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9546 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9547 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9548 if (VT == Op.getValueType())
9549 return RetVal;
9550 else
9551 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9552 }
9553
9554 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
9555 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
9556 // for fneg/fabs.
9557 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9558 // Make -1 and vspltisw -1:
9559 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9560
9561 // Make the VSLW intrinsic, computing 0x8000_0000.
9562 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9563 OnesV, DAG, dl);
9564
9565 // xor by OnesV to invert it.
9566 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9567 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9568 }
9569
9570 // Check to see if this is a wide variety of vsplti*, binop self cases.
9571 static const signed char SplatCsts[] = {
9572 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9573 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9574 };
9575
9576 for (unsigned idx = 0; idx < std::size(SplatCsts); ++idx) {
9577 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9578 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
9579 int i = SplatCsts[idx];
9580
9581 // Figure out what shift amount will be used by altivec if shifted by i in
9582 // this splat size.
9583 unsigned TypeShiftAmt = i & (SplatBitSize-1);
9584
9585 // vsplti + shl self.
9586 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9587 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9588 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9589 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9590 Intrinsic::ppc_altivec_vslw
9591 };
9592 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9593 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9594 }
9595
9596 // vsplti + srl self.
9597 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9598 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9599 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9600 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9601 Intrinsic::ppc_altivec_vsrw
9602 };
9603 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9604 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9605 }
9606
9607 // vsplti + rol self.
9608 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9609 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9610 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9611 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9612 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9613 Intrinsic::ppc_altivec_vrlw
9614 };
9615 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9616 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9617 }
9618
9619 // t = vsplti c, result = vsldoi t, t, 1
9620 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9621 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9622 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9623 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9624 }
9625 // t = vsplti c, result = vsldoi t, t, 2
9626 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9627 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9628 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9629 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9630 }
9631 // t = vsplti c, result = vsldoi t, t, 3
9632 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9633 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9634 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9635 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9636 }
9637 }
9638
9639 return SDValue();
9640}
9641
9642/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9643/// the specified operations to build the shuffle.
9644static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9645 SDValue RHS, SelectionDAG &DAG,
9646 const SDLoc &dl) {
9647 unsigned OpNum = (PFEntry >> 26) & 0x0F;
9648 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9649 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
9650
9651 enum {
9652 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9653 OP_VMRGHW,
9654 OP_VMRGLW,
9655 OP_VSPLTISW0,
9656 OP_VSPLTISW1,
9657 OP_VSPLTISW2,
9658 OP_VSPLTISW3,
9659 OP_VSLDOI4,
9660 OP_VSLDOI8,
9661 OP_VSLDOI12
9662 };
9663
9664 if (OpNum == OP_COPY) {
9665 if (LHSID == (1*9+2)*9+3) return LHS;
9666 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9667 return RHS;
9668 }
9669
9670 SDValue OpLHS, OpRHS;
9671 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9672 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9673
9674 int ShufIdxs[16];
9675 switch (OpNum) {
9676 default: llvm_unreachable("Unknown i32 permute!");
9677 case OP_VMRGHW:
9678 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
9679 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9680 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
9681 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9682 break;
9683 case OP_VMRGLW:
9684 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9685 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9686 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9687 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9688 break;
9689 case OP_VSPLTISW0:
9690 for (unsigned i = 0; i != 16; ++i)
9691 ShufIdxs[i] = (i&3)+0;
9692 break;
9693 case OP_VSPLTISW1:
9694 for (unsigned i = 0; i != 16; ++i)
9695 ShufIdxs[i] = (i&3)+4;
9696 break;
9697 case OP_VSPLTISW2:
9698 for (unsigned i = 0; i != 16; ++i)
9699 ShufIdxs[i] = (i&3)+8;
9700 break;
9701 case OP_VSPLTISW3:
9702 for (unsigned i = 0; i != 16; ++i)
9703 ShufIdxs[i] = (i&3)+12;
9704 break;
9705 case OP_VSLDOI4:
9706 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9707 case OP_VSLDOI8:
9708 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9709 case OP_VSLDOI12:
9710 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9711 }
9712 EVT VT = OpLHS.getValueType();
9713 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9714 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9715 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9716 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9717}
9718
9719/// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9720/// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9721/// SDValue.
9722SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9723 SelectionDAG &DAG) const {
9724 const unsigned BytesInVector = 16;
9725 bool IsLE = Subtarget.isLittleEndian();
9726 SDLoc dl(N);
9727 SDValue V1 = N->getOperand(0);
9728 SDValue V2 = N->getOperand(1);
9729 unsigned ShiftElts = 0, InsertAtByte = 0;
9730 bool Swap = false;
9731
9732 // Shifts required to get the byte we want at element 7.
9733 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
9734 0, 15, 14, 13, 12, 11, 10, 9};
9735 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9736 1, 2, 3, 4, 5, 6, 7, 8};
9737
9738 ArrayRef<int> Mask = N->getMask();
9739 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9740
9741 // For each mask element, find out if we're just inserting something
9742 // from V2 into V1 or vice versa.
9743 // Possible permutations inserting an element from V2 into V1:
9744 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9745 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9746 // ...
9747 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9748 // Inserting from V1 into V2 will be similar, except mask range will be
9749 // [16,31].
9750
9751 bool FoundCandidate = false;
9752 // If both vector operands for the shuffle are the same vector, the mask
9753 // will contain only elements from the first one and the second one will be
9754 // undef.
9755 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9756 // Go through the mask of half-words to find an element that's being moved
9757 // from one vector to the other.
9758 for (unsigned i = 0; i < BytesInVector; ++i) {
9759 unsigned CurrentElement = Mask[i];
9760 // If 2nd operand is undefined, we should only look for element 7 in the
9761 // Mask.
9762 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9763 continue;
9764
9765 bool OtherElementsInOrder = true;
9766 // Examine the other elements in the Mask to see if they're in original
9767 // order.
9768 for (unsigned j = 0; j < BytesInVector; ++j) {
9769 if (j == i)
9770 continue;
9771 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9772 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
9773 // in which we always assume we're always picking from the 1st operand.
9774 int MaskOffset =
9775 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9776 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9777 OtherElementsInOrder = false;
9778 break;
9779 }
9780 }
9781 // If other elements are in original order, we record the number of shifts
9782 // we need to get the element we want into element 7. Also record which byte
9783 // in the vector we should insert into.
9784 if (OtherElementsInOrder) {
9785 // If 2nd operand is undefined, we assume no shifts and no swapping.
9786 if (V2.isUndef()) {
9787 ShiftElts = 0;
9788 Swap = false;
9789 } else {
9790 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9791 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9792 : BigEndianShifts[CurrentElement & 0xF];
9793 Swap = CurrentElement < BytesInVector;
9794 }
9795 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9796 FoundCandidate = true;
9797 break;
9798 }
9799 }
9800
9801 if (!FoundCandidate)
9802 return SDValue();
9803
9804 // Candidate found, construct the proper SDAG sequence with VINSERTB,
9805 // optionally with VECSHL if shift is required.
9806 if (Swap)
9807 std::swap(V1, V2);
9808 if (V2.isUndef())
9809 V2 = V1;
9810 if (ShiftElts) {
9811 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9812 DAG.getConstant(ShiftElts, dl, MVT::i32));
9813 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9814 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9815 }
9816 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9817 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9818}
9819
9820/// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9821/// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9822/// SDValue.
9823SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9824 SelectionDAG &DAG) const {
9825 const unsigned NumHalfWords = 8;
9826 const unsigned BytesInVector = NumHalfWords * 2;
9827 // Check that the shuffle is on half-words.
9828 if (!isNByteElemShuffleMask(N, 2, 1))
9829 return SDValue();
9830
9831 bool IsLE = Subtarget.isLittleEndian();
9832 SDLoc dl(N);
9833 SDValue V1 = N->getOperand(0);
9834 SDValue V2 = N->getOperand(1);
9835 unsigned ShiftElts = 0, InsertAtByte = 0;
9836 bool Swap = false;
9837
9838 // Shifts required to get the half-word we want at element 3.
9839 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9840 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9841
9842 uint32_t Mask = 0;
9843 uint32_t OriginalOrderLow = 0x1234567;
9844 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9845 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a
9846 // 32-bit space, only need 4-bit nibbles per element.
9847 for (unsigned i = 0; i < NumHalfWords; ++i) {
9848 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9849 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9850 }
9851
9852 // For each mask element, find out if we're just inserting something
9853 // from V2 into V1 or vice versa. Possible permutations inserting an element
9854 // from V2 into V1:
9855 // X, 1, 2, 3, 4, 5, 6, 7
9856 // 0, X, 2, 3, 4, 5, 6, 7
9857 // 0, 1, X, 3, 4, 5, 6, 7
9858 // 0, 1, 2, X, 4, 5, 6, 7
9859 // 0, 1, 2, 3, X, 5, 6, 7
9860 // 0, 1, 2, 3, 4, X, 6, 7
9861 // 0, 1, 2, 3, 4, 5, X, 7
9862 // 0, 1, 2, 3, 4, 5, 6, X
9863 // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9864
9865 bool FoundCandidate = false;
9866 // Go through the mask of half-words to find an element that's being moved
9867 // from one vector to the other.
9868 for (unsigned i = 0; i < NumHalfWords; ++i) {
9869 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9870 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9871 uint32_t MaskOtherElts = ~(0xF << MaskShift);
9872 uint32_t TargetOrder = 0x0;
9873
9874 // If both vector operands for the shuffle are the same vector, the mask
9875 // will contain only elements from the first one and the second one will be
9876 // undef.
9877 if (V2.isUndef()) {
9878 ShiftElts = 0;
9879 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9880 TargetOrder = OriginalOrderLow;
9881 Swap = false;
9882 // Skip if not the correct element or mask of other elements don't equal
9883 // to our expected order.
9884 if (MaskOneElt == VINSERTHSrcElem &&
9885 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9886 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9887 FoundCandidate = true;
9888 break;
9889 }
9890 } else { // If both operands are defined.
9891 // Target order is [8,15] if the current mask is between [0,7].
9892 TargetOrder =
9893 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9894 // Skip if mask of other elements don't equal our expected order.
9895 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9896 // We only need the last 3 bits for the number of shifts.
9897 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9898 : BigEndianShifts[MaskOneElt & 0x7];
9899 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9900 Swap = MaskOneElt < NumHalfWords;
9901 FoundCandidate = true;
9902 break;
9903 }
9904 }
9905 }
9906
9907 if (!FoundCandidate)
9908 return SDValue();
9909
9910 // Candidate found, construct the proper SDAG sequence with VINSERTH,
9911 // optionally with VECSHL if shift is required.
9912 if (Swap)
9913 std::swap(V1, V2);
9914 if (V2.isUndef())
9915 V2 = V1;
9916 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9917 if (ShiftElts) {
9918 // Double ShiftElts because we're left shifting on v16i8 type.
9919 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9920 DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9921 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9922 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9923 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9924 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9925 }
9926 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9927 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9928 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9929 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9930}
9931
9932/// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9933/// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9934/// return the default SDValue.
9935SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9936 SelectionDAG &DAG) const {
9937 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9938 // to v16i8. Peek through the bitcasts to get the actual operands.
9941
9942 auto ShuffleMask = SVN->getMask();
9943 SDValue VecShuffle(SVN, 0);
9944 SDLoc DL(SVN);
9945
9946 // Check that we have a four byte shuffle.
9947 if (!isNByteElemShuffleMask(SVN, 4, 1))
9948 return SDValue();
9949
9950 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9951 if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9952 std::swap(LHS, RHS);
9954 ShuffleVectorSDNode *CommutedSV = dyn_cast<ShuffleVectorSDNode>(VecShuffle);
9955 if (!CommutedSV)
9956 return SDValue();
9957 ShuffleMask = CommutedSV->getMask();
9958 }
9959
9960 // Ensure that the RHS is a vector of constants.
9961 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9962 if (!BVN)
9963 return SDValue();
9964
9965 // Check if RHS is a splat of 4-bytes (or smaller).
9966 APInt APSplatValue, APSplatUndef;
9967 unsigned SplatBitSize;
9968 bool HasAnyUndefs;
9969 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9970 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9971 SplatBitSize > 32)
9972 return SDValue();
9973
9974 // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9975 // The instruction splats a constant C into two words of the source vector
9976 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9977 // Thus we check that the shuffle mask is the equivalent of
9978 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9979 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9980 // within each word are consecutive, so we only need to check the first byte.
9981 SDValue Index;
9982 bool IsLE = Subtarget.isLittleEndian();
9983 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9984 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9985 ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9986 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9987 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9988 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9989 ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9990 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9991 else
9992 return SDValue();
9993
9994 // If the splat is narrower than 32-bits, we need to get the 32-bit value
9995 // for XXSPLTI32DX.
9996 unsigned SplatVal = APSplatValue.getZExtValue();
9997 for (; SplatBitSize < 32; SplatBitSize <<= 1)
9998 SplatVal |= (SplatVal << SplatBitSize);
9999
10000 SDValue SplatNode = DAG.getNode(
10001 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
10002 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
10003 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
10004}
10005
10006/// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
10007/// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
10008/// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
10009/// i.e (or (shl x, C1), (srl x, 128-C1)).
10010SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
10011 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
10012 assert(Op.getValueType() == MVT::v1i128 &&
10013 "Only set v1i128 as custom, other type shouldn't reach here!");
10014 SDLoc dl(Op);
10015 SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
10016 SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
10017 unsigned SHLAmt = N1.getConstantOperandVal(0);
10018 if (SHLAmt % 8 == 0) {
10019 std::array<int, 16> Mask;
10020 std::iota(Mask.begin(), Mask.end(), 0);
10021 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
10022 if (SDValue Shuffle =
10023 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
10024 DAG.getUNDEF(MVT::v16i8), Mask))
10025 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
10026 }
10027 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
10028 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
10029 DAG.getConstant(SHLAmt, dl, MVT::i32));
10030 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
10031 DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
10032 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
10033 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
10034}
10035
10036/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
10037/// is a shuffle we can handle in a single instruction, return it. Otherwise,
10038/// return the code it can be lowered into. Worst case, it can always be
10039/// lowered into a vperm.
10040SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
10041 SelectionDAG &DAG) const {
10042 SDLoc dl(Op);
10043 SDValue V1 = Op.getOperand(0);
10044 SDValue V2 = Op.getOperand(1);
10045 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10046
10047 // Any nodes that were combined in the target-independent combiner prior
10048 // to vector legalization will not be sent to the target combine. Try to
10049 // combine it here.
10050 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
10051 if (!isa<ShuffleVectorSDNode>(NewShuffle))
10052 return NewShuffle;
10053 Op = NewShuffle;
10054 SVOp = cast<ShuffleVectorSDNode>(Op);
10055 V1 = Op.getOperand(0);
10056 V2 = Op.getOperand(1);
10057 }
10058 EVT VT = Op.getValueType();
10059 bool isLittleEndian = Subtarget.isLittleEndian();
10060
10061 unsigned ShiftElts, InsertAtByte;
10062 bool Swap = false;
10063
10064 // If this is a load-and-splat, we can do that with a single instruction
10065 // in some cases. However if the load has multiple uses, we don't want to
10066 // combine it because that will just produce multiple loads.
10067 bool IsPermutedLoad = false;
10068 const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
10069 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
10070 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
10071 InputLoad->hasOneUse()) {
10072 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
10073 int SplatIdx =
10074 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
10075
10076 // The splat index for permuted loads will be in the left half of the vector
10077 // which is strictly wider than the loaded value by 8 bytes. So we need to
10078 // adjust the splat index to point to the correct address in memory.
10079 if (IsPermutedLoad) {
10080 assert((isLittleEndian || IsFourByte) &&
10081 "Unexpected size for permuted load on big endian target");
10082 SplatIdx += IsFourByte ? 2 : 1;
10083 assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
10084 "Splat of a value outside of the loaded memory");
10085 }
10086
10087 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
10088 // For 4-byte load-and-splat, we need Power9.
10089 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
10090 uint64_t Offset = 0;
10091 if (IsFourByte)
10092 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
10093 else
10094 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
10095
10096 // If the width of the load is the same as the width of the splat,
10097 // loading with an offset would load the wrong memory.
10098 if (LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
10099 Offset = 0;
10100
10101 SDValue BasePtr = LD->getBasePtr();
10102 if (Offset != 0)
10104 BasePtr, DAG.getIntPtrConstant(Offset, dl));
10105 SDValue Ops[] = {
10106 LD->getChain(), // Chain
10107 BasePtr, // BasePtr
10108 DAG.getValueType(Op.getValueType()) // VT
10109 };
10110 SDVTList VTL =
10111 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
10112 SDValue LdSplt =
10114 Ops, LD->getMemoryVT(), LD->getMemOperand());
10115 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
10116 if (LdSplt.getValueType() != SVOp->getValueType(0))
10117 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
10118 return LdSplt;
10119 }
10120 }
10121
10122 // All v2i64 and v2f64 shuffles are legal
10123 if (VT == MVT::v2i64 || VT == MVT::v2f64)
10124 return Op;
10125
10126 if (Subtarget.hasP9Vector() &&
10127 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
10128 isLittleEndian)) {
10129 if (Swap)
10130 std::swap(V1, V2);
10131 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10132 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
10133 if (ShiftElts) {
10134 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
10135 DAG.getConstant(ShiftElts, dl, MVT::i32));
10136 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
10137 DAG.getConstant(InsertAtByte, dl, MVT::i32));
10138 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
10139 }
10140 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
10141 DAG.getConstant(InsertAtByte, dl, MVT::i32));
10142 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
10143 }
10144
10145 if (Subtarget.hasPrefixInstrs()) {
10146 SDValue SplatInsertNode;
10147 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
10148 return SplatInsertNode;
10149 }
10150
10151 if (Subtarget.hasP9Altivec()) {
10152 SDValue NewISDNode;
10153 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
10154 return NewISDNode;
10155
10156 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
10157 return NewISDNode;
10158 }
10159
10160 if (Subtarget.hasVSX() &&
10161 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10162 if (Swap)
10163 std::swap(V1, V2);
10164 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10165 SDValue Conv2 =
10166 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
10167
10168 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
10169 DAG.getConstant(ShiftElts, dl, MVT::i32));
10170 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
10171 }
10172
10173 if (Subtarget.hasVSX() &&
10174 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10175 if (Swap)
10176 std::swap(V1, V2);
10177 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10178 SDValue Conv2 =
10179 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
10180
10181 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
10182 DAG.getConstant(ShiftElts, dl, MVT::i32));
10183 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
10184 }
10185
10186 if (Subtarget.hasP9Vector()) {
10187 if (PPC::isXXBRHShuffleMask(SVOp)) {
10188 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
10189 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
10190 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
10191 } else if (PPC::isXXBRWShuffleMask(SVOp)) {
10192 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10193 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
10194 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
10195 } else if (PPC::isXXBRDShuffleMask(SVOp)) {
10196 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10197 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
10198 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
10199 } else if (PPC::isXXBRQShuffleMask(SVOp)) {
10200 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
10201 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
10202 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
10203 }
10204 }
10205
10206 if (Subtarget.hasVSX()) {
10207 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
10208 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
10209
10210 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10211 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
10212 DAG.getConstant(SplatIdx, dl, MVT::i32));
10213 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
10214 }
10215
10216 // Left shifts of 8 bytes are actually swaps. Convert accordingly.
10217 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
10218 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
10219 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
10220 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
10221 }
10222 }
10223
10224 // Cases that are handled by instructions that take permute immediates
10225 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
10226 // selected by the instruction selector.
10227 if (V2.isUndef()) {
10228 if (PPC::isSplatShuffleMask(SVOp, 1) ||
10229 PPC::isSplatShuffleMask(SVOp, 2) ||
10230 PPC::isSplatShuffleMask(SVOp, 4) ||
10231 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
10232 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
10233 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
10234 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
10235 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
10236 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
10237 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
10238 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
10239 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
10240 (Subtarget.hasP8Altivec() && (
10241 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
10242 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
10243 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
10244 return Op;
10245 }
10246 }
10247
10248 // Altivec has a variety of "shuffle immediates" that take two vector inputs
10249 // and produce a fixed permutation. If any of these match, do not lower to
10250 // VPERM.
10251 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10252 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10253 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10254 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
10255 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10256 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10257 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10258 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10259 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10260 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10261 (Subtarget.hasP8Altivec() && (
10262 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10263 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
10264 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
10265 return Op;
10266
10267 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
10268 // perfect shuffle table to emit an optimal matching sequence.
10269 ArrayRef<int> PermMask = SVOp->getMask();
10270
10271 if (!DisablePerfectShuffle && !isLittleEndian) {
10272 unsigned PFIndexes[4];
10273 bool isFourElementShuffle = true;
10274 for (unsigned i = 0; i != 4 && isFourElementShuffle;
10275 ++i) { // Element number
10276 unsigned EltNo = 8; // Start out undef.
10277 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
10278 if (PermMask[i * 4 + j] < 0)
10279 continue; // Undef, ignore it.
10280
10281 unsigned ByteSource = PermMask[i * 4 + j];
10282 if ((ByteSource & 3) != j) {
10283 isFourElementShuffle = false;
10284 break;
10285 }
10286
10287 if (EltNo == 8) {
10288 EltNo = ByteSource / 4;
10289 } else if (EltNo != ByteSource / 4) {
10290 isFourElementShuffle = false;
10291 break;
10292 }
10293 }
10294 PFIndexes[i] = EltNo;
10295 }
10296
10297 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
10298 // perfect shuffle vector to determine if it is cost effective to do this as
10299 // discrete instructions, or whether we should use a vperm.
10300 // For now, we skip this for little endian until such time as we have a
10301 // little-endian perfect shuffle table.
10302 if (isFourElementShuffle) {
10303 // Compute the index in the perfect shuffle table.
10304 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
10305 PFIndexes[2] * 9 + PFIndexes[3];
10306
10307 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10308 unsigned Cost = (PFEntry >> 30);
10309
10310 // Determining when to avoid vperm is tricky. Many things affect the cost
10311 // of vperm, particularly how many times the perm mask needs to be
10312 // computed. For example, if the perm mask can be hoisted out of a loop or
10313 // is already used (perhaps because there are multiple permutes with the
10314 // same shuffle mask?) the vperm has a cost of 1. OTOH, hoisting the
10315 // permute mask out of the loop requires an extra register.
10316 //
10317 // As a compromise, we only emit discrete instructions if the shuffle can
10318 // be generated in 3 or fewer operations. When we have loop information
10319 // available, if this block is within a loop, we should avoid using vperm
10320 // for 3-operation perms and use a constant pool load instead.
10321 if (Cost < 3)
10322 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10323 }
10324 }
10325
10326 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10327 // vector that will get spilled to the constant pool.
10328 if (V2.isUndef()) V2 = V1;
10329
10330 return LowerVPERM(Op, DAG, PermMask, VT, V1, V2);
10331}
10332
10333SDValue PPCTargetLowering::LowerVPERM(SDValue Op, SelectionDAG &DAG,
10334 ArrayRef<int> PermMask, EVT VT,
10335 SDValue V1, SDValue V2) const {
10336 unsigned Opcode = PPCISD::VPERM;
10337 EVT ValType = V1.getValueType();
10338 SDLoc dl(Op);
10339 bool NeedSwap = false;
10340 bool isLittleEndian = Subtarget.isLittleEndian();
10341 bool isPPC64 = Subtarget.isPPC64();
10342
10343 if (Subtarget.hasVSX() && Subtarget.hasP9Vector() &&
10344 (V1->hasOneUse() || V2->hasOneUse())) {
10345 LLVM_DEBUG(dbgs() << "At least one of two input vectors are dead - using "
10346 "XXPERM instead\n");
10347 Opcode = PPCISD::XXPERM;
10348
10349 // The second input to XXPERM is also an output so if the second input has
10350 // multiple uses then copying is necessary, as a result we want the
10351 // single-use operand to be used as the second input to prevent copying.
10352 if ((!isLittleEndian && !V2->hasOneUse() && V1->hasOneUse()) ||
10353 (isLittleEndian && !V1->hasOneUse() && V2->hasOneUse())) {
10354 std::swap(V1, V2);
10355 NeedSwap = !NeedSwap;
10356 }
10357 }
10358
10359 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10360 // that it is in input element units, not in bytes. Convert now.
10361
10362 // For little endian, the order of the input vectors is reversed, and
10363 // the permutation mask is complemented with respect to 31. This is
10364 // necessary to produce proper semantics with the big-endian-based vperm
10365 // instruction.
10366 EVT EltVT = V1.getValueType().getVectorElementType();
10367 unsigned BytesPerElement = EltVT.getSizeInBits() / 8;
10368
10369 bool V1HasXXSWAPD = V1->getOperand(0)->getOpcode() == PPCISD::XXSWAPD;
10370 bool V2HasXXSWAPD = V2->getOperand(0)->getOpcode() == PPCISD::XXSWAPD;
10371
10372 /*
10373 Vectors will be appended like so: [ V1 | v2 ]
10374 XXSWAPD on V1:
10375 [ A | B | C | D ] -> [ C | D | A | B ]
10376 0-3 4-7 8-11 12-15 0-3 4-7 8-11 12-15
10377 i.e. index of A, B += 8, and index of C, D -= 8.
10378 XXSWAPD on V2:
10379 [ E | F | G | H ] -> [ G | H | E | F ]
10380 16-19 20-23 24-27 28-31 16-19 20-23 24-27 28-31
10381 i.e. index of E, F += 8, index of G, H -= 8
10382 Swap V1 and V2:
10383 [ V1 | V2 ] -> [ V2 | V1 ]
10384 0-15 16-31 0-15 16-31
10385 i.e. index of V1 += 16, index of V2 -= 16
10386 */
10387
10388 SmallVector<SDValue, 16> ResultMask;
10389 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10390 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10391
10392 if (V1HasXXSWAPD) {
10393 if (SrcElt < 8)
10394 SrcElt += 8;
10395 else if (SrcElt < 16)
10396 SrcElt -= 8;
10397 }
10398 if (V2HasXXSWAPD) {
10399 if (SrcElt > 23)
10400 SrcElt -= 8;
10401 else if (SrcElt > 15)
10402 SrcElt += 8;
10403 }
10404 if (NeedSwap) {
10405 if (SrcElt < 16)
10406 SrcElt += 16;
10407 else
10408 SrcElt -= 16;
10409 }
10410 for (unsigned j = 0; j != BytesPerElement; ++j)
10411 if (isLittleEndian)
10412 ResultMask.push_back(
10413 DAG.getConstant(31 - (SrcElt * BytesPerElement + j), dl, MVT::i32));
10414 else
10415 ResultMask.push_back(
10416 DAG.getConstant(SrcElt * BytesPerElement + j, dl, MVT::i32));
10417 }
10418
10419 if (V1HasXXSWAPD) {
10420 dl = SDLoc(V1->getOperand(0));
10421 V1 = V1->getOperand(0)->getOperand(1);
10422 }
10423 if (V2HasXXSWAPD) {
10424 dl = SDLoc(V2->getOperand(0));
10425 V2 = V2->getOperand(0)->getOperand(1);
10426 }
10427
10428 if (isPPC64 && (V1HasXXSWAPD || V2HasXXSWAPD)) {
10429 if (ValType != MVT::v2f64)
10430 V1 = DAG.getBitcast(MVT::v2f64, V1);
10431 if (V2.getValueType() != MVT::v2f64)
10432 V2 = DAG.getBitcast(MVT::v2f64, V2);
10433 }
10434
10435 ShufflesHandledWithVPERM++;
10436 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10437 LLVM_DEBUG({
10438 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10439 if (Opcode == PPCISD::XXPERM) {
10440 dbgs() << "Emitting a XXPERM for the following shuffle:\n";
10441 } else {
10442 dbgs() << "Emitting a VPERM for the following shuffle:\n";
10443 }
10444 SVOp->dump();
10445 dbgs() << "With the following permute control vector:\n";
10446 VPermMask.dump();
10447 });
10448
10449 if (Opcode == PPCISD::XXPERM)
10450 VPermMask = DAG.getBitcast(MVT::v4i32, VPermMask);
10451
10452 // Only need to place items backwards in LE,
10453 // the mask was properly calculated.
10454 if (isLittleEndian)
10455 std::swap(V1, V2);
10456
10457 SDValue VPERMNode =
10458 DAG.getNode(Opcode, dl, V1.getValueType(), V1, V2, VPermMask);
10459
10460 VPERMNode = DAG.getBitcast(ValType, VPERMNode);
10461 return VPERMNode;
10462}
10463
10464/// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10465/// vector comparison. If it is, return true and fill in Opc/isDot with
10466/// information about the intrinsic.
10467static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10468 bool &isDot, const PPCSubtarget &Subtarget) {
10469 unsigned IntrinsicID = Intrin.getConstantOperandVal(0);
10470 CompareOpc = -1;
10471 isDot = false;
10472 switch (IntrinsicID) {
10473 default:
10474 return false;
10475 // Comparison predicates.
10476 case Intrinsic::ppc_altivec_vcmpbfp_p:
10477 CompareOpc = 966;
10478 isDot = true;
10479 break;
10480 case Intrinsic::ppc_altivec_vcmpeqfp_p:
10481 CompareOpc = 198;
10482 isDot = true;
10483 break;
10484 case Intrinsic::ppc_altivec_vcmpequb_p:
10485 CompareOpc = 6;
10486 isDot = true;
10487 break;
10488 case Intrinsic::ppc_altivec_vcmpequh_p:
10489 CompareOpc = 70;
10490 isDot = true;
10491 break;
10492 case Intrinsic::ppc_altivec_vcmpequw_p:
10493 CompareOpc = 134;
10494 isDot = true;
10495 break;
10496 case Intrinsic::ppc_altivec_vcmpequd_p:
10497 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10498 CompareOpc = 199;
10499 isDot = true;
10500 } else
10501 return false;
10502 break;
10503 case Intrinsic::ppc_altivec_vcmpneb_p:
10504 case Intrinsic::ppc_altivec_vcmpneh_p:
10505 case Intrinsic::ppc_altivec_vcmpnew_p:
10506 case Intrinsic::ppc_altivec_vcmpnezb_p:
10507 case Intrinsic::ppc_altivec_vcmpnezh_p:
10508 case Intrinsic::ppc_altivec_vcmpnezw_p:
10509 if (Subtarget.hasP9Altivec()) {
10510 switch (IntrinsicID) {
10511 default:
10512 llvm_unreachable("Unknown comparison intrinsic.");
10513 case Intrinsic::ppc_altivec_vcmpneb_p:
10514 CompareOpc = 7;
10515 break;
10516 case Intrinsic::ppc_altivec_vcmpneh_p:
10517 CompareOpc = 71;
10518 break;
10519 case Intrinsic::ppc_altivec_vcmpnew_p:
10520 CompareOpc = 135;
10521 break;
10522 case Intrinsic::ppc_altivec_vcmpnezb_p:
10523 CompareOpc = 263;
10524 break;
10525 case Intrinsic::ppc_altivec_vcmpnezh_p:
10526 CompareOpc = 327;
10527 break;
10528 case Intrinsic::ppc_altivec_vcmpnezw_p:
10529 CompareOpc = 391;
10530 break;
10531 }
10532 isDot = true;
10533 } else
10534 return false;
10535 break;
10536 case Intrinsic::ppc_altivec_vcmpgefp_p:
10537 CompareOpc = 454;
10538 isDot = true;
10539 break;
10540 case Intrinsic::ppc_altivec_vcmpgtfp_p:
10541 CompareOpc = 710;
10542 isDot = true;
10543 break;
10544 case Intrinsic::ppc_altivec_vcmpgtsb_p:
10545 CompareOpc = 774;
10546 isDot = true;
10547 break;
10548 case Intrinsic::ppc_altivec_vcmpgtsh_p:
10549 CompareOpc = 838;
10550 isDot = true;
10551 break;
10552 case Intrinsic::ppc_altivec_vcmpgtsw_p:
10553 CompareOpc = 902;
10554 isDot = true;
10555 break;
10556 case Intrinsic::ppc_altivec_vcmpgtsd_p:
10557 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10558 CompareOpc = 967;
10559 isDot = true;
10560 } else
10561 return false;
10562 break;
10563 case Intrinsic::ppc_altivec_vcmpgtub_p:
10564 CompareOpc = 518;
10565 isDot = true;
10566 break;
10567 case Intrinsic::ppc_altivec_vcmpgtuh_p:
10568 CompareOpc = 582;
10569 isDot = true;
10570 break;
10571 case Intrinsic::ppc_altivec_vcmpgtuw_p:
10572 CompareOpc = 646;
10573 isDot = true;
10574 break;
10575 case Intrinsic::ppc_altivec_vcmpgtud_p:
10576 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10577 CompareOpc = 711;
10578 isDot = true;
10579 } else
10580 return false;
10581 break;
10582
10583 case Intrinsic::ppc_altivec_vcmpequq:
10584 case Intrinsic::ppc_altivec_vcmpgtsq:
10585 case Intrinsic::ppc_altivec_vcmpgtuq:
10586 if (!Subtarget.isISA3_1())
10587 return false;
10588 switch (IntrinsicID) {
10589 default:
10590 llvm_unreachable("Unknown comparison intrinsic.");
10591 case Intrinsic::ppc_altivec_vcmpequq:
10592 CompareOpc = 455;
10593 break;
10594 case Intrinsic::ppc_altivec_vcmpgtsq:
10595 CompareOpc = 903;
10596 break;
10597 case Intrinsic::ppc_altivec_vcmpgtuq:
10598 CompareOpc = 647;
10599 break;
10600 }
10601 break;
10602
10603 // VSX predicate comparisons use the same infrastructure
10604 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10605 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10606 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10607 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10608 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10609 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10610 if (Subtarget.hasVSX()) {
10611 switch (IntrinsicID) {
10612 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10613 CompareOpc = 99;
10614 break;
10615 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10616 CompareOpc = 115;
10617 break;
10618 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10619 CompareOpc = 107;
10620 break;
10621 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10622 CompareOpc = 67;
10623 break;
10624 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10625 CompareOpc = 83;
10626 break;
10627 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10628 CompareOpc = 75;
10629 break;
10630 }
10631 isDot = true;
10632 } else
10633 return false;
10634 break;
10635
10636 // Normal Comparisons.
10637 case Intrinsic::ppc_altivec_vcmpbfp:
10638 CompareOpc = 966;
10639 break;
10640 case Intrinsic::ppc_altivec_vcmpeqfp:
10641 CompareOpc = 198;
10642 break;
10643 case Intrinsic::ppc_altivec_vcmpequb:
10644 CompareOpc = 6;
10645 break;
10646 case Intrinsic::ppc_altivec_vcmpequh:
10647 CompareOpc = 70;
10648 break;
10649 case Intrinsic::ppc_altivec_vcmpequw:
10650 CompareOpc = 134;
10651 break;
10652 case Intrinsic::ppc_altivec_vcmpequd:
10653 if (Subtarget.hasP8Altivec())
10654 CompareOpc = 199;
10655 else
10656 return false;
10657 break;
10658 case Intrinsic::ppc_altivec_vcmpneb:
10659 case Intrinsic::ppc_altivec_vcmpneh:
10660 case Intrinsic::ppc_altivec_vcmpnew:
10661 case Intrinsic::ppc_altivec_vcmpnezb:
10662 case Intrinsic::ppc_altivec_vcmpnezh:
10663 case Intrinsic::ppc_altivec_vcmpnezw:
10664 if (Subtarget.hasP9Altivec())
10665 switch (IntrinsicID) {
10666 default:
10667 llvm_unreachable("Unknown comparison intrinsic.");
10668 case Intrinsic::ppc_altivec_vcmpneb:
10669 CompareOpc = 7;
10670 break;
10671 case Intrinsic::ppc_altivec_vcmpneh:
10672 CompareOpc = 71;
10673 break;
10674 case Intrinsic::ppc_altivec_vcmpnew:
10675 CompareOpc = 135;
10676 break;
10677 case Intrinsic::ppc_altivec_vcmpnezb:
10678 CompareOpc = 263;
10679 break;
10680 case Intrinsic::ppc_altivec_vcmpnezh:
10681 CompareOpc = 327;
10682 break;
10683 case Intrinsic::ppc_altivec_vcmpnezw:
10684 CompareOpc = 391;
10685 break;
10686 }
10687 else
10688 return false;
10689 break;
10690 case Intrinsic::ppc_altivec_vcmpgefp:
10691 CompareOpc = 454;
10692 break;
10693 case Intrinsic::ppc_altivec_vcmpgtfp:
10694 CompareOpc = 710;
10695 break;
10696 case Intrinsic::ppc_altivec_vcmpgtsb:
10697 CompareOpc = 774;
10698 break;
10699 case Intrinsic::ppc_altivec_vcmpgtsh:
10700 CompareOpc = 838;
10701 break;
10702 case Intrinsic::ppc_altivec_vcmpgtsw:
10703 CompareOpc = 902;
10704 break;
10705 case Intrinsic::ppc_altivec_vcmpgtsd:
10706 if (Subtarget.hasP8Altivec())
10707 CompareOpc = 967;
10708 else
10709 return false;
10710 break;
10711 case Intrinsic::ppc_altivec_vcmpgtub:
10712 CompareOpc = 518;
10713 break;
10714 case Intrinsic::ppc_altivec_vcmpgtuh:
10715 CompareOpc = 582;
10716 break;
10717 case Intrinsic::ppc_altivec_vcmpgtuw:
10718 CompareOpc = 646;
10719 break;
10720 case Intrinsic::ppc_altivec_vcmpgtud:
10721 if (Subtarget.hasP8Altivec())
10722 CompareOpc = 711;
10723 else
10724 return false;
10725 break;
10726 case Intrinsic::ppc_altivec_vcmpequq_p:
10727 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10728 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10729 if (!Subtarget.isISA3_1())
10730 return false;
10731 switch (IntrinsicID) {
10732 default:
10733 llvm_unreachable("Unknown comparison intrinsic.");
10734 case Intrinsic::ppc_altivec_vcmpequq_p:
10735 CompareOpc = 455;
10736 break;
10737 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10738 CompareOpc = 903;
10739 break;
10740 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10741 CompareOpc = 647;
10742 break;
10743 }
10744 isDot = true;
10745 break;
10746 }
10747 return true;
10748}
10749
10750/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10751/// lower, do it, otherwise return null.
10752SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10753 SelectionDAG &DAG) const {
10754 unsigned IntrinsicID = Op.getConstantOperandVal(0);
10755
10756 SDLoc dl(Op);
10757
10758 switch (IntrinsicID) {
10759 case Intrinsic::thread_pointer:
10760 // Reads the thread pointer register, used for __builtin_thread_pointer.
10761 if (Subtarget.isPPC64())
10762 return DAG.getRegister(PPC::X13, MVT::i64);
10763 return DAG.getRegister(PPC::R2, MVT::i32);
10764
10765 case Intrinsic::ppc_rldimi: {
10766 uint64_t SH = Op.getConstantOperandVal(3);
10767 unsigned MB = 0, ME = 0;
10768 if (!isRunOfOnes64(Op.getConstantOperandVal(4), MB, ME) || ME != 63 - SH)
10769 report_fatal_error("invalid rldimi mask!");
10770 return SDValue(DAG.getMachineNode(
10771 PPC::RLDIMI, dl, MVT::i64,
10772 {Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
10773 DAG.getTargetConstant(MB, dl, MVT::i32)}),
10774 0);
10775 }
10776
10777 case Intrinsic::ppc_rlwimi: {
10778 unsigned MB = 0, ME = 0;
10779 if (!isRunOfOnes(Op.getConstantOperandVal(4), MB, ME))
10780 report_fatal_error("invalid rlwimi mask!");
10781 return SDValue(DAG.getMachineNode(
10782 PPC::RLWIMI, dl, MVT::i32,
10783 {Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
10784 DAG.getTargetConstant(MB, dl, MVT::i32),
10785 DAG.getTargetConstant(ME, dl, MVT::i32)}),
10786 0);
10787 }
10788
10789 case Intrinsic::ppc_rlwnm: {
10790 unsigned MB = 0, ME = 0;
10791 if (!isRunOfOnes(Op.getConstantOperandVal(3), MB, ME))
10792 report_fatal_error("invalid rlwnm mask!");
10793 return SDValue(
10794 DAG.getMachineNode(PPC::RLWNM, dl, MVT::i32,
10795 {Op.getOperand(1), Op.getOperand(2),
10796 DAG.getTargetConstant(MB, dl, MVT::i32),
10797 DAG.getTargetConstant(ME, dl, MVT::i32)}),
10798 0);
10799 }
10800
10801 case Intrinsic::ppc_mma_disassemble_acc: {
10802 if (Subtarget.isISAFuture()) {
10803 EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
10804 SDValue WideVec = SDValue(DAG.getMachineNode(PPC::DMXXEXTFDMR512, dl,
10805 ArrayRef(ReturnTypes, 2),
10806 Op.getOperand(1)),
10807 0);
10809 SDValue Value = SDValue(WideVec.getNode(), 0);
10810 SDValue Value2 = SDValue(WideVec.getNode(), 1);
10811
10812 SDValue Extract;
10813 Extract = DAG.getNode(
10814 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8,
10815 Subtarget.isLittleEndian() ? Value2 : Value,
10816 DAG.getConstant(Subtarget.isLittleEndian() ? 1 : 0,
10817 dl, getPointerTy(DAG.getDataLayout())));
10818 RetOps.push_back(Extract);
10819 Extract = DAG.getNode(
10820 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8,
10821 Subtarget.isLittleEndian() ? Value2 : Value,
10822 DAG.getConstant(Subtarget.isLittleEndian() ? 0 : 1,
10823 dl, getPointerTy(DAG.getDataLayout())));
10824 RetOps.push_back(Extract);
10825 Extract = DAG.getNode(
10826 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8,
10827 Subtarget.isLittleEndian() ? Value : Value2,
10828 DAG.getConstant(Subtarget.isLittleEndian() ? 1 : 0,
10829 dl, getPointerTy(DAG.getDataLayout())));
10830 RetOps.push_back(Extract);
10831 Extract = DAG.getNode(
10832 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8,
10833 Subtarget.isLittleEndian() ? Value : Value2,
10834 DAG.getConstant(Subtarget.isLittleEndian() ? 0 : 1,
10835 dl, getPointerTy(DAG.getDataLayout())));
10836 RetOps.push_back(Extract);
10837 return DAG.getMergeValues(RetOps, dl);
10838 }
10839 [[fallthrough]];
10840 }
10841 case Intrinsic::ppc_vsx_disassemble_pair: {
10842 int NumVecs = 2;
10843 SDValue WideVec = Op.getOperand(1);
10844 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10845 NumVecs = 4;
10846 WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
10847 }
10849 for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
10850 SDValue Extract = DAG.getNode(
10851 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
10852 DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
10853 : VecNo,
10854 dl, getPointerTy(DAG.getDataLayout())));
10855 RetOps.push_back(Extract);
10856 }
10857 return DAG.getMergeValues(RetOps, dl);
10858 }
10859
10860 case Intrinsic::ppc_mma_xxmfacc:
10861 case Intrinsic::ppc_mma_xxmtacc: {
10862 // Allow pre-isa-future subtargets to lower as normal.
10863 if (!Subtarget.isISAFuture())
10864 return SDValue();
10865 // The intrinsics for xxmtacc and xxmfacc take one argument of
10866 // type v512i1, for future cpu the corresponding wacc instruction
10867 // dmxx[inst|extf]dmr512 is always generated for type v512i1, negating
10868 // the need to produce the xxm[t|f]acc.
10869 SDValue WideVec = Op.getOperand(1);
10870 DAG.ReplaceAllUsesWith(Op, WideVec);
10871 return SDValue();
10872 }
10873
10874 case Intrinsic::ppc_unpack_longdouble: {
10875 auto *Idx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10876 assert(Idx && (Idx->getSExtValue() == 0 || Idx->getSExtValue() == 1) &&
10877 "Argument of long double unpack must be 0 or 1!");
10878 return DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Op.getOperand(1),
10879 DAG.getConstant(!!(Idx->getSExtValue()), dl,
10880 Idx->getValueType(0)));
10881 }
10882
10883 case Intrinsic::ppc_compare_exp_lt:
10884 case Intrinsic::ppc_compare_exp_gt:
10885 case Intrinsic::ppc_compare_exp_eq:
10886 case Intrinsic::ppc_compare_exp_uo: {
10887 unsigned Pred;
10888 switch (IntrinsicID) {
10889 case Intrinsic::ppc_compare_exp_lt:
10890 Pred = PPC::PRED_LT;
10891 break;
10892 case Intrinsic::ppc_compare_exp_gt:
10893 Pred = PPC::PRED_GT;
10894 break;
10895 case Intrinsic::ppc_compare_exp_eq:
10896 Pred = PPC::PRED_EQ;
10897 break;
10898 case Intrinsic::ppc_compare_exp_uo:
10899 Pred = PPC::PRED_UN;
10900 break;
10901 }
10902 return SDValue(
10903 DAG.getMachineNode(
10904 PPC::SELECT_CC_I4, dl, MVT::i32,
10905 {SDValue(DAG.getMachineNode(PPC::XSCMPEXPDP, dl, MVT::i32,
10906 Op.getOperand(1), Op.getOperand(2)),
10907 0),
10908 DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
10909 DAG.getTargetConstant(Pred, dl, MVT::i32)}),
10910 0);
10911 }
10912 case Intrinsic::ppc_test_data_class: {
10913 EVT OpVT = Op.getOperand(1).getValueType();
10914 unsigned CmprOpc = OpVT == MVT::f128 ? PPC::XSTSTDCQP
10915 : (OpVT == MVT::f64 ? PPC::XSTSTDCDP
10916 : PPC::XSTSTDCSP);
10917 return SDValue(
10918 DAG.getMachineNode(
10919 PPC::SELECT_CC_I4, dl, MVT::i32,
10920 {SDValue(DAG.getMachineNode(CmprOpc, dl, MVT::i32, Op.getOperand(2),
10921 Op.getOperand(1)),
10922 0),
10923 DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
10924 DAG.getTargetConstant(PPC::PRED_EQ, dl, MVT::i32)}),
10925 0);
10926 }
10927 case Intrinsic::ppc_fnmsub: {
10928 EVT VT = Op.getOperand(1).getValueType();
10929 if (!Subtarget.hasVSX() || (!Subtarget.hasFloat128() && VT == MVT::f128))
10930 return DAG.getNode(
10931 ISD::FNEG, dl, VT,
10932 DAG.getNode(ISD::FMA, dl, VT, Op.getOperand(1), Op.getOperand(2),
10933 DAG.getNode(ISD::FNEG, dl, VT, Op.getOperand(3))));
10934 return DAG.getNode(PPCISD::FNMSUB, dl, VT, Op.getOperand(1),
10935 Op.getOperand(2), Op.getOperand(3));
10936 }
10937 case Intrinsic::ppc_convert_f128_to_ppcf128:
10938 case Intrinsic::ppc_convert_ppcf128_to_f128: {
10939 RTLIB::Libcall LC = IntrinsicID == Intrinsic::ppc_convert_ppcf128_to_f128
10940 ? RTLIB::CONVERT_PPCF128_F128
10941 : RTLIB::CONVERT_F128_PPCF128;
10942 MakeLibCallOptions CallOptions;
10943 std::pair<SDValue, SDValue> Result =
10944 makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(1), CallOptions,
10945 dl, SDValue());
10946 return Result.first;
10947 }
10948 case Intrinsic::ppc_maxfe:
10949 case Intrinsic::ppc_maxfl:
10950 case Intrinsic::ppc_maxfs:
10951 case Intrinsic::ppc_minfe:
10952 case Intrinsic::ppc_minfl:
10953 case Intrinsic::ppc_minfs: {
10954 EVT VT = Op.getValueType();
10955 assert(
10956 all_of(Op->ops().drop_front(4),
10957 [VT](const SDUse &Use) { return Use.getValueType() == VT; }) &&
10958 "ppc_[max|min]f[e|l|s] must have uniform type arguments");
10959 (void)VT;
10961 if (IntrinsicID == Intrinsic::ppc_minfe ||
10962 IntrinsicID == Intrinsic::ppc_minfl ||
10963 IntrinsicID == Intrinsic::ppc_minfs)
10964 CC = ISD::SETLT;
10965 unsigned I = Op.getNumOperands() - 2, Cnt = I;
10966 SDValue Res = Op.getOperand(I);
10967 for (--I; Cnt != 0; --Cnt, I = (--I == 0 ? (Op.getNumOperands() - 1) : I)) {
10968 Res =
10969 DAG.getSelectCC(dl, Res, Op.getOperand(I), Res, Op.getOperand(I), CC);
10970 }
10971 return Res;
10972 }
10973 }
10974
10975 // If this is a lowered altivec predicate compare, CompareOpc is set to the
10976 // opcode number of the comparison.
10977 int CompareOpc;
10978 bool isDot;
10979 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10980 return SDValue(); // Don't custom lower most intrinsics.
10981
10982 // If this is a non-dot comparison, make the VCMP node and we are done.
10983 if (!isDot) {
10984 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10985 Op.getOperand(1), Op.getOperand(2),
10986 DAG.getConstant(CompareOpc, dl, MVT::i32));
10987 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10988 }
10989
10990 // Create the PPCISD altivec 'dot' comparison node.
10991 SDValue Ops[] = {
10992 Op.getOperand(2), // LHS
10993 Op.getOperand(3), // RHS
10994 DAG.getConstant(CompareOpc, dl, MVT::i32)
10995 };
10996 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10997 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
10998
10999 // Now that we have the comparison, emit a copy from the CR to a GPR.
11000 // This is flagged to the above dot comparison.
11001 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
11002 DAG.getRegister(PPC::CR6, MVT::i32),
11003 CompNode.getValue(1));
11004
11005 // Unpack the result based on how the target uses it.
11006 unsigned BitNo; // Bit # of CR6.
11007 bool InvertBit; // Invert result?
11008 switch (Op.getConstantOperandVal(1)) {
11009 default: // Can't happen, don't crash on invalid number though.
11010 case 0: // Return the value of the EQ bit of CR6.
11011 BitNo = 0; InvertBit = false;
11012 break;
11013 case 1: // Return the inverted value of the EQ bit of CR6.
11014 BitNo = 0; InvertBit = true;
11015 break;
11016 case 2: // Return the value of the LT bit of CR6.
11017 BitNo = 2; InvertBit = false;
11018 break;
11019 case 3: // Return the inverted value of the LT bit of CR6.
11020 BitNo = 2; InvertBit = true;
11021 break;
11022 }
11023
11024 // Shift the bit into the low position.
11025 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
11026 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
11027 // Isolate the bit.
11028 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
11029 DAG.getConstant(1, dl, MVT::i32));
11030
11031 // If we are supposed to, toggle the bit.
11032 if (InvertBit)
11033 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
11034 DAG.getConstant(1, dl, MVT::i32));
11035 return Flags;
11036}
11037
11038SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
11039 SelectionDAG &DAG) const {
11040 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
11041 // the beginning of the argument list.
11042 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
11043 SDLoc DL(Op);
11044 switch (Op.getConstantOperandVal(ArgStart)) {
11045 case Intrinsic::ppc_cfence: {
11046 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
11047 SDValue Val = Op.getOperand(ArgStart + 1);
11048 EVT Ty = Val.getValueType();
11049 if (Ty == MVT::i128) {
11050 // FIXME: Testing one of two paired registers is sufficient to guarantee
11051 // ordering?
11052 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, Val);
11053 }
11054 unsigned Opcode = Subtarget.isPPC64() ? PPC::CFENCE8 : PPC::CFENCE;
11055 EVT FTy = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
11056 return SDValue(
11057 DAG.getMachineNode(Opcode, DL, MVT::Other,
11058 DAG.getNode(ISD::ANY_EXTEND, DL, FTy, Val),
11059 Op.getOperand(0)),
11060 0);
11061 }
11062 default:
11063 break;
11064 }
11065 return SDValue();
11066}
11067
11068// Lower scalar BSWAP64 to xxbrd.
11069SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
11070 SDLoc dl(Op);
11071 if (!Subtarget.isPPC64())
11072 return Op;
11073 // MTVSRDD
11074 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
11075 Op.getOperand(0));
11076 // XXBRD
11077 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
11078 // MFVSRD
11079 int VectorIndex = 0;
11080 if (Subtarget.isLittleEndian())
11081 VectorIndex = 1;
11082 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
11083 DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
11084 return Op;
11085}
11086
11087// ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
11088// compared to a value that is atomically loaded (atomic loads zero-extend).
11089SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
11090 SelectionDAG &DAG) const {
11091 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
11092 "Expecting an atomic compare-and-swap here.");
11093 SDLoc dl(Op);
11094 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
11095 EVT MemVT = AtomicNode->getMemoryVT();
11096 if (MemVT.getSizeInBits() >= 32)
11097 return Op;
11098
11099 SDValue CmpOp = Op.getOperand(2);
11100 // If this is already correctly zero-extended, leave it alone.
11101 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
11102 if (DAG.MaskedValueIsZero(CmpOp, HighBits))
11103 return Op;
11104
11105 // Clear the high bits of the compare operand.
11106 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
11107 SDValue NewCmpOp =
11108 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
11109 DAG.getConstant(MaskVal, dl, MVT::i32));
11110
11111 // Replace the existing compare operand with the properly zero-extended one.
11113 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
11114 Ops.push_back(AtomicNode->getOperand(i));
11115 Ops[2] = NewCmpOp;
11116 MachineMemOperand *MMO = AtomicNode->getMemOperand();
11117 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
11118 auto NodeTy =
11120 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
11121}
11122
11123SDValue PPCTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
11124 SelectionDAG &DAG) const {
11125 AtomicSDNode *N = cast<AtomicSDNode>(Op.getNode());
11126 EVT MemVT = N->getMemoryVT();
11127 assert(MemVT.getSimpleVT() == MVT::i128 &&
11128 "Expect quadword atomic operations");
11129 SDLoc dl(N);
11130 unsigned Opc = N->getOpcode();
11131 switch (Opc) {
11132 case ISD::ATOMIC_LOAD: {
11133 // Lower quadword atomic load to int_ppc_atomic_load_i128 which will be
11134 // lowered to ppc instructions by pattern matching instruction selector.
11135 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
11137 N->getOperand(0),
11138 DAG.getConstant(Intrinsic::ppc_atomic_load_i128, dl, MVT::i32)};
11139 for (int I = 1, E = N->getNumOperands(); I < E; ++I)
11140 Ops.push_back(N->getOperand(I));
11141 SDValue LoadedVal = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, Tys,
11142 Ops, MemVT, N->getMemOperand());
11143 SDValue ValLo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal);
11144 SDValue ValHi =
11145 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal.getValue(1));
11146 ValHi = DAG.getNode(ISD::SHL, dl, MVT::i128, ValHi,
11147 DAG.getConstant(64, dl, MVT::i32));
11148 SDValue Val =
11149 DAG.getNode(ISD::OR, dl, {MVT::i128, MVT::Other}, {ValLo, ValHi});
11150 return DAG.getNode(ISD::MERGE_VALUES, dl, {MVT::i128, MVT::Other},
11151 {Val, LoadedVal.getValue(2)});
11152 }
11153 case ISD::ATOMIC_STORE: {
11154 // Lower quadword atomic store to int_ppc_atomic_store_i128 which will be
11155 // lowered to ppc instructions by pattern matching instruction selector.
11156 SDVTList Tys = DAG.getVTList(MVT::Other);
11158 N->getOperand(0),
11159 DAG.getConstant(Intrinsic::ppc_atomic_store_i128, dl, MVT::i32)};
11160 SDValue Val = N->getOperand(1);
11161 SDValue ValLo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, Val);
11162 SDValue ValHi = DAG.getNode(ISD::SRL, dl, MVT::i128, Val,
11163 DAG.getConstant(64, dl, MVT::i32));
11164 ValHi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, ValHi);
11165 Ops.push_back(ValLo);
11166 Ops.push_back(ValHi);
11167 Ops.push_back(N->getOperand(2));
11168 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, dl, Tys, Ops, MemVT,
11169 N->getMemOperand());
11170 }
11171 default:
11172 llvm_unreachable("Unexpected atomic opcode");
11173 }
11174}
11175
11177 SelectionDAG &DAG,
11178 const PPCSubtarget &Subtarget) {
11179 assert(Mask <= fcAllFlags && "Invalid fp_class flags!");
11180
11181 enum DataClassMask {
11182 DC_NAN = 1 << 6,
11183 DC_NEG_INF = 1 << 4,
11184 DC_POS_INF = 1 << 5,
11185 DC_NEG_ZERO = 1 << 2,
11186 DC_POS_ZERO = 1 << 3,
11187 DC_NEG_SUBNORM = 1,
11188 DC_POS_SUBNORM = 1 << 1,
11189 };
11190
11191 EVT VT = Op.getValueType();
11192
11193 unsigned TestOp = VT == MVT::f128 ? PPC::XSTSTDCQP
11194 : VT == MVT::f64 ? PPC::XSTSTDCDP
11195 : PPC::XSTSTDCSP;
11196
11197 if (Mask == fcAllFlags)
11198 return DAG.getBoolConstant(true, Dl, MVT::i1, VT);
11199 if (Mask == 0)
11200 return DAG.getBoolConstant(false, Dl, MVT::i1, VT);
11201
11202 // When it's cheaper or necessary to test reverse flags.
11203 if ((Mask & fcNormal) == fcNormal || Mask == ~fcQNan || Mask == ~fcSNan) {
11204 SDValue Rev = getDataClassTest(Op, ~Mask, Dl, DAG, Subtarget);
11205 return DAG.getNOT(Dl, Rev, MVT::i1);
11206 }
11207
11208 // Power doesn't support testing whether a value is 'normal'. Test the rest
11209 // first, and test if it's 'not not-normal' with expected sign.
11210 if (Mask & fcNormal) {
11211 SDValue Rev(DAG.getMachineNode(
11212 TestOp, Dl, MVT::i32,
11213 DAG.getTargetConstant(DC_NAN | DC_NEG_INF | DC_POS_INF |
11214 DC_NEG_ZERO | DC_POS_ZERO |
11215 DC_NEG_SUBNORM | DC_POS_SUBNORM,
11216 Dl, MVT::i32),
11217 Op),
11218 0);
11219 // Sign are stored in CR bit 0, result are in CR bit 2.
11220 SDValue Sign(
11221 DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1, Rev,
11222 DAG.getTargetConstant(PPC::sub_lt, Dl, MVT::i32)),
11223 0);
11224 SDValue Normal(DAG.getNOT(
11225 Dl,
11227 TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1, Rev,
11228 DAG.getTargetConstant(PPC::sub_eq, Dl, MVT::i32)),
11229 0),
11230 MVT::i1));
11231 if (Mask & fcPosNormal)
11232 Sign = DAG.getNOT(Dl, Sign, MVT::i1);
11233 SDValue Result = DAG.getNode(ISD::AND, Dl, MVT::i1, Sign, Normal);
11234 if (Mask == fcPosNormal || Mask == fcNegNormal)
11235 return Result;
11236
11237 return DAG.getNode(
11238 ISD::OR, Dl, MVT::i1,
11239 getDataClassTest(Op, Mask & ~fcNormal, Dl, DAG, Subtarget), Result);
11240 }
11241
11242 // The instruction doesn't differentiate between signaling or quiet NaN. Test
11243 // the rest first, and test if it 'is NaN and is signaling/quiet'.
11244 if ((Mask & fcNan) == fcQNan || (Mask & fcNan) == fcSNan) {
11245 bool IsQuiet = Mask & fcQNan;
11246 SDValue NanCheck = getDataClassTest(Op, fcNan, Dl, DAG, Subtarget);
11247
11248 // Quietness is determined by the first bit in fraction field.
11249 uint64_t QuietMask = 0;
11250 SDValue HighWord;
11251 if (VT == MVT::f128) {
11252 HighWord = DAG.getNode(
11253 ISD::EXTRACT_VECTOR_ELT, Dl, MVT::i32, DAG.getBitcast(MVT::v4i32, Op),
11254 DAG.getVectorIdxConstant(Subtarget.isLittleEndian() ? 3 : 0, Dl));
11255 QuietMask = 0x8000;
11256 } else if (VT == MVT::f64) {
11257 if (Subtarget.isPPC64()) {
11258 HighWord = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32,
11259 DAG.getBitcast(MVT::i64, Op),
11260 DAG.getConstant(1, Dl, MVT::i32));
11261 } else {
11262 SDValue Vec = DAG.getBitcast(
11263 MVT::v4i32, DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v2f64, Op));
11264 HighWord = DAG.getNode(
11265 ISD::EXTRACT_VECTOR_ELT, Dl, MVT::i32, Vec,
11266 DAG.getVectorIdxConstant(Subtarget.isLittleEndian() ? 1 : 0, Dl));
11267 }
11268 QuietMask = 0x80000;
11269 } else if (VT == MVT::f32) {
11270 HighWord = DAG.getBitcast(MVT::i32, Op);
11271 QuietMask = 0x400000;
11272 }
11273 SDValue NanRes = DAG.getSetCC(
11274 Dl, MVT::i1,
11275 DAG.getNode(ISD::AND, Dl, MVT::i32, HighWord,
11276 DAG.getConstant(QuietMask, Dl, MVT::i32)),
11277 DAG.getConstant(0, Dl, MVT::i32), IsQuiet ? ISD::SETNE : ISD::SETEQ);
11278 NanRes = DAG.getNode(ISD::AND, Dl, MVT::i1, NanCheck, NanRes);
11279 if (Mask == fcQNan || Mask == fcSNan)
11280 return NanRes;
11281
11282 return DAG.getNode(ISD::OR, Dl, MVT::i1,
11283 getDataClassTest(Op, Mask & ~fcNan, Dl, DAG, Subtarget),
11284 NanRes);
11285 }
11286
11287 unsigned NativeMask = 0;
11288 if ((Mask & fcNan) == fcNan)
11289 NativeMask |= DC_NAN;
11290 if (Mask & fcNegInf)
11291 NativeMask |= DC_NEG_INF;
11292 if (Mask & fcPosInf)
11293 NativeMask |= DC_POS_INF;
11294 if (Mask & fcNegZero)
11295 NativeMask |= DC_NEG_ZERO;
11296 if (Mask & fcPosZero)
11297 NativeMask |= DC_POS_ZERO;
11298 if (Mask & fcNegSubnormal)
11299 NativeMask |= DC_NEG_SUBNORM;
11300 if (Mask & fcPosSubnormal)
11301 NativeMask |= DC_POS_SUBNORM;
11302 return SDValue(
11303 DAG.getMachineNode(
11304 TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1,
11306 TestOp, Dl, MVT::i32,
11307 DAG.getTargetConstant(NativeMask, Dl, MVT::i32), Op),
11308 0),
11309 DAG.getTargetConstant(PPC::sub_eq, Dl, MVT::i32)),
11310 0);
11311}
11312
11313SDValue PPCTargetLowering::LowerIS_FPCLASS(SDValue Op,
11314 SelectionDAG &DAG) const {
11315 assert(Subtarget.hasP9Vector() && "Test data class requires Power9");
11316 SDValue LHS = Op.getOperand(0);
11317 uint64_t RHSC = Op.getConstantOperandVal(1);
11318 SDLoc Dl(Op);
11319 FPClassTest Category = static_cast<FPClassTest>(RHSC);
11320 return getDataClassTest(LHS, Category, Dl, DAG, Subtarget);
11321}
11322
11323SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
11324 SelectionDAG &DAG) const {
11325 SDLoc dl(Op);
11326 // Create a stack slot that is 16-byte aligned.
11328 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
11329 EVT PtrVT = getPointerTy(DAG.getDataLayout());
11330 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
11331
11332 // Store the input value into Value#0 of the stack slot.
11333 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
11335 // Load it out.
11336 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
11337}
11338
11339SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
11340 SelectionDAG &DAG) const {
11341 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
11342 "Should only be called for ISD::INSERT_VECTOR_ELT");
11343
11344 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
11345
11346 EVT VT = Op.getValueType();
11347 SDLoc dl(Op);
11348 SDValue V1 = Op.getOperand(0);
11349 SDValue V2 = Op.getOperand(1);
11350
11351 if (VT == MVT::v2f64 && C)
11352 return Op;
11353
11354 if (Subtarget.hasP9Vector()) {
11355 // A f32 load feeding into a v4f32 insert_vector_elt is handled in this way
11356 // because on P10, it allows this specific insert_vector_elt load pattern to
11357 // utilize the refactored load and store infrastructure in order to exploit
11358 // prefixed loads.
11359 // On targets with inexpensive direct moves (Power9 and up), a
11360 // (insert_vector_elt v4f32:$vec, (f32 load)) is always better as an integer
11361 // load since a single precision load will involve conversion to double
11362 // precision on the load followed by another conversion to single precision.
11363 if ((VT == MVT::v4f32) && (V2.getValueType() == MVT::f32) &&
11364 (isa<LoadSDNode>(V2))) {
11365 SDValue BitcastVector = DAG.getBitcast(MVT::v4i32, V1);
11366 SDValue BitcastLoad = DAG.getBitcast(MVT::i32, V2);
11367 SDValue InsVecElt =
11368 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32, BitcastVector,
11369 BitcastLoad, Op.getOperand(2));
11370 return DAG.getBitcast(MVT::v4f32, InsVecElt);
11371 }
11372 }
11373
11374 if (Subtarget.isISA3_1()) {
11375 if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
11376 return SDValue();
11377 // On P10, we have legal lowering for constant and variable indices for
11378 // all vectors.
11379 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11380 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
11381 return Op;
11382 }
11383
11384 // Before P10, we have legal lowering for constant indices but not for
11385 // variable ones.
11386 if (!C)
11387 return SDValue();
11388
11389 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
11390 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
11391 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
11392 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
11393 unsigned InsertAtElement = C->getZExtValue();
11394 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
11395 if (Subtarget.isLittleEndian()) {
11396 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
11397 }
11398 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
11399 DAG.getConstant(InsertAtByte, dl, MVT::i32));
11400 }
11401 return Op;
11402}
11403
11404SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
11405 SelectionDAG &DAG) const {
11406 SDLoc dl(Op);
11407 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
11408 SDValue LoadChain = LN->getChain();
11409 SDValue BasePtr = LN->getBasePtr();
11410 EVT VT = Op.getValueType();
11411
11412 if (VT != MVT::v256i1 && VT != MVT::v512i1)
11413 return Op;
11414
11415 // Type v256i1 is used for pairs and v512i1 is used for accumulators.
11416 // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
11417 // 2 or 4 vsx registers.
11418 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
11419 "Type unsupported without MMA");
11420 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
11421 "Type unsupported without paired vector support");
11422 Align Alignment = LN->getAlign();
11424 SmallVector<SDValue, 4> LoadChains;
11425 unsigned NumVecs = VT.getSizeInBits() / 128;
11426 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
11427 SDValue Load =
11428 DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
11429 LN->getPointerInfo().getWithOffset(Idx * 16),
11430 commonAlignment(Alignment, Idx * 16),
11431 LN->getMemOperand()->getFlags(), LN->getAAInfo());
11432 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
11433 DAG.getConstant(16, dl, BasePtr.getValueType()));
11434 Loads.push_back(Load);
11435 LoadChains.push_back(Load.getValue(1));
11436 }
11437 if (Subtarget.isLittleEndian()) {
11438 std::reverse(Loads.begin(), Loads.end());
11439 std::reverse(LoadChains.begin(), LoadChains.end());
11440 }
11441 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
11442 SDValue Value =
11443 DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
11444 dl, VT, Loads);
11445 SDValue RetOps[] = {Value, TF};
11446 return DAG.getMergeValues(RetOps, dl);
11447}
11448
11449SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
11450 SelectionDAG &DAG) const {
11451 SDLoc dl(Op);
11452 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
11453 SDValue StoreChain = SN->getChain();
11454 SDValue BasePtr = SN->getBasePtr();
11455 SDValue Value = SN->getValue();
11456 SDValue Value2 = SN->getValue();
11457 EVT StoreVT = Value.getValueType();
11458
11459 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
11460 return Op;
11461
11462 // Type v256i1 is used for pairs and v512i1 is used for accumulators.
11463 // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
11464 // underlying registers individually.
11465 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
11466 "Type unsupported without MMA");
11467 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
11468 "Type unsupported without paired vector support");
11469 Align Alignment = SN->getAlign();
11471 unsigned NumVecs = 2;
11472 if (StoreVT == MVT::v512i1) {
11473 if (Subtarget.isISAFuture()) {
11474 EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
11475 MachineSDNode *ExtNode = DAG.getMachineNode(
11476 PPC::DMXXEXTFDMR512, dl, ArrayRef(ReturnTypes, 2), Op.getOperand(1));
11477
11478 Value = SDValue(ExtNode, 0);
11479 Value2 = SDValue(ExtNode, 1);
11480 } else
11481 Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
11482 NumVecs = 4;
11483 }
11484 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
11485 unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
11486 SDValue Elt;
11487 if (Subtarget.isISAFuture()) {
11488 VecNum = Subtarget.isLittleEndian() ? 1 - (Idx % 2) : (Idx % 2);
11489 Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8,
11490 Idx > 1 ? Value2 : Value,
11491 DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
11492 } else
11493 Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
11494 DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
11495
11496 SDValue Store =
11497 DAG.getStore(StoreChain, dl, Elt, BasePtr,
11498 SN->getPointerInfo().getWithOffset(Idx * 16),
11499 commonAlignment(Alignment, Idx * 16),
11500 SN->getMemOperand()->getFlags(), SN->getAAInfo());
11501 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
11502 DAG.getConstant(16, dl, BasePtr.getValueType()));
11503 Stores.push_back(Store);
11504 }
11505 SDValue TF = DAG.getTokenFactor(dl, Stores);
11506 return TF;
11507}
11508
11509SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
11510 SDLoc dl(Op);
11511 if (Op.getValueType() == MVT::v4i32) {
11512 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
11513
11514 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
11515 // +16 as shift amt.
11516 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
11517 SDValue RHSSwap = // = vrlw RHS, 16
11518 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
11519
11520 // Shrinkify inputs to v8i16.
11521 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
11522 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
11523 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
11524
11525 // Low parts multiplied together, generating 32-bit results (we ignore the
11526 // top parts).
11527 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
11528 LHS, RHS, DAG, dl, MVT::v4i32);
11529
11530 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
11531 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
11532 // Shift the high parts up 16 bits.
11533 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
11534 Neg16, DAG, dl);
11535 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
11536 } else if (Op.getValueType() == MVT::v16i8) {
11537 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
11538 bool isLittleEndian = Subtarget.isLittleEndian();
11539
11540 // Multiply the even 8-bit parts, producing 16-bit sums.
11541 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
11542 LHS, RHS, DAG, dl, MVT::v8i16);
11543 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
11544
11545 // Multiply the odd 8-bit parts, producing 16-bit sums.
11546 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
11547 LHS, RHS, DAG, dl, MVT::v8i16);
11548 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
11549
11550 // Merge the results together. Because vmuleub and vmuloub are
11551 // instructions with a big-endian bias, we must reverse the
11552 // element numbering and reverse the meaning of "odd" and "even"
11553 // when generating little endian code.
11554 int Ops[16];
11555 for (unsigned i = 0; i != 8; ++i) {
11556 if (isLittleEndian) {
11557 Ops[i*2 ] = 2*i;
11558 Ops[i*2+1] = 2*i+16;
11559 } else {
11560 Ops[i*2 ] = 2*i+1;
11561 Ops[i*2+1] = 2*i+1+16;
11562 }
11563 }
11564 if (isLittleEndian)
11565 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
11566 else
11567 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
11568 } else {
11569 llvm_unreachable("Unknown mul to lower!");
11570 }
11571}
11572
11573SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
11574 bool IsStrict = Op->isStrictFPOpcode();
11575 if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
11576 !Subtarget.hasP9Vector())
11577 return SDValue();
11578
11579 return Op;
11580}
11581
11582// Custom lowering for fpext vf32 to v2f64
11583SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
11584
11585 assert(Op.getOpcode() == ISD::FP_EXTEND &&
11586 "Should only be called for ISD::FP_EXTEND");
11587
11588 // FIXME: handle extends from half precision float vectors on P9.
11589 // We only want to custom lower an extend from v2f32 to v2f64.
11590 if (Op.getValueType() != MVT::v2f64 ||
11591 Op.getOperand(0).getValueType() != MVT::v2f32)
11592 return SDValue();
11593
11594 SDLoc dl(Op);
11595 SDValue Op0 = Op.getOperand(0);
11596
11597 switch (Op0.getOpcode()) {
11598 default:
11599 return SDValue();
11601 assert(Op0.getNumOperands() == 2 &&
11602 isa<ConstantSDNode>(Op0->getOperand(1)) &&
11603 "Node should have 2 operands with second one being a constant!");
11604
11605 if (Op0.getOperand(0).getValueType() != MVT::v4f32)
11606 return SDValue();
11607
11608 // Custom lower is only done for high or low doubleword.
11609 int Idx = Op0.getConstantOperandVal(1);
11610 if (Idx % 2 != 0)
11611 return SDValue();
11612
11613 // Since input is v4f32, at this point Idx is either 0 or 2.
11614 // Shift to get the doubleword position we want.
11615 int DWord = Idx >> 1;
11616
11617 // High and low word positions are different on little endian.
11618 if (Subtarget.isLittleEndian())
11619 DWord ^= 0x1;
11620
11621 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
11622 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
11623 }
11624 case ISD::FADD:
11625 case ISD::FMUL:
11626 case ISD::FSUB: {
11627 SDValue NewLoad[2];
11628 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
11629 // Ensure both input are loads.
11630 SDValue LdOp = Op0.getOperand(i);
11631 if (LdOp.getOpcode() != ISD::LOAD)
11632 return SDValue();
11633 // Generate new load node.
11634 LoadSDNode *LD = cast<LoadSDNode>(LdOp);
11635 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11636 NewLoad[i] = DAG.getMemIntrinsicNode(
11637 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11638 LD->getMemoryVT(), LD->getMemOperand());
11639 }
11640 SDValue NewOp =
11641 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
11642 NewLoad[1], Op0.getNode()->getFlags());
11643 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
11644 DAG.getConstant(0, dl, MVT::i32));
11645 }
11646 case ISD::LOAD: {
11647 LoadSDNode *LD = cast<LoadSDNode>(Op0);
11648 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11649 SDValue NewLd = DAG.getMemIntrinsicNode(
11650 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11651 LD->getMemoryVT(), LD->getMemOperand());
11652 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
11653 DAG.getConstant(0, dl, MVT::i32));
11654 }
11655 }
11656 llvm_unreachable("ERROR:Should return for all cases within swtich.");
11657}
11658
11659/// LowerOperation - Provide custom lowering hooks for some operations.
11660///
11662 switch (Op.getOpcode()) {
11663 default: llvm_unreachable("Wasn't expecting to be able to lower this!");
11664 case ISD::FPOW: return lowerPow(Op, DAG);
11665 case ISD::FSIN: return lowerSin(Op, DAG);
11666 case ISD::FCOS: return lowerCos(Op, DAG);
11667 case ISD::FLOG: return lowerLog(Op, DAG);
11668 case ISD::FLOG10: return lowerLog10(Op, DAG);
11669 case ISD::FEXP: return lowerExp(Op, DAG);
11670 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
11671 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
11672 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
11673 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
11674 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
11675 case ISD::STRICT_FSETCC:
11677 case ISD::SETCC: return LowerSETCC(Op, DAG);
11678 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
11679 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
11680
11681 case ISD::INLINEASM:
11682 case ISD::INLINEASM_BR: return LowerINLINEASM(Op, DAG);
11683 // Variable argument lowering.
11684 case ISD::VASTART: return LowerVASTART(Op, DAG);
11685 case ISD::VAARG: return LowerVAARG(Op, DAG);
11686 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
11687
11688 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
11689 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
11691 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
11692
11693 // Exception handling lowering.
11694 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
11695 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
11696 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
11697
11698 case ISD::LOAD: return LowerLOAD(Op, DAG);
11699 case ISD::STORE: return LowerSTORE(Op, DAG);
11700 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
11701 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
11704 case ISD::FP_TO_UINT:
11705 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
11708 case ISD::UINT_TO_FP:
11709 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
11710 case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG);
11711
11712 // Lower 64-bit shifts.
11713 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
11714 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
11715 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
11716
11717 case ISD::FSHL: return LowerFunnelShift(Op, DAG);
11718 case ISD::FSHR: return LowerFunnelShift(Op, DAG);
11719
11720 // Vector-related lowering.
11721 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
11722 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
11723 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
11724 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
11725 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
11726 case ISD::MUL: return LowerMUL(Op, DAG);
11727 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
11729 case ISD::FP_ROUND:
11730 return LowerFP_ROUND(Op, DAG);
11731 case ISD::ROTL: return LowerROTL(Op, DAG);
11732
11733 // For counter-based loop handling.
11734 case ISD::INTRINSIC_W_CHAIN: return SDValue();
11735
11736 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
11737
11738 // Frame & Return address.
11739 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
11740 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
11741
11743 return LowerINTRINSIC_VOID(Op, DAG);
11744 case ISD::BSWAP:
11745 return LowerBSWAP(Op, DAG);
11747 return LowerATOMIC_CMP_SWAP(Op, DAG);
11748 case ISD::ATOMIC_STORE:
11749 return LowerATOMIC_LOAD_STORE(Op, DAG);
11750 case ISD::IS_FPCLASS:
11751 return LowerIS_FPCLASS(Op, DAG);
11752 }
11753}
11754
11757 SelectionDAG &DAG) const {
11758 SDLoc dl(N);
11759 switch (N->getOpcode()) {
11760 default:
11761 llvm_unreachable("Do not know how to custom type legalize this operation!");
11762 case ISD::ATOMIC_LOAD: {
11763 SDValue Res = LowerATOMIC_LOAD_STORE(SDValue(N, 0), DAG);
11764 Results.push_back(Res);
11765 Results.push_back(Res.getValue(1));
11766 break;
11767 }
11768 case ISD::READCYCLECOUNTER: {
11769 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11770 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
11771
11772 Results.push_back(
11773 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11774 Results.push_back(RTB.getValue(2));
11775 break;
11776 }
11778 if (N->getConstantOperandVal(1) != Intrinsic::loop_decrement)
11779 break;
11780
11781 assert(N->getValueType(0) == MVT::i1 &&
11782 "Unexpected result type for CTR decrement intrinsic");
11783 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11784 N->getValueType(0));
11785 SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11786 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11787 N->getOperand(1));
11788
11789 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11790 Results.push_back(NewInt.getValue(1));
11791 break;
11792 }
11794 switch (N->getConstantOperandVal(0)) {
11795 case Intrinsic::ppc_pack_longdouble:
11796 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
11797 N->getOperand(2), N->getOperand(1)));
11798 break;
11799 case Intrinsic::ppc_maxfe:
11800 case Intrinsic::ppc_minfe:
11801 case Intrinsic::ppc_fnmsub:
11802 case Intrinsic::ppc_convert_f128_to_ppcf128:
11803 Results.push_back(LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), DAG));
11804 break;
11805 }
11806 break;
11807 }
11808 case ISD::VAARG: {
11809 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11810 return;
11811
11812 EVT VT = N->getValueType(0);
11813
11814 if (VT == MVT::i64) {
11815 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11816
11817 Results.push_back(NewNode);
11818 Results.push_back(NewNode.getValue(1));
11819 }
11820 return;
11821 }
11824 case ISD::FP_TO_SINT:
11825 case ISD::FP_TO_UINT: {
11826 // LowerFP_TO_INT() can only handle f32 and f64.
11827 if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
11828 MVT::ppcf128)
11829 return;
11830 SDValue LoweredValue = LowerFP_TO_INT(SDValue(N, 0), DAG, dl);
11831 Results.push_back(LoweredValue);
11832 if (N->isStrictFPOpcode())
11833 Results.push_back(LoweredValue.getValue(1));
11834 return;
11835 }
11836 case ISD::TRUNCATE: {
11837 if (!N->getValueType(0).isVector())
11838 return;
11839 SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
11840 if (Lowered)
11841 Results.push_back(Lowered);
11842 return;
11843 }
11844 case ISD::FSHL:
11845 case ISD::FSHR:
11846 // Don't handle funnel shifts here.
11847 return;
11848 case ISD::BITCAST:
11849 // Don't handle bitcast here.
11850 return;
11851 case ISD::FP_EXTEND:
11852 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11853 if (Lowered)
11854 Results.push_back(Lowered);
11855 return;
11856 }
11857}
11858
11859//===----------------------------------------------------------------------===//
11860// Other Lowering Code
11861//===----------------------------------------------------------------------===//
11862
11864 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11865 Function *Func = Intrinsic::getDeclaration(M, Id);
11866 return Builder.CreateCall(Func, {});
11867}
11868
11869// The mappings for emitLeading/TrailingFence is taken from
11870// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
11872 Instruction *Inst,
11873 AtomicOrdering Ord) const {
11875 return callIntrinsic(Builder, Intrinsic::ppc_sync);
11876 if (isReleaseOrStronger(Ord))
11877 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11878 return nullptr;
11879}
11880
11882 Instruction *Inst,
11883 AtomicOrdering Ord) const {
11884 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11885 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11886 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11887 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11888 if (isa<LoadInst>(Inst))
11889 return Builder.CreateCall(
11891 Builder.GetInsertBlock()->getParent()->getParent(),
11892 Intrinsic::ppc_cfence, {Inst->getType()}),
11893 {Inst});
11894 // FIXME: Can use isync for rmw operation.
11895 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11896 }
11897 return nullptr;
11898}
11899
11902 unsigned AtomicSize,
11903 unsigned BinOpcode,
11904 unsigned CmpOpcode,
11905 unsigned CmpPred) const {
11906 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11907 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11908
11909 auto LoadMnemonic = PPC::LDARX;
11910 auto StoreMnemonic = PPC::STDCX;
11911 switch (AtomicSize) {
11912 default:
11913 llvm_unreachable("Unexpected size of atomic entity");
11914 case 1:
11915 LoadMnemonic = PPC::LBARX;
11916 StoreMnemonic = PPC::STBCX;
11917 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11918 break;
11919 case 2:
11920 LoadMnemonic = PPC::LHARX;
11921 StoreMnemonic = PPC::STHCX;
11922 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11923 break;
11924 case 4:
11925 LoadMnemonic = PPC::LWARX;
11926 StoreMnemonic = PPC::STWCX;
11927 break;
11928 case 8:
11929 LoadMnemonic = PPC::LDARX;
11930 StoreMnemonic = PPC::STDCX;
11931 break;
11932 }
11933
11934 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11935 MachineFunction *F = BB->getParent();
11937
11938 Register dest = MI.getOperand(0).getReg();
11939 Register ptrA = MI.getOperand(1).getReg();
11940 Register ptrB = MI.getOperand(2).getReg();
11941 Register incr = MI.getOperand(3).getReg();
11942 DebugLoc dl = MI.getDebugLoc();
11943
11944 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11945 MachineBasicBlock *loop2MBB =
11946 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11947 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11948 F->insert(It, loopMBB);
11949 if (CmpOpcode)
11950 F->insert(It, loop2MBB);
11951 F->insert(It, exitMBB);
11952 exitMBB->splice(exitMBB->begin(), BB,
11953 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11955
11956 MachineRegisterInfo &RegInfo = F->getRegInfo();
11957 Register TmpReg = (!BinOpcode) ? incr :
11958 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11959 : &PPC::GPRCRegClass);
11960
11961 // thisMBB:
11962 // ...
11963 // fallthrough --> loopMBB
11964 BB->addSuccessor(loopMBB);
11965
11966 // loopMBB:
11967 // l[wd]arx dest, ptr
11968 // add r0, dest, incr
11969 // st[wd]cx. r0, ptr
11970 // bne- loopMBB
11971 // fallthrough --> exitMBB
11972
11973 // For max/min...
11974 // loopMBB:
11975 // l[wd]arx dest, ptr
11976 // cmpl?[wd] dest, incr
11977 // bgt exitMBB
11978 // loop2MBB:
11979 // st[wd]cx. dest, ptr
11980 // bne- loopMBB
11981 // fallthrough --> exitMBB
11982
11983 BB = loopMBB;
11984 BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11985 .addReg(ptrA).addReg(ptrB);
11986 if (BinOpcode)
11987 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11988 if (CmpOpcode) {
11989 Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11990 // Signed comparisons of byte or halfword values must be sign-extended.
11991 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11992 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11993 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11994 ExtReg).addReg(dest);
11995 BuildMI(BB, dl, TII->get(CmpOpcode), CrReg).addReg(ExtReg).addReg(incr);
11996 } else
11997 BuildMI(BB, dl, TII->get(CmpOpcode), CrReg).addReg(dest).addReg(incr);
11998
11999 BuildMI(BB, dl, TII->get(PPC::BCC))
12000 .addImm(CmpPred)
12001 .addReg(CrReg)
12002 .addMBB(exitMBB);
12003 BB->addSuccessor(loop2MBB);
12004 BB->addSuccessor(exitMBB);
12005 BB = loop2MBB;
12006 }
12007 BuildMI(BB, dl, TII->get(StoreMnemonic))
12008 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
12009 BuildMI(BB, dl, TII->get(PPC::BCC))
12010 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
12011 BB->addSuccessor(loopMBB);
12012 BB->addSuccessor(exitMBB);
12013
12014 // exitMBB:
12015 // ...
12016 BB = exitMBB;
12017 return BB;
12018}
12019
12021 switch(MI.getOpcode()) {
12022 default:
12023 return false;
12024 case PPC::COPY:
12025 return TII->isSignExtended(MI.getOperand(1).getReg(),
12026 &MI.getMF()->getRegInfo());
12027 case PPC::LHA:
12028 case PPC::LHA8:
12029 case PPC::LHAU:
12030 case PPC::LHAU8:
12031 case PPC::LHAUX:
12032 case PPC::LHAUX8:
12033 case PPC::LHAX:
12034 case PPC::LHAX8:
12035 case PPC::LWA:
12036 case PPC::LWAUX:
12037 case PPC::LWAX:
12038 case PPC::LWAX_32:
12039 case PPC::LWA_32:
12040 case PPC::PLHA:
12041 case PPC::PLHA8:
12042 case PPC::PLHA8pc:
12043 case PPC::PLHApc:
12044 case PPC::PLWA:
12045 case PPC::PLWA8:
12046 case PPC::PLWA8pc:
12047 case PPC::PLWApc:
12048 case PPC::EXTSB:
12049 case PPC::EXTSB8:
12050 case PPC::EXTSB8_32_64:
12051 case PPC::EXTSB8_rec:
12052 case PPC::EXTSB_rec:
12053 case PPC::EXTSH:
12054 case PPC::EXTSH8:
12055 case PPC::EXTSH8_32_64:
12056 case PPC::EXTSH8_rec:
12057 case PPC::EXTSH_rec:
12058 case PPC::EXTSW:
12059 case PPC::EXTSWSLI:
12060 case PPC::EXTSWSLI_32_64:
12061 case PPC::EXTSWSLI_32_64_rec:
12062 case PPC::EXTSWSLI_rec:
12063 case PPC::EXTSW_32:
12064 case PPC::EXTSW_32_64:
12065 case PPC::EXTSW_32_64_rec:
12066 case PPC::EXTSW_rec:
12067 case PPC::SRAW:
12068 case PPC::SRAWI:
12069 case PPC::SRAWI_rec:
12070 case PPC::SRAW_rec:
12071 return true;
12072 }
12073 return false;
12074}
12075
12078 bool is8bit, // operation
12079 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
12080 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
12081 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
12082
12083 // If this is a signed comparison and the value being compared is not known
12084 // to be sign extended, sign extend it here.
12085 DebugLoc dl = MI.getDebugLoc();
12086 MachineFunction *F = BB->getParent();
12087 MachineRegisterInfo &RegInfo = F->getRegInfo();
12088 Register incr = MI.getOperand(3).getReg();
12089 bool IsSignExtended =
12090 incr.isVirtual() && isSignExtended(*RegInfo.getVRegDef(incr), TII);
12091
12092 if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
12093 Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12094 BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
12095 .addReg(MI.getOperand(3).getReg());
12096 MI.getOperand(3).setReg(ValueReg);
12097 incr = ValueReg;
12098 }
12099 // If we support part-word atomic mnemonics, just use them
12100 if (Subtarget.hasPartwordAtomics())
12101 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
12102 CmpPred);
12103
12104 // In 64 bit mode we have to use 64 bits for addresses, even though the
12105 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
12106 // registers without caring whether they're 32 or 64, but here we're
12107 // doing actual arithmetic on the addresses.
12108 bool is64bit = Subtarget.isPPC64();
12109 bool isLittleEndian = Subtarget.isLittleEndian();
12110 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12111
12112 const BasicBlock *LLVM_BB = BB->getBasicBlock();
12114
12115 Register dest = MI.getOperand(0).getReg();
12116 Register ptrA = MI.getOperand(1).getReg();
12117 Register ptrB = MI.getOperand(2).getReg();
12118
12119 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
12120 MachineBasicBlock *loop2MBB =
12121 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
12122 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12123 F->insert(It, loopMBB);
12124 if (CmpOpcode)
12125 F->insert(It, loop2MBB);
12126 F->insert(It, exitMBB);
12127 exitMBB->splice(exitMBB->begin(), BB,
12128 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12130
12131 const TargetRegisterClass *RC =
12132 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12133 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12134
12135 Register PtrReg = RegInfo.createVirtualRegister(RC);
12136 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12137 Register ShiftReg =
12138 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12139 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
12140 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12141 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12142 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12143 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12144 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
12145 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12146 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12147 Register SrwDestReg = RegInfo.createVirtualRegister(GPRC);
12148 Register Ptr1Reg;
12149 Register TmpReg =
12150 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
12151
12152 // thisMBB:
12153 // ...
12154 // fallthrough --> loopMBB
12155 BB->addSuccessor(loopMBB);
12156
12157 // The 4-byte load must be aligned, while a char or short may be
12158 // anywhere in the word. Hence all this nasty bookkeeping code.
12159 // add ptr1, ptrA, ptrB [copy if ptrA==0]
12160 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12161 // xori shift, shift1, 24 [16]
12162 // rlwinm ptr, ptr1, 0, 0, 29
12163 // slw incr2, incr, shift
12164 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12165 // slw mask, mask2, shift
12166 // loopMBB:
12167 // lwarx tmpDest, ptr
12168 // add tmp, tmpDest, incr2
12169 // andc tmp2, tmpDest, mask
12170 // and tmp3, tmp, mask
12171 // or tmp4, tmp3, tmp2
12172 // stwcx. tmp4, ptr
12173 // bne- loopMBB
12174 // fallthrough --> exitMBB
12175 // srw SrwDest, tmpDest, shift
12176 // rlwinm SrwDest, SrwDest, 0, 24 [16], 31
12177 if (ptrA != ZeroReg) {
12178 Ptr1Reg = RegInfo.createVirtualRegister(RC);
12179 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12180 .addReg(ptrA)
12181 .addReg(ptrB);
12182 } else {
12183 Ptr1Reg = ptrB;
12184 }
12185 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12186 // mode.
12187 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12188 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12189 .addImm(3)
12190 .addImm(27)
12191 .addImm(is8bit ? 28 : 27);
12192 if (!isLittleEndian)
12193 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12194 .addReg(Shift1Reg)
12195 .addImm(is8bit ? 24 : 16);
12196 if (is64bit)
12197 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12198 .addReg(Ptr1Reg)
12199 .addImm(0)
12200 .addImm(61);
12201 else
12202 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12203 .addReg(Ptr1Reg)
12204 .addImm(0)
12205 .addImm(0)
12206 .addImm(29);
12207 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
12208 if (is8bit)
12209 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12210 else {
12211 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12212 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12213 .addReg(Mask3Reg)
12214 .addImm(65535);
12215 }
12216 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12217 .addReg(Mask2Reg)
12218 .addReg(ShiftReg);
12219
12220 BB = loopMBB;
12221 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12222 .addReg(ZeroReg)
12223 .addReg(PtrReg);
12224 if (BinOpcode)
12225 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
12226 .addReg(Incr2Reg)
12227 .addReg(TmpDestReg);
12228 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12229 .addReg(TmpDestReg)
12230 .addReg(MaskReg);
12231 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
12232 if (CmpOpcode) {
12233 // For unsigned comparisons, we can directly compare the shifted values.
12234 // For signed comparisons we shift and sign extend.
12235 Register SReg = RegInfo.createVirtualRegister(GPRC);
12236 Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12237 BuildMI(BB, dl, TII->get(PPC::AND), SReg)
12238 .addReg(TmpDestReg)
12239 .addReg(MaskReg);
12240 unsigned ValueReg = SReg;
12241 unsigned CmpReg = Incr2Reg;
12242 if (CmpOpcode == PPC::CMPW) {
12243 ValueReg = RegInfo.createVirtualRegister(GPRC);
12244 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
12245 .addReg(SReg)
12246 .addReg(ShiftReg);
12247 Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
12248 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
12249 .addReg(ValueReg);
12250 ValueReg = ValueSReg;
12251 CmpReg = incr;
12252 }
12253 BuildMI(BB, dl, TII->get(CmpOpcode), CrReg).addReg(ValueReg).addReg(CmpReg);
12254 BuildMI(BB, dl, TII->get(PPC::BCC))
12255 .addImm(CmpPred)
12256 .addReg(CrReg)
12257 .addMBB(exitMBB);
12258 BB->addSuccessor(loop2MBB);
12259 BB->addSuccessor(exitMBB);
12260 BB = loop2MBB;
12261 }
12262 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
12263 BuildMI(BB, dl, TII->get(PPC::STWCX))
12264 .addReg(Tmp4Reg)
12265 .addReg(ZeroReg)
12266 .addReg(PtrReg);
12267 BuildMI(BB, dl, TII->get(PPC::BCC))
12269 .addReg(PPC::CR0)
12270 .addMBB(loopMBB);
12271 BB->addSuccessor(loopMBB);
12272 BB->addSuccessor(exitMBB);
12273
12274 // exitMBB:
12275 // ...
12276 BB = exitMBB;
12277 // Since the shift amount is not a constant, we need to clear
12278 // the upper bits with a separate RLWINM.
12279 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::RLWINM), dest)
12280 .addReg(SrwDestReg)
12281 .addImm(0)
12282 .addImm(is8bit ? 24 : 16)
12283 .addImm(31);
12284 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), SrwDestReg)
12285 .addReg(TmpDestReg)
12286 .addReg(ShiftReg);
12287 return BB;
12288}
12289
12292 MachineBasicBlock *MBB) const {
12293 DebugLoc DL = MI.getDebugLoc();
12294 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12295 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
12296
12297 MachineFunction *MF = MBB->getParent();
12299
12300 const BasicBlock *BB = MBB->getBasicBlock();
12302
12303 Register DstReg = MI.getOperand(0).getReg();
12304 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
12305 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
12306 Register mainDstReg = MRI.createVirtualRegister(RC);
12307 Register restoreDstReg = MRI.createVirtualRegister(RC);
12308
12309 MVT PVT = getPointerTy(MF->getDataLayout());
12310 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
12311 "Invalid Pointer Size!");
12312 // For v = setjmp(buf), we generate
12313 //
12314 // thisMBB:
12315 // SjLjSetup mainMBB
12316 // bl mainMBB
12317 // v_restore = 1
12318 // b sinkMBB
12319 //
12320 // mainMBB:
12321 // buf[LabelOffset] = LR
12322 // v_main = 0
12323 //
12324 // sinkMBB:
12325 // v = phi(main, restore)
12326 //
12327
12328 MachineBasicBlock *thisMBB = MBB;
12329 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
12330 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
12331 MF->insert(I, mainMBB);
12332 MF->insert(I, sinkMBB);
12333
12335
12336 // Transfer the remainder of BB and its successor edges to sinkMBB.
12337 sinkMBB->splice(sinkMBB->begin(), MBB,
12338 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
12340
12341 // Note that the structure of the jmp_buf used here is not compatible
12342 // with that used by libc, and is not designed to be. Specifically, it
12343 // stores only those 'reserved' registers that LLVM does not otherwise
12344 // understand how to spill. Also, by convention, by the time this
12345 // intrinsic is called, Clang has already stored the frame address in the
12346 // first slot of the buffer and stack address in the third. Following the
12347 // X86 target code, we'll store the jump address in the second slot. We also
12348 // need to save the TOC pointer (R2) to handle jumps between shared
12349 // libraries, and that will be stored in the fourth slot. The thread
12350 // identifier (R13) is not affected.
12351
12352 // thisMBB:
12353 const int64_t LabelOffset = 1 * PVT.getStoreSize();
12354 const int64_t TOCOffset = 3 * PVT.getStoreSize();
12355 const int64_t BPOffset = 4 * PVT.getStoreSize();
12356
12357 // Prepare IP either in reg.
12358 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
12359 Register LabelReg = MRI.createVirtualRegister(PtrRC);
12360 Register BufReg = MI.getOperand(1).getReg();
12361
12362 if (Subtarget.is64BitELFABI()) {
12364 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
12365 .addReg(PPC::X2)
12366 .addImm(TOCOffset)
12367 .addReg(BufReg)
12368 .cloneMemRefs(MI);
12369 }
12370
12371 // Naked functions never have a base pointer, and so we use r1. For all
12372 // other functions, this decision must be delayed until during PEI.
12373 unsigned BaseReg;
12374 if (MF->getFunction().hasFnAttribute(Attribute::Naked))
12375 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
12376 else
12377 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
12378
12379 MIB = BuildMI(*thisMBB, MI, DL,
12380 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
12381 .addReg(BaseReg)
12382 .addImm(BPOffset)
12383 .addReg(BufReg)
12384 .cloneMemRefs(MI);
12385
12386 // Setup
12387 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
12388 MIB.addRegMask(TRI->getNoPreservedMask());
12389
12390 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
12391
12392 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
12393 .addMBB(mainMBB);
12394 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
12395
12396 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
12397 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
12398
12399 // mainMBB:
12400 // mainDstReg = 0
12401 MIB =
12402 BuildMI(mainMBB, DL,
12403 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
12404
12405 // Store IP
12406 if (Subtarget.isPPC64()) {
12407 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
12408 .addReg(LabelReg)
12409 .addImm(LabelOffset)
12410 .addReg(BufReg);
12411 } else {
12412 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
12413 .addReg(LabelReg)
12414 .addImm(LabelOffset)
12415 .addReg(BufReg);
12416 }
12417 MIB.cloneMemRefs(MI);
12418
12419 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
12420 mainMBB->addSuccessor(sinkMBB);
12421
12422 // sinkMBB:
12423 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
12424 TII->get(PPC::PHI), DstReg)
12425 .addReg(mainDstReg).addMBB(mainMBB)
12426 .addReg(restoreDstReg).addMBB(thisMBB);
12427
12428 MI.eraseFromParent();
12429 return sinkMBB;
12430}
12431
12434 MachineBasicBlock *MBB) const {
12435 DebugLoc DL = MI.getDebugLoc();
12436 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12437
12438 MachineFunction *MF = MBB->getParent();
12440
12441 MVT PVT = getPointerTy(MF->getDataLayout());
12442 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
12443 "Invalid Pointer Size!");
12444
12445 const TargetRegisterClass *RC =
12446 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12447 Register Tmp = MRI.createVirtualRegister(RC);
12448 // Since FP is only updated here but NOT referenced, it's treated as GPR.
12449 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
12450 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
12451 unsigned BP =
12452 (PVT == MVT::i64)
12453 ? PPC::X30
12454 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
12455 : PPC::R30);
12456
12458
12459 const int64_t LabelOffset = 1 * PVT.getStoreSize();
12460 const int64_t SPOffset = 2 * PVT.getStoreSize();
12461 const int64_t TOCOffset = 3 * PVT.getStoreSize();
12462 const int64_t BPOffset = 4 * PVT.getStoreSize();
12463
12464 Register BufReg = MI.getOperand(0).getReg();
12465
12466 // Reload FP (the jumped-to function may not have had a
12467 // frame pointer, and if so, then its r31 will be restored
12468 // as necessary).
12469 if (PVT == MVT::i64) {
12470 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
12471 .addImm(0)
12472 .addReg(BufReg);
12473 } else {
12474 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
12475 .addImm(0)
12476 .addReg(BufReg);
12477 }
12478 MIB.cloneMemRefs(MI);
12479
12480 // Reload IP
12481 if (PVT == MVT::i64) {
12482 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
12483 .addImm(LabelOffset)
12484 .addReg(BufReg);
12485 } else {
12486 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
12487 .addImm(LabelOffset)
12488 .addReg(BufReg);
12489 }
12490 MIB.cloneMemRefs(MI);
12491
12492 // Reload SP
12493 if (PVT == MVT::i64) {
12494 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
12495 .addImm(SPOffset)
12496 .addReg(BufReg);
12497 } else {
12498 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
12499 .addImm(SPOffset)
12500 .addReg(BufReg);
12501 }
12502 MIB.cloneMemRefs(MI);
12503
12504 // Reload BP
12505 if (PVT == MVT::i64) {
12506 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
12507 .addImm(BPOffset)
12508 .addReg(BufReg);
12509 } else {
12510 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
12511 .addImm(BPOffset)
12512 .addReg(BufReg);
12513 }
12514 MIB.cloneMemRefs(MI);
12515
12516 // Reload TOC
12517 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
12519 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
12520 .addImm(TOCOffset)
12521 .addReg(BufReg)
12522 .cloneMemRefs(MI);
12523 }
12524
12525 // Jump
12526 BuildMI(*MBB, MI, DL,
12527 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
12528 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
12529
12530 MI.eraseFromParent();
12531 return MBB;
12532}
12533
12535 // If the function specifically requests inline stack probes, emit them.
12536 if (MF.getFunction().hasFnAttribute("probe-stack"))
12537 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
12538 "inline-asm";
12539 return false;
12540}
12541
12543 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
12544 unsigned StackAlign = TFI->getStackAlignment();
12545 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
12546 "Unexpected stack alignment");
12547 // The default stack probe size is 4096 if the function has no
12548 // stack-probe-size attribute.
12549 const Function &Fn = MF.getFunction();
12550 unsigned StackProbeSize =
12551 Fn.getFnAttributeAsParsedInteger("stack-probe-size", 4096);
12552 // Round down to the stack alignment.
12553 StackProbeSize &= ~(StackAlign - 1);
12554 return StackProbeSize ? StackProbeSize : StackAlign;
12555}
12556
12557// Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
12558// into three phases. In the first phase, it uses pseudo instruction
12559// PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
12560// FinalStackPtr. In the second phase, it generates a loop for probing blocks.
12561// At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
12562// MaxCallFrameSize so that it can calculate correct data area pointer.
12565 MachineBasicBlock *MBB) const {
12566 const bool isPPC64 = Subtarget.isPPC64();
12567 MachineFunction *MF = MBB->getParent();
12568 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12569 DebugLoc DL = MI.getDebugLoc();
12570 const unsigned ProbeSize = getStackProbeSize(*MF);
12571 const BasicBlock *ProbedBB = MBB->getBasicBlock();
12573 // The CFG of probing stack looks as
12574 // +-----+
12575 // | MBB |
12576 // +--+--+
12577 // |
12578 // +----v----+
12579 // +--->+ TestMBB +---+
12580 // | +----+----+ |
12581 // | | |
12582 // | +-----v----+ |
12583 // +---+ BlockMBB | |
12584 // +----------+ |
12585 // |
12586 // +---------+ |
12587 // | TailMBB +<--+
12588 // +---------+
12589 // In MBB, calculate previous frame pointer and final stack pointer.
12590 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
12591 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
12592 // TailMBB is spliced via \p MI.
12593 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
12594 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
12595 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
12596
12598 MF->insert(MBBIter, TestMBB);
12599 MF->insert(MBBIter, BlockMBB);
12600 MF->insert(MBBIter, TailMBB);
12601
12602 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
12603 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12604
12605 Register DstReg = MI.getOperand(0).getReg();
12606 Register NegSizeReg = MI.getOperand(1).getReg();
12607 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
12608 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12609 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12610 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12611
12612 // Since value of NegSizeReg might be realigned in prologepilog, insert a
12613 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
12614 // NegSize.
12615 unsigned ProbeOpc;
12616 if (!MRI.hasOneNonDBGUse(NegSizeReg))
12617 ProbeOpc =
12618 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
12619 else
12620 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
12621 // and NegSizeReg will be allocated in the same phyreg to avoid
12622 // redundant copy when NegSizeReg has only one use which is current MI and
12623 // will be replaced by PREPARE_PROBED_ALLOCA then.
12624 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
12625 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
12626 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
12627 .addDef(ActualNegSizeReg)
12628 .addReg(NegSizeReg)
12629 .add(MI.getOperand(2))
12630 .add(MI.getOperand(3));
12631
12632 // Calculate final stack pointer, which equals to SP + ActualNegSize.
12633 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
12634 FinalStackPtr)
12635 .addReg(SPReg)
12636 .addReg(ActualNegSizeReg);
12637
12638 // Materialize a scratch register for update.
12639 int64_t NegProbeSize = -(int64_t)ProbeSize;
12640 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
12641 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12642 if (!isInt<16>(NegProbeSize)) {
12643 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12644 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
12645 .addImm(NegProbeSize >> 16);
12646 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
12647 ScratchReg)
12648 .addReg(TempReg)
12649 .addImm(NegProbeSize & 0xFFFF);
12650 } else
12651 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
12652 .addImm(NegProbeSize);
12653
12654 {
12655 // Probing leading residual part.
12656 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12657 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
12658 .addReg(ActualNegSizeReg)
12659 .addReg(ScratchReg);
12660 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12661 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
12662 .addReg(Div)
12663 .addReg(ScratchReg);
12664 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12665 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
12666 .addReg(Mul)
12667 .addReg(ActualNegSizeReg);
12668 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12669 .addReg(FramePointer)
12670 .addReg(SPReg)
12671 .addReg(NegMod);
12672 }
12673
12674 {
12675 // Remaining part should be multiple of ProbeSize.
12676 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
12677 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
12678 .addReg(SPReg)
12679 .addReg(FinalStackPtr);
12680 BuildMI(TestMBB, DL, TII->get(PPC::BCC))
12682 .addReg(CmpResult)
12683 .addMBB(TailMBB);
12684 TestMBB->addSuccessor(BlockMBB);
12685 TestMBB->addSuccessor(TailMBB);
12686 }
12687
12688 {
12689 // Touch the block.
12690 // |P...|P...|P...
12691 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12692 .addReg(FramePointer)
12693 .addReg(SPReg)
12694 .addReg(ScratchReg);
12695 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
12696 BlockMBB->addSuccessor(TestMBB);
12697 }
12698
12699 // Calculation of MaxCallFrameSize is deferred to prologepilog, use
12700 // DYNAREAOFFSET pseudo instruction to get the future result.
12701 Register MaxCallFrameSizeReg =
12702 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12703 BuildMI(TailMBB, DL,
12704 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
12705 MaxCallFrameSizeReg)
12706 .add(MI.getOperand(2))
12707 .add(MI.getOperand(3));
12708 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
12709 .addReg(SPReg)
12710 .addReg(MaxCallFrameSizeReg);
12711
12712 // Splice instructions after MI to TailMBB.
12713 TailMBB->splice(TailMBB->end(), MBB,
12714 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
12716 MBB->addSuccessor(TestMBB);
12717
12718 // Delete the pseudo instruction.
12719 MI.eraseFromParent();
12720
12721 ++NumDynamicAllocaProbed;
12722 return TailMBB;
12723}
12724
12726 switch (MI.getOpcode()) {
12727 case PPC::SELECT_CC_I4:
12728 case PPC::SELECT_CC_I8:
12729 case PPC::SELECT_CC_F4:
12730 case PPC::SELECT_CC_F8:
12731 case PPC::SELECT_CC_F16:
12732 case PPC::SELECT_CC_VRRC:
12733 case PPC::SELECT_CC_VSFRC:
12734 case PPC::SELECT_CC_VSSRC:
12735 case PPC::SELECT_CC_VSRC:
12736 case PPC::SELECT_CC_SPE4:
12737 case PPC::SELECT_CC_SPE:
12738 return true;
12739 default:
12740 return false;
12741 }
12742}
12743
12744static bool IsSelect(MachineInstr &MI) {
12745 switch (MI.getOpcode()) {
12746 case PPC::SELECT_I4:
12747 case PPC::SELECT_I8:
12748 case PPC::SELECT_F4:
12749 case PPC::SELECT_F8:
12750 case PPC::SELECT_F16:
12751 case PPC::SELECT_SPE:
12752 case PPC::SELECT_SPE4:
12753 case PPC::SELECT_VRRC:
12754 case PPC::SELECT_VSFRC:
12755 case PPC::SELECT_VSSRC:
12756 case PPC::SELECT_VSRC:
12757 return true;
12758 default:
12759 return false;
12760 }
12761}
12762
12765 MachineBasicBlock *BB) const {
12766 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
12767 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
12768 if (Subtarget.is64BitELFABI() &&
12769 MI.getOpcode() == TargetOpcode::PATCHPOINT &&
12770 !Subtarget.isUsingPCRelativeCalls()) {
12771 // Call lowering should have added an r2 operand to indicate a dependence
12772 // on the TOC base pointer value. It can't however, because there is no
12773 // way to mark the dependence as implicit there, and so the stackmap code
12774 // will confuse it with a regular operand. Instead, add the dependence
12775 // here.
12776 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
12777 }
12778
12779 return emitPatchPoint(MI, BB);
12780 }
12781
12782 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
12783 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
12784 return emitEHSjLjSetJmp(MI, BB);
12785 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
12786 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
12787 return emitEHSjLjLongJmp(MI, BB);
12788 }
12789
12790 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12791
12792 // To "insert" these instructions we actually have to insert their
12793 // control-flow patterns.
12794 const BasicBlock *LLVM_BB = BB->getBasicBlock();
12796
12797 MachineFunction *F = BB->getParent();
12798 MachineRegisterInfo &MRI = F->getRegInfo();
12799
12800 if (Subtarget.hasISEL() &&
12801 (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12802 MI.getOpcode() == PPC::SELECT_CC_I8 ||
12803 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8)) {
12805 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12806 MI.getOpcode() == PPC::SELECT_CC_I8)
12807 Cond.push_back(MI.getOperand(4));
12808 else
12810 Cond.push_back(MI.getOperand(1));
12811
12812 DebugLoc dl = MI.getDebugLoc();
12813 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
12814 MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
12815 } else if (IsSelectCC(MI) || IsSelect(MI)) {
12816 // The incoming instruction knows the destination vreg to set, the
12817 // condition code register to branch on, the true/false values to
12818 // select between, and a branch opcode to use.
12819
12820 // thisMBB:
12821 // ...
12822 // TrueVal = ...
12823 // cmpTY ccX, r1, r2
12824 // bCC sinkMBB
12825 // fallthrough --> copy0MBB
12826 MachineBasicBlock *thisMBB = BB;
12827 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12828 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12829 DebugLoc dl = MI.getDebugLoc();
12830 F->insert(It, copy0MBB);
12831 F->insert(It, sinkMBB);
12832
12833 // Set the call frame size on entry to the new basic blocks.
12834 // See https://reviews.llvm.org/D156113.
12835 unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
12836 copy0MBB->setCallFrameSize(CallFrameSize);
12837 sinkMBB->setCallFrameSize(CallFrameSize);
12838
12839 // Transfer the remainder of BB and its successor edges to sinkMBB.
12840 sinkMBB->splice(sinkMBB->begin(), BB,
12841 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12843
12844 // Next, add the true and fallthrough blocks as its successors.
12845 BB->addSuccessor(copy0MBB);
12846 BB->addSuccessor(sinkMBB);
12847
12848 if (IsSelect(MI)) {
12849 BuildMI(BB, dl, TII->get(PPC::BC))
12850 .addReg(MI.getOperand(1).getReg())
12851 .addMBB(sinkMBB);
12852 } else {
12853 unsigned SelectPred = MI.getOperand(4).getImm();
12854 BuildMI(BB, dl, TII->get(PPC::BCC))
12855 .addImm(SelectPred)
12856 .addReg(MI.getOperand(1).getReg())
12857 .addMBB(sinkMBB);
12858 }
12859
12860 // copy0MBB:
12861 // %FalseValue = ...
12862 // # fallthrough to sinkMBB
12863 BB = copy0MBB;
12864
12865 // Update machine-CFG edges
12866 BB->addSuccessor(sinkMBB);
12867
12868 // sinkMBB:
12869 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12870 // ...
12871 BB = sinkMBB;
12872 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12873 .addReg(MI.getOperand(3).getReg())
12874 .addMBB(copy0MBB)
12875 .addReg(MI.getOperand(2).getReg())
12876 .addMBB(thisMBB);
12877 } else if (MI.getOpcode() == PPC::ReadTB) {
12878 // To read the 64-bit time-base register on a 32-bit target, we read the
12879 // two halves. Should the counter have wrapped while it was being read, we
12880 // need to try again.
12881 // ...
12882 // readLoop:
12883 // mfspr Rx,TBU # load from TBU
12884 // mfspr Ry,TB # load from TB
12885 // mfspr Rz,TBU # load from TBU
12886 // cmpw crX,Rx,Rz # check if 'old'='new'
12887 // bne readLoop # branch if they're not equal
12888 // ...
12889
12890 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12891 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12892 DebugLoc dl = MI.getDebugLoc();
12893 F->insert(It, readMBB);
12894 F->insert(It, sinkMBB);
12895
12896 // Transfer the remainder of BB and its successor edges to sinkMBB.
12897 sinkMBB->splice(sinkMBB->begin(), BB,
12898 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12900
12901 BB->addSuccessor(readMBB);
12902 BB = readMBB;
12903
12904 MachineRegisterInfo &RegInfo = F->getRegInfo();
12905 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12906 Register LoReg = MI.getOperand(0).getReg();
12907 Register HiReg = MI.getOperand(1).getReg();
12908
12909 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12910 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12911 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12912
12913 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12914
12915 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12916 .addReg(HiReg)
12917 .addReg(ReadAgainReg);
12918 BuildMI(BB, dl, TII->get(PPC::BCC))
12920 .addReg(CmpReg)
12921 .addMBB(readMBB);
12922
12923 BB->addSuccessor(readMBB);
12924 BB->addSuccessor(sinkMBB);
12925 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12926 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12927 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12928 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12929 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12930 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12931 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12932 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12933
12934 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12935 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12936 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12937 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12938 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12939 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12940 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12941 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12942
12943 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12944 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12945 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12946 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12947 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12948 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12949 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12950 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12951
12952 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12953 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12954 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12955 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12956 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12957 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12958 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12959 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12960
12961 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12962 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12963 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12964 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12965 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12966 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12967 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12968 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12969
12970 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12971 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12972 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12973 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12974 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12975 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12976 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12977 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12978
12979 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12980 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LT);
12981 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12982 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LT);
12983 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12984 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LT);
12985 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12986 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LT);
12987
12988 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12989 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GT);
12990 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12991 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GT);
12992 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12993 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GT);
12994 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12995 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GT);
12996
12997 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12998 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LT);
12999 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
13000 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LT);
13001 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
13002 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LT);
13003 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
13004 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LT);
13005
13006 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
13007 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GT);
13008 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
13009 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GT);
13010 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
13011 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GT);
13012 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
13013 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GT);
13014
13015 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
13016 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
13017 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
13018 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
13019 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
13020 BB = EmitAtomicBinary(MI, BB, 4, 0);
13021 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
13022 BB = EmitAtomicBinary(MI, BB, 8, 0);
13023 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
13024 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
13025 (Subtarget.hasPartwordAtomics() &&
13026 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
13027 (Subtarget.hasPartwordAtomics() &&
13028 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
13029 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
13030
13031 auto LoadMnemonic = PPC::LDARX;
13032 auto StoreMnemonic = PPC::STDCX;
13033 switch (MI.getOpcode()) {
13034 default:
13035 llvm_unreachable("Compare and swap of unknown size");
13036 case PPC::ATOMIC_CMP_SWAP_I8:
13037 LoadMnemonic = PPC::LBARX;
13038 StoreMnemonic = PPC::STBCX;
13039 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
13040 break;
13041 case PPC::ATOMIC_CMP_SWAP_I16:
13042 LoadMnemonic = PPC::LHARX;
13043 StoreMnemonic = PPC::STHCX;
13044 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
13045 break;
13046 case PPC::ATOMIC_CMP_SWAP_I32:
13047 LoadMnemonic = PPC::LWARX;
13048 StoreMnemonic = PPC::STWCX;
13049 break;
13050 case PPC::ATOMIC_CMP_SWAP_I64:
13051 LoadMnemonic = PPC::LDARX;
13052 StoreMnemonic = PPC::STDCX;
13053 break;
13054 }
13055 MachineRegisterInfo &RegInfo = F->getRegInfo();
13056 Register dest = MI.getOperand(0).getReg();
13057 Register ptrA = MI.getOperand(1).getReg();
13058 Register ptrB = MI.getOperand(2).getReg();
13059 Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
13060 Register oldval = MI.getOperand(3).getReg();
13061 Register newval = MI.getOperand(4).getReg();
13062 DebugLoc dl = MI.getDebugLoc();
13063
13064 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
13065 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
13066 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
13067 F->insert(It, loop1MBB);
13068 F->insert(It, loop2MBB);
13069 F->insert(It, exitMBB);
13070 exitMBB->splice(exitMBB->begin(), BB,
13071 std::next(MachineBasicBlock::iterator(MI)), BB->end());
13073
13074 // thisMBB:
13075 // ...
13076 // fallthrough --> loopMBB
13077 BB->addSuccessor(loop1MBB);
13078
13079 // loop1MBB:
13080 // l[bhwd]arx dest, ptr
13081 // cmp[wd] dest, oldval
13082 // bne- exitBB
13083 // loop2MBB:
13084 // st[bhwd]cx. newval, ptr
13085 // bne- loopMBB
13086 // b exitBB
13087 // exitBB:
13088 BB = loop1MBB;
13089 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
13090 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), CrReg)
13091 .addReg(dest)
13092 .addReg(oldval);
13093 BuildMI(BB, dl, TII->get(PPC::BCC))
13095 .addReg(CrReg)
13096 .addMBB(exitMBB);
13097 BB->addSuccessor(loop2MBB);
13098 BB->addSuccessor(exitMBB);
13099
13100 BB = loop2MBB;
13101 BuildMI(BB, dl, TII->get(StoreMnemonic))
13102 .addReg(newval)
13103 .addReg(ptrA)
13104 .addReg(ptrB);
13105 BuildMI(BB, dl, TII->get(PPC::BCC))
13107 .addReg(PPC::CR0)
13108 .addMBB(loop1MBB);
13109 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
13110 BB->addSuccessor(loop1MBB);
13111 BB->addSuccessor(exitMBB);
13112
13113 // exitMBB:
13114 // ...
13115 BB = exitMBB;
13116 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
13117 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
13118 // We must use 64-bit registers for addresses when targeting 64-bit,
13119 // since we're actually doing arithmetic on them. Other registers
13120 // can be 32-bit.
13121 bool is64bit = Subtarget.isPPC64();
13122 bool isLittleEndian = Subtarget.isLittleEndian();
13123 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
13124
13125 Register dest = MI.getOperand(0).getReg();
13126 Register ptrA = MI.getOperand(1).getReg();
13127 Register ptrB = MI.getOperand(2).getReg();
13128 Register oldval = MI.getOperand(3).getReg();
13129 Register newval = MI.getOperand(4).getReg();
13130 DebugLoc dl = MI.getDebugLoc();
13131
13132 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
13133 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
13134 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
13135 F->insert(It, loop1MBB);
13136 F->insert(It, loop2MBB);
13137 F->insert(It, exitMBB);
13138 exitMBB->splice(exitMBB->begin(), BB,
13139 std::next(MachineBasicBlock::iterator(MI)), BB->end());
13141
13142 MachineRegisterInfo &RegInfo = F->getRegInfo();
13143 const TargetRegisterClass *RC =
13144 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
13145 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
13146
13147 Register PtrReg = RegInfo.createVirtualRegister(RC);
13148 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
13149 Register ShiftReg =
13150 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
13151 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
13152 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
13153 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
13154 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
13155 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
13156 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
13157 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
13158 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
13159 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
13160 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
13161 Register Ptr1Reg;
13162 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
13163 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
13164 Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
13165 // thisMBB:
13166 // ...
13167 // fallthrough --> loopMBB
13168 BB->addSuccessor(loop1MBB);
13169
13170 // The 4-byte load must be aligned, while a char or short may be
13171 // anywhere in the word. Hence all this nasty bookkeeping code.
13172 // add ptr1, ptrA, ptrB [copy if ptrA==0]
13173 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
13174 // xori shift, shift1, 24 [16]
13175 // rlwinm ptr, ptr1, 0, 0, 29
13176 // slw newval2, newval, shift
13177 // slw oldval2, oldval,shift
13178 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
13179 // slw mask, mask2, shift
13180 // and newval3, newval2, mask
13181 // and oldval3, oldval2, mask
13182 // loop1MBB:
13183 // lwarx tmpDest, ptr
13184 // and tmp, tmpDest, mask
13185 // cmpw tmp, oldval3
13186 // bne- exitBB
13187 // loop2MBB:
13188 // andc tmp2, tmpDest, mask
13189 // or tmp4, tmp2, newval3
13190 // stwcx. tmp4, ptr
13191 // bne- loop1MBB
13192 // b exitBB
13193 // exitBB:
13194 // srw dest, tmpDest, shift
13195 if (ptrA != ZeroReg) {
13196 Ptr1Reg = RegInfo.createVirtualRegister(RC);
13197 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
13198 .addReg(ptrA)
13199 .addReg(ptrB);
13200 } else {
13201 Ptr1Reg = ptrB;
13202 }
13203
13204 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
13205 // mode.
13206 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
13207 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
13208 .addImm(3)
13209 .addImm(27)
13210 .addImm(is8bit ? 28 : 27);
13211 if (!isLittleEndian)
13212 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
13213 .addReg(Shift1Reg)
13214 .addImm(is8bit ? 24 : 16);
13215 if (is64bit)
13216 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
13217 .addReg(Ptr1Reg)
13218 .addImm(0)
13219 .addImm(61);
13220 else
13221 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
13222 .addReg(Ptr1Reg)
13223 .addImm(0)
13224 .addImm(0)
13225 .addImm(29);
13226 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
13227 .addReg(newval)
13228 .addReg(ShiftReg);
13229 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
13230 .addReg(oldval)
13231 .addReg(ShiftReg);
13232 if (is8bit)
13233 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
13234 else {
13235 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
13236 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
13237 .addReg(Mask3Reg)
13238 .addImm(65535);
13239 }
13240 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
13241 .addReg(Mask2Reg)
13242 .addReg(ShiftReg);
13243 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
13244 .addReg(NewVal2Reg)
13245 .addReg(MaskReg);
13246 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
13247 .addReg(OldVal2Reg)
13248 .addReg(MaskReg);
13249
13250 BB = loop1MBB;
13251 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
13252 .addReg(ZeroReg)
13253 .addReg(PtrReg);
13254 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
13255 .addReg(TmpDestReg)
13256 .addReg(MaskReg);
13257 BuildMI(BB, dl, TII->get(PPC::CMPW), CrReg)
13258 .addReg(TmpReg)
13259 .addReg(OldVal3Reg);
13260 BuildMI(BB, dl, TII->get(PPC::BCC))
13262 .addReg(CrReg)
13263 .addMBB(exitMBB);
13264 BB->addSuccessor(loop2MBB);
13265 BB->addSuccessor(exitMBB);
13266
13267 BB = loop2MBB;
13268 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
13269 .addReg(TmpDestReg)
13270 .addReg(MaskReg);
13271 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
13272 .addReg(Tmp2Reg)
13273 .addReg(NewVal3Reg);
13274 BuildMI(BB, dl, TII->get(PPC::STWCX))
13275 .addReg(Tmp4Reg)
13276 .addReg(ZeroReg)
13277 .addReg(PtrReg);
13278 BuildMI(BB, dl, TII->get(PPC::BCC))
13280 .addReg(PPC::CR0)
13281 .addMBB(loop1MBB);
13282 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
13283 BB->addSuccessor(loop1MBB);
13284 BB->addSuccessor(exitMBB);
13285
13286 // exitMBB:
13287 // ...
13288 BB = exitMBB;
13289 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
13290 .addReg(TmpReg)
13291 .addReg(ShiftReg);
13292 } else if (MI.getOpcode() == PPC::FADDrtz) {
13293 // This pseudo performs an FADD with rounding mode temporarily forced
13294 // to round-to-zero. We emit this via custom inserter since the FPSCR
13295 // is not modeled at the SelectionDAG level.
13296 Register Dest = MI.getOperand(0).getReg();
13297 Register Src1 = MI.getOperand(1).getReg();
13298 Register Src2 = MI.getOperand(2).getReg();
13299 DebugLoc dl = MI.getDebugLoc();
13300
13301 MachineRegisterInfo &RegInfo = F->getRegInfo();
13302 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
13303
13304 // Save FPSCR value.
13305 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
13306
13307 // Set rounding mode to round-to-zero.
13308 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
13309 .addImm(31)
13311
13312 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
13313 .addImm(30)
13315
13316 // Perform addition.
13317 auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
13318 .addReg(Src1)
13319 .addReg(Src2);
13320 if (MI.getFlag(MachineInstr::NoFPExcept))
13322
13323 // Restore FPSCR value.
13324 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
13325 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
13326 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
13327 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
13328 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
13329 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
13330 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
13331 ? PPC::ANDI8_rec
13332 : PPC::ANDI_rec;
13333 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
13334 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
13335
13336 MachineRegisterInfo &RegInfo = F->getRegInfo();
13337 Register Dest = RegInfo.createVirtualRegister(
13338 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
13339
13340 DebugLoc Dl = MI.getDebugLoc();
13341 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
13342 .addReg(MI.getOperand(1).getReg())
13343 .addImm(1);
13344 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
13345 MI.getOperand(0).getReg())
13346 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
13347 } else if (MI.getOpcode() == PPC::TCHECK_RET) {
13348 DebugLoc Dl = MI.getDebugLoc();
13349 MachineRegisterInfo &RegInfo = F->getRegInfo();
13350 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
13351 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
13352 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
13353 MI.getOperand(0).getReg())
13354 .addReg(CRReg);
13355 } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
13356 DebugLoc Dl = MI.getDebugLoc();
13357 unsigned Imm = MI.getOperand(1).getImm();
13358 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
13359 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
13360 MI.getOperand(0).getReg())
13361 .addReg(PPC::CR0EQ);
13362 } else if (MI.getOpcode() == PPC::SETRNDi) {
13363 DebugLoc dl = MI.getDebugLoc();
13364 Register OldFPSCRReg = MI.getOperand(0).getReg();
13365
13366 // Save FPSCR value.
13367 if (MRI.use_empty(OldFPSCRReg))
13368 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
13369 else
13370 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
13371
13372 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
13373 // the following settings:
13374 // 00 Round to nearest
13375 // 01 Round to 0
13376 // 10 Round to +inf
13377 // 11 Round to -inf
13378
13379 // When the operand is immediate, using the two least significant bits of
13380 // the immediate to set the bits 62:63 of FPSCR.
13381 unsigned Mode = MI.getOperand(1).getImm();
13382 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
13383 .addImm(31)
13385
13386 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
13387 .addImm(30)
13389 } else if (MI.getOpcode() == PPC::SETRND) {
13390 DebugLoc dl = MI.getDebugLoc();
13391
13392 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
13393 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
13394 // If the target doesn't have DirectMove, we should use stack to do the
13395 // conversion, because the target doesn't have the instructions like mtvsrd
13396 // or mfvsrd to do this conversion directly.
13397 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
13398 if (Subtarget.hasDirectMove()) {
13399 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
13400 .addReg(SrcReg);
13401 } else {
13402 // Use stack to do the register copy.
13403 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
13404 MachineRegisterInfo &RegInfo = F->getRegInfo();
13405 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
13406 if (RC == &PPC::F8RCRegClass) {
13407 // Copy register from F8RCRegClass to G8RCRegclass.
13408 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
13409 "Unsupported RegClass.");
13410
13411 StoreOp = PPC::STFD;
13412 LoadOp = PPC::LD;
13413 } else {
13414 // Copy register from G8RCRegClass to F8RCRegclass.
13415 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
13416 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
13417 "Unsupported RegClass.");
13418 }
13419
13420 MachineFrameInfo &MFI = F->getFrameInfo();
13421 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
13422
13423 MachineMemOperand *MMOStore = F->getMachineMemOperand(
13424 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
13426 MFI.getObjectAlign(FrameIdx));
13427
13428 // Store the SrcReg into the stack.
13429 BuildMI(*BB, MI, dl, TII->get(StoreOp))
13430 .addReg(SrcReg)
13431 .addImm(0)
13432 .addFrameIndex(FrameIdx)
13433 .addMemOperand(MMOStore);
13434
13435 MachineMemOperand *MMOLoad = F->getMachineMemOperand(
13436 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
13438 MFI.getObjectAlign(FrameIdx));
13439
13440 // Load from the stack where SrcReg is stored, and save to DestReg,
13441 // so we have done the RegClass conversion from RegClass::SrcReg to
13442 // RegClass::DestReg.
13443 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
13444 .addImm(0)
13445 .addFrameIndex(FrameIdx)
13446 .addMemOperand(MMOLoad);
13447 }
13448 };
13449
13450 Register OldFPSCRReg = MI.getOperand(0).getReg();
13451
13452 // Save FPSCR value.
13453 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
13454
13455 // When the operand is gprc register, use two least significant bits of the
13456 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
13457 //
13458 // copy OldFPSCRTmpReg, OldFPSCRReg
13459 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
13460 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
13461 // copy NewFPSCRReg, NewFPSCRTmpReg
13462 // mtfsf 255, NewFPSCRReg
13463 MachineOperand SrcOp = MI.getOperand(1);
13464 MachineRegisterInfo &RegInfo = F->getRegInfo();
13465 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
13466
13467 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
13468
13469 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
13470 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
13471
13472 // The first operand of INSERT_SUBREG should be a register which has
13473 // subregisters, we only care about its RegClass, so we should use an
13474 // IMPLICIT_DEF register.
13475 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
13476 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
13477 .addReg(ImDefReg)
13478 .add(SrcOp)
13479 .addImm(1);
13480
13481 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
13482 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
13483 .addReg(OldFPSCRTmpReg)
13484 .addReg(ExtSrcReg)
13485 .addImm(0)
13486 .addImm(62);
13487
13488 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
13489 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
13490
13491 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
13492 // bits of FPSCR.
13493 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
13494 .addImm(255)
13495 .addReg(NewFPSCRReg)
13496 .addImm(0)
13497 .addImm(0);
13498 } else if (MI.getOpcode() == PPC::SETFLM) {
13499 DebugLoc Dl = MI.getDebugLoc();
13500
13501 // Result of setflm is previous FPSCR content, so we need to save it first.
13502 Register OldFPSCRReg = MI.getOperand(0).getReg();
13503 if (MRI.use_empty(OldFPSCRReg))
13504 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
13505 else
13506 BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
13507
13508 // Put bits in 32:63 to FPSCR.
13509 Register NewFPSCRReg = MI.getOperand(1).getReg();
13510 BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
13511 .addImm(255)
13512 .addReg(NewFPSCRReg)
13513 .addImm(0)
13514 .addImm(0);
13515 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
13516 MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
13517 return emitProbedAlloca(MI, BB);
13518 } else if (MI.getOpcode() == PPC::SPLIT_QUADWORD) {
13519 DebugLoc DL = MI.getDebugLoc();
13520 Register Src = MI.getOperand(2).getReg();
13521 Register Lo = MI.getOperand(0).getReg();
13522 Register Hi = MI.getOperand(1).getReg();
13523 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY))
13524 .addDef(Lo)
13525 .addUse(Src, 0, PPC::sub_gp8_x1);
13526 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY))
13527 .addDef(Hi)
13528 .addUse(Src, 0, PPC::sub_gp8_x0);
13529 } else if (MI.getOpcode() == PPC::LQX_PSEUDO ||
13530 MI.getOpcode() == PPC::STQX_PSEUDO) {
13531 DebugLoc DL = MI.getDebugLoc();
13532 // Ptr is used as the ptr_rc_no_r0 part
13533 // of LQ/STQ's memory operand and adding result of RA and RB,
13534 // so it has to be g8rc_and_g8rc_nox0.
13535 Register Ptr =
13536 F->getRegInfo().createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
13537 Register Val = MI.getOperand(0).getReg();
13538 Register RA = MI.getOperand(1).getReg();
13539 Register RB = MI.getOperand(2).getReg();
13540 BuildMI(*BB, MI, DL, TII->get(PPC::ADD8), Ptr).addReg(RA).addReg(RB);
13541 BuildMI(*BB, MI, DL,
13542 MI.getOpcode() == PPC::LQX_PSEUDO ? TII->get(PPC::LQ)
13543 : TII->get(PPC::STQ))
13544 .addReg(Val, MI.getOpcode() == PPC::LQX_PSEUDO ? RegState::Define : 0)
13545 .addImm(0)
13546 .addReg(Ptr);
13547 } else {
13548 llvm_unreachable("Unexpected instr type to insert");
13549 }
13550
13551 MI.eraseFromParent(); // The pseudo instruction is gone now.
13552 return BB;
13553}
13554
13555//===----------------------------------------------------------------------===//
13556// Target Optimization Hooks
13557//===----------------------------------------------------------------------===//
13558
13559static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
13560 // For the estimates, convergence is quadratic, so we essentially double the
13561 // number of digits correct after every iteration. For both FRE and FRSQRTE,
13562 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
13563 // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
13564 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
13565 if (VT.getScalarType() == MVT::f64)
13566 RefinementSteps++;
13567 return RefinementSteps;
13568}
13569
13570SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
13571 const DenormalMode &Mode) const {
13572 // We only have VSX Vector Test for software Square Root.
13573 EVT VT = Op.getValueType();
13574 if (!isTypeLegal(MVT::i1) ||
13575 (VT != MVT::f64 &&
13576 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
13577 return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
13578
13579 SDLoc DL(Op);
13580 // The output register of FTSQRT is CR field.
13581 SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
13582 // ftsqrt BF,FRB
13583 // Let e_b be the unbiased exponent of the double-precision
13584 // floating-point operand in register FRB.
13585 // fe_flag is set to 1 if either of the following conditions occurs.
13586 // - The double-precision floating-point operand in register FRB is a zero,
13587 // a NaN, or an infinity, or a negative value.
13588 // - e_b is less than or equal to -970.
13589 // Otherwise fe_flag is set to 0.
13590 // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
13591 // not eligible for iteration. (zero/negative/infinity/nan or unbiased
13592 // exponent is less than -970)
13593 SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
13594 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
13595 FTSQRT, SRIdxVal),
13596 0);
13597}
13598
13599SDValue
13600PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
13601 SelectionDAG &DAG) const {
13602 // We only have VSX Vector Square Root.
13603 EVT VT = Op.getValueType();
13604 if (VT != MVT::f64 &&
13605 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
13607
13608 return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
13609}
13610
13611SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
13612 int Enabled, int &RefinementSteps,
13613 bool &UseOneConstNR,
13614 bool Reciprocal) const {
13615 EVT VT = Operand.getValueType();
13616 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
13617 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
13618 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
13619 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
13620 if (RefinementSteps == ReciprocalEstimate::Unspecified)
13621 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
13622
13623 // The Newton-Raphson computation with a single constant does not provide
13624 // enough accuracy on some CPUs.
13625 UseOneConstNR = !Subtarget.needsTwoConstNR();
13626 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
13627 }
13628 return SDValue();
13629}
13630
13631SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
13632 int Enabled,
13633 int &RefinementSteps) const {
13634 EVT VT = Operand.getValueType();
13635 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
13636 (VT == MVT::f64 && Subtarget.hasFRE()) ||
13637 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
13638 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
13639 if (RefinementSteps == ReciprocalEstimate::Unspecified)
13640 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
13641 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
13642 }
13643 return SDValue();
13644}
13645
13646unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
13647 // Note: This functionality is used only when unsafe-fp-math is enabled, and
13648 // on cores with reciprocal estimates (which are used when unsafe-fp-math is
13649 // enabled for division), this functionality is redundant with the default
13650 // combiner logic (once the division -> reciprocal/multiply transformation
13651 // has taken place). As a result, this matters more for older cores than for
13652 // newer ones.
13653
13654 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
13655 // reciprocal if there are two or more FDIVs (for embedded cores with only
13656 // one FP pipeline) for three or more FDIVs (for generic OOO cores).
13657 switch (Subtarget.getCPUDirective()) {
13658 default:
13659 return 3;
13660 case PPC::DIR_440:
13661 case PPC::DIR_A2:
13662 case PPC::DIR_E500:
13663 case PPC::DIR_E500mc:
13664 case PPC::DIR_E5500:
13665 return 2;
13666 }
13667}
13668
13669// isConsecutiveLSLoc needs to work even if all adds have not yet been
13670// collapsed, and so we need to look through chains of them.
13672 int64_t& Offset, SelectionDAG &DAG) {
13673 if (DAG.isBaseWithConstantOffset(Loc)) {
13674 Base = Loc.getOperand(0);
13675 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
13676
13677 // The base might itself be a base plus an offset, and if so, accumulate
13678 // that as well.
13680 }
13681}
13682
13684 unsigned Bytes, int Dist,
13685 SelectionDAG &DAG) {
13686 if (VT.getSizeInBits() / 8 != Bytes)
13687 return false;
13688
13689 SDValue BaseLoc = Base->getBasePtr();
13690 if (Loc.getOpcode() == ISD::FrameIndex) {
13691 if (BaseLoc.getOpcode() != ISD::FrameIndex)
13692 return false;
13694 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
13695 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
13696 int FS = MFI.getObjectSize(FI);
13697 int BFS = MFI.getObjectSize(BFI);
13698 if (FS != BFS || FS != (int)Bytes) return false;
13699 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
13700 }
13701
13702 SDValue Base1 = Loc, Base2 = BaseLoc;
13703 int64_t Offset1 = 0, Offset2 = 0;
13704 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
13705 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
13706 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
13707 return true;
13708
13709 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13710 const GlobalValue *GV1 = nullptr;
13711 const GlobalValue *GV2 = nullptr;
13712 Offset1 = 0;
13713 Offset2 = 0;
13714 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
13715 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
13716 if (isGA1 && isGA2 && GV1 == GV2)
13717 return Offset1 == (Offset2 + Dist*Bytes);
13718 return false;
13719}
13720
13721// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
13722// not enforce equality of the chain operands.
13724 unsigned Bytes, int Dist,
13725 SelectionDAG &DAG) {
13726 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
13727 EVT VT = LS->getMemoryVT();
13728 SDValue Loc = LS->getBasePtr();
13729 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
13730 }
13731
13732 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
13733 EVT VT;
13734 switch (N->getConstantOperandVal(1)) {
13735 default: return false;
13736 case Intrinsic::ppc_altivec_lvx:
13737 case Intrinsic::ppc_altivec_lvxl:
13738 case Intrinsic::ppc_vsx_lxvw4x:
13739 case Intrinsic::ppc_vsx_lxvw4x_be:
13740 VT = MVT::v4i32;
13741 break;
13742 case Intrinsic::ppc_vsx_lxvd2x:
13743 case Intrinsic::ppc_vsx_lxvd2x_be:
13744 VT = MVT::v2f64;
13745 break;
13746 case Intrinsic::ppc_altivec_lvebx:
13747 VT = MVT::i8;
13748 break;
13749 case Intrinsic::ppc_altivec_lvehx:
13750 VT = MVT::i16;
13751 break;
13752 case Intrinsic::ppc_altivec_lvewx:
13753 VT = MVT::i32;
13754 break;
13755 }
13756
13757 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
13758 }
13759
13760 if (N->getOpcode() == ISD::INTRINSIC_VOID) {
13761 EVT VT;
13762 switch (N->getConstantOperandVal(1)) {
13763 default: return false;
13764 case Intrinsic::ppc_altivec_stvx:
13765 case Intrinsic::ppc_altivec_stvxl:
13766 case Intrinsic::ppc_vsx_stxvw4x:
13767 VT = MVT::v4i32;
13768 break;
13769 case Intrinsic::ppc_vsx_stxvd2x:
13770 VT = MVT::v2f64;
13771 break;
13772 case Intrinsic::ppc_vsx_stxvw4x_be:
13773 VT = MVT::v4i32;
13774 break;
13775 case Intrinsic::ppc_vsx_stxvd2x_be:
13776 VT = MVT::v2f64;
13777 break;
13778 case Intrinsic::ppc_altivec_stvebx:
13779 VT = MVT::i8;
13780 break;
13781 case Intrinsic::ppc_altivec_stvehx:
13782 VT = MVT::i16;
13783 break;
13784 case Intrinsic::ppc_altivec_stvewx:
13785 VT = MVT::i32;
13786 break;
13787 }
13788
13789 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
13790 }
13791
13792 return false;
13793}
13794
13795// Return true is there is a nearyby consecutive load to the one provided
13796// (regardless of alignment). We search up and down the chain, looking though
13797// token factors and other loads (but nothing else). As a result, a true result
13798// indicates that it is safe to create a new consecutive load adjacent to the
13799// load provided.
13801 SDValue Chain = LD->getChain();
13802 EVT VT = LD->getMemoryVT();
13803
13804 SmallSet<SDNode *, 16> LoadRoots;
13805 SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13806 SmallSet<SDNode *, 16> Visited;
13807
13808 // First, search up the chain, branching to follow all token-factor operands.
13809 // If we find a consecutive load, then we're done, otherwise, record all
13810 // nodes just above the top-level loads and token factors.
13811 while (!Queue.empty()) {
13812 SDNode *ChainNext = Queue.pop_back_val();
13813 if (!Visited.insert(ChainNext).second)
13814 continue;
13815
13816 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13817 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13818 return true;
13819
13820 if (!Visited.count(ChainLD->getChain().getNode()))
13821 Queue.push_back(ChainLD->getChain().getNode());
13822 } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13823 for (const SDUse &O : ChainNext->ops())
13824 if (!Visited.count(O.getNode()))
13825 Queue.push_back(O.getNode());
13826 } else
13827 LoadRoots.insert(ChainNext);
13828 }
13829
13830 // Second, search down the chain, starting from the top-level nodes recorded
13831 // in the first phase. These top-level nodes are the nodes just above all
13832 // loads and token factors. Starting with their uses, recursively look though
13833 // all loads (just the chain uses) and token factors to find a consecutive
13834 // load.
13835 Visited.clear();
13836 Queue.clear();
13837
13838 for (SDNode *I : LoadRoots) {
13839 Queue.push_back(I);
13840
13841 while (!Queue.empty()) {
13842 SDNode *LoadRoot = Queue.pop_back_val();
13843 if (!Visited.insert(LoadRoot).second)
13844 continue;
13845
13846 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13847 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13848 return true;
13849
13850 for (SDNode *U : LoadRoot->uses())
13851 if (((isa<MemSDNode>(U) &&
13852 cast<MemSDNode>(U)->getChain().getNode() == LoadRoot) ||
13853 U->getOpcode() == ISD::TokenFactor) &&
13854 !Visited.count(U))
13855 Queue.push_back(U);
13856 }
13857 }
13858
13859 return false;
13860}
13861
13862/// This function is called when we have proved that a SETCC node can be replaced
13863/// by subtraction (and other supporting instructions) so that the result of
13864/// comparison is kept in a GPR instead of CR. This function is purely for
13865/// codegen purposes and has some flags to guide the codegen process.
13866static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13867 bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13868 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13869
13870 // Zero extend the operands to the largest legal integer. Originally, they
13871 // must be of a strictly smaller size.
13872 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13873 DAG.getConstant(Size, DL, MVT::i32));
13874 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13875 DAG.getConstant(Size, DL, MVT::i32));
13876
13877 // Swap if needed. Depends on the condition code.
13878 if (Swap)
13879 std::swap(Op0, Op1);
13880
13881 // Subtract extended integers.
13882 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13883
13884 // Move the sign bit to the least significant position and zero out the rest.
13885 // Now the least significant bit carries the result of original comparison.
13886 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13887 DAG.getConstant(Size - 1, DL, MVT::i32));
13888 auto Final = Shifted;
13889
13890 // Complement the result if needed. Based on the condition code.
13891 if (Complement)
13892 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13893 DAG.getConstant(1, DL, MVT::i64));
13894
13895 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13896}
13897
13898SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13899 DAGCombinerInfo &DCI) const {
13900 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13901
13902 SelectionDAG &DAG = DCI.DAG;
13903 SDLoc DL(N);
13904
13905 // Size of integers being compared has a critical role in the following
13906 // analysis, so we prefer to do this when all types are legal.
13907 if (!DCI.isAfterLegalizeDAG())
13908 return SDValue();
13909
13910 // If all users of SETCC extend its value to a legal integer type
13911 // then we replace SETCC with a subtraction
13912 for (const SDNode *U : N->uses())
13913 if (U->getOpcode() != ISD::ZERO_EXTEND)
13914 return SDValue();
13915
13916 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13917 auto OpSize = N->getOperand(0).getValueSizeInBits();
13918
13920
13921 if (OpSize < Size) {
13922 switch (CC) {
13923 default: break;
13924 case ISD::SETULT:
13925 return generateEquivalentSub(N, Size, false, false, DL, DAG);
13926 case ISD::SETULE:
13927 return generateEquivalentSub(N, Size, true, true, DL, DAG);
13928 case ISD::SETUGT:
13929 return generateEquivalentSub(N, Size, false, true, DL, DAG);
13930 case ISD::SETUGE:
13931 return generateEquivalentSub(N, Size, true, false, DL, DAG);
13932 }
13933 }
13934
13935 return SDValue();
13936}
13937
13938SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13939 DAGCombinerInfo &DCI) const {
13940 SelectionDAG &DAG = DCI.DAG;
13941 SDLoc dl(N);
13942
13943 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
13944 // If we're tracking CR bits, we need to be careful that we don't have:
13945 // trunc(binary-ops(zext(x), zext(y)))
13946 // or
13947 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13948 // such that we're unnecessarily moving things into GPRs when it would be
13949 // better to keep them in CR bits.
13950
13951 // Note that trunc here can be an actual i1 trunc, or can be the effective
13952 // truncation that comes from a setcc or select_cc.
13953 if (N->getOpcode() == ISD::TRUNCATE &&
13954 N->getValueType(0) != MVT::i1)
13955 return SDValue();
13956
13957 if (N->getOperand(0).getValueType() != MVT::i32 &&
13958 N->getOperand(0).getValueType() != MVT::i64)
13959 return SDValue();
13960
13961 if (N->getOpcode() == ISD::SETCC ||
13962 N->getOpcode() == ISD::SELECT_CC) {
13963 // If we're looking at a comparison, then we need to make sure that the
13964 // high bits (all except for the first) don't matter the result.
13966 cast<CondCodeSDNode>(N->getOperand(
13967 N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13968 unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13969
13971 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13972 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13973 return SDValue();
13974 } else if (ISD::isUnsignedIntSetCC(CC)) {
13975 if (!DAG.MaskedValueIsZero(N->getOperand(0),
13976 APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13977 !DAG.MaskedValueIsZero(N->getOperand(1),
13978 APInt::getHighBitsSet(OpBits, OpBits-1)))
13979 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13980 : SDValue());
13981 } else {
13982 // This is neither a signed nor an unsigned comparison, just make sure
13983 // that the high bits are equal.
13984 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13985 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13986
13987 // We don't really care about what is known about the first bit (if
13988 // anything), so pretend that it is known zero for both to ensure they can
13989 // be compared as constants.
13990 Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
13991 Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
13992
13993 if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
13994 Op1Known.getConstant() != Op2Known.getConstant())
13995 return SDValue();
13996 }
13997 }
13998
13999 // We now know that the higher-order bits are irrelevant, we just need to
14000 // make sure that all of the intermediate operations are bit operations, and
14001 // all inputs are extensions.
14002 if (N->getOperand(0).getOpcode() != ISD::AND &&
14003 N->getOperand(0).getOpcode() != ISD::OR &&
14004 N->getOperand(0).getOpcode() != ISD::XOR &&
14005 N->getOperand(0).getOpcode() != ISD::SELECT &&
14006 N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
14007 N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
14008 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
14009 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
14010 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
14011 return SDValue();
14012
14013 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
14014 N->getOperand(1).getOpcode() != ISD::AND &&
14015 N->getOperand(1).getOpcode() != ISD::OR &&
14016 N->getOperand(1).getOpcode() != ISD::XOR &&
14017 N->getOperand(1).getOpcode() != ISD::SELECT &&
14018 N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
14019 N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
14020 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
14021 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
14022 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
14023 return SDValue();
14024
14026 SmallVector<SDValue, 8> BinOps, PromOps;
14028
14029 for (unsigned i = 0; i < 2; ++i) {
14030 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
14031 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
14032 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
14033 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
14034 isa<ConstantSDNode>(N->getOperand(i)))
14035 Inputs.push_back(N->getOperand(i));
14036 else
14037 BinOps.push_back(N->getOperand(i));
14038
14039 if (N->getOpcode() == ISD::TRUNCATE)
14040 break;
14041 }
14042
14043 // Visit all inputs, collect all binary operations (and, or, xor and
14044 // select) that are all fed by extensions.
14045 while (!BinOps.empty()) {
14046 SDValue BinOp = BinOps.pop_back_val();
14047
14048 if (!Visited.insert(BinOp.getNode()).second)
14049 continue;
14050
14051 PromOps.push_back(BinOp);
14052
14053 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
14054 // The condition of the select is not promoted.
14055 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
14056 continue;
14057 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
14058 continue;
14059
14060 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
14061 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
14062 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
14063 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
14064 isa<ConstantSDNode>(BinOp.getOperand(i))) {
14065 Inputs.push_back(BinOp.getOperand(i));
14066 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
14067 BinOp.getOperand(i).getOpcode() == ISD::OR ||
14068 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
14069 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
14070 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
14071 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
14072 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
14073 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
14074 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
14075 BinOps.push_back(BinOp.getOperand(i));
14076 } else {
14077 // We have an input that is not an extension or another binary
14078 // operation; we'll abort this transformation.
14079 return SDValue();
14080 }
14081 }
14082 }
14083
14084 // Make sure that this is a self-contained cluster of operations (which
14085 // is not quite the same thing as saying that everything has only one
14086 // use).
14087 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
14088 if (isa<ConstantSDNode>(Inputs[i]))
14089 continue;
14090
14091 for (const SDNode *User : Inputs[i].getNode()->uses()) {
14092 if (User != N && !Visited.count(User))
14093 return SDValue();
14094
14095 // Make sure that we're not going to promote the non-output-value
14096 // operand(s) or SELECT or SELECT_CC.
14097 // FIXME: Although we could sometimes handle this, and it does occur in
14098 // practice that one of the condition inputs to the select is also one of
14099 // the outputs, we currently can't deal with this.
14100 if (User->getOpcode() == ISD::SELECT) {
14101 if (User->getOperand(0) == Inputs[i])
14102 return SDValue();
14103 } else if (User->getOpcode() == ISD::SELECT_CC) {
14104 if (User->getOperand(0) == Inputs[i] ||
14105 User->getOperand(1) == Inputs[i])
14106 return SDValue();
14107 }
14108 }
14109 }
14110
14111 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
14112 for (const SDNode *User : PromOps[i].getNode()->uses()) {
14113 if (User != N && !Visited.count(User))
14114 return SDValue();
14115
14116 // Make sure that we're not going to promote the non-output-value
14117 // operand(s) or SELECT or SELECT_CC.
14118 // FIXME: Although we could sometimes handle this, and it does occur in
14119 // practice that one of the condition inputs to the select is also one of
14120 // the outputs, we currently can't deal with this.
14121 if (User->getOpcode() == ISD::SELECT) {
14122 if (User->getOperand(0) == PromOps[i])
14123 return SDValue();
14124 } else if (User->getOpcode() == ISD::SELECT_CC) {
14125 if (User->getOperand(0) == PromOps[i] ||
14126 User->getOperand(1) == PromOps[i])
14127 return SDValue();
14128 }
14129 }
14130 }
14131
14132 // Replace all inputs with the extension operand.
14133 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
14134 // Constants may have users outside the cluster of to-be-promoted nodes,
14135 // and so we need to replace those as we do the promotions.
14136 if (isa<ConstantSDNode>(Inputs[i]))
14137 continue;
14138 else
14139 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
14140 }
14141
14142 std::list<HandleSDNode> PromOpHandles;
14143 for (auto &PromOp : PromOps)
14144 PromOpHandles.emplace_back(PromOp);
14145
14146 // Replace all operations (these are all the same, but have a different
14147 // (i1) return type). DAG.getNode will validate that the types of
14148 // a binary operator match, so go through the list in reverse so that
14149 // we've likely promoted both operands first. Any intermediate truncations or
14150 // extensions disappear.
14151 while (!PromOpHandles.empty()) {
14152 SDValue PromOp = PromOpHandles.back().getValue();
14153 PromOpHandles.pop_back();
14154
14155 if (PromOp.getOpcode() == ISD::TRUNCATE ||
14156 PromOp.getOpcode() == ISD::SIGN_EXTEND ||
14157 PromOp.getOpcode() == ISD::ZERO_EXTEND ||
14158 PromOp.getOpcode() == ISD::ANY_EXTEND) {
14159 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
14160 PromOp.getOperand(0).getValueType() != MVT::i1) {
14161 // The operand is not yet ready (see comment below).
14162 PromOpHandles.emplace_front(PromOp);
14163 continue;
14164 }
14165
14166 SDValue RepValue = PromOp.getOperand(0);
14167 if (isa<ConstantSDNode>(RepValue))
14168 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
14169
14170 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
14171 continue;
14172 }
14173
14174 unsigned C;
14175 switch (PromOp.getOpcode()) {
14176 default: C = 0; break;
14177 case ISD::SELECT: C = 1; break;
14178 case ISD::SELECT_CC: C = 2; break;
14179 }
14180
14181 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
14182 PromOp.getOperand(C).getValueType() != MVT::i1) ||
14183 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
14184 PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
14185 // The to-be-promoted operands of this node have not yet been
14186 // promoted (this should be rare because we're going through the
14187 // list backward, but if one of the operands has several users in
14188 // this cluster of to-be-promoted nodes, it is possible).
14189 PromOpHandles.emplace_front(PromOp);
14190 continue;
14191 }
14192
14194 PromOp.getNode()->op_end());
14195
14196 // If there are any constant inputs, make sure they're replaced now.
14197 for (unsigned i = 0; i < 2; ++i)
14198 if (isa<ConstantSDNode>(Ops[C+i]))
14199 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
14200
14201 DAG.ReplaceAllUsesOfValueWith(PromOp,
14202 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
14203 }
14204
14205 // Now we're left with the initial truncation itself.
14206 if (N->getOpcode() == ISD::TRUNCATE)
14207 return N->getOperand(0);
14208
14209 // Otherwise, this is a comparison. The operands to be compared have just
14210 // changed type (to i1), but everything else is the same.
14211 return SDValue(N, 0);
14212}
14213
14214SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
14215 DAGCombinerInfo &DCI) const {
14216 SelectionDAG &DAG = DCI.DAG;
14217 SDLoc dl(N);
14218
14219 // If we're tracking CR bits, we need to be careful that we don't have:
14220 // zext(binary-ops(trunc(x), trunc(y)))
14221 // or
14222 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
14223 // such that we're unnecessarily moving things into CR bits that can more
14224 // efficiently stay in GPRs. Note that if we're not certain that the high
14225 // bits are set as required by the final extension, we still may need to do
14226 // some masking to get the proper behavior.
14227
14228 // This same functionality is important on PPC64 when dealing with
14229 // 32-to-64-bit extensions; these occur often when 32-bit values are used as
14230 // the return values of functions. Because it is so similar, it is handled
14231 // here as well.
14232
14233 if (N->getValueType(0) != MVT::i32 &&
14234 N->getValueType(0) != MVT::i64)
14235 return SDValue();
14236
14237 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
14238 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
14239 return SDValue();
14240
14241 if (N->getOperand(0).getOpcode() != ISD::AND &&
14242 N->getOperand(0).getOpcode() != ISD::OR &&
14243 N->getOperand(0).getOpcode() != ISD::XOR &&
14244 N->getOperand(0).getOpcode() != ISD::SELECT &&
14245 N->getOperand(0).getOpcode() != ISD::SELECT_CC)
14246 return SDValue();
14247
14249 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
14251
14252 // Visit all inputs, collect all binary operations (and, or, xor and
14253 // select) that are all fed by truncations.
14254 while (!BinOps.empty()) {
14255 SDValue BinOp = BinOps.pop_back_val();
14256
14257 if (!Visited.insert(BinOp.getNode()).second)
14258 continue;
14259
14260 PromOps.push_back(BinOp);
14261
14262 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
14263 // The condition of the select is not promoted.
14264 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
14265 continue;
14266 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
14267 continue;
14268
14269 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
14270 isa<ConstantSDNode>(BinOp.getOperand(i))) {
14271 Inputs.push_back(BinOp.getOperand(i));
14272 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
14273 BinOp.getOperand(i).getOpcode() == ISD::OR ||
14274 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
14275 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
14276 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
14277 BinOps.push_back(BinOp.getOperand(i));
14278 } else {
14279 // We have an input that is not a truncation or another binary
14280 // operation; we'll abort this transformation.
14281 return SDValue();
14282 }
14283 }
14284 }
14285
14286 // The operands of a select that must be truncated when the select is
14287 // promoted because the operand is actually part of the to-be-promoted set.
14288 DenseMap<SDNode *, EVT> SelectTruncOp[2];
14289
14290 // Make sure that this is a self-contained cluster of operations (which
14291 // is not quite the same thing as saying that everything has only one
14292 // use).
14293 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
14294 if (isa<ConstantSDNode>(Inputs[i]))
14295 continue;
14296
14297 for (SDNode *User : Inputs[i].getNode()->uses()) {
14298 if (User != N && !Visited.count(User))
14299 return SDValue();
14300
14301 // If we're going to promote the non-output-value operand(s) or SELECT or
14302 // SELECT_CC, record them for truncation.
14303 if (User->getOpcode() == ISD::SELECT) {
14304 if (User->getOperand(0) == Inputs[i])
14305 SelectTruncOp[0].insert(std::make_pair(User,
14306 User->getOperand(0).getValueType()));
14307 } else if (User->getOpcode() == ISD::SELECT_CC) {
14308 if (User->getOperand(0) == Inputs[i])
14309 SelectTruncOp[0].insert(std::make_pair(User,
14310 User->getOperand(0).getValueType()));
14311 if (User->getOperand(1) == Inputs[i])
14312 SelectTruncOp[1].insert(std::make_pair(User,
14313 User->getOperand(1).getValueType()));
14314 }
14315 }
14316 }
14317
14318 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
14319 for (SDNode *User : PromOps[i].getNode()->uses()) {
14320 if (User != N && !Visited.count(User))
14321 return SDValue();
14322
14323 // If we're going to promote the non-output-value operand(s) or SELECT or
14324 // SELECT_CC, record them for truncation.
14325 if (User->getOpcode() == ISD::SELECT) {
14326 if (User->getOperand(0) == PromOps[i])
14327 SelectTruncOp[0].insert(std::make_pair(User,
14328 User->getOperand(0).getValueType()));
14329 } else if (User->getOpcode() == ISD::SELECT_CC) {
14330 if (User->getOperand(0) == PromOps[i])
14331 SelectTruncOp[0].insert(std::make_pair(User,
14332 User->getOperand(0).getValueType()));
14333 if (User->getOperand(1) == PromOps[i])
14334 SelectTruncOp[1].insert(std::make_pair(User,
14335 User->getOperand(1).getValueType()));
14336 }
14337 }
14338 }
14339
14340 unsigned PromBits = N->getOperand(0).getValueSizeInBits();
14341 bool ReallyNeedsExt = false;
14342 if (N->getOpcode() != ISD::ANY_EXTEND) {
14343 // If all of the inputs are not already sign/zero extended, then
14344 // we'll still need to do that at the end.
14345 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
14346 if (isa<ConstantSDNode>(Inputs[i]))
14347 continue;
14348
14349 unsigned OpBits =
14350 Inputs[i].getOperand(0).getValueSizeInBits();
14351 assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
14352
14353 if ((N->getOpcode() == ISD::ZERO_EXTEND &&
14354 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
14355 APInt::getHighBitsSet(OpBits,
14356 OpBits-PromBits))) ||
14357 (N->getOpcode() == ISD::SIGN_EXTEND &&
14358 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
14359 (OpBits-(PromBits-1)))) {
14360 ReallyNeedsExt = true;
14361 break;
14362 }
14363 }
14364 }
14365
14366 // Replace all inputs, either with the truncation operand, or a
14367 // truncation or extension to the final output type.
14368 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
14369 // Constant inputs need to be replaced with the to-be-promoted nodes that
14370 // use them because they might have users outside of the cluster of
14371 // promoted nodes.
14372 if (isa<ConstantSDNode>(Inputs[i]))
14373 continue;
14374
14375 SDValue InSrc = Inputs[i].getOperand(0);
14376 if (Inputs[i].getValueType() == N->getValueType(0))
14377 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
14378 else if (N->getOpcode() == ISD::SIGN_EXTEND)
14379 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
14380 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
14381 else if (N->getOpcode() == ISD::ZERO_EXTEND)
14382 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
14383 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
14384 else
14385 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
14386 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
14387 }
14388
14389 std::list<HandleSDNode> PromOpHandles;
14390 for (auto &PromOp : PromOps)
14391 PromOpHandles.emplace_back(PromOp);
14392
14393 // Replace all operations (these are all the same, but have a different
14394 // (promoted) return type). DAG.getNode will validate that the types of
14395 // a binary operator match, so go through the list in reverse so that
14396 // we've likely promoted both operands first.
14397 while (!PromOpHandles.empty()) {
14398 SDValue PromOp = PromOpHandles.back().getValue();
14399 PromOpHandles.pop_back();
14400
14401 unsigned C;
14402 switch (PromOp.getOpcode()) {
14403 default: C = 0; break;
14404 case ISD::SELECT: C = 1; break;
14405 case ISD::SELECT_CC: C = 2; break;
14406 }
14407
14408 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
14409 PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
14410 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
14411 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
14412 // The to-be-promoted operands of this node have not yet been
14413 // promoted (this should be rare because we're going through the
14414 // list backward, but if one of the operands has several users in
14415 // this cluster of to-be-promoted nodes, it is possible).
14416 PromOpHandles.emplace_front(PromOp);
14417 continue;
14418 }
14419
14420 // For SELECT and SELECT_CC nodes, we do a similar check for any
14421 // to-be-promoted comparison inputs.
14422 if (PromOp.getOpcode() == ISD::SELECT ||
14423 PromOp.getOpcode() == ISD::SELECT_CC) {
14424 if ((SelectTruncOp[0].count(PromOp.getNode()) &&
14425 PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
14426 (SelectTruncOp[1].count(PromOp.getNode()) &&
14427 PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
14428 PromOpHandles.emplace_front(PromOp);
14429 continue;
14430 }
14431 }
14432
14434 PromOp.getNode()->op_end());
14435
14436 // If this node has constant inputs, then they'll need to be promoted here.
14437 for (unsigned i = 0; i < 2; ++i) {
14438 if (!isa<ConstantSDNode>(Ops[C+i]))
14439 continue;
14440 if (Ops[C+i].getValueType() == N->getValueType(0))
14441 continue;
14442
14443 if (N->getOpcode() == ISD::SIGN_EXTEND)
14444 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
14445 else if (N->getOpcode() == ISD::ZERO_EXTEND)
14446 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
14447 else
14448 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
14449 }
14450
14451 // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
14452 // truncate them again to the original value type.
14453 if (PromOp.getOpcode() == ISD::SELECT ||
14454 PromOp.getOpcode() == ISD::SELECT_CC) {
14455 auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
14456 if (SI0 != SelectTruncOp[0].end())
14457 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
14458 auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
14459 if (SI1 != SelectTruncOp[1].end())
14460 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
14461 }
14462
14463 DAG.ReplaceAllUsesOfValueWith(PromOp,
14464 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
14465 }
14466
14467 // Now we're left with the initial extension itself.
14468 if (!ReallyNeedsExt)
14469 return N->getOperand(0);
14470
14471 // To zero extend, just mask off everything except for the first bit (in the
14472 // i1 case).
14473 if (N->getOpcode() == ISD::ZERO_EXTEND)
14474 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
14476 N->getValueSizeInBits(0), PromBits),
14477 dl, N->getValueType(0)));
14478
14479 assert(N->getOpcode() == ISD::SIGN_EXTEND &&
14480 "Invalid extension type");
14481 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
14482 SDValue ShiftCst =
14483 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
14484 return DAG.getNode(
14485 ISD::SRA, dl, N->getValueType(0),
14486 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
14487 ShiftCst);
14488}
14489
14490SDValue PPCTargetLowering::combineSetCC(SDNode *N,
14491 DAGCombinerInfo &DCI) const {
14492 assert(N->getOpcode() == ISD::SETCC &&
14493 "Should be called with a SETCC node");
14494
14495 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
14496 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
14497 SDValue LHS = N->getOperand(0);
14498 SDValue RHS = N->getOperand(1);
14499
14500 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
14501 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
14502 LHS.hasOneUse())
14503 std::swap(LHS, RHS);
14504
14505 // x == 0-y --> x+y == 0
14506 // x != 0-y --> x+y != 0
14507 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
14508 RHS.hasOneUse()) {
14509 SDLoc DL(N);
14510 SelectionDAG &DAG = DCI.DAG;
14511 EVT VT = N->getValueType(0);
14512 EVT OpVT = LHS.getValueType();
14513 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
14514 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
14515 }
14516 }
14517
14518 return DAGCombineTruncBoolExt(N, DCI);
14519}
14520
14521// Is this an extending load from an f32 to an f64?
14522static bool isFPExtLoad(SDValue Op) {
14523 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
14524 return LD->getExtensionType() == ISD::EXTLOAD &&
14525 Op.getValueType() == MVT::f64;
14526 return false;
14527}
14528
14529/// Reduces the number of fp-to-int conversion when building a vector.
14530///
14531/// If this vector is built out of floating to integer conversions,
14532/// transform it to a vector built out of floating point values followed by a
14533/// single floating to integer conversion of the vector.
14534/// Namely (build_vector (fptosi $A), (fptosi $B), ...)
14535/// becomes (fptosi (build_vector ($A, $B, ...)))
14536SDValue PPCTargetLowering::
14537combineElementTruncationToVectorTruncation(SDNode *N,
14538 DAGCombinerInfo &DCI) const {
14539 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14540 "Should be called with a BUILD_VECTOR node");
14541
14542 SelectionDAG &DAG = DCI.DAG;
14543 SDLoc dl(N);
14544
14545 SDValue FirstInput = N->getOperand(0);
14546 assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
14547 "The input operand must be an fp-to-int conversion.");
14548
14549 // This combine happens after legalization so the fp_to_[su]i nodes are
14550 // already converted to PPCSISD nodes.
14551 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
14552 if (FirstConversion == PPCISD::FCTIDZ ||
14553 FirstConversion == PPCISD::FCTIDUZ ||
14554 FirstConversion == PPCISD::FCTIWZ ||
14555 FirstConversion == PPCISD::FCTIWUZ) {
14556 bool IsSplat = true;
14557 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
14558 FirstConversion == PPCISD::FCTIWUZ;
14559 EVT SrcVT = FirstInput.getOperand(0).getValueType();
14561 EVT TargetVT = N->getValueType(0);
14562 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
14563 SDValue NextOp = N->getOperand(i);
14564 if (NextOp.getOpcode() != PPCISD::MFVSR)
14565 return SDValue();
14566 unsigned NextConversion = NextOp.getOperand(0).getOpcode();
14567 if (NextConversion != FirstConversion)
14568 return SDValue();
14569 // If we are converting to 32-bit integers, we need to add an FP_ROUND.
14570 // This is not valid if the input was originally double precision. It is
14571 // also not profitable to do unless this is an extending load in which
14572 // case doing this combine will allow us to combine consecutive loads.
14573 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
14574 return SDValue();
14575 if (N->getOperand(i) != FirstInput)
14576 IsSplat = false;
14577 }
14578
14579 // If this is a splat, we leave it as-is since there will be only a single
14580 // fp-to-int conversion followed by a splat of the integer. This is better
14581 // for 32-bit and smaller ints and neutral for 64-bit ints.
14582 if (IsSplat)
14583 return SDValue();
14584
14585 // Now that we know we have the right type of node, get its operands
14586 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
14587 SDValue In = N->getOperand(i).getOperand(0);
14588 if (Is32Bit) {
14589 // For 32-bit values, we need to add an FP_ROUND node (if we made it
14590 // here, we know that all inputs are extending loads so this is safe).
14591 if (In.isUndef())
14592 Ops.push_back(DAG.getUNDEF(SrcVT));
14593 else {
14594 SDValue Trunc =
14595 DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, In.getOperand(0),
14596 DAG.getIntPtrConstant(1, dl, /*isTarget=*/true));
14597 Ops.push_back(Trunc);
14598 }
14599 } else
14600 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
14601 }
14602
14603 unsigned Opcode;
14604 if (FirstConversion == PPCISD::FCTIDZ ||
14605 FirstConversion == PPCISD::FCTIWZ)
14606 Opcode = ISD::FP_TO_SINT;
14607 else
14608 Opcode = ISD::FP_TO_UINT;
14609
14610 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
14611 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
14612 return DAG.getNode(Opcode, dl, TargetVT, BV);
14613 }
14614 return SDValue();
14615}
14616
14617/// Reduce the number of loads when building a vector.
14618///
14619/// Building a vector out of multiple loads can be converted to a load
14620/// of the vector type if the loads are consecutive. If the loads are
14621/// consecutive but in descending order, a shuffle is added at the end
14622/// to reorder the vector.
14624 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14625 "Should be called with a BUILD_VECTOR node");
14626
14627 SDLoc dl(N);
14628
14629 // Return early for non byte-sized type, as they can't be consecutive.
14630 if (!N->getValueType(0).getVectorElementType().isByteSized())
14631 return SDValue();
14632
14633 bool InputsAreConsecutiveLoads = true;
14634 bool InputsAreReverseConsecutive = true;
14635 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
14636 SDValue FirstInput = N->getOperand(0);
14637 bool IsRoundOfExtLoad = false;
14638 LoadSDNode *FirstLoad = nullptr;
14639
14640 if (FirstInput.getOpcode() == ISD::FP_ROUND &&
14641 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
14642 FirstLoad = cast<LoadSDNode>(FirstInput.getOperand(0));
14643 IsRoundOfExtLoad = FirstLoad->getExtensionType() == ISD::EXTLOAD;
14644 }
14645 // Not a build vector of (possibly fp_rounded) loads.
14646 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
14647 N->getNumOperands() == 1)
14648 return SDValue();
14649
14650 if (!IsRoundOfExtLoad)
14651 FirstLoad = cast<LoadSDNode>(FirstInput);
14652
14654 InputLoads.push_back(FirstLoad);
14655 for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
14656 // If any inputs are fp_round(extload), they all must be.
14657 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
14658 return SDValue();
14659
14660 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
14661 N->getOperand(i);
14662 if (NextInput.getOpcode() != ISD::LOAD)
14663 return SDValue();
14664
14665 SDValue PreviousInput =
14666 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
14667 LoadSDNode *LD1 = cast<LoadSDNode>(PreviousInput);
14668 LoadSDNode *LD2 = cast<LoadSDNode>(NextInput);
14669
14670 // If any inputs are fp_round(extload), they all must be.
14671 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
14672 return SDValue();
14673
14674 // We only care about regular loads. The PPC-specific load intrinsics
14675 // will not lead to a merge opportunity.
14676 if (!DAG.areNonVolatileConsecutiveLoads(LD2, LD1, ElemSize, 1))
14677 InputsAreConsecutiveLoads = false;
14678 if (!DAG.areNonVolatileConsecutiveLoads(LD1, LD2, ElemSize, 1))
14679 InputsAreReverseConsecutive = false;
14680
14681 // Exit early if the loads are neither consecutive nor reverse consecutive.
14682 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
14683 return SDValue();
14684 InputLoads.push_back(LD2);
14685 }
14686
14687 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
14688 "The loads cannot be both consecutive and reverse consecutive.");
14689
14690 SDValue WideLoad;
14691 SDValue ReturnSDVal;
14692 if (InputsAreConsecutiveLoads) {
14693 assert(FirstLoad && "Input needs to be a LoadSDNode.");
14694 WideLoad = DAG.getLoad(N->getValueType(0), dl, FirstLoad->getChain(),
14695 FirstLoad->getBasePtr(), FirstLoad->getPointerInfo(),
14696 FirstLoad->getAlign());
14697 ReturnSDVal = WideLoad;
14698 } else if (InputsAreReverseConsecutive) {
14699 LoadSDNode *LastLoad = InputLoads.back();
14700 assert(LastLoad && "Input needs to be a LoadSDNode.");
14701 WideLoad = DAG.getLoad(N->getValueType(0), dl, LastLoad->getChain(),
14702 LastLoad->getBasePtr(), LastLoad->getPointerInfo(),
14703 LastLoad->getAlign());
14705 for (int i = N->getNumOperands() - 1; i >= 0; i--)
14706 Ops.push_back(i);
14707
14708 ReturnSDVal = DAG.getVectorShuffle(N->getValueType(0), dl, WideLoad,
14709 DAG.getUNDEF(N->getValueType(0)), Ops);
14710 } else
14711 return SDValue();
14712
14713 for (auto *LD : InputLoads)
14714 DAG.makeEquivalentMemoryOrdering(LD, WideLoad);
14715 return ReturnSDVal;
14716}
14717
14718// This function adds the required vector_shuffle needed to get
14719// the elements of the vector extract in the correct position
14720// as specified by the CorrectElems encoding.
14722 SDValue Input, uint64_t Elems,
14723 uint64_t CorrectElems) {
14724 SDLoc dl(N);
14725
14726 unsigned NumElems = Input.getValueType().getVectorNumElements();
14727 SmallVector<int, 16> ShuffleMask(NumElems, -1);
14728
14729 // Knowing the element indices being extracted from the original
14730 // vector and the order in which they're being inserted, just put
14731 // them at element indices required for the instruction.
14732 for (unsigned i = 0; i < N->getNumOperands(); i++) {
14733 if (DAG.getDataLayout().isLittleEndian())
14734 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
14735 else
14736 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
14737 CorrectElems = CorrectElems >> 8;
14738 Elems = Elems >> 8;
14739 }
14740
14741 SDValue Shuffle =
14742 DAG.getVectorShuffle(Input.getValueType(), dl, Input,
14743 DAG.getUNDEF(Input.getValueType()), ShuffleMask);
14744
14745 EVT VT = N->getValueType(0);
14746 SDValue Conv = DAG.getBitcast(VT, Shuffle);
14747
14748 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
14751 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
14752 DAG.getValueType(ExtVT));
14753}
14754
14755// Look for build vector patterns where input operands come from sign
14756// extended vector_extract elements of specific indices. If the correct indices
14757// aren't used, add a vector shuffle to fix up the indices and create
14758// SIGN_EXTEND_INREG node which selects the vector sign extend instructions
14759// during instruction selection.
14761 // This array encodes the indices that the vector sign extend instructions
14762 // extract from when extending from one type to another for both BE and LE.
14763 // The right nibble of each byte corresponds to the LE incides.
14764 // and the left nibble of each byte corresponds to the BE incides.
14765 // For example: 0x3074B8FC byte->word
14766 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
14767 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
14768 // For example: 0x000070F8 byte->double word
14769 // For LE: the allowed indices are: 0x0,0x8
14770 // For BE: the allowed indices are: 0x7,0xF
14771 uint64_t TargetElems[] = {
14772 0x3074B8FC, // b->w
14773 0x000070F8, // b->d
14774 0x10325476, // h->w
14775 0x00003074, // h->d
14776 0x00001032, // w->d
14777 };
14778
14779 uint64_t Elems = 0;
14780 int Index;
14781 SDValue Input;
14782
14783 auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
14784 if (!Op)
14785 return false;
14786 if (Op.getOpcode() != ISD::SIGN_EXTEND &&
14787 Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
14788 return false;
14789
14790 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
14791 // of the right width.
14792 SDValue Extract = Op.getOperand(0);
14793 if (Extract.getOpcode() == ISD::ANY_EXTEND)
14794 Extract = Extract.getOperand(0);
14795 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14796 return false;
14797
14798 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14799 if (!ExtOp)
14800 return false;
14801
14802 Index = ExtOp->getZExtValue();
14803 if (Input && Input != Extract.getOperand(0))
14804 return false;
14805
14806 if (!Input)
14807 Input = Extract.getOperand(0);
14808
14809 Elems = Elems << 8;
14810 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14811 Elems |= Index;
14812
14813 return true;
14814 };
14815
14816 // If the build vector operands aren't sign extended vector extracts,
14817 // of the same input vector, then return.
14818 for (unsigned i = 0; i < N->getNumOperands(); i++) {
14819 if (!isSExtOfVecExtract(N->getOperand(i))) {
14820 return SDValue();
14821 }
14822 }
14823
14824 // If the vector extract indicies are not correct, add the appropriate
14825 // vector_shuffle.
14826 int TgtElemArrayIdx;
14827 int InputSize = Input.getValueType().getScalarSizeInBits();
14828 int OutputSize = N->getValueType(0).getScalarSizeInBits();
14829 if (InputSize + OutputSize == 40)
14830 TgtElemArrayIdx = 0;
14831 else if (InputSize + OutputSize == 72)
14832 TgtElemArrayIdx = 1;
14833 else if (InputSize + OutputSize == 48)
14834 TgtElemArrayIdx = 2;
14835 else if (InputSize + OutputSize == 80)
14836 TgtElemArrayIdx = 3;
14837 else if (InputSize + OutputSize == 96)
14838 TgtElemArrayIdx = 4;
14839 else
14840 return SDValue();
14841
14842 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14843 CorrectElems = DAG.getDataLayout().isLittleEndian()
14844 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14845 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14846 if (Elems != CorrectElems) {
14847 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14848 }
14849
14850 // Regular lowering will catch cases where a shuffle is not needed.
14851 return SDValue();
14852}
14853
14854// Look for the pattern of a load from a narrow width to i128, feeding
14855// into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
14856// (LXVRZX). This node represents a zero extending load that will be matched
14857// to the Load VSX Vector Rightmost instructions.
14859 SDLoc DL(N);
14860
14861 // This combine is only eligible for a BUILD_VECTOR of v1i128.
14862 if (N->getValueType(0) != MVT::v1i128)
14863 return SDValue();
14864
14865 SDValue Operand = N->getOperand(0);
14866 // Proceed with the transformation if the operand to the BUILD_VECTOR
14867 // is a load instruction.
14868 if (Operand.getOpcode() != ISD::LOAD)
14869 return SDValue();
14870
14871 auto *LD = cast<LoadSDNode>(Operand);
14872 EVT MemoryType = LD->getMemoryVT();
14873
14874 // This transformation is only valid if the we are loading either a byte,
14875 // halfword, word, or doubleword.
14876 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
14877 MemoryType == MVT::i32 || MemoryType == MVT::i64;
14878
14879 // Ensure that the load from the narrow width is being zero extended to i128.
14880 if (!ValidLDType ||
14881 (LD->getExtensionType() != ISD::ZEXTLOAD &&
14882 LD->getExtensionType() != ISD::EXTLOAD))
14883 return SDValue();
14884
14885 SDValue LoadOps[] = {
14886 LD->getChain(), LD->getBasePtr(),
14887 DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
14888
14890 DAG.getVTList(MVT::v1i128, MVT::Other),
14891 LoadOps, MemoryType, LD->getMemOperand());
14892}
14893
14894SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14895 DAGCombinerInfo &DCI) const {
14896 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14897 "Should be called with a BUILD_VECTOR node");
14898
14899 SelectionDAG &DAG = DCI.DAG;
14900 SDLoc dl(N);
14901
14902 if (!Subtarget.hasVSX())
14903 return SDValue();
14904
14905 // The target independent DAG combiner will leave a build_vector of
14906 // float-to-int conversions intact. We can generate MUCH better code for
14907 // a float-to-int conversion of a vector of floats.
14908 SDValue FirstInput = N->getOperand(0);
14909 if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14910 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14911 if (Reduced)
14912 return Reduced;
14913 }
14914
14915 // If we're building a vector out of consecutive loads, just load that
14916 // vector type.
14917 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14918 if (Reduced)
14919 return Reduced;
14920
14921 // If we're building a vector out of extended elements from another vector
14922 // we have P9 vector integer extend instructions. The code assumes legal
14923 // input types (i.e. it can't handle things like v4i16) so do not run before
14924 // legalization.
14925 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14926 Reduced = combineBVOfVecSExt(N, DAG);
14927 if (Reduced)
14928 return Reduced;
14929 }
14930
14931 // On Power10, the Load VSX Vector Rightmost instructions can be utilized
14932 // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
14933 // is a load from <valid narrow width> to i128.
14934 if (Subtarget.isISA3_1()) {
14935 SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
14936 if (BVOfZLoad)
14937 return BVOfZLoad;
14938 }
14939
14940 if (N->getValueType(0) != MVT::v2f64)
14941 return SDValue();
14942
14943 // Looking for:
14944 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14945 if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14946 FirstInput.getOpcode() != ISD::UINT_TO_FP)
14947 return SDValue();
14948 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14949 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14950 return SDValue();
14951 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14952 return SDValue();
14953
14954 SDValue Ext1 = FirstInput.getOperand(0);
14955 SDValue Ext2 = N->getOperand(1).getOperand(0);
14956 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14958 return SDValue();
14959
14960 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14961 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14962 if (!Ext1Op || !Ext2Op)
14963 return SDValue();
14964 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14965 Ext1.getOperand(0) != Ext2.getOperand(0))
14966 return SDValue();
14967
14968 int FirstElem = Ext1Op->getZExtValue();
14969 int SecondElem = Ext2Op->getZExtValue();
14970 int SubvecIdx;
14971 if (FirstElem == 0 && SecondElem == 1)
14972 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14973 else if (FirstElem == 2 && SecondElem == 3)
14974 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14975 else
14976 return SDValue();
14977
14978 SDValue SrcVec = Ext1.getOperand(0);
14979 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14981 return DAG.getNode(NodeType, dl, MVT::v2f64,
14982 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14983}
14984
14985SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14986 DAGCombinerInfo &DCI) const {
14987 assert((N->getOpcode() == ISD::SINT_TO_FP ||
14988 N->getOpcode() == ISD::UINT_TO_FP) &&
14989 "Need an int -> FP conversion node here");
14990
14991 if (useSoftFloat() || !Subtarget.has64BitSupport())
14992 return SDValue();
14993
14994 SelectionDAG &DAG = DCI.DAG;
14995 SDLoc dl(N);
14996 SDValue Op(N, 0);
14997
14998 // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14999 // from the hardware.
15000 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
15001 return SDValue();
15002 if (!Op.getOperand(0).getValueType().isSimple())
15003 return SDValue();
15004 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
15005 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
15006 return SDValue();
15007
15008 SDValue FirstOperand(Op.getOperand(0));
15009 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
15010 (FirstOperand.getValueType() == MVT::i8 ||
15011 FirstOperand.getValueType() == MVT::i16);
15012 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
15013 bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
15014 bool DstDouble = Op.getValueType() == MVT::f64;
15015 unsigned ConvOp = Signed ?
15016 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
15017 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
15018 SDValue WidthConst =
15019 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
15020 dl, false);
15021 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
15022 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
15024 DAG.getVTList(MVT::f64, MVT::Other),
15025 Ops, MVT::i8, LDN->getMemOperand());
15026
15027 // For signed conversion, we need to sign-extend the value in the VSR
15028 if (Signed) {
15029 SDValue ExtOps[] = { Ld, WidthConst };
15030 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
15031 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
15032 } else
15033 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
15034 }
15035
15036
15037 // For i32 intermediate values, unfortunately, the conversion functions
15038 // leave the upper 32 bits of the value are undefined. Within the set of
15039 // scalar instructions, we have no method for zero- or sign-extending the
15040 // value. Thus, we cannot handle i32 intermediate values here.
15041 if (Op.getOperand(0).getValueType() == MVT::i32)
15042 return SDValue();
15043
15044 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
15045 "UINT_TO_FP is supported only with FPCVT");
15046
15047 // If we have FCFIDS, then use it when converting to single-precision.
15048 // Otherwise, convert to double-precision and then round.
15049 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
15050 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
15052 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
15053 : PPCISD::FCFID);
15054 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
15055 ? MVT::f32
15056 : MVT::f64;
15057
15058 // If we're converting from a float, to an int, and back to a float again,
15059 // then we don't need the store/load pair at all.
15060 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
15061 Subtarget.hasFPCVT()) ||
15062 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
15063 SDValue Src = Op.getOperand(0).getOperand(0);
15064 if (Src.getValueType() == MVT::f32) {
15065 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
15066 DCI.AddToWorklist(Src.getNode());
15067 } else if (Src.getValueType() != MVT::f64) {
15068 // Make sure that we don't pick up a ppc_fp128 source value.
15069 return SDValue();
15070 }
15071
15072 unsigned FCTOp =
15073 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
15075
15076 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
15077 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
15078
15079 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
15080 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
15081 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
15082 DCI.AddToWorklist(FP.getNode());
15083 }
15084
15085 return FP;
15086 }
15087
15088 return SDValue();
15089}
15090
15091// expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
15092// builtins) into loads with swaps.
15094 DAGCombinerInfo &DCI) const {
15095 // Delay VSX load for LE combine until after LegalizeOps to prioritize other
15096 // load combines.
15097 if (DCI.isBeforeLegalizeOps())
15098 return SDValue();
15099
15100 SelectionDAG &DAG = DCI.DAG;
15101 SDLoc dl(N);
15102 SDValue Chain;
15103 SDValue Base;
15104 MachineMemOperand *MMO;
15105
15106 switch (N->getOpcode()) {
15107 default:
15108 llvm_unreachable("Unexpected opcode for little endian VSX load");
15109 case ISD::LOAD: {
15110 LoadSDNode *LD = cast<LoadSDNode>(N);
15111 Chain = LD->getChain();
15112 Base = LD->getBasePtr();
15113 MMO = LD->getMemOperand();
15114 // If the MMO suggests this isn't a load of a full vector, leave
15115 // things alone. For a built-in, we have to make the change for
15116 // correctness, so if there is a size problem that will be a bug.
15117 if (MMO->getSize() < 16)
15118 return SDValue();
15119 break;
15120 }
15122 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
15123 Chain = Intrin->getChain();
15124 // Similarly to the store case below, Intrin->getBasePtr() doesn't get
15125 // us what we want. Get operand 2 instead.
15126 Base = Intrin->getOperand(2);
15127 MMO = Intrin->getMemOperand();
15128 break;
15129 }
15130 }
15131
15132 MVT VecTy = N->getValueType(0).getSimpleVT();
15133
15134 SDValue LoadOps[] = { Chain, Base };
15136 DAG.getVTList(MVT::v2f64, MVT::Other),
15137 LoadOps, MVT::v2f64, MMO);
15138
15139 DCI.AddToWorklist(Load.getNode());
15140 Chain = Load.getValue(1);
15141 SDValue Swap = DAG.getNode(
15142 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
15143 DCI.AddToWorklist(Swap.getNode());
15144
15145 // Add a bitcast if the resulting load type doesn't match v2f64.
15146 if (VecTy != MVT::v2f64) {
15147 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
15148 DCI.AddToWorklist(N.getNode());
15149 // Package {bitcast value, swap's chain} to match Load's shape.
15150 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
15151 N, Swap.getValue(1));
15152 }
15153
15154 return Swap;
15155}
15156
15157// expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
15158// builtins) into stores with swaps.
15160 DAGCombinerInfo &DCI) const {
15161 // Delay VSX store for LE combine until after LegalizeOps to prioritize other
15162 // store combines.
15163 if (DCI.isBeforeLegalizeOps())
15164 return SDValue();
15165
15166 SelectionDAG &DAG = DCI.DAG;
15167 SDLoc dl(N);
15168 SDValue Chain;
15169 SDValue Base;
15170 unsigned SrcOpnd;
15171 MachineMemOperand *MMO;
15172
15173 switch (N->getOpcode()) {
15174 default:
15175 llvm_unreachable("Unexpected opcode for little endian VSX store");
15176 case ISD::STORE: {
15177 StoreSDNode *ST = cast<StoreSDNode>(N);
15178 Chain = ST->getChain();
15179 Base = ST->getBasePtr();
15180 MMO = ST->getMemOperand();
15181 SrcOpnd = 1;
15182 // If the MMO suggests this isn't a store of a full vector, leave
15183 // things alone. For a built-in, we have to make the change for
15184 // correctness, so if there is a size problem that will be a bug.
15185 if (MMO->getSize() < 16)
15186 return SDValue();
15187 break;
15188 }
15189 case ISD::INTRINSIC_VOID: {
15190 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
15191 Chain = Intrin->getChain();
15192 // Intrin->getBasePtr() oddly does not get what we want.
15193 Base = Intrin->getOperand(3);
15194 MMO = Intrin->getMemOperand();
15195 SrcOpnd = 2;
15196 break;
15197 }
15198 }
15199
15200 SDValue Src = N->getOperand(SrcOpnd);
15201 MVT VecTy = Src.getValueType().getSimpleVT();
15202
15203 // All stores are done as v2f64 and possible bit cast.
15204 if (VecTy != MVT::v2f64) {
15205 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
15206 DCI.AddToWorklist(Src.getNode());
15207 }
15208
15209 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
15210 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
15211 DCI.AddToWorklist(Swap.getNode());
15212 Chain = Swap.getValue(1);
15213 SDValue StoreOps[] = { Chain, Swap, Base };
15215 DAG.getVTList(MVT::Other),
15216 StoreOps, VecTy, MMO);
15217 DCI.AddToWorklist(Store.getNode());
15218 return Store;
15219}
15220
15221// Handle DAG combine for STORE (FP_TO_INT F).
15222SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
15223 DAGCombinerInfo &DCI) const {
15224 SelectionDAG &DAG = DCI.DAG;
15225 SDLoc dl(N);
15226 unsigned Opcode = N->getOperand(1).getOpcode();
15227 (void)Opcode;
15228 bool Strict = N->getOperand(1)->isStrictFPOpcode();
15229
15230 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT ||
15231 Opcode == ISD::STRICT_FP_TO_SINT || Opcode == ISD::STRICT_FP_TO_UINT)
15232 && "Not a FP_TO_INT Instruction!");
15233
15234 SDValue Val = N->getOperand(1).getOperand(Strict ? 1 : 0);
15235 EVT Op1VT = N->getOperand(1).getValueType();
15236 EVT ResVT = Val.getValueType();
15237
15238 if (!Subtarget.hasVSX() || !Subtarget.hasFPCVT() || !isTypeLegal(ResVT))
15239 return SDValue();
15240
15241 // Only perform combine for conversion to i64/i32 or power9 i16/i8.
15242 bool ValidTypeForStoreFltAsInt =
15243 (Op1VT == MVT::i32 || (Op1VT == MVT::i64 && Subtarget.isPPC64()) ||
15244 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
15245
15246 // TODO: Lower conversion from f128 on all VSX targets
15247 if (ResVT == MVT::ppcf128 || (ResVT == MVT::f128 && !Subtarget.hasP9Vector()))
15248 return SDValue();
15249
15250 if ((Op1VT != MVT::i64 && !Subtarget.hasP8Vector()) ||
15251 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
15252 return SDValue();
15253
15254 Val = convertFPToInt(N->getOperand(1), DAG, Subtarget);
15255
15256 // Set number of bytes being converted.
15257 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
15258 SDValue Ops[] = {N->getOperand(0), Val, N->getOperand(2),
15259 DAG.getIntPtrConstant(ByteSize, dl, false),
15260 DAG.getValueType(Op1VT)};
15261
15263 DAG.getVTList(MVT::Other), Ops,
15264 cast<StoreSDNode>(N)->getMemoryVT(),
15265 cast<StoreSDNode>(N)->getMemOperand());
15266
15267 return Val;
15268}
15269
15270static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
15271 // Check that the source of the element keeps flipping
15272 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
15273 bool PrevElemFromFirstVec = Mask[0] < NumElts;
15274 for (int i = 1, e = Mask.size(); i < e; i++) {
15275 if (PrevElemFromFirstVec && Mask[i] < NumElts)
15276 return false;
15277 if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
15278 return false;
15279 PrevElemFromFirstVec = !PrevElemFromFirstVec;
15280 }
15281 return true;
15282}
15283
15284static bool isSplatBV(SDValue Op) {
15285 if (Op.getOpcode() != ISD::BUILD_VECTOR)
15286 return false;
15287 SDValue FirstOp;
15288
15289 // Find first non-undef input.
15290 for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
15291 FirstOp = Op.getOperand(i);
15292 if (!FirstOp.isUndef())
15293 break;
15294 }
15295
15296 // All inputs are undef or the same as the first non-undef input.
15297 for (int i = 1, e = Op.getNumOperands(); i < e; i++)
15298 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
15299 return false;
15300 return true;
15301}
15302
15304 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
15305 return Op;
15306 if (Op.getOpcode() != ISD::BITCAST)
15307 return SDValue();
15308 Op = Op.getOperand(0);
15309 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
15310 return Op;
15311 return SDValue();
15312}
15313
15314// Fix up the shuffle mask to account for the fact that the result of
15315// scalar_to_vector is not in lane zero. This just takes all values in
15316// the ranges specified by the min/max indices and adds the number of
15317// elements required to ensure each element comes from the respective
15318// position in the valid lane.
15319// On little endian, that's just the corresponding element in the other
15320// half of the vector. On big endian, it is in the same half but right
15321// justified rather than left justified in that half.
15323 int LHSMaxIdx, int RHSMinIdx,
15324 int RHSMaxIdx, int HalfVec,
15325 unsigned ValidLaneWidth,
15326 const PPCSubtarget &Subtarget) {
15327 for (int i = 0, e = ShuffV.size(); i < e; i++) {
15328 int Idx = ShuffV[i];
15329 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
15330 ShuffV[i] +=
15331 Subtarget.isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth;
15332 }
15333}
15334
15335// Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
15336// the original is:
15337// (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
15338// In such a case, just change the shuffle mask to extract the element
15339// from the permuted index.
15341 const PPCSubtarget &Subtarget) {
15342 SDLoc dl(OrigSToV);
15343 EVT VT = OrigSToV.getValueType();
15344 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
15345 "Expecting a SCALAR_TO_VECTOR here");
15346 SDValue Input = OrigSToV.getOperand(0);
15347
15348 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
15349 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
15350 SDValue OrigVector = Input.getOperand(0);
15351
15352 // Can't handle non-const element indices or different vector types
15353 // for the input to the extract and the output of the scalar_to_vector.
15354 if (Idx && VT == OrigVector.getValueType()) {
15355 unsigned NumElts = VT.getVectorNumElements();
15356 assert(
15357 NumElts > 1 &&
15358 "Cannot produce a permuted scalar_to_vector for one element vector");
15359 SmallVector<int, 16> NewMask(NumElts, -1);
15360 unsigned ResultInElt = NumElts / 2;
15361 ResultInElt -= Subtarget.isLittleEndian() ? 0 : 1;
15362 NewMask[ResultInElt] = Idx->getZExtValue();
15363 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
15364 }
15365 }
15366 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
15367 OrigSToV.getOperand(0));
15368}
15369
15370// On little endian subtargets, combine shuffles such as:
15371// vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
15372// into:
15373// vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
15374// because the latter can be matched to a single instruction merge.
15375// Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
15376// to put the value into element zero. Adjust the shuffle mask so that the
15377// vector can remain in permuted form (to prevent a swap prior to a shuffle).
15378// On big endian targets, this is still useful for SCALAR_TO_VECTOR
15379// nodes with elements smaller than doubleword because all the ways
15380// of getting scalar data into a vector register put the value in the
15381// rightmost element of the left half of the vector.
15382SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
15383 SelectionDAG &DAG) const {
15384 SDValue LHS = SVN->getOperand(0);
15385 SDValue RHS = SVN->getOperand(1);
15386 auto Mask = SVN->getMask();
15387 int NumElts = LHS.getValueType().getVectorNumElements();
15388 SDValue Res(SVN, 0);
15389 SDLoc dl(SVN);
15390 bool IsLittleEndian = Subtarget.isLittleEndian();
15391
15392 // On big endian targets this is only useful for subtargets with direct moves.
15393 // On little endian targets it would be useful for all subtargets with VSX.
15394 // However adding special handling for LE subtargets without direct moves
15395 // would be wasted effort since the minimum arch for LE is ISA 2.07 (Power8)
15396 // which includes direct moves.
15397 if (!Subtarget.hasDirectMove())
15398 return Res;
15399
15400 // If this is not a shuffle of a shuffle and the first element comes from
15401 // the second vector, canonicalize to the commuted form. This will make it
15402 // more likely to match one of the single instruction patterns.
15403 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
15404 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
15405 std::swap(LHS, RHS);
15406 Res = DAG.getCommutedVectorShuffle(*SVN);
15407 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
15408 }
15409
15410 // Adjust the shuffle mask if either input vector comes from a
15411 // SCALAR_TO_VECTOR and keep the respective input vector in permuted
15412 // form (to prevent the need for a swap).
15413 SmallVector<int, 16> ShuffV(Mask);
15414 SDValue SToVLHS = isScalarToVec(LHS);
15415 SDValue SToVRHS = isScalarToVec(RHS);
15416 if (SToVLHS || SToVRHS) {
15417 // FIXME: If both LHS and RHS are SCALAR_TO_VECTOR, but are not the
15418 // same type and have differing element sizes, then do not perform
15419 // the following transformation. The current transformation for
15420 // SCALAR_TO_VECTOR assumes that both input vectors have the same
15421 // element size. This will be updated in the future to account for
15422 // differing sizes of the LHS and RHS.
15423 if (SToVLHS && SToVRHS &&
15424 (SToVLHS.getValueType().getScalarSizeInBits() !=
15425 SToVRHS.getValueType().getScalarSizeInBits()))
15426 return Res;
15427
15428 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
15429 : SToVRHS.getValueType().getVectorNumElements();
15430 int NumEltsOut = ShuffV.size();
15431 // The width of the "valid lane" (i.e. the lane that contains the value that
15432 // is vectorized) needs to be expressed in terms of the number of elements
15433 // of the shuffle. It is thereby the ratio of the values before and after
15434 // any bitcast.
15435 unsigned ValidLaneWidth =
15436 SToVLHS ? SToVLHS.getValueType().getScalarSizeInBits() /
15437 LHS.getValueType().getScalarSizeInBits()
15438 : SToVRHS.getValueType().getScalarSizeInBits() /
15439 RHS.getValueType().getScalarSizeInBits();
15440
15441 // Initially assume that neither input is permuted. These will be adjusted
15442 // accordingly if either input is.
15443 int LHSMaxIdx = -1;
15444 int RHSMinIdx = -1;
15445 int RHSMaxIdx = -1;
15446 int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
15447
15448 // Get the permuted scalar to vector nodes for the source(s) that come from
15449 // ISD::SCALAR_TO_VECTOR.
15450 // On big endian systems, this only makes sense for element sizes smaller
15451 // than 64 bits since for 64-bit elements, all instructions already put
15452 // the value into element zero. Since scalar size of LHS and RHS may differ
15453 // after isScalarToVec, this should be checked using their own sizes.
15454 if (SToVLHS) {
15455 if (!IsLittleEndian && SToVLHS.getValueType().getScalarSizeInBits() >= 64)
15456 return Res;
15457 // Set up the values for the shuffle vector fixup.
15458 LHSMaxIdx = NumEltsOut / NumEltsIn;
15459 SToVLHS = getSToVPermuted(SToVLHS, DAG, Subtarget);
15460 if (SToVLHS.getValueType() != LHS.getValueType())
15461 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
15462 LHS = SToVLHS;
15463 }
15464 if (SToVRHS) {
15465 if (!IsLittleEndian && SToVRHS.getValueType().getScalarSizeInBits() >= 64)
15466 return Res;
15467 RHSMinIdx = NumEltsOut;
15468 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
15469 SToVRHS = getSToVPermuted(SToVRHS, DAG, Subtarget);
15470 if (SToVRHS.getValueType() != RHS.getValueType())
15471 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
15472 RHS = SToVRHS;
15473 }
15474
15475 // Fix up the shuffle mask to reflect where the desired element actually is.
15476 // The minimum and maximum indices that correspond to element zero for both
15477 // the LHS and RHS are computed and will control which shuffle mask entries
15478 // are to be changed. For example, if the RHS is permuted, any shuffle mask
15479 // entries in the range [RHSMinIdx,RHSMaxIdx) will be adjusted.
15480 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
15481 HalfVec, ValidLaneWidth, Subtarget);
15482 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
15483
15484 // We may have simplified away the shuffle. We won't be able to do anything
15485 // further with it here.
15486 if (!isa<ShuffleVectorSDNode>(Res))
15487 return Res;
15488 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
15489 }
15490
15491 SDValue TheSplat = IsLittleEndian ? RHS : LHS;
15492 // The common case after we commuted the shuffle is that the RHS is a splat
15493 // and we have elements coming in from the splat at indices that are not
15494 // conducive to using a merge.
15495 // Example:
15496 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
15497 if (!isSplatBV(TheSplat))
15498 return Res;
15499
15500 // We are looking for a mask such that all even elements are from
15501 // one vector and all odd elements from the other.
15502 if (!isAlternatingShuffMask(Mask, NumElts))
15503 return Res;
15504
15505 // Adjust the mask so we are pulling in the same index from the splat
15506 // as the index from the interesting vector in consecutive elements.
15507 if (IsLittleEndian) {
15508 // Example (even elements from first vector):
15509 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
15510 if (Mask[0] < NumElts)
15511 for (int i = 1, e = Mask.size(); i < e; i += 2) {
15512 if (ShuffV[i] < 0)
15513 continue;
15514 // If element from non-splat is undef, pick first element from splat.
15515 ShuffV[i] = (ShuffV[i - 1] >= 0 ? ShuffV[i - 1] : 0) + NumElts;
15516 }
15517 // Example (odd elements from first vector):
15518 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
15519 else
15520 for (int i = 0, e = Mask.size(); i < e; i += 2) {
15521 if (ShuffV[i] < 0)
15522 continue;
15523 // If element from non-splat is undef, pick first element from splat.
15524 ShuffV[i] = (ShuffV[i + 1] >= 0 ? ShuffV[i + 1] : 0) + NumElts;
15525 }
15526 } else {
15527 // Example (even elements from first vector):
15528 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> <zero>, t1
15529 if (Mask[0] < NumElts)
15530 for (int i = 0, e = Mask.size(); i < e; i += 2) {
15531 if (ShuffV[i] < 0)
15532 continue;
15533 // If element from non-splat is undef, pick first element from splat.
15534 ShuffV[i] = ShuffV[i + 1] >= 0 ? ShuffV[i + 1] - NumElts : 0;
15535 }
15536 // Example (odd elements from first vector):
15537 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> <zero>, t1
15538 else
15539 for (int i = 1, e = Mask.size(); i < e; i += 2) {
15540 if (ShuffV[i] < 0)
15541 continue;
15542 // If element from non-splat is undef, pick first element from splat.
15543 ShuffV[i] = ShuffV[i - 1] >= 0 ? ShuffV[i - 1] - NumElts : 0;
15544 }
15545 }
15546
15547 // If the RHS has undefs, we need to remove them since we may have created
15548 // a shuffle that adds those instead of the splat value.
15549 SDValue SplatVal =
15550 cast<BuildVectorSDNode>(TheSplat.getNode())->getSplatValue();
15551 TheSplat = DAG.getSplatBuildVector(TheSplat.getValueType(), dl, SplatVal);
15552
15553 if (IsLittleEndian)
15554 RHS = TheSplat;
15555 else
15556 LHS = TheSplat;
15557 return DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
15558}
15559
15560SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
15561 LSBaseSDNode *LSBase,
15562 DAGCombinerInfo &DCI) const {
15563 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
15564 "Not a reverse memop pattern!");
15565
15566 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
15567 auto Mask = SVN->getMask();
15568 int i = 0;
15569 auto I = Mask.rbegin();
15570 auto E = Mask.rend();
15571
15572 for (; I != E; ++I) {
15573 if (*I != i)
15574 return false;
15575 i++;
15576 }
15577 return true;
15578 };
15579
15580 SelectionDAG &DAG = DCI.DAG;
15581 EVT VT = SVN->getValueType(0);
15582
15583 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
15584 return SDValue();
15585
15586 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
15587 // See comment in PPCVSXSwapRemoval.cpp.
15588 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
15589 if (!Subtarget.hasP9Vector())
15590 return SDValue();
15591
15592 if(!IsElementReverse(SVN))
15593 return SDValue();
15594
15595 if (LSBase->getOpcode() == ISD::LOAD) {
15596 // If the load return value 0 has more than one user except the
15597 // shufflevector instruction, it is not profitable to replace the
15598 // shufflevector with a reverse load.
15599 for (SDNode::use_iterator UI = LSBase->use_begin(), UE = LSBase->use_end();
15600 UI != UE; ++UI)
15601 if (UI.getUse().getResNo() == 0 && UI->getOpcode() != ISD::VECTOR_SHUFFLE)
15602 return SDValue();
15603
15604 SDLoc dl(LSBase);
15605 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
15606 return DAG.getMemIntrinsicNode(
15607 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
15608 LSBase->getMemoryVT(), LSBase->getMemOperand());
15609 }
15610
15611 if (LSBase->getOpcode() == ISD::STORE) {
15612 // If there are other uses of the shuffle, the swap cannot be avoided.
15613 // Forcing the use of an X-Form (since swapped stores only have
15614 // X-Forms) without removing the swap is unprofitable.
15615 if (!SVN->hasOneUse())
15616 return SDValue();
15617
15618 SDLoc dl(LSBase);
15619 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
15620 LSBase->getBasePtr()};
15621 return DAG.getMemIntrinsicNode(
15622 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
15623 LSBase->getMemoryVT(), LSBase->getMemOperand());
15624 }
15625
15626 llvm_unreachable("Expected a load or store node here");
15627}
15628
15629static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth) {
15630 unsigned IntrinsicID = Intrin.getConstantOperandVal(1);
15631 if (IntrinsicID == Intrinsic::ppc_stdcx)
15632 StoreWidth = 8;
15633 else if (IntrinsicID == Intrinsic::ppc_stwcx)
15634 StoreWidth = 4;
15635 else if (IntrinsicID == Intrinsic::ppc_sthcx)
15636 StoreWidth = 2;
15637 else if (IntrinsicID == Intrinsic::ppc_stbcx)
15638 StoreWidth = 1;
15639 else
15640 return false;
15641 return true;
15642}
15643
15645 DAGCombinerInfo &DCI) const {
15646 SelectionDAG &DAG = DCI.DAG;
15647 SDLoc dl(N);
15648 switch (N->getOpcode()) {
15649 default: break;
15650 case ISD::ADD:
15651 return combineADD(N, DCI);
15652 case ISD::AND: {
15653 // We don't want (and (zext (shift...)), C) if C fits in the width of the
15654 // original input as that will prevent us from selecting optimal rotates.
15655 // This only matters if the input to the extend is i32 widened to i64.
15656 SDValue Op1 = N->getOperand(0);
15657 SDValue Op2 = N->getOperand(1);
15658 if ((Op1.getOpcode() != ISD::ZERO_EXTEND &&
15659 Op1.getOpcode() != ISD::ANY_EXTEND) ||
15660 !isa<ConstantSDNode>(Op2) || N->getValueType(0) != MVT::i64 ||
15661 Op1.getOperand(0).getValueType() != MVT::i32)
15662 break;
15663 SDValue NarrowOp = Op1.getOperand(0);
15664 if (NarrowOp.getOpcode() != ISD::SHL && NarrowOp.getOpcode() != ISD::SRL &&
15665 NarrowOp.getOpcode() != ISD::ROTL && NarrowOp.getOpcode() != ISD::ROTR)
15666 break;
15667
15668 uint64_t Imm = Op2->getAsZExtVal();
15669 // Make sure that the constant is narrow enough to fit in the narrow type.
15670 if (!isUInt<32>(Imm))
15671 break;
15672 SDValue ConstOp = DAG.getConstant(Imm, dl, MVT::i32);
15673 SDValue NarrowAnd = DAG.getNode(ISD::AND, dl, MVT::i32, NarrowOp, ConstOp);
15674 return DAG.getZExtOrTrunc(NarrowAnd, dl, N->getValueType(0));
15675 }
15676 case ISD::SHL:
15677 return combineSHL(N, DCI);
15678 case ISD::SRA:
15679 return combineSRA(N, DCI);
15680 case ISD::SRL:
15681 return combineSRL(N, DCI);
15682 case ISD::MUL:
15683 return combineMUL(N, DCI);
15684 case ISD::FMA:
15685 case PPCISD::FNMSUB:
15686 return combineFMALike(N, DCI);
15687 case PPCISD::SHL:
15688 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
15689 return N->getOperand(0);
15690 break;
15691 case PPCISD::SRL:
15692 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
15693 return N->getOperand(0);
15694 break;
15695 case PPCISD::SRA:
15696 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
15697 if (C->isZero() || // 0 >>s V -> 0.
15698 C->isAllOnes()) // -1 >>s V -> -1.
15699 return N->getOperand(0);
15700 }
15701 break;
15702 case ISD::SIGN_EXTEND:
15703 case ISD::ZERO_EXTEND:
15704 case ISD::ANY_EXTEND:
15705 return DAGCombineExtBoolTrunc(N, DCI);
15706 case ISD::TRUNCATE:
15707 return combineTRUNCATE(N, DCI);
15708 case ISD::SETCC:
15709 if (SDValue CSCC = combineSetCC(N, DCI))
15710 return CSCC;
15711 [[fallthrough]];
15712 case ISD::SELECT_CC:
15713 return DAGCombineTruncBoolExt(N, DCI);
15714 case ISD::SINT_TO_FP:
15715 case ISD::UINT_TO_FP:
15716 return combineFPToIntToFP(N, DCI);
15718 if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
15719 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
15720 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
15721 }
15722 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
15723 case ISD::STORE: {
15724
15725 EVT Op1VT = N->getOperand(1).getValueType();
15726 unsigned Opcode = N->getOperand(1).getOpcode();
15727
15728 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT ||
15729 Opcode == ISD::STRICT_FP_TO_SINT || Opcode == ISD::STRICT_FP_TO_UINT) {
15730 SDValue Val = combineStoreFPToInt(N, DCI);
15731 if (Val)
15732 return Val;
15733 }
15734
15735 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
15736 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
15737 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
15738 if (Val)
15739 return Val;
15740 }
15741
15742 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
15743 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
15744 N->getOperand(1).getNode()->hasOneUse() &&
15745 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
15746 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
15747
15748 // STBRX can only handle simple types and it makes no sense to store less
15749 // two bytes in byte-reversed order.
15750 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
15751 if (mVT.isExtended() || mVT.getSizeInBits() < 16)
15752 break;
15753
15754 SDValue BSwapOp = N->getOperand(1).getOperand(0);
15755 // Do an any-extend to 32-bits if this is a half-word input.
15756 if (BSwapOp.getValueType() == MVT::i16)
15757 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
15758
15759 // If the type of BSWAP operand is wider than stored memory width
15760 // it need to be shifted to the right side before STBRX.
15761 if (Op1VT.bitsGT(mVT)) {
15762 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
15763 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
15764 DAG.getConstant(Shift, dl, MVT::i32));
15765 // Need to truncate if this is a bswap of i64 stored as i32/i16.
15766 if (Op1VT == MVT::i64)
15767 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
15768 }
15769
15770 SDValue Ops[] = {
15771 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
15772 };
15773 return
15774 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
15775 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
15776 cast<StoreSDNode>(N)->getMemOperand());
15777 }
15778
15779 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0>
15780 // So it can increase the chance of CSE constant construction.
15781 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
15782 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
15783 // Need to sign-extended to 64-bits to handle negative values.
15784 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
15785 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
15786 MemVT.getSizeInBits());
15787 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
15788
15789 // DAG.getTruncStore() can't be used here because it doesn't accept
15790 // the general (base + offset) addressing mode.
15791 // So we use UpdateNodeOperands and setTruncatingStore instead.
15792 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
15793 N->getOperand(3));
15794 cast<StoreSDNode>(N)->setTruncatingStore(true);
15795 return SDValue(N, 0);
15796 }
15797
15798 // For little endian, VSX stores require generating xxswapd/lxvd2x.
15799 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15800 if (Op1VT.isSimple()) {
15801 MVT StoreVT = Op1VT.getSimpleVT();
15802 if (Subtarget.needsSwapsForVSXMemOps() &&
15803 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
15804 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
15805 return expandVSXStoreForLE(N, DCI);
15806 }
15807 break;
15808 }
15809 case ISD::LOAD: {
15810 LoadSDNode *LD = cast<LoadSDNode>(N);
15811 EVT VT = LD->getValueType(0);
15812
15813 // For little endian, VSX loads require generating lxvd2x/xxswapd.
15814 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15815 if (VT.isSimple()) {
15816 MVT LoadVT = VT.getSimpleVT();
15817 if (Subtarget.needsSwapsForVSXMemOps() &&
15818 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
15819 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
15820 return expandVSXLoadForLE(N, DCI);
15821 }
15822
15823 // We sometimes end up with a 64-bit integer load, from which we extract
15824 // two single-precision floating-point numbers. This happens with
15825 // std::complex<float>, and other similar structures, because of the way we
15826 // canonicalize structure copies. However, if we lack direct moves,
15827 // then the final bitcasts from the extracted integer values to the
15828 // floating-point numbers turn into store/load pairs. Even with direct moves,
15829 // just loading the two floating-point numbers is likely better.
15830 auto ReplaceTwoFloatLoad = [&]() {
15831 if (VT != MVT::i64)
15832 return false;
15833
15834 if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
15835 LD->isVolatile())
15836 return false;
15837
15838 // We're looking for a sequence like this:
15839 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
15840 // t16: i64 = srl t13, Constant:i32<32>
15841 // t17: i32 = truncate t16
15842 // t18: f32 = bitcast t17
15843 // t19: i32 = truncate t13
15844 // t20: f32 = bitcast t19
15845
15846 if (!LD->hasNUsesOfValue(2, 0))
15847 return false;
15848
15849 auto UI = LD->use_begin();
15850 while (UI.getUse().getResNo() != 0) ++UI;
15851 SDNode *Trunc = *UI++;
15852 while (UI.getUse().getResNo() != 0) ++UI;
15853 SDNode *RightShift = *UI;
15854 if (Trunc->getOpcode() != ISD::TRUNCATE)
15855 std::swap(Trunc, RightShift);
15856
15857 if (Trunc->getOpcode() != ISD::TRUNCATE ||
15858 Trunc->getValueType(0) != MVT::i32 ||
15859 !Trunc->hasOneUse())
15860 return false;
15861 if (RightShift->getOpcode() != ISD::SRL ||
15862 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
15863 RightShift->getConstantOperandVal(1) != 32 ||
15864 !RightShift->hasOneUse())
15865 return false;
15866
15867 SDNode *Trunc2 = *RightShift->use_begin();
15868 if (Trunc2->getOpcode() != ISD::TRUNCATE ||
15869 Trunc2->getValueType(0) != MVT::i32 ||
15870 !Trunc2->hasOneUse())
15871 return false;
15872
15873 SDNode *Bitcast = *Trunc->use_begin();
15874 SDNode *Bitcast2 = *Trunc2->use_begin();
15875
15876 if (Bitcast->getOpcode() != ISD::BITCAST ||
15877 Bitcast->getValueType(0) != MVT::f32)
15878 return false;
15879 if (Bitcast2->getOpcode() != ISD::BITCAST ||
15880 Bitcast2->getValueType(0) != MVT::f32)
15881 return false;
15882
15883 if (Subtarget.isLittleEndian())
15884 std::swap(Bitcast, Bitcast2);
15885
15886 // Bitcast has the second float (in memory-layout order) and Bitcast2
15887 // has the first one.
15888
15889 SDValue BasePtr = LD->getBasePtr();
15890 if (LD->isIndexed()) {
15891 assert(LD->getAddressingMode() == ISD::PRE_INC &&
15892 "Non-pre-inc AM on PPC?");
15893 BasePtr =
15894 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
15895 LD->getOffset());
15896 }
15897
15898 auto MMOFlags =
15899 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
15900 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
15901 LD->getPointerInfo(), LD->getAlign(),
15902 MMOFlags, LD->getAAInfo());
15903 SDValue AddPtr =
15904 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
15905 BasePtr, DAG.getIntPtrConstant(4, dl));
15906 SDValue FloatLoad2 = DAG.getLoad(
15907 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
15908 LD->getPointerInfo().getWithOffset(4),
15909 commonAlignment(LD->getAlign(), 4), MMOFlags, LD->getAAInfo());
15910
15911 if (LD->isIndexed()) {
15912 // Note that DAGCombine should re-form any pre-increment load(s) from
15913 // what is produced here if that makes sense.
15914 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
15915 }
15916
15917 DCI.CombineTo(Bitcast2, FloatLoad);
15918 DCI.CombineTo(Bitcast, FloatLoad2);
15919
15920 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
15921 SDValue(FloatLoad2.getNode(), 1));
15922 return true;
15923 };
15924
15925 if (ReplaceTwoFloatLoad())
15926 return SDValue(N, 0);
15927
15928 EVT MemVT = LD->getMemoryVT();
15929 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15930 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15931 if (LD->isUnindexed() && VT.isVector() &&
15932 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15933 // P8 and later hardware should just use LOAD.
15934 !Subtarget.hasP8Vector() &&
15935 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15936 VT == MVT::v4f32))) &&
15937 LD->getAlign() < ABIAlignment) {
15938 // This is a type-legal unaligned Altivec load.
15939 SDValue Chain = LD->getChain();
15940 SDValue Ptr = LD->getBasePtr();
15941 bool isLittleEndian = Subtarget.isLittleEndian();
15942
15943 // This implements the loading of unaligned vectors as described in
15944 // the venerable Apple Velocity Engine overview. Specifically:
15945 // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15946 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15947 //
15948 // The general idea is to expand a sequence of one or more unaligned
15949 // loads into an alignment-based permutation-control instruction (lvsl
15950 // or lvsr), a series of regular vector loads (which always truncate
15951 // their input address to an aligned address), and a series of
15952 // permutations. The results of these permutations are the requested
15953 // loaded values. The trick is that the last "extra" load is not taken
15954 // from the address you might suspect (sizeof(vector) bytes after the
15955 // last requested load), but rather sizeof(vector) - 1 bytes after the
15956 // last requested vector. The point of this is to avoid a page fault if
15957 // the base address happened to be aligned. This works because if the
15958 // base address is aligned, then adding less than a full vector length
15959 // will cause the last vector in the sequence to be (re)loaded.
15960 // Otherwise, the next vector will be fetched as you might suspect was
15961 // necessary.
15962
15963 // We might be able to reuse the permutation generation from
15964 // a different base address offset from this one by an aligned amount.
15965 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15966 // optimization later.
15967 Intrinsic::ID Intr, IntrLD, IntrPerm;
15968 MVT PermCntlTy, PermTy, LDTy;
15969 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15970 : Intrinsic::ppc_altivec_lvsl;
15971 IntrLD = Intrinsic::ppc_altivec_lvx;
15972 IntrPerm = Intrinsic::ppc_altivec_vperm;
15973 PermCntlTy = MVT::v16i8;
15974 PermTy = MVT::v4i32;
15975 LDTy = MVT::v4i32;
15976
15977 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15978
15979 // Create the new MMO for the new base load. It is like the original MMO,
15980 // but represents an area in memory almost twice the vector size centered
15981 // on the original address. If the address is unaligned, we might start
15982 // reading up to (sizeof(vector)-1) bytes below the address of the
15983 // original unaligned load.
15985 MachineMemOperand *BaseMMO =
15986 MF.getMachineMemOperand(LD->getMemOperand(),
15987 -(int64_t)MemVT.getStoreSize()+1,
15988 2*MemVT.getStoreSize()-1);
15989
15990 // Create the new base load.
15991 SDValue LDXIntID =
15992 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15993 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15994 SDValue BaseLoad =
15996 DAG.getVTList(PermTy, MVT::Other),
15997 BaseLoadOps, LDTy, BaseMMO);
15998
15999 // Note that the value of IncOffset (which is provided to the next
16000 // load's pointer info offset value, and thus used to calculate the
16001 // alignment), and the value of IncValue (which is actually used to
16002 // increment the pointer value) are different! This is because we
16003 // require the next load to appear to be aligned, even though it
16004 // is actually offset from the base pointer by a lesser amount.
16005 int IncOffset = VT.getSizeInBits() / 8;
16006 int IncValue = IncOffset;
16007
16008 // Walk (both up and down) the chain looking for another load at the real
16009 // (aligned) offset (the alignment of the other load does not matter in
16010 // this case). If found, then do not use the offset reduction trick, as
16011 // that will prevent the loads from being later combined (as they would
16012 // otherwise be duplicates).
16013 if (!findConsecutiveLoad(LD, DAG))
16014 --IncValue;
16015
16016 SDValue Increment =
16017 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
16018 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16019
16020 MachineMemOperand *ExtraMMO =
16021 MF.getMachineMemOperand(LD->getMemOperand(),
16022 1, 2*MemVT.getStoreSize()-1);
16023 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
16024 SDValue ExtraLoad =
16026 DAG.getVTList(PermTy, MVT::Other),
16027 ExtraLoadOps, LDTy, ExtraMMO);
16028
16029 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
16030 BaseLoad.getValue(1), ExtraLoad.getValue(1));
16031
16032 // Because vperm has a big-endian bias, we must reverse the order
16033 // of the input vectors and complement the permute control vector
16034 // when generating little endian code. We have already handled the
16035 // latter by using lvsr instead of lvsl, so just reverse BaseLoad
16036 // and ExtraLoad here.
16037 SDValue Perm;
16038 if (isLittleEndian)
16039 Perm = BuildIntrinsicOp(IntrPerm,
16040 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
16041 else
16042 Perm = BuildIntrinsicOp(IntrPerm,
16043 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
16044
16045 if (VT != PermTy)
16046 Perm = Subtarget.hasAltivec()
16047 ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
16048 : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
16049 DAG.getTargetConstant(1, dl, MVT::i64));
16050 // second argument is 1 because this rounding
16051 // is always exact.
16052
16053 // The output of the permutation is our loaded result, the TokenFactor is
16054 // our new chain.
16055 DCI.CombineTo(N, Perm, TF);
16056 return SDValue(N, 0);
16057 }
16058 }
16059 break;
16061 bool isLittleEndian = Subtarget.isLittleEndian();
16062 unsigned IID = N->getConstantOperandVal(0);
16063 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
16064 : Intrinsic::ppc_altivec_lvsl);
16065 if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
16066 SDValue Add = N->getOperand(1);
16067
16068 int Bits = 4 /* 16 byte alignment */;
16069
16070 if (DAG.MaskedValueIsZero(Add->getOperand(1),
16071 APInt::getAllOnes(Bits /* alignment */)
16072 .zext(Add.getScalarValueSizeInBits()))) {
16073 SDNode *BasePtr = Add->getOperand(0).getNode();
16074 for (SDNode *U : BasePtr->uses()) {
16075 if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
16076 U->getConstantOperandVal(0) == IID) {
16077 // We've found another LVSL/LVSR, and this address is an aligned
16078 // multiple of that one. The results will be the same, so use the
16079 // one we've just found instead.
16080
16081 return SDValue(U, 0);
16082 }
16083 }
16084 }
16085
16086 if (isa<ConstantSDNode>(Add->getOperand(1))) {
16087 SDNode *BasePtr = Add->getOperand(0).getNode();
16088 for (SDNode *U : BasePtr->uses()) {
16089 if (U->getOpcode() == ISD::ADD &&
16090 isa<ConstantSDNode>(U->getOperand(1)) &&
16091 (Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) %
16092 (1ULL << Bits) ==
16093 0) {
16094 SDNode *OtherAdd = U;
16095 for (SDNode *V : OtherAdd->uses()) {
16096 if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
16097 V->getConstantOperandVal(0) == IID) {
16098 return SDValue(V, 0);
16099 }
16100 }
16101 }
16102 }
16103 }
16104 }
16105
16106 // Combine vmaxsw/h/b(a, a's negation) to abs(a)
16107 // Expose the vabsduw/h/b opportunity for down stream
16108 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
16109 (IID == Intrinsic::ppc_altivec_vmaxsw ||
16110 IID == Intrinsic::ppc_altivec_vmaxsh ||
16111 IID == Intrinsic::ppc_altivec_vmaxsb)) {
16112 SDValue V1 = N->getOperand(1);
16113 SDValue V2 = N->getOperand(2);
16114 if ((V1.getSimpleValueType() == MVT::v4i32 ||
16115 V1.getSimpleValueType() == MVT::v8i16 ||
16116 V1.getSimpleValueType() == MVT::v16i8) &&
16117 V1.getSimpleValueType() == V2.getSimpleValueType()) {
16118 // (0-a, a)
16119 if (V1.getOpcode() == ISD::SUB &&
16121 V1.getOperand(1) == V2) {
16122 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
16123 }
16124 // (a, 0-a)
16125 if (V2.getOpcode() == ISD::SUB &&
16126 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
16127 V2.getOperand(1) == V1) {
16128 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
16129 }
16130 // (x-y, y-x)
16131 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
16132 V1.getOperand(0) == V2.getOperand(1) &&
16133 V1.getOperand(1) == V2.getOperand(0)) {
16134 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
16135 }
16136 }
16137 }
16138 }
16139
16140 break;
16142 switch (N->getConstantOperandVal(1)) {
16143 default:
16144 break;
16145 case Intrinsic::ppc_altivec_vsum4sbs:
16146 case Intrinsic::ppc_altivec_vsum4shs:
16147 case Intrinsic::ppc_altivec_vsum4ubs: {
16148 // These sum-across intrinsics only have a chain due to the side effect
16149 // that they may set the SAT bit. If we know the SAT bit will not be set
16150 // for some inputs, we can replace any uses of their chain with the
16151 // input chain.
16152 if (BuildVectorSDNode *BVN =
16153 dyn_cast<BuildVectorSDNode>(N->getOperand(3))) {
16154 APInt APSplatBits, APSplatUndef;
16155 unsigned SplatBitSize;
16156 bool HasAnyUndefs;
16157 bool BVNIsConstantSplat = BVN->isConstantSplat(
16158 APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0,
16159 !Subtarget.isLittleEndian());
16160 // If the constant splat vector is 0, the SAT bit will not be set.
16161 if (BVNIsConstantSplat && APSplatBits == 0)
16162 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), N->getOperand(0));
16163 }
16164 return SDValue();
16165 }
16166 case Intrinsic::ppc_vsx_lxvw4x:
16167 case Intrinsic::ppc_vsx_lxvd2x:
16168 // For little endian, VSX loads require generating lxvd2x/xxswapd.
16169 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
16170 if (Subtarget.needsSwapsForVSXMemOps())
16171 return expandVSXLoadForLE(N, DCI);
16172 break;
16173 }
16174 break;
16176 // For little endian, VSX stores require generating xxswapd/stxvd2x.
16177 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
16178 if (Subtarget.needsSwapsForVSXMemOps()) {
16179 switch (N->getConstantOperandVal(1)) {
16180 default:
16181 break;
16182 case Intrinsic::ppc_vsx_stxvw4x:
16183 case Intrinsic::ppc_vsx_stxvd2x:
16184 return expandVSXStoreForLE(N, DCI);
16185 }
16186 }
16187 break;
16188 case ISD::BSWAP: {
16189 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
16190 // For subtargets without LDBRX, we can still do better than the default
16191 // expansion even for 64-bit BSWAP (LOAD).
16192 bool Is64BitBswapOn64BitTgt =
16193 Subtarget.isPPC64() && N->getValueType(0) == MVT::i64;
16194 bool IsSingleUseNormalLd = ISD::isNormalLoad(N->getOperand(0).getNode()) &&
16195 N->getOperand(0).hasOneUse();
16196 if (IsSingleUseNormalLd &&
16197 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
16198 (Subtarget.hasLDBRX() && Is64BitBswapOn64BitTgt))) {
16199 SDValue Load = N->getOperand(0);
16200 LoadSDNode *LD = cast<LoadSDNode>(Load);
16201 // Create the byte-swapping load.
16202 SDValue Ops[] = {
16203 LD->getChain(), // Chain
16204 LD->getBasePtr(), // Ptr
16205 DAG.getValueType(N->getValueType(0)) // VT
16206 };
16207 SDValue BSLoad =
16209 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
16210 MVT::i64 : MVT::i32, MVT::Other),
16211 Ops, LD->getMemoryVT(), LD->getMemOperand());
16212
16213 // If this is an i16 load, insert the truncate.
16214 SDValue ResVal = BSLoad;
16215 if (N->getValueType(0) == MVT::i16)
16216 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
16217
16218 // First, combine the bswap away. This makes the value produced by the
16219 // load dead.
16220 DCI.CombineTo(N, ResVal);
16221
16222 // Next, combine the load away, we give it a bogus result value but a real
16223 // chain result. The result value is dead because the bswap is dead.
16224 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
16225
16226 // Return N so it doesn't get rechecked!
16227 return SDValue(N, 0);
16228 }
16229 // Convert this to two 32-bit bswap loads and a BUILD_PAIR. Do this only
16230 // before legalization so that the BUILD_PAIR is handled correctly.
16231 if (!DCI.isBeforeLegalize() || !Is64BitBswapOn64BitTgt ||
16232 !IsSingleUseNormalLd)
16233 return SDValue();
16234 LoadSDNode *LD = cast<LoadSDNode>(N->getOperand(0));
16235
16236 // Can't split volatile or atomic loads.
16237 if (!LD->isSimple())
16238 return SDValue();
16239 SDValue BasePtr = LD->getBasePtr();
16240 SDValue Lo = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr,
16241 LD->getPointerInfo(), LD->getAlign());
16242 Lo = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Lo);
16243 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
16244 DAG.getIntPtrConstant(4, dl));
16246 LD->getMemOperand(), 4, 4);
16247 SDValue Hi = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr, NewMMO);
16248 Hi = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Hi);
16249 SDValue Res;
16250 if (Subtarget.isLittleEndian())
16251 Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Hi, Lo);
16252 else
16253 Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
16254 SDValue TF =
16255 DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
16256 Hi.getOperand(0).getValue(1), Lo.getOperand(0).getValue(1));
16257 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), TF);
16258 return Res;
16259 }
16260 case PPCISD::VCMP:
16261 // If a VCMP_rec node already exists with exactly the same operands as this
16262 // node, use its result instead of this node (VCMP_rec computes both a CR6
16263 // and a normal output).
16264 //
16265 if (!N->getOperand(0).hasOneUse() &&
16266 !N->getOperand(1).hasOneUse() &&
16267 !N->getOperand(2).hasOneUse()) {
16268
16269 // Scan all of the users of the LHS, looking for VCMP_rec's that match.
16270 SDNode *VCMPrecNode = nullptr;
16271
16272 SDNode *LHSN = N->getOperand(0).getNode();
16273 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
16274 UI != E; ++UI)
16275 if (UI->getOpcode() == PPCISD::VCMP_rec &&
16276 UI->getOperand(1) == N->getOperand(1) &&
16277 UI->getOperand(2) == N->getOperand(2) &&
16278 UI->getOperand(0) == N->getOperand(0)) {
16279 VCMPrecNode = *UI;
16280 break;
16281 }
16282
16283 // If there is no VCMP_rec node, or if the flag value has a single use,
16284 // don't transform this.
16285 if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
16286 break;
16287
16288 // Look at the (necessarily single) use of the flag value. If it has a
16289 // chain, this transformation is more complex. Note that multiple things
16290 // could use the value result, which we should ignore.
16291 SDNode *FlagUser = nullptr;
16292 for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
16293 FlagUser == nullptr; ++UI) {
16294 assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
16295 SDNode *User = *UI;
16296 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
16297 if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
16298 FlagUser = User;
16299 break;
16300 }
16301 }
16302 }
16303
16304 // If the user is a MFOCRF instruction, we know this is safe.
16305 // Otherwise we give up for right now.
16306 if (FlagUser->getOpcode() == PPCISD::MFOCRF)
16307 return SDValue(VCMPrecNode, 0);
16308 }
16309 break;
16310 case ISD::BR_CC: {
16311 // If this is a branch on an altivec predicate comparison, lower this so
16312 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
16313 // lowering is done pre-legalize, because the legalizer lowers the predicate
16314 // compare down to code that is difficult to reassemble.
16315 // This code also handles branches that depend on the result of a store
16316 // conditional.
16317 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
16318 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
16319
16320 int CompareOpc;
16321 bool isDot;
16322
16323 if (!isa<ConstantSDNode>(RHS) || (CC != ISD::SETEQ && CC != ISD::SETNE))
16324 break;
16325
16326 // Since we are doing this pre-legalize, the RHS can be a constant of
16327 // arbitrary bitwidth which may cause issues when trying to get the value
16328 // from the underlying APInt.
16329 auto RHSAPInt = RHS->getAsAPIntVal();
16330 if (!RHSAPInt.isIntN(64))
16331 break;
16332
16333 unsigned Val = RHSAPInt.getZExtValue();
16334 auto isImpossibleCompare = [&]() {
16335 // If this is a comparison against something other than 0/1, then we know
16336 // that the condition is never/always true.
16337 if (Val != 0 && Val != 1) {
16338 if (CC == ISD::SETEQ) // Cond never true, remove branch.
16339 return N->getOperand(0);
16340 // Always !=, turn it into an unconditional branch.
16341 return DAG.getNode(ISD::BR, dl, MVT::Other,
16342 N->getOperand(0), N->getOperand(4));
16343 }
16344 return SDValue();
16345 };
16346 // Combine branches fed by store conditional instructions (st[bhwd]cx).
16347 unsigned StoreWidth = 0;
16348 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
16349 isStoreConditional(LHS, StoreWidth)) {
16350 if (SDValue Impossible = isImpossibleCompare())
16351 return Impossible;
16352 PPC::Predicate CompOpc;
16353 // eq 0 => ne
16354 // ne 0 => eq
16355 // eq 1 => eq
16356 // ne 1 => ne
16357 if (Val == 0)
16358 CompOpc = CC == ISD::SETEQ ? PPC::PRED_NE : PPC::PRED_EQ;
16359 else
16360 CompOpc = CC == ISD::SETEQ ? PPC::PRED_EQ : PPC::PRED_NE;
16361
16362 SDValue Ops[] = {LHS.getOperand(0), LHS.getOperand(2), LHS.getOperand(3),
16363 DAG.getConstant(StoreWidth, dl, MVT::i32)};
16364 auto *MemNode = cast<MemSDNode>(LHS);
16365 SDValue ConstSt = DAG.getMemIntrinsicNode(
16367 DAG.getVTList(MVT::i32, MVT::Other, MVT::Glue), Ops,
16368 MemNode->getMemoryVT(), MemNode->getMemOperand());
16369
16370 SDValue InChain;
16371 // Unchain the branch from the original store conditional.
16372 if (N->getOperand(0) == LHS.getValue(1))
16373 InChain = LHS.getOperand(0);
16374 else if (N->getOperand(0).getOpcode() == ISD::TokenFactor) {
16375 SmallVector<SDValue, 4> InChains;
16376 SDValue InTF = N->getOperand(0);
16377 for (int i = 0, e = InTF.getNumOperands(); i < e; i++)
16378 if (InTF.getOperand(i) != LHS.getValue(1))
16379 InChains.push_back(InTF.getOperand(i));
16380 InChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, InChains);
16381 }
16382
16383 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, InChain,
16384 DAG.getConstant(CompOpc, dl, MVT::i32),
16385 DAG.getRegister(PPC::CR0, MVT::i32), N->getOperand(4),
16386 ConstSt.getValue(2));
16387 }
16388
16389 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
16390 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
16391 assert(isDot && "Can't compare against a vector result!");
16392
16393 if (SDValue Impossible = isImpossibleCompare())
16394 return Impossible;
16395
16396 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
16397 // Create the PPCISD altivec 'dot' comparison node.
16398 SDValue Ops[] = {
16399 LHS.getOperand(2), // LHS of compare
16400 LHS.getOperand(3), // RHS of compare
16401 DAG.getConstant(CompareOpc, dl, MVT::i32)
16402 };
16403 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
16404 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
16405
16406 // Unpack the result based on how the target uses it.
16407 PPC::Predicate CompOpc;
16408 switch (LHS.getConstantOperandVal(1)) {
16409 default: // Can't happen, don't crash on invalid number though.
16410 case 0: // Branch on the value of the EQ bit of CR6.
16411 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
16412 break;
16413 case 1: // Branch on the inverted value of the EQ bit of CR6.
16414 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
16415 break;
16416 case 2: // Branch on the value of the LT bit of CR6.
16417 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
16418 break;
16419 case 3: // Branch on the inverted value of the LT bit of CR6.
16420 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
16421 break;
16422 }
16423
16424 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
16425 DAG.getConstant(CompOpc, dl, MVT::i32),
16426 DAG.getRegister(PPC::CR6, MVT::i32),
16427 N->getOperand(4), CompNode.getValue(1));
16428 }
16429 break;
16430 }
16431 case ISD::BUILD_VECTOR:
16432 return DAGCombineBuildVector(N, DCI);
16433 }
16434
16435 return SDValue();
16436}
16437
16438SDValue
16440 SelectionDAG &DAG,
16441 SmallVectorImpl<SDNode *> &Created) const {
16442 // fold (sdiv X, pow2)
16443 EVT VT = N->getValueType(0);
16444 if (VT == MVT::i64 && !Subtarget.isPPC64())
16445 return SDValue();
16446 if ((VT != MVT::i32 && VT != MVT::i64) ||
16447 !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
16448 return SDValue();
16449
16450 SDLoc DL(N);
16451 SDValue N0 = N->getOperand(0);
16452
16453 bool IsNegPow2 = Divisor.isNegatedPowerOf2();
16454 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countr_zero();
16455 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
16456
16457 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
16458 Created.push_back(Op.getNode());
16459
16460 if (IsNegPow2) {
16461 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
16462 Created.push_back(Op.getNode());
16463 }
16464
16465 return Op;
16466}
16467
16468//===----------------------------------------------------------------------===//
16469// Inline Assembly Support
16470//===----------------------------------------------------------------------===//
16471
16473 KnownBits &Known,
16474 const APInt &DemandedElts,
16475 const SelectionDAG &DAG,
16476 unsigned Depth) const {
16477 Known.resetAll();
16478 switch (Op.getOpcode()) {
16479 default: break;
16480 case PPCISD::LBRX: {
16481 // lhbrx is known to have the top bits cleared out.
16482 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
16483 Known.Zero = 0xFFFF0000;
16484 break;
16485 }
16487 switch (Op.getConstantOperandVal(0)) {
16488 default: break;
16489 case Intrinsic::ppc_altivec_vcmpbfp_p:
16490 case Intrinsic::ppc_altivec_vcmpeqfp_p:
16491 case Intrinsic::ppc_altivec_vcmpequb_p:
16492 case Intrinsic::ppc_altivec_vcmpequh_p:
16493 case Intrinsic::ppc_altivec_vcmpequw_p:
16494 case Intrinsic::ppc_altivec_vcmpequd_p:
16495 case Intrinsic::ppc_altivec_vcmpequq_p:
16496 case Intrinsic::ppc_altivec_vcmpgefp_p:
16497 case Intrinsic::ppc_altivec_vcmpgtfp_p:
16498 case Intrinsic::ppc_altivec_vcmpgtsb_p:
16499 case Intrinsic::ppc_altivec_vcmpgtsh_p:
16500 case Intrinsic::ppc_altivec_vcmpgtsw_p:
16501 case Intrinsic::ppc_altivec_vcmpgtsd_p:
16502 case Intrinsic::ppc_altivec_vcmpgtsq_p:
16503 case Intrinsic::ppc_altivec_vcmpgtub_p:
16504 case Intrinsic::ppc_altivec_vcmpgtuh_p:
16505 case Intrinsic::ppc_altivec_vcmpgtuw_p:
16506 case Intrinsic::ppc_altivec_vcmpgtud_p:
16507 case Intrinsic::ppc_altivec_vcmpgtuq_p:
16508 Known.Zero = ~1U; // All bits but the low one are known to be zero.
16509 break;
16510 }
16511 break;
16512 }
16514 switch (Op.getConstantOperandVal(1)) {
16515 default:
16516 break;
16517 case Intrinsic::ppc_load2r:
16518 // Top bits are cleared for load2r (which is the same as lhbrx).
16519 Known.Zero = 0xFFFF0000;
16520 break;
16521 }
16522 break;
16523 }
16524 }
16525}
16526
16528 switch (Subtarget.getCPUDirective()) {
16529 default: break;
16530 case PPC::DIR_970:
16531 case PPC::DIR_PWR4:
16532 case PPC::DIR_PWR5:
16533 case PPC::DIR_PWR5X:
16534 case PPC::DIR_PWR6:
16535 case PPC::DIR_PWR6X:
16536 case PPC::DIR_PWR7:
16537 case PPC::DIR_PWR8:
16538 case PPC::DIR_PWR9:
16539 case PPC::DIR_PWR10:
16540 case PPC::DIR_PWR_FUTURE: {
16541 if (!ML)
16542 break;
16543
16545 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
16546 // so that we can decrease cache misses and branch-prediction misses.
16547 // Actual alignment of the loop will depend on the hotness check and other
16548 // logic in alignBlocks.
16549 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
16550 return Align(32);
16551 }
16552
16553 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
16554
16555 // For small loops (between 5 and 8 instructions), align to a 32-byte
16556 // boundary so that the entire loop fits in one instruction-cache line.
16557 uint64_t LoopSize = 0;
16558 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
16559 for (const MachineInstr &J : **I) {
16560 LoopSize += TII->getInstSizeInBytes(J);
16561 if (LoopSize > 32)
16562 break;
16563 }
16564
16565 if (LoopSize > 16 && LoopSize <= 32)
16566 return Align(32);
16567
16568 break;
16569 }
16570 }
16571
16573}
16574
16575/// getConstraintType - Given a constraint, return the type of
16576/// constraint it is for this target.
16579 if (Constraint.size() == 1) {
16580 switch (Constraint[0]) {
16581 default: break;
16582 case 'b':
16583 case 'r':
16584 case 'f':
16585 case 'd':
16586 case 'v':
16587 case 'y':
16588 return C_RegisterClass;
16589 case 'Z':
16590 // FIXME: While Z does indicate a memory constraint, it specifically
16591 // indicates an r+r address (used in conjunction with the 'y' modifier
16592 // in the replacement string). Currently, we're forcing the base
16593 // register to be r0 in the asm printer (which is interpreted as zero)
16594 // and forming the complete address in the second register. This is
16595 // suboptimal.
16596 return C_Memory;
16597 }
16598 } else if (Constraint == "wc") { // individual CR bits.
16599 return C_RegisterClass;
16600 } else if (Constraint == "wa" || Constraint == "wd" ||
16601 Constraint == "wf" || Constraint == "ws" ||
16602 Constraint == "wi" || Constraint == "ww") {
16603 return C_RegisterClass; // VSX registers.
16604 }
16605 return TargetLowering::getConstraintType(Constraint);
16606}
16607
16608/// Examine constraint type and operand type and determine a weight value.
16609/// This object must already have been set up with the operand type
16610/// and the current alternative constraint selected.
16613 AsmOperandInfo &info, const char *constraint) const {
16615 Value *CallOperandVal = info.CallOperandVal;
16616 // If we don't have a value, we can't do a match,
16617 // but allow it at the lowest weight.
16618 if (!CallOperandVal)
16619 return CW_Default;
16620 Type *type = CallOperandVal->getType();
16621
16622 // Look at the constraint type.
16623 if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
16624 return CW_Register; // an individual CR bit.
16625 else if ((StringRef(constraint) == "wa" ||
16626 StringRef(constraint) == "wd" ||
16627 StringRef(constraint) == "wf") &&
16628 type->isVectorTy())
16629 return CW_Register;
16630 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
16631 return CW_Register; // just hold 64-bit integers data.
16632 else if (StringRef(constraint) == "ws" && type->isDoubleTy())
16633 return CW_Register;
16634 else if (StringRef(constraint) == "ww" && type->isFloatTy())
16635 return CW_Register;
16636
16637 switch (*constraint) {
16638 default:
16640 break;
16641 case 'b':
16642 if (type->isIntegerTy())
16643 weight = CW_Register;
16644 break;
16645 case 'f':
16646 if (type->isFloatTy())
16647 weight = CW_Register;
16648 break;
16649 case 'd':
16650 if (type->isDoubleTy())
16651 weight = CW_Register;
16652 break;
16653 case 'v':
16654 if (type->isVectorTy())
16655 weight = CW_Register;
16656 break;
16657 case 'y':
16658 weight = CW_Register;
16659 break;
16660 case 'Z':
16661 weight = CW_Memory;
16662 break;
16663 }
16664 return weight;
16665}
16666
16667std::pair<unsigned, const TargetRegisterClass *>
16669 StringRef Constraint,
16670 MVT VT) const {
16671 if (Constraint.size() == 1) {
16672 // GCC RS6000 Constraint Letters
16673 switch (Constraint[0]) {
16674 case 'b': // R1-R31
16675 if (VT == MVT::i64 && Subtarget.isPPC64())
16676 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
16677 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
16678 case 'r': // R0-R31
16679 if (VT == MVT::i64 && Subtarget.isPPC64())
16680 return std::make_pair(0U, &PPC::G8RCRegClass);
16681 return std::make_pair(0U, &PPC::GPRCRegClass);
16682 // 'd' and 'f' constraints are both defined to be "the floating point
16683 // registers", where one is for 32-bit and the other for 64-bit. We don't
16684 // really care overly much here so just give them all the same reg classes.
16685 case 'd':
16686 case 'f':
16687 if (Subtarget.hasSPE()) {
16688 if (VT == MVT::f32 || VT == MVT::i32)
16689 return std::make_pair(0U, &PPC::GPRCRegClass);
16690 if (VT == MVT::f64 || VT == MVT::i64)
16691 return std::make_pair(0U, &PPC::SPERCRegClass);
16692 } else {
16693 if (VT == MVT::f32 || VT == MVT::i32)
16694 return std::make_pair(0U, &PPC::F4RCRegClass);
16695 if (VT == MVT::f64 || VT == MVT::i64)
16696 return std::make_pair(0U, &PPC::F8RCRegClass);
16697 }
16698 break;
16699 case 'v':
16700 if (Subtarget.hasAltivec() && VT.isVector())
16701 return std::make_pair(0U, &PPC::VRRCRegClass);
16702 else if (Subtarget.hasVSX())
16703 // Scalars in Altivec registers only make sense with VSX.
16704 return std::make_pair(0U, &PPC::VFRCRegClass);
16705 break;
16706 case 'y': // crrc
16707 return std::make_pair(0U, &PPC::CRRCRegClass);
16708 }
16709 } else if (Constraint == "wc" && Subtarget.useCRBits()) {
16710 // An individual CR bit.
16711 return std::make_pair(0U, &PPC::CRBITRCRegClass);
16712 } else if ((Constraint == "wa" || Constraint == "wd" ||
16713 Constraint == "wf" || Constraint == "wi") &&
16714 Subtarget.hasVSX()) {
16715 // A VSX register for either a scalar (FP) or vector. There is no
16716 // support for single precision scalars on subtargets prior to Power8.
16717 if (VT.isVector())
16718 return std::make_pair(0U, &PPC::VSRCRegClass);
16719 if (VT == MVT::f32 && Subtarget.hasP8Vector())
16720 return std::make_pair(0U, &PPC::VSSRCRegClass);
16721 return std::make_pair(0U, &PPC::VSFRCRegClass);
16722 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
16723 if (VT == MVT::f32 && Subtarget.hasP8Vector())
16724 return std::make_pair(0U, &PPC::VSSRCRegClass);
16725 else
16726 return std::make_pair(0U, &PPC::VSFRCRegClass);
16727 } else if (Constraint == "lr") {
16728 if (VT == MVT::i64)
16729 return std::make_pair(0U, &PPC::LR8RCRegClass);
16730 else
16731 return std::make_pair(0U, &PPC::LRRCRegClass);
16732 }
16733
16734 // Handle special cases of physical registers that are not properly handled
16735 // by the base class.
16736 if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
16737 // If we name a VSX register, we can't defer to the base class because it
16738 // will not recognize the correct register (their names will be VSL{0-31}
16739 // and V{0-31} so they won't match). So we match them here.
16740 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
16741 int VSNum = atoi(Constraint.data() + 3);
16742 assert(VSNum >= 0 && VSNum <= 63 &&
16743 "Attempted to access a vsr out of range");
16744 if (VSNum < 32)
16745 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
16746 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
16747 }
16748
16749 // For float registers, we can't defer to the base class as it will match
16750 // the SPILLTOVSRRC class.
16751 if (Constraint.size() > 3 && Constraint[1] == 'f') {
16752 int RegNum = atoi(Constraint.data() + 2);
16753 if (RegNum > 31 || RegNum < 0)
16754 report_fatal_error("Invalid floating point register number");
16755 if (VT == MVT::f32 || VT == MVT::i32)
16756 return Subtarget.hasSPE()
16757 ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
16758 : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
16759 if (VT == MVT::f64 || VT == MVT::i64)
16760 return Subtarget.hasSPE()
16761 ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
16762 : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
16763 }
16764 }
16765
16766 std::pair<unsigned, const TargetRegisterClass *> R =
16768
16769 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
16770 // (which we call X[0-9]+). If a 64-bit value has been requested, and a
16771 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
16772 // register.
16773 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
16774 // the AsmName field from *RegisterInfo.td, then this would not be necessary.
16775 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
16776 PPC::GPRCRegClass.contains(R.first))
16777 return std::make_pair(TRI->getMatchingSuperReg(R.first,
16778 PPC::sub_32, &PPC::G8RCRegClass),
16779 &PPC::G8RCRegClass);
16780
16781 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
16782 if (!R.second && StringRef("{cc}").equals_insensitive(Constraint)) {
16783 R.first = PPC::CR0;
16784 R.second = &PPC::CRRCRegClass;
16785 }
16786 // FIXME: This warning should ideally be emitted in the front end.
16787 const auto &TM = getTargetMachine();
16788 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
16789 if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
16790 (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
16791 (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
16792 errs() << "warning: vector registers 20 to 32 are reserved in the "
16793 "default AIX AltiVec ABI and cannot be used\n";
16794 }
16795
16796 return R;
16797}
16798
16799/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
16800/// vector. If it is invalid, don't add anything to Ops.
16802 StringRef Constraint,
16803 std::vector<SDValue> &Ops,
16804 SelectionDAG &DAG) const {
16805 SDValue Result;
16806
16807 // Only support length 1 constraints.
16808 if (Constraint.size() > 1)
16809 return;
16810
16811 char Letter = Constraint[0];
16812 switch (Letter) {
16813 default: break;
16814 case 'I':
16815 case 'J':
16816 case 'K':
16817 case 'L':
16818 case 'M':
16819 case 'N':
16820 case 'O':
16821 case 'P': {
16822 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
16823 if (!CST) return; // Must be an immediate to match.
16824 SDLoc dl(Op);
16825 int64_t Value = CST->getSExtValue();
16826 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
16827 // numbers are printed as such.
16828 switch (Letter) {
16829 default: llvm_unreachable("Unknown constraint letter!");
16830 case 'I': // "I" is a signed 16-bit constant.
16831 if (isInt<16>(Value))
16832 Result = DAG.getTargetConstant(Value, dl, TCVT);
16833 break;
16834 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
16835 if (isShiftedUInt<16, 16>(Value))
16836 Result = DAG.getTargetConstant(Value, dl, TCVT);
16837 break;
16838 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
16839 if (isShiftedInt<16, 16>(Value))
16840 Result = DAG.getTargetConstant(Value, dl, TCVT);
16841 break;
16842 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
16843 if (isUInt<16>(Value))
16844 Result = DAG.getTargetConstant(Value, dl, TCVT);
16845 break;
16846 case 'M': // "M" is a constant that is greater than 31.
16847 if (Value > 31)
16848 Result = DAG.getTargetConstant(Value, dl, TCVT);
16849 break;
16850 case 'N': // "N" is a positive constant that is an exact power of two.
16851 if (Value > 0 && isPowerOf2_64(Value))
16852 Result = DAG.getTargetConstant(Value, dl, TCVT);
16853 break;
16854 case 'O': // "O" is the constant zero.
16855 if (Value == 0)
16856 Result = DAG.getTargetConstant(Value, dl, TCVT);
16857 break;
16858 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
16859 if (isInt<16>(-Value))
16860 Result = DAG.getTargetConstant(Value, dl, TCVT);
16861 break;
16862 }
16863 break;
16864 }
16865 }
16866
16867 if (Result.getNode()) {
16868 Ops.push_back(Result);
16869 return;
16870 }
16871
16872 // Handle standard constraint letters.
16873 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
16874}
16875
16878 SelectionDAG &DAG) const {
16879 if (I.getNumOperands() <= 1)
16880 return;
16881 if (!isa<ConstantSDNode>(Ops[1].getNode()))
16882 return;
16883 auto IntrinsicID = Ops[1].getNode()->getAsZExtVal();
16884 if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
16885 IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
16886 return;
16887
16888 if (MDNode *MDN = I.getMetadata(LLVMContext::MD_annotation))
16889 Ops.push_back(DAG.getMDNode(MDN));
16890}
16891
16892// isLegalAddressingMode - Return true if the addressing mode represented
16893// by AM is legal for this target, for a load/store of the specified type.
16895 const AddrMode &AM, Type *Ty,
16896 unsigned AS,
16897 Instruction *I) const {
16898 // Vector type r+i form is supported since power9 as DQ form. We don't check
16899 // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
16900 // imm form is preferred and the offset can be adjusted to use imm form later
16901 // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
16902 // max offset to check legal addressing mode, we should be a little aggressive
16903 // to contain other offsets for that LSRUse.
16904 if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
16905 return false;
16906
16907 // PPC allows a sign-extended 16-bit immediate field.
16908 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
16909 return false;
16910
16911 // No global is ever allowed as a base.
16912 if (AM.BaseGV)
16913 return false;
16914
16915 // PPC only support r+r,
16916 switch (AM.Scale) {
16917 case 0: // "r+i" or just "i", depending on HasBaseReg.
16918 break;
16919 case 1:
16920 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
16921 return false;
16922 // Otherwise we have r+r or r+i.
16923 break;
16924 case 2:
16925 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
16926 return false;
16927 // Allow 2*r as r+r.
16928 break;
16929 default:
16930 // No other scales are supported.
16931 return false;
16932 }
16933
16934 return true;
16935}
16936
16937SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
16938 SelectionDAG &DAG) const {
16940 MachineFrameInfo &MFI = MF.getFrameInfo();
16941 MFI.setReturnAddressIsTaken(true);
16942
16944 return SDValue();
16945
16946 SDLoc dl(Op);
16947 unsigned Depth = Op.getConstantOperandVal(0);
16948
16949 // Make sure the function does not optimize away the store of the RA to
16950 // the stack.
16951 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
16952 FuncInfo->setLRStoreRequired();
16953 bool isPPC64 = Subtarget.isPPC64();
16954 auto PtrVT = getPointerTy(MF.getDataLayout());
16955
16956 if (Depth > 0) {
16957 // The link register (return address) is saved in the caller's frame
16958 // not the callee's stack frame. So we must get the caller's frame
16959 // address and load the return address at the LR offset from there.
16960 SDValue FrameAddr =
16961 DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
16962 LowerFRAMEADDR(Op, DAG), MachinePointerInfo());
16963 SDValue Offset =
16964 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
16965 isPPC64 ? MVT::i64 : MVT::i32);
16966 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
16967 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
16969 }
16970
16971 // Just load the return address off the stack.
16972 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
16973 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
16975}
16976
16977SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
16978 SelectionDAG &DAG) const {
16979 SDLoc dl(Op);
16980 unsigned Depth = Op.getConstantOperandVal(0);
16981
16983 MachineFrameInfo &MFI = MF.getFrameInfo();
16984 MFI.setFrameAddressIsTaken(true);
16985
16986 EVT PtrVT = getPointerTy(MF.getDataLayout());
16987 bool isPPC64 = PtrVT == MVT::i64;
16988
16989 // Naked functions never have a frame pointer, and so we use r1. For all
16990 // other functions, this decision must be delayed until during PEI.
16991 unsigned FrameReg;
16992 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
16993 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
16994 else
16995 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
16996
16997 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
16998 PtrVT);
16999 while (Depth--)
17000 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
17001 FrameAddr, MachinePointerInfo());
17002 return FrameAddr;
17003}
17004
17005// FIXME? Maybe this could be a TableGen attribute on some registers and
17006// this table could be generated automatically from RegInfo.
17008 const MachineFunction &MF) const {
17009 bool isPPC64 = Subtarget.isPPC64();
17010
17011 bool is64Bit = isPPC64 && VT == LLT::scalar(64);
17012 if (!is64Bit && VT != LLT::scalar(32))
17013 report_fatal_error("Invalid register global variable type");
17014
17016 .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
17017 .Case("r2", isPPC64 ? Register() : PPC::R2)
17018 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
17019 .Default(Register());
17020
17021 if (Reg)
17022 return Reg;
17023 report_fatal_error("Invalid register name global variable");
17024}
17025
17027 // 32-bit SVR4 ABI access everything as got-indirect.
17028 if (Subtarget.is32BitELFABI())
17029 return true;
17030
17031 // AIX accesses everything indirectly through the TOC, which is similar to
17032 // the GOT.
17033 if (Subtarget.isAIXABI())
17034 return true;
17035
17037 // If it is small or large code model, module locals are accessed
17038 // indirectly by loading their address from .toc/.got.
17039 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
17040 return true;
17041
17042 // JumpTable and BlockAddress are accessed as got-indirect.
17043 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
17044 return true;
17045
17046 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
17047 return Subtarget.isGVIndirectSymbol(G->getGlobal());
17048
17049 return false;
17050}
17051
17052bool
17054 // The PowerPC target isn't yet aware of offsets.
17055 return false;
17056}
17057
17059 const CallInst &I,
17060 MachineFunction &MF,
17061 unsigned Intrinsic) const {
17062 switch (Intrinsic) {
17063 case Intrinsic::ppc_atomicrmw_xchg_i128:
17064 case Intrinsic::ppc_atomicrmw_add_i128:
17065 case Intrinsic::ppc_atomicrmw_sub_i128:
17066 case Intrinsic::ppc_atomicrmw_nand_i128:
17067 case Intrinsic::ppc_atomicrmw_and_i128:
17068 case Intrinsic::ppc_atomicrmw_or_i128:
17069 case Intrinsic::ppc_atomicrmw_xor_i128:
17070 case Intrinsic::ppc_cmpxchg_i128:
17072 Info.memVT = MVT::i128;
17073 Info.ptrVal = I.getArgOperand(0);
17074 Info.offset = 0;
17075 Info.align = Align(16);
17078 return true;
17079 case Intrinsic::ppc_atomic_load_i128:
17081 Info.memVT = MVT::i128;
17082 Info.ptrVal = I.getArgOperand(0);
17083 Info.offset = 0;
17084 Info.align = Align(16);
17086 return true;
17087 case Intrinsic::ppc_atomic_store_i128:
17089 Info.memVT = MVT::i128;
17090 Info.ptrVal = I.getArgOperand(2);
17091 Info.offset = 0;
17092 Info.align = Align(16);
17094 return true;
17095 case Intrinsic::ppc_altivec_lvx:
17096 case Intrinsic::ppc_altivec_lvxl:
17097 case Intrinsic::ppc_altivec_lvebx:
17098 case Intrinsic::ppc_altivec_lvehx:
17099 case Intrinsic::ppc_altivec_lvewx:
17100 case Intrinsic::ppc_vsx_lxvd2x:
17101 case Intrinsic::ppc_vsx_lxvw4x:
17102 case Intrinsic::ppc_vsx_lxvd2x_be:
17103 case Intrinsic::ppc_vsx_lxvw4x_be:
17104 case Intrinsic::ppc_vsx_lxvl:
17105 case Intrinsic::ppc_vsx_lxvll: {
17106 EVT VT;
17107 switch (Intrinsic) {
17108 case Intrinsic::ppc_altivec_lvebx:
17109 VT = MVT::i8;
17110 break;
17111 case Intrinsic::ppc_altivec_lvehx:
17112 VT = MVT::i16;
17113 break;
17114 case Intrinsic::ppc_altivec_lvewx:
17115 VT = MVT::i32;
17116 break;
17117 case Intrinsic::ppc_vsx_lxvd2x:
17118 case Intrinsic::ppc_vsx_lxvd2x_be:
17119 VT = MVT::v2f64;
17120 break;
17121 default:
17122 VT = MVT::v4i32;
17123 break;
17124 }
17125
17127 Info.memVT = VT;
17128 Info.ptrVal = I.getArgOperand(0);
17129 Info.offset = -VT.getStoreSize()+1;
17130 Info.size = 2*VT.getStoreSize()-1;
17131 Info.align = Align(1);
17133 return true;
17134 }
17135 case Intrinsic::ppc_altivec_stvx:
17136 case Intrinsic::ppc_altivec_stvxl:
17137 case Intrinsic::ppc_altivec_stvebx:
17138 case Intrinsic::ppc_altivec_stvehx:
17139 case Intrinsic::ppc_altivec_stvewx:
17140 case Intrinsic::ppc_vsx_stxvd2x:
17141 case Intrinsic::ppc_vsx_stxvw4x:
17142 case Intrinsic::ppc_vsx_stxvd2x_be:
17143 case Intrinsic::ppc_vsx_stxvw4x_be:
17144 case Intrinsic::ppc_vsx_stxvl:
17145 case Intrinsic::ppc_vsx_stxvll: {
17146 EVT VT;
17147 switch (Intrinsic) {
17148 case Intrinsic::ppc_altivec_stvebx:
17149 VT = MVT::i8;
17150 break;
17151 case Intrinsic::ppc_altivec_stvehx:
17152 VT = MVT::i16;
17153 break;
17154 case Intrinsic::ppc_altivec_stvewx:
17155 VT = MVT::i32;
17156 break;
17157 case Intrinsic::ppc_vsx_stxvd2x:
17158 case Intrinsic::ppc_vsx_stxvd2x_be:
17159 VT = MVT::v2f64;
17160 break;
17161 default:
17162 VT = MVT::v4i32;
17163 break;
17164 }
17165
17167 Info.memVT = VT;
17168 Info.ptrVal = I.getArgOperand(1);
17169 Info.offset = -VT.getStoreSize()+1;
17170 Info.size = 2*VT.getStoreSize()-1;
17171 Info.align = Align(1);
17173 return true;
17174 }
17175 case Intrinsic::ppc_stdcx:
17176 case Intrinsic::ppc_stwcx:
17177 case Intrinsic::ppc_sthcx:
17178 case Intrinsic::ppc_stbcx: {
17179 EVT VT;
17180 auto Alignment = Align(8);
17181 switch (Intrinsic) {
17182 case Intrinsic::ppc_stdcx:
17183 VT = MVT::i64;
17184 break;
17185 case Intrinsic::ppc_stwcx:
17186 VT = MVT::i32;
17187 Alignment = Align(4);
17188 break;
17189 case Intrinsic::ppc_sthcx:
17190 VT = MVT::i16;
17191 Alignment = Align(2);
17192 break;
17193 case Intrinsic::ppc_stbcx:
17194 VT = MVT::i8;
17195 Alignment = Align(1);
17196 break;
17197 }
17199 Info.memVT = VT;
17200 Info.ptrVal = I.getArgOperand(0);
17201 Info.offset = 0;
17202 Info.align = Alignment;
17204 return true;
17205 }
17206 default:
17207 break;
17208 }
17209
17210 return false;
17211}
17212
17213/// It returns EVT::Other if the type should be determined using generic
17214/// target-independent logic.
17216 const MemOp &Op, const AttributeList &FuncAttributes) const {
17217 if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None) {
17218 // We should use Altivec/VSX loads and stores when available. For unaligned
17219 // addresses, unaligned VSX loads are only fast starting with the P8.
17220 if (Subtarget.hasAltivec() && Op.size() >= 16) {
17221 if (Op.isMemset() && Subtarget.hasVSX()) {
17222 uint64_t TailSize = Op.size() % 16;
17223 // For memset lowering, EXTRACT_VECTOR_ELT tries to return constant
17224 // element if vector element type matches tail store. For tail size
17225 // 3/4, the tail store is i32, v4i32 cannot be used, need a legal one.
17226 if (TailSize > 2 && TailSize <= 4) {
17227 return MVT::v8i16;
17228 }
17229 return MVT::v4i32;
17230 }
17231 if (Op.isAligned(Align(16)) || Subtarget.hasP8Vector())
17232 return MVT::v4i32;
17233 }
17234 }
17235
17236 if (Subtarget.isPPC64()) {
17237 return MVT::i64;
17238 }
17239
17240 return MVT::i32;
17241}
17242
17243/// Returns true if it is beneficial to convert a load of a constant
17244/// to just the constant itself.
17246 Type *Ty) const {
17247 assert(Ty->isIntegerTy());
17248
17249 unsigned BitSize = Ty->getPrimitiveSizeInBits();
17250 return !(BitSize == 0 || BitSize > 64);
17251}
17252
17254 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
17255 return false;
17256 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
17257 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
17258 return NumBits1 == 64 && NumBits2 == 32;
17259}
17260
17262 if (!VT1.isInteger() || !VT2.isInteger())
17263 return false;
17264 unsigned NumBits1 = VT1.getSizeInBits();
17265 unsigned NumBits2 = VT2.getSizeInBits();
17266 return NumBits1 == 64 && NumBits2 == 32;
17267}
17268
17270 // Generally speaking, zexts are not free, but they are free when they can be
17271 // folded with other operations.
17272 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
17273 EVT MemVT = LD->getMemoryVT();
17274 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
17275 (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
17276 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
17277 LD->getExtensionType() == ISD::ZEXTLOAD))
17278 return true;
17279 }
17280
17281 // FIXME: Add other cases...
17282 // - 32-bit shifts with a zext to i64
17283 // - zext after ctlz, bswap, etc.
17284 // - zext after and by a constant mask
17285
17286 return TargetLowering::isZExtFree(Val, VT2);
17287}
17288
17289bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
17290 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
17291 "invalid fpext types");
17292 // Extending to float128 is not free.
17293 if (DestVT == MVT::f128)
17294 return false;
17295 return true;
17296}
17297
17299 return isInt<16>(Imm) || isUInt<16>(Imm);
17300}
17301
17303 return isInt<16>(Imm) || isUInt<16>(Imm);
17304}
17305
17308 unsigned *Fast) const {
17310 return false;
17311
17312 // PowerPC supports unaligned memory access for simple non-vector types.
17313 // Although accessing unaligned addresses is not as efficient as accessing
17314 // aligned addresses, it is generally more efficient than manual expansion,
17315 // and generally only traps for software emulation when crossing page
17316 // boundaries.
17317
17318 if (!VT.isSimple())
17319 return false;
17320
17321 if (VT.isFloatingPoint() && !VT.isVector() &&
17322 !Subtarget.allowsUnalignedFPAccess())
17323 return false;
17324
17325 if (VT.getSimpleVT().isVector()) {
17326 if (Subtarget.hasVSX()) {
17327 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
17328 VT != MVT::v4f32 && VT != MVT::v4i32)
17329 return false;
17330 } else {
17331 return false;
17332 }
17333 }
17334
17335 if (VT == MVT::ppcf128)
17336 return false;
17337
17338 if (Fast)
17339 *Fast = 1;
17340
17341 return true;
17342}
17343
17345 SDValue C) const {
17346 // Check integral scalar types.
17347 if (!VT.isScalarInteger())
17348 return false;
17349 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
17350 if (!ConstNode->getAPIntValue().isSignedIntN(64))
17351 return false;
17352 // This transformation will generate >= 2 operations. But the following
17353 // cases will generate <= 2 instructions during ISEL. So exclude them.
17354 // 1. If the constant multiplier fits 16 bits, it can be handled by one
17355 // HW instruction, ie. MULLI
17356 // 2. If the multiplier after shifted fits 16 bits, an extra shift
17357 // instruction is needed than case 1, ie. MULLI and RLDICR
17358 int64_t Imm = ConstNode->getSExtValue();
17359 unsigned Shift = llvm::countr_zero<uint64_t>(Imm);
17360 Imm >>= Shift;
17361 if (isInt<16>(Imm))
17362 return false;
17363 uint64_t UImm = static_cast<uint64_t>(Imm);
17364 if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
17365 isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
17366 return true;
17367 }
17368 return false;
17369}
17370
17372 EVT VT) const {
17375}
17376
17378 Type *Ty) const {
17379 if (Subtarget.hasSPE() || Subtarget.useSoftFloat())
17380 return false;
17381 switch (Ty->getScalarType()->getTypeID()) {
17382 case Type::FloatTyID:
17383 case Type::DoubleTyID:
17384 return true;
17385 case Type::FP128TyID:
17386 return Subtarget.hasP9Vector();
17387 default:
17388 return false;
17389 }
17390}
17391
17392// FIXME: add more patterns which are not profitable to hoist.
17394 if (!I->hasOneUse())
17395 return true;
17396
17397 Instruction *User = I->user_back();
17398 assert(User && "A single use instruction with no uses.");
17399
17400 switch (I->getOpcode()) {
17401 case Instruction::FMul: {
17402 // Don't break FMA, PowerPC prefers FMA.
17403 if (User->getOpcode() != Instruction::FSub &&
17404 User->getOpcode() != Instruction::FAdd)
17405 return true;
17406
17408 const Function *F = I->getFunction();
17409 const DataLayout &DL = F->getParent()->getDataLayout();
17410 Type *Ty = User->getOperand(0)->getType();
17411
17412 return !(
17415 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
17416 }
17417 case Instruction::Load: {
17418 // Don't break "store (load float*)" pattern, this pattern will be combined
17419 // to "store (load int32)" in later InstCombine pass. See function
17420 // combineLoadToOperationType. On PowerPC, loading a float point takes more
17421 // cycles than loading a 32 bit integer.
17422 LoadInst *LI = cast<LoadInst>(I);
17423 // For the loads that combineLoadToOperationType does nothing, like
17424 // ordered load, it should be profitable to hoist them.
17425 // For swifterror load, it can only be used for pointer to pointer type, so
17426 // later type check should get rid of this case.
17427 if (!LI->isUnordered())
17428 return true;
17429
17430 if (User->getOpcode() != Instruction::Store)
17431 return true;
17432
17433 if (I->getType()->getTypeID() != Type::FloatTyID)
17434 return true;
17435
17436 return false;
17437 }
17438 default:
17439 return true;
17440 }
17441 return true;
17442}
17443
17444const MCPhysReg *
17446 // LR is a callee-save register, but we must treat it as clobbered by any call
17447 // site. Hence we include LR in the scratch registers, which are in turn added
17448 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
17449 // to CTR, which is used by any indirect call.
17450 static const MCPhysReg ScratchRegs[] = {
17451 PPC::X12, PPC::LR8, PPC::CTR8, 0
17452 };
17453
17454 return ScratchRegs;
17455}
17456
17458 const Constant *PersonalityFn) const {
17459 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
17460}
17461
17463 const Constant *PersonalityFn) const {
17464 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
17465}
17466
17467bool
17469 EVT VT , unsigned DefinedValues) const {
17470 if (VT == MVT::v2i64)
17471 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
17472
17473 if (Subtarget.hasVSX())
17474 return true;
17475
17477}
17478
17480 if (DisableILPPref || Subtarget.enableMachineScheduler())
17482
17483 return Sched::ILP;
17484}
17485
17486// Create a fast isel object.
17487FastISel *
17489 const TargetLibraryInfo *LibInfo) const {
17490 return PPC::createFastISel(FuncInfo, LibInfo);
17491}
17492
17493// 'Inverted' means the FMA opcode after negating one multiplicand.
17494// For example, (fma -a b c) = (fnmsub a b c)
17495static unsigned invertFMAOpcode(unsigned Opc) {
17496 switch (Opc) {
17497 default:
17498 llvm_unreachable("Invalid FMA opcode for PowerPC!");
17499 case ISD::FMA:
17500 return PPCISD::FNMSUB;
17501 case PPCISD::FNMSUB:
17502 return ISD::FMA;
17503 }
17504}
17505
17507 bool LegalOps, bool OptForSize,
17509 unsigned Depth) const {
17511 return SDValue();
17512
17513 unsigned Opc = Op.getOpcode();
17514 EVT VT = Op.getValueType();
17515 SDNodeFlags Flags = Op.getNode()->getFlags();
17516
17517 switch (Opc) {
17518 case PPCISD::FNMSUB:
17519 if (!Op.hasOneUse() || !isTypeLegal(VT))
17520 break;
17521
17523 SDValue N0 = Op.getOperand(0);
17524 SDValue N1 = Op.getOperand(1);
17525 SDValue N2 = Op.getOperand(2);
17526 SDLoc Loc(Op);
17527
17529 SDValue NegN2 =
17530 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
17531
17532 if (!NegN2)
17533 return SDValue();
17534
17535 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
17536 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
17537 // These transformations may change sign of zeroes. For example,
17538 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
17539 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
17540 // Try and choose the cheaper one to negate.
17542 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
17543 N0Cost, Depth + 1);
17544
17546 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
17547 N1Cost, Depth + 1);
17548
17549 if (NegN0 && N0Cost <= N1Cost) {
17550 Cost = std::min(N0Cost, N2Cost);
17551 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
17552 } else if (NegN1) {
17553 Cost = std::min(N1Cost, N2Cost);
17554 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
17555 }
17556 }
17557
17558 // (fneg (fnmsub a b c)) => (fma a b (fneg c))
17559 if (isOperationLegal(ISD::FMA, VT)) {
17560 Cost = N2Cost;
17561 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
17562 }
17563
17564 break;
17565 }
17566
17567 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
17568 Cost, Depth);
17569}
17570
17571// Override to enable LOAD_STACK_GUARD lowering on Linux.
17573 if (!Subtarget.isTargetLinux())
17575 return true;
17576}
17577
17578// Override to disable global variable loading on Linux and insert AIX canary
17579// word declaration.
17581 if (Subtarget.isAIXABI()) {
17582 M.getOrInsertGlobal(AIXSSPCanaryWordName,
17583 PointerType::getUnqual(M.getContext()));
17584 return;
17585 }
17586 if (!Subtarget.isTargetLinux())
17588}
17589
17591 if (Subtarget.isAIXABI())
17592 return M.getGlobalVariable(AIXSSPCanaryWordName);
17594}
17595
17597 bool ForCodeSize) const {
17598 if (!VT.isSimple() || !Subtarget.hasVSX())
17599 return false;
17600
17601 switch(VT.getSimpleVT().SimpleTy) {
17602 default:
17603 // For FP types that are currently not supported by PPC backend, return
17604 // false. Examples: f16, f80.
17605 return false;
17606 case MVT::f32:
17607 case MVT::f64: {
17608 if (Subtarget.hasPrefixInstrs()) {
17609 // we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP.
17610 return true;
17611 }
17612 bool IsExact;
17613 APSInt IntResult(16, false);
17614 // The rounding mode doesn't really matter because we only care about floats
17615 // that can be converted to integers exactly.
17616 Imm.convertToInteger(IntResult, APFloat::rmTowardZero, &IsExact);
17617 // For exact values in the range [-16, 15] we can materialize the float.
17618 if (IsExact && IntResult <= 15 && IntResult >= -16)
17619 return true;
17620 return Imm.isZero();
17621 }
17622 case MVT::ppcf128:
17623 return Imm.isPosZero();
17624 }
17625}
17626
17627// For vector shift operation op, fold
17628// (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
17630 SelectionDAG &DAG) {
17631 SDValue N0 = N->getOperand(0);
17632 SDValue N1 = N->getOperand(1);
17633 EVT VT = N0.getValueType();
17634 unsigned OpSizeInBits = VT.getScalarSizeInBits();
17635 unsigned Opcode = N->getOpcode();
17636 unsigned TargetOpcode;
17637
17638 switch (Opcode) {
17639 default:
17640 llvm_unreachable("Unexpected shift operation");
17641 case ISD::SHL:
17642 TargetOpcode = PPCISD::SHL;
17643 break;
17644 case ISD::SRL:
17645 TargetOpcode = PPCISD::SRL;
17646 break;
17647 case ISD::SRA:
17648 TargetOpcode = PPCISD::SRA;
17649 break;
17650 }
17651
17652 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
17653 N1->getOpcode() == ISD::AND)
17654 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
17655 if (Mask->getZExtValue() == OpSizeInBits - 1)
17656 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
17657
17658 return SDValue();
17659}
17660
17661SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
17662 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
17663 return Value;
17664
17665 SDValue N0 = N->getOperand(0);
17666 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
17667 if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
17668 N0.getOpcode() != ISD::SIGN_EXTEND ||
17669 N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
17670 N->getValueType(0) != MVT::i64)
17671 return SDValue();
17672
17673 // We can't save an operation here if the value is already extended, and
17674 // the existing shift is easier to combine.
17675 SDValue ExtsSrc = N0.getOperand(0);
17676 if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
17677 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
17678 return SDValue();
17679
17680 SDLoc DL(N0);
17681 SDValue ShiftBy = SDValue(CN1, 0);
17682 // We want the shift amount to be i32 on the extswli, but the shift could
17683 // have an i64.
17684 if (ShiftBy.getValueType() == MVT::i64)
17685 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
17686
17687 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
17688 ShiftBy);
17689}
17690
17691SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
17692 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
17693 return Value;
17694
17695 return SDValue();
17696}
17697
17698SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
17699 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
17700 return Value;
17701
17702 return SDValue();
17703}
17704
17705// Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
17706// Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
17707// When C is zero, the equation (addi Z, -C) can be simplified to Z
17708// Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
17710 const PPCSubtarget &Subtarget) {
17711 if (!Subtarget.isPPC64())
17712 return SDValue();
17713
17714 SDValue LHS = N->getOperand(0);
17715 SDValue RHS = N->getOperand(1);
17716
17717 auto isZextOfCompareWithConstant = [](SDValue Op) {
17718 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
17719 Op.getValueType() != MVT::i64)
17720 return false;
17721
17722 SDValue Cmp = Op.getOperand(0);
17723 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
17724 Cmp.getOperand(0).getValueType() != MVT::i64)
17725 return false;
17726
17727 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
17728 int64_t NegConstant = 0 - Constant->getSExtValue();
17729 // Due to the limitations of the addi instruction,
17730 // -C is required to be [-32768, 32767].
17731 return isInt<16>(NegConstant);
17732 }
17733
17734 return false;
17735 };
17736
17737 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
17738 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
17739
17740 // If there is a pattern, canonicalize a zext operand to the RHS.
17741 if (LHSHasPattern && !RHSHasPattern)
17742 std::swap(LHS, RHS);
17743 else if (!LHSHasPattern && !RHSHasPattern)
17744 return SDValue();
17745
17746 SDLoc DL(N);
17747 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
17748 SDValue Cmp = RHS.getOperand(0);
17749 SDValue Z = Cmp.getOperand(0);
17750 auto *Constant = cast<ConstantSDNode>(Cmp.getOperand(1));
17751 int64_t NegConstant = 0 - Constant->getSExtValue();
17752
17753 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
17754 default: break;
17755 case ISD::SETNE: {
17756 // when C == 0
17757 // --> addze X, (addic Z, -1).carry
17758 // /
17759 // add X, (zext(setne Z, C))--
17760 // \ when -32768 <= -C <= 32767 && C != 0
17761 // --> addze X, (addic (addi Z, -C), -1).carry
17762 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
17763 DAG.getConstant(NegConstant, DL, MVT::i64));
17764 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
17765 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
17766 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
17767 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
17768 SDValue(Addc.getNode(), 1));
17769 }
17770 case ISD::SETEQ: {
17771 // when C == 0
17772 // --> addze X, (subfic Z, 0).carry
17773 // /
17774 // add X, (zext(sete Z, C))--
17775 // \ when -32768 <= -C <= 32767 && C != 0
17776 // --> addze X, (subfic (addi Z, -C), 0).carry
17777 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
17778 DAG.getConstant(NegConstant, DL, MVT::i64));
17779 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
17780 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
17781 DAG.getConstant(0, DL, MVT::i64), AddOrZ);
17782 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
17783 SDValue(Subc.getNode(), 1));
17784 }
17785 }
17786
17787 return SDValue();
17788}
17789
17790// Transform
17791// (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
17792// (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
17793// In this case both C1 and C2 must be known constants.
17794// C1+C2 must fit into a 34 bit signed integer.
17796 const PPCSubtarget &Subtarget) {
17797 if (!Subtarget.isUsingPCRelativeCalls())
17798 return SDValue();
17799
17800 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
17801 // If we find that node try to cast the Global Address and the Constant.
17802 SDValue LHS = N->getOperand(0);
17803 SDValue RHS = N->getOperand(1);
17804
17805 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
17806 std::swap(LHS, RHS);
17807
17808 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
17809 return SDValue();
17810
17811 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
17812 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
17813 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
17814
17815 // Check that both casts succeeded.
17816 if (!GSDN || !ConstNode)
17817 return SDValue();
17818
17819 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
17820 SDLoc DL(GSDN);
17821
17822 // The signed int offset needs to fit in 34 bits.
17823 if (!isInt<34>(NewOffset))
17824 return SDValue();
17825
17826 // The new global address is a copy of the old global address except
17827 // that it has the updated Offset.
17828 SDValue GA =
17829 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
17830 NewOffset, GSDN->getTargetFlags());
17831 SDValue MatPCRel =
17832 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
17833 return MatPCRel;
17834}
17835
17836SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
17837 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
17838 return Value;
17839
17840 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
17841 return Value;
17842
17843 return SDValue();
17844}
17845
17846// Detect TRUNCATE operations on bitcasts of float128 values.
17847// What we are looking for here is the situtation where we extract a subset
17848// of bits from a 128 bit float.
17849// This can be of two forms:
17850// 1) BITCAST of f128 feeding TRUNCATE
17851// 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
17852// The reason this is required is because we do not have a legal i128 type
17853// and so we want to prevent having to store the f128 and then reload part
17854// of it.
17855SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
17856 DAGCombinerInfo &DCI) const {
17857 // If we are using CRBits then try that first.
17858 if (Subtarget.useCRBits()) {
17859 // Check if CRBits did anything and return that if it did.
17860 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
17861 return CRTruncValue;
17862 }
17863
17864 SDLoc dl(N);
17865 SDValue Op0 = N->getOperand(0);
17866
17867 // Looking for a truncate of i128 to i64.
17868 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
17869 return SDValue();
17870
17871 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
17872
17873 // SRL feeding TRUNCATE.
17874 if (Op0.getOpcode() == ISD::SRL) {
17875 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
17876 // The right shift has to be by 64 bits.
17877 if (!ConstNode || ConstNode->getZExtValue() != 64)
17878 return SDValue();
17879
17880 // Switch the element number to extract.
17881 EltToExtract = EltToExtract ? 0 : 1;
17882 // Update Op0 past the SRL.
17883 Op0 = Op0.getOperand(0);
17884 }
17885
17886 // BITCAST feeding a TRUNCATE possibly via SRL.
17887 if (Op0.getOpcode() == ISD::BITCAST &&
17888 Op0.getValueType() == MVT::i128 &&
17889 Op0.getOperand(0).getValueType() == MVT::f128) {
17890 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
17891 return DCI.DAG.getNode(
17892 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
17893 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
17894 }
17895 return SDValue();
17896}
17897
17898SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
17899 SelectionDAG &DAG = DCI.DAG;
17900
17901 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
17902 if (!ConstOpOrElement)
17903 return SDValue();
17904
17905 // An imul is usually smaller than the alternative sequence for legal type.
17907 isOperationLegal(ISD::MUL, N->getValueType(0)))
17908 return SDValue();
17909
17910 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
17911 switch (this->Subtarget.getCPUDirective()) {
17912 default:
17913 // TODO: enhance the condition for subtarget before pwr8
17914 return false;
17915 case PPC::DIR_PWR8:
17916 // type mul add shl
17917 // scalar 4 1 1
17918 // vector 7 2 2
17919 return true;
17920 case PPC::DIR_PWR9:
17921 case PPC::DIR_PWR10:
17923 // type mul add shl
17924 // scalar 5 2 2
17925 // vector 7 2 2
17926
17927 // The cycle RATIO of related operations are showed as a table above.
17928 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
17929 // scalar and vector type. For 2 instrs patterns, add/sub + shl
17930 // are 4, it is always profitable; but for 3 instrs patterns
17931 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
17932 // So we should only do it for vector type.
17933 return IsAddOne && IsNeg ? VT.isVector() : true;
17934 }
17935 };
17936
17937 EVT VT = N->getValueType(0);
17938 SDLoc DL(N);
17939
17940 const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
17941 bool IsNeg = MulAmt.isNegative();
17942 APInt MulAmtAbs = MulAmt.abs();
17943
17944 if ((MulAmtAbs - 1).isPowerOf2()) {
17945 // (mul x, 2^N + 1) => (add (shl x, N), x)
17946 // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
17947
17948 if (!IsProfitable(IsNeg, true, VT))
17949 return SDValue();
17950
17951 SDValue Op0 = N->getOperand(0);
17952 SDValue Op1 =
17953 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
17954 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
17955 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
17956
17957 if (!IsNeg)
17958 return Res;
17959
17960 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
17961 } else if ((MulAmtAbs + 1).isPowerOf2()) {
17962 // (mul x, 2^N - 1) => (sub (shl x, N), x)
17963 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
17964
17965 if (!IsProfitable(IsNeg, false, VT))
17966 return SDValue();
17967
17968 SDValue Op0 = N->getOperand(0);
17969 SDValue Op1 =
17970 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
17971 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
17972
17973 if (!IsNeg)
17974 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
17975 else
17976 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
17977
17978 } else {
17979 return SDValue();
17980 }
17981}
17982
17983// Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
17984// in combiner since we need to check SD flags and other subtarget features.
17985SDValue PPCTargetLowering::combineFMALike(SDNode *N,
17986 DAGCombinerInfo &DCI) const {
17987 SDValue N0 = N->getOperand(0);
17988 SDValue N1 = N->getOperand(1);
17989 SDValue N2 = N->getOperand(2);
17990 SDNodeFlags Flags = N->getFlags();
17991 EVT VT = N->getValueType(0);
17992 SelectionDAG &DAG = DCI.DAG;
17994 unsigned Opc = N->getOpcode();
17995 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
17996 bool LegalOps = !DCI.isBeforeLegalizeOps();
17997 SDLoc Loc(N);
17998
17999 if (!isOperationLegal(ISD::FMA, VT))
18000 return SDValue();
18001
18002 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
18003 // since (fnmsub a b c)=-0 while c-ab=+0.
18004 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
18005 return SDValue();
18006
18007 // (fma (fneg a) b c) => (fnmsub a b c)
18008 // (fnmsub (fneg a) b c) => (fma a b c)
18009 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
18010 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
18011
18012 // (fma a (fneg b) c) => (fnmsub a b c)
18013 // (fnmsub a (fneg b) c) => (fma a b c)
18014 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
18015 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
18016
18017 return SDValue();
18018}
18019
18020bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
18021 // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
18022 if (!Subtarget.is64BitELFABI())
18023 return false;
18024
18025 // If not a tail call then no need to proceed.
18026 if (!CI->isTailCall())
18027 return false;
18028
18029 // If sibling calls have been disabled and tail-calls aren't guaranteed
18030 // there is no reason to duplicate.
18031 auto &TM = getTargetMachine();
18032 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
18033 return false;
18034
18035 // Can't tail call a function called indirectly, or if it has variadic args.
18036 const Function *Callee = CI->getCalledFunction();
18037 if (!Callee || Callee->isVarArg())
18038 return false;
18039
18040 // Make sure the callee and caller calling conventions are eligible for tco.
18041 const Function *Caller = CI->getParent()->getParent();
18042 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
18043 CI->getCallingConv()))
18044 return false;
18045
18046 // If the function is local then we have a good chance at tail-calling it
18047 return getTargetMachine().shouldAssumeDSOLocal(Callee);
18048}
18049
18050bool PPCTargetLowering::
18051isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
18052 const Value *Mask = AndI.getOperand(1);
18053 // If the mask is suitable for andi. or andis. we should sink the and.
18054 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
18055 // Can't handle constants wider than 64-bits.
18056 if (CI->getBitWidth() > 64)
18057 return false;
18058 int64_t ConstVal = CI->getZExtValue();
18059 return isUInt<16>(ConstVal) ||
18060 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
18061 }
18062
18063 // For non-constant masks, we can always use the record-form and.
18064 return true;
18065}
18066
18067/// getAddrModeForFlags - Based on the set of address flags, select the most
18068/// optimal instruction format to match by.
18069PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const {
18070 // This is not a node we should be handling here.
18071 if (Flags == PPC::MOF_None)
18072 return PPC::AM_None;
18073 // Unaligned D-Forms are tried first, followed by the aligned D-Forms.
18074 for (auto FlagSet : AddrModesMap.at(PPC::AM_DForm))
18075 if ((Flags & FlagSet) == FlagSet)
18076 return PPC::AM_DForm;
18077 for (auto FlagSet : AddrModesMap.at(PPC::AM_DSForm))
18078 if ((Flags & FlagSet) == FlagSet)
18079 return PPC::AM_DSForm;
18080 for (auto FlagSet : AddrModesMap.at(PPC::AM_DQForm))
18081 if ((Flags & FlagSet) == FlagSet)
18082 return PPC::AM_DQForm;
18083 for (auto FlagSet : AddrModesMap.at(PPC::AM_PrefixDForm))
18084 if ((Flags & FlagSet) == FlagSet)
18085 return PPC::AM_PrefixDForm;
18086 // If no other forms are selected, return an X-Form as it is the most
18087 // general addressing mode.
18088 return PPC::AM_XForm;
18089}
18090
18091/// Set alignment flags based on whether or not the Frame Index is aligned.
18092/// Utilized when computing flags for address computation when selecting
18093/// load and store instructions.
18094static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet,
18095 SelectionDAG &DAG) {
18096 bool IsAdd = ((N.getOpcode() == ISD::ADD) || (N.getOpcode() == ISD::OR));
18097 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(IsAdd ? N.getOperand(0) : N);
18098 if (!FI)
18099 return;
18101 unsigned FrameIndexAlign = MFI.getObjectAlign(FI->getIndex()).value();
18102 // If this is (add $FI, $S16Imm), the alignment flags are already set
18103 // based on the immediate. We just need to clear the alignment flags
18104 // if the FI alignment is weaker.
18105 if ((FrameIndexAlign % 4) != 0)
18106 FlagSet &= ~PPC::MOF_RPlusSImm16Mult4;
18107 if ((FrameIndexAlign % 16) != 0)
18108 FlagSet &= ~PPC::MOF_RPlusSImm16Mult16;
18109 // If the address is a plain FrameIndex, set alignment flags based on
18110 // FI alignment.
18111 if (!IsAdd) {
18112 if ((FrameIndexAlign % 4) == 0)
18113 FlagSet |= PPC::MOF_RPlusSImm16Mult4;
18114 if ((FrameIndexAlign % 16) == 0)
18115 FlagSet |= PPC::MOF_RPlusSImm16Mult16;
18116 }
18117}
18118
18119/// Given a node, compute flags that are used for address computation when
18120/// selecting load and store instructions. The flags computed are stored in
18121/// FlagSet. This function takes into account whether the node is a constant,
18122/// an ADD, OR, or a constant, and computes the address flags accordingly.
18123static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
18124 SelectionDAG &DAG) {
18125 // Set the alignment flags for the node depending on if the node is
18126 // 4-byte or 16-byte aligned.
18127 auto SetAlignFlagsForImm = [&](uint64_t Imm) {
18128 if ((Imm & 0x3) == 0)
18129 FlagSet |= PPC::MOF_RPlusSImm16Mult4;
18130 if ((Imm & 0xf) == 0)
18131 FlagSet |= PPC::MOF_RPlusSImm16Mult16;
18132 };
18133
18134 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
18135 // All 32-bit constants can be computed as LIS + Disp.
18136 const APInt &ConstImm = CN->getAPIntValue();
18137 if (ConstImm.isSignedIntN(32)) { // Flag to handle 32-bit constants.
18138 FlagSet |= PPC::MOF_AddrIsSImm32;
18139 SetAlignFlagsForImm(ConstImm.getZExtValue());
18140 setAlignFlagsForFI(N, FlagSet, DAG);
18141 }
18142 if (ConstImm.isSignedIntN(34)) // Flag to handle 34-bit constants.
18143 FlagSet |= PPC::MOF_RPlusSImm34;
18144 else // Let constant materialization handle large constants.
18145 FlagSet |= PPC::MOF_NotAddNorCst;
18146 } else if (N.getOpcode() == ISD::ADD || provablyDisjointOr(DAG, N)) {
18147 // This address can be represented as an addition of:
18148 // - Register + Imm16 (possibly a multiple of 4/16)
18149 // - Register + Imm34
18150 // - Register + PPCISD::Lo
18151 // - Register + Register
18152 // In any case, we won't have to match this as Base + Zero.
18153 SDValue RHS = N.getOperand(1);
18154 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
18155 const APInt &ConstImm = CN->getAPIntValue();
18156 if (ConstImm.isSignedIntN(16)) {
18157 FlagSet |= PPC::MOF_RPlusSImm16; // Signed 16-bit immediates.
18158 SetAlignFlagsForImm(ConstImm.getZExtValue());
18159 setAlignFlagsForFI(N, FlagSet, DAG);
18160 }
18161 if (ConstImm.isSignedIntN(34))
18162 FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates.
18163 else
18164 FlagSet |= PPC::MOF_RPlusR; // Register.
18165 } else if (RHS.getOpcode() == PPCISD::Lo && !RHS.getConstantOperandVal(1))
18166 FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo.
18167 else
18168 FlagSet |= PPC::MOF_RPlusR;
18169 } else { // The address computation is not a constant or an addition.
18170 setAlignFlagsForFI(N, FlagSet, DAG);
18171 FlagSet |= PPC::MOF_NotAddNorCst;
18172 }
18173}
18174
18175static bool isPCRelNode(SDValue N) {
18176 return (N.getOpcode() == PPCISD::MAT_PCREL_ADDR ||
18177 isValidPCRelNode<ConstantPoolSDNode>(N) ||
18178 isValidPCRelNode<GlobalAddressSDNode>(N) ||
18179 isValidPCRelNode<JumpTableSDNode>(N) ||
18180 isValidPCRelNode<BlockAddressSDNode>(N));
18181}
18182
18183/// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
18184/// the address flags of the load/store instruction that is to be matched.
18185unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
18186 SelectionDAG &DAG) const {
18187 unsigned FlagSet = PPC::MOF_None;
18188
18189 // Compute subtarget flags.
18190 if (!Subtarget.hasP9Vector())
18191 FlagSet |= PPC::MOF_SubtargetBeforeP9;
18192 else {
18193 FlagSet |= PPC::MOF_SubtargetP9;
18194 if (Subtarget.hasPrefixInstrs())
18195 FlagSet |= PPC::MOF_SubtargetP10;
18196 }
18197 if (Subtarget.hasSPE())
18198 FlagSet |= PPC::MOF_SubtargetSPE;
18199
18200 // Check if we have a PCRel node and return early.
18201 if ((FlagSet & PPC::MOF_SubtargetP10) && isPCRelNode(N))
18202 return FlagSet;
18203
18204 // If the node is the paired load/store intrinsics, compute flags for
18205 // address computation and return early.
18206 unsigned ParentOp = Parent->getOpcode();
18207 if (Subtarget.isISA3_1() && ((ParentOp == ISD::INTRINSIC_W_CHAIN) ||
18208 (ParentOp == ISD::INTRINSIC_VOID))) {
18209 unsigned ID = Parent->getConstantOperandVal(1);
18210 if ((ID == Intrinsic::ppc_vsx_lxvp) || (ID == Intrinsic::ppc_vsx_stxvp)) {
18211 SDValue IntrinOp = (ID == Intrinsic::ppc_vsx_lxvp)
18212 ? Parent->getOperand(2)
18213 : Parent->getOperand(3);
18214 computeFlagsForAddressComputation(IntrinOp, FlagSet, DAG);
18215 FlagSet |= PPC::MOF_Vector;
18216 return FlagSet;
18217 }
18218 }
18219
18220 // Mark this as something we don't want to handle here if it is atomic
18221 // or pre-increment instruction.
18222 if (const LSBaseSDNode *LSB = dyn_cast<LSBaseSDNode>(Parent))
18223 if (LSB->isIndexed())
18224 return PPC::MOF_None;
18225
18226 // Compute in-memory type flags. This is based on if there are scalars,
18227 // floats or vectors.
18228 const MemSDNode *MN = dyn_cast<MemSDNode>(Parent);
18229 assert(MN && "Parent should be a MemSDNode!");
18230 EVT MemVT = MN->getMemoryVT();
18231 unsigned Size = MemVT.getSizeInBits();
18232 if (MemVT.isScalarInteger()) {
18233 assert(Size <= 128 &&
18234 "Not expecting scalar integers larger than 16 bytes!");
18235 if (Size < 32)
18236 FlagSet |= PPC::MOF_SubWordInt;
18237 else if (Size == 32)
18238 FlagSet |= PPC::MOF_WordInt;
18239 else
18240 FlagSet |= PPC::MOF_DoubleWordInt;
18241 } else if (MemVT.isVector() && !MemVT.isFloatingPoint()) { // Integer vectors.
18242 if (Size == 128)
18243 FlagSet |= PPC::MOF_Vector;
18244 else if (Size == 256) {
18245 assert(Subtarget.pairedVectorMemops() &&
18246 "256-bit vectors are only available when paired vector memops is "
18247 "enabled!");
18248 FlagSet |= PPC::MOF_Vector;
18249 } else
18250 llvm_unreachable("Not expecting illegal vectors!");
18251 } else { // Floating point type: can be scalar, f128 or vector types.
18252 if (Size == 32 || Size == 64)
18253 FlagSet |= PPC::MOF_ScalarFloat;
18254 else if (MemVT == MVT::f128 || MemVT.isVector())
18255 FlagSet |= PPC::MOF_Vector;
18256 else
18257 llvm_unreachable("Not expecting illegal scalar floats!");
18258 }
18259
18260 // Compute flags for address computation.
18261 computeFlagsForAddressComputation(N, FlagSet, DAG);
18262
18263 // Compute type extension flags.
18264 if (const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) {
18265 switch (LN->getExtensionType()) {
18266 case ISD::SEXTLOAD:
18267 FlagSet |= PPC::MOF_SExt;
18268 break;
18269 case ISD::EXTLOAD:
18270 case ISD::ZEXTLOAD:
18271 FlagSet |= PPC::MOF_ZExt;
18272 break;
18273 case ISD::NON_EXTLOAD:
18274 FlagSet |= PPC::MOF_NoExt;
18275 break;
18276 }
18277 } else
18278 FlagSet |= PPC::MOF_NoExt;
18279
18280 // For integers, no extension is the same as zero extension.
18281 // We set the extension mode to zero extension so we don't have
18282 // to add separate entries in AddrModesMap for loads and stores.
18283 if (MemVT.isScalarInteger() && (FlagSet & PPC::MOF_NoExt)) {
18284 FlagSet |= PPC::MOF_ZExt;
18285 FlagSet &= ~PPC::MOF_NoExt;
18286 }
18287
18288 // If we don't have prefixed instructions, 34-bit constants should be
18289 // treated as PPC::MOF_NotAddNorCst so they can match D-Forms.
18290 bool IsNonP1034BitConst =
18292 FlagSet) == PPC::MOF_RPlusSImm34;
18293 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::OR &&
18294 IsNonP1034BitConst)
18295 FlagSet |= PPC::MOF_NotAddNorCst;
18296
18297 return FlagSet;
18298}
18299
18300/// SelectForceXFormMode - Given the specified address, force it to be
18301/// represented as an indexed [r+r] operation (an XForm instruction).
18303 SDValue &Base,
18304 SelectionDAG &DAG) const {
18305
18307 int16_t ForceXFormImm = 0;
18308 if (provablyDisjointOr(DAG, N) &&
18309 !isIntS16Immediate(N.getOperand(1), ForceXFormImm)) {
18310 Disp = N.getOperand(0);
18311 Base = N.getOperand(1);
18312 return Mode;
18313 }
18314
18315 // If the address is the result of an add, we will utilize the fact that the
18316 // address calculation includes an implicit add. However, we can reduce
18317 // register pressure if we do not materialize a constant just for use as the
18318 // index register. We only get rid of the add if it is not an add of a
18319 // value and a 16-bit signed constant and both have a single use.
18320 if (N.getOpcode() == ISD::ADD &&
18321 (!isIntS16Immediate(N.getOperand(1), ForceXFormImm) ||
18322 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
18323 Disp = N.getOperand(0);
18324 Base = N.getOperand(1);
18325 return Mode;
18326 }
18327
18328 // Otherwise, use R0 as the base register.
18329 Disp = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
18330 N.getValueType());
18331 Base = N;
18332
18333 return Mode;
18334}
18335
18337 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
18338 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
18339 EVT ValVT = Val.getValueType();
18340 // If we are splitting a scalar integer into f64 parts (i.e. so they
18341 // can be placed into VFRC registers), we need to zero extend and
18342 // bitcast the values. This will ensure the value is placed into a
18343 // VSR using direct moves or stack operations as needed.
18344 if (PartVT == MVT::f64 &&
18345 (ValVT == MVT::i32 || ValVT == MVT::i16 || ValVT == MVT::i8)) {
18346 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
18347 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Val);
18348 Parts[0] = Val;
18349 return true;
18350 }
18351 return false;
18352}
18353
18354SDValue PPCTargetLowering::lowerToLibCall(const char *LibCallName, SDValue Op,
18355 SelectionDAG &DAG) const {
18356 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18358 EVT RetVT = Op.getValueType();
18359 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
18360 SDValue Callee =
18361 DAG.getExternalSymbol(LibCallName, TLI.getPointerTy(DAG.getDataLayout()));
18362 bool SignExtend = TLI.shouldSignExtendTypeInLibCall(RetVT, false);
18365 for (const SDValue &N : Op->op_values()) {
18366 EVT ArgVT = N.getValueType();
18367 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18368 Entry.Node = N;
18369 Entry.Ty = ArgTy;
18370 Entry.IsSExt = TLI.shouldSignExtendTypeInLibCall(ArgVT, SignExtend);
18371 Entry.IsZExt = !Entry.IsSExt;
18372 Args.push_back(Entry);
18373 }
18374
18375 SDValue InChain = DAG.getEntryNode();
18376 SDValue TCChain = InChain;
18377 const Function &F = DAG.getMachineFunction().getFunction();
18378 bool isTailCall =
18379 TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
18380 (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
18381 if (isTailCall)
18382 InChain = TCChain;
18383 CLI.setDebugLoc(SDLoc(Op))
18384 .setChain(InChain)
18385 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args))
18386 .setTailCall(isTailCall)
18387 .setSExtResult(SignExtend)
18388 .setZExtResult(!SignExtend)
18390 return TLI.LowerCallTo(CLI).first;
18391}
18392
18393SDValue PPCTargetLowering::lowerLibCallBasedOnType(
18394 const char *LibCallFloatName, const char *LibCallDoubleName, SDValue Op,
18395 SelectionDAG &DAG) const {
18396 if (Op.getValueType() == MVT::f32)
18397 return lowerToLibCall(LibCallFloatName, Op, DAG);
18398
18399 if (Op.getValueType() == MVT::f64)
18400 return lowerToLibCall(LibCallDoubleName, Op, DAG);
18401
18402 return SDValue();
18403}
18404
18405bool PPCTargetLowering::isLowringToMASSFiniteSafe(SDValue Op) const {
18406 SDNodeFlags Flags = Op.getNode()->getFlags();
18407 return isLowringToMASSSafe(Op) && Flags.hasNoSignedZeros() &&
18408 Flags.hasNoNaNs() && Flags.hasNoInfs();
18409}
18410
18411bool PPCTargetLowering::isLowringToMASSSafe(SDValue Op) const {
18412 return Op.getNode()->getFlags().hasApproximateFuncs();
18413}
18414
18415bool PPCTargetLowering::isScalarMASSConversionEnabled() const {
18417}
18418
18419SDValue PPCTargetLowering::lowerLibCallBase(const char *LibCallDoubleName,
18420 const char *LibCallFloatName,
18421 const char *LibCallDoubleNameFinite,
18422 const char *LibCallFloatNameFinite,
18423 SDValue Op,
18424 SelectionDAG &DAG) const {
18425 if (!isScalarMASSConversionEnabled() || !isLowringToMASSSafe(Op))
18426 return SDValue();
18427
18428 if (!isLowringToMASSFiniteSafe(Op))
18429 return lowerLibCallBasedOnType(LibCallFloatName, LibCallDoubleName, Op,
18430 DAG);
18431
18432 return lowerLibCallBasedOnType(LibCallFloatNameFinite,
18433 LibCallDoubleNameFinite, Op, DAG);
18434}
18435
18436SDValue PPCTargetLowering::lowerPow(SDValue Op, SelectionDAG &DAG) const {
18437 return lowerLibCallBase("__xl_pow", "__xl_powf", "__xl_pow_finite",
18438 "__xl_powf_finite", Op, DAG);
18439}
18440
18441SDValue PPCTargetLowering::lowerSin(SDValue Op, SelectionDAG &DAG) const {
18442 return lowerLibCallBase("__xl_sin", "__xl_sinf", "__xl_sin_finite",
18443 "__xl_sinf_finite", Op, DAG);
18444}
18445
18446SDValue PPCTargetLowering::lowerCos(SDValue Op, SelectionDAG &DAG) const {
18447 return lowerLibCallBase("__xl_cos", "__xl_cosf", "__xl_cos_finite",
18448 "__xl_cosf_finite", Op, DAG);
18449}
18450
18451SDValue PPCTargetLowering::lowerLog(SDValue Op, SelectionDAG &DAG) const {
18452 return lowerLibCallBase("__xl_log", "__xl_logf", "__xl_log_finite",
18453 "__xl_logf_finite", Op, DAG);
18454}
18455
18456SDValue PPCTargetLowering::lowerLog10(SDValue Op, SelectionDAG &DAG) const {
18457 return lowerLibCallBase("__xl_log10", "__xl_log10f", "__xl_log10_finite",
18458 "__xl_log10f_finite", Op, DAG);
18459}
18460
18461SDValue PPCTargetLowering::lowerExp(SDValue Op, SelectionDAG &DAG) const {
18462 return lowerLibCallBase("__xl_exp", "__xl_expf", "__xl_exp_finite",
18463 "__xl_expf_finite", Op, DAG);
18464}
18465
18466// If we happen to match to an aligned D-Form, check if the Frame Index is
18467// adequately aligned. If it is not, reset the mode to match to X-Form.
18468static void setXFormForUnalignedFI(SDValue N, unsigned Flags,
18469 PPC::AddrMode &Mode) {
18470 if (!isa<FrameIndexSDNode>(N))
18471 return;
18472 if ((Mode == PPC::AM_DSForm && !(Flags & PPC::MOF_RPlusSImm16Mult4)) ||
18473 (Mode == PPC::AM_DQForm && !(Flags & PPC::MOF_RPlusSImm16Mult16)))
18474 Mode = PPC::AM_XForm;
18475}
18476
18477/// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
18478/// compute the address flags of the node, get the optimal address mode based
18479/// on the flags, and set the Base and Disp based on the address mode.
18481 SDValue N, SDValue &Disp,
18482 SDValue &Base,
18483 SelectionDAG &DAG,
18484 MaybeAlign Align) const {
18485 SDLoc DL(Parent);
18486
18487 // Compute the address flags.
18488 unsigned Flags = computeMOFlags(Parent, N, DAG);
18489
18490 // Get the optimal address mode based on the Flags.
18491 PPC::AddrMode Mode = getAddrModeForFlags(Flags);
18492
18493 // If the address mode is DS-Form or DQ-Form, check if the FI is aligned.
18494 // Select an X-Form load if it is not.
18495 setXFormForUnalignedFI(N, Flags, Mode);
18496
18497 // Set the mode to PC-Relative addressing mode if we have a valid PC-Rel node.
18498 if ((Mode == PPC::AM_XForm) && isPCRelNode(N)) {
18499 assert(Subtarget.isUsingPCRelativeCalls() &&
18500 "Must be using PC-Relative calls when a valid PC-Relative node is "
18501 "present!");
18502 Mode = PPC::AM_PCRel;
18503 }
18504
18505 // Set Base and Disp accordingly depending on the address mode.
18506 switch (Mode) {
18507 case PPC::AM_DForm:
18508 case PPC::AM_DSForm:
18509 case PPC::AM_DQForm: {
18510 // This is a register plus a 16-bit immediate. The base will be the
18511 // register and the displacement will be the immediate unless it
18512 // isn't sufficiently aligned.
18513 if (Flags & PPC::MOF_RPlusSImm16) {
18514 SDValue Op0 = N.getOperand(0);
18515 SDValue Op1 = N.getOperand(1);
18516 int16_t Imm = Op1->getAsZExtVal();
18517 if (!Align || isAligned(*Align, Imm)) {
18518 Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
18519 Base = Op0;
18520 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0)) {
18521 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
18522 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
18523 }
18524 break;
18525 }
18526 }
18527 // This is a register plus the @lo relocation. The base is the register
18528 // and the displacement is the global address.
18529 else if (Flags & PPC::MOF_RPlusLo) {
18530 Disp = N.getOperand(1).getOperand(0); // The global address.
18535 Base = N.getOperand(0);
18536 break;
18537 }
18538 // This is a constant address at most 32 bits. The base will be
18539 // zero or load-immediate-shifted and the displacement will be
18540 // the low 16 bits of the address.
18541 else if (Flags & PPC::MOF_AddrIsSImm32) {
18542 auto *CN = cast<ConstantSDNode>(N);
18543 EVT CNType = CN->getValueType(0);
18544 uint64_t CNImm = CN->getZExtValue();
18545 // If this address fits entirely in a 16-bit sext immediate field, codegen
18546 // this as "d, 0".
18547 int16_t Imm;
18548 if (isIntS16Immediate(CN, Imm) && (!Align || isAligned(*Align, Imm))) {
18549 Disp = DAG.getTargetConstant(Imm, DL, CNType);
18550 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
18551 CNType);
18552 break;
18553 }
18554 // Handle 32-bit sext immediate with LIS + Addr mode.
18555 if ((CNType == MVT::i32 || isInt<32>(CNImm)) &&
18556 (!Align || isAligned(*Align, CNImm))) {
18557 int32_t Addr = (int32_t)CNImm;
18558 // Otherwise, break this down into LIS + Disp.
18559 Disp = DAG.getTargetConstant((int16_t)Addr, DL, MVT::i32);
18560 Base =
18561 DAG.getTargetConstant((Addr - (int16_t)Addr) >> 16, DL, MVT::i32);
18562 uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
18563 Base = SDValue(DAG.getMachineNode(LIS, DL, CNType, Base), 0);
18564 break;
18565 }
18566 }
18567 // Otherwise, the PPC:MOF_NotAdd flag is set. Load/Store is Non-foldable.
18568 Disp = DAG.getTargetConstant(0, DL, getPointerTy(DAG.getDataLayout()));
18569 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
18570 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
18571 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
18572 } else
18573 Base = N;
18574 break;
18575 }
18576 case PPC::AM_PrefixDForm: {
18577 int64_t Imm34 = 0;
18578 unsigned Opcode = N.getOpcode();
18579 if (((Opcode == ISD::ADD) || (Opcode == ISD::OR)) &&
18580 (isIntS34Immediate(N.getOperand(1), Imm34))) {
18581 // N is an Add/OR Node, and it's operand is a 34-bit signed immediate.
18582 Disp = DAG.getTargetConstant(Imm34, DL, N.getValueType());
18583 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
18584 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
18585 else
18586 Base = N.getOperand(0);
18587 } else if (isIntS34Immediate(N, Imm34)) {
18588 // The address is a 34-bit signed immediate.
18589 Disp = DAG.getTargetConstant(Imm34, DL, N.getValueType());
18590 Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
18591 }
18592 break;
18593 }
18594 case PPC::AM_PCRel: {
18595 // When selecting PC-Relative instructions, "Base" is not utilized as
18596 // we select the address as [PC+imm].
18597 Disp = N;
18598 break;
18599 }
18600 case PPC::AM_None:
18601 break;
18602 default: { // By default, X-Form is always available to be selected.
18603 // When a frame index is not aligned, we also match by XForm.
18604 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
18605 Base = FI ? N : N.getOperand(1);
18606 Disp = FI ? DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
18607 N.getValueType())
18608 : N.getOperand(0);
18609 break;
18610 }
18611 }
18612 return Mode;
18613}
18614
18616 bool Return,
18617 bool IsVarArg) const {
18618 switch (CC) {
18619 case CallingConv::Cold:
18620 return (Return ? RetCC_PPC_Cold : CC_PPC64_ELF);
18621 default:
18622 return CC_PPC64_ELF;
18623 }
18624}
18625
18627 return Subtarget.isPPC64() && Subtarget.hasQuadwordAtomics();
18628}
18629
18632 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
18633 if (shouldInlineQuadwordAtomics() && Size == 128)
18635
18636 switch (AI->getOperation()) {
18640 default:
18642 }
18643
18644 llvm_unreachable("unreachable atomicrmw operation");
18645}
18646
18649 unsigned Size = AI->getNewValOperand()->getType()->getPrimitiveSizeInBits();
18650 if (shouldInlineQuadwordAtomics() && Size == 128)
18653}
18654
18655static Intrinsic::ID
18657 switch (BinOp) {
18658 default:
18659 llvm_unreachable("Unexpected AtomicRMW BinOp");
18661 return Intrinsic::ppc_atomicrmw_xchg_i128;
18662 case AtomicRMWInst::Add:
18663 return Intrinsic::ppc_atomicrmw_add_i128;
18664 case AtomicRMWInst::Sub:
18665 return Intrinsic::ppc_atomicrmw_sub_i128;
18666 case AtomicRMWInst::And:
18667 return Intrinsic::ppc_atomicrmw_and_i128;
18668 case AtomicRMWInst::Or:
18669 return Intrinsic::ppc_atomicrmw_or_i128;
18670 case AtomicRMWInst::Xor:
18671 return Intrinsic::ppc_atomicrmw_xor_i128;
18673 return Intrinsic::ppc_atomicrmw_nand_i128;
18674 }
18675}
18676
18678 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
18679 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
18680 assert(shouldInlineQuadwordAtomics() && "Only support quadword now");
18681 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18682 Type *ValTy = Incr->getType();
18683 assert(ValTy->getPrimitiveSizeInBits() == 128);
18686 Type *Int64Ty = Type::getInt64Ty(M->getContext());
18687 Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo");
18688 Value *IncrHi =
18689 Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, "incr_hi");
18690 Value *LoHi = Builder.CreateCall(RMW, {AlignedAddr, IncrLo, IncrHi});
18691 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
18692 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
18693 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
18694 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
18695 return Builder.CreateOr(
18696 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
18697}
18698
18700 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
18701 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
18702 assert(shouldInlineQuadwordAtomics() && "Only support quadword now");
18703 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18704 Type *ValTy = CmpVal->getType();
18705 assert(ValTy->getPrimitiveSizeInBits() == 128);
18706 Function *IntCmpXchg =
18707 Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128);
18708 Type *Int64Ty = Type::getInt64Ty(M->getContext());
18709 Value *CmpLo = Builder.CreateTrunc(CmpVal, Int64Ty, "cmp_lo");
18710 Value *CmpHi =
18711 Builder.CreateTrunc(Builder.CreateLShr(CmpVal, 64), Int64Ty, "cmp_hi");
18712 Value *NewLo = Builder.CreateTrunc(NewVal, Int64Ty, "new_lo");
18713 Value *NewHi =
18714 Builder.CreateTrunc(Builder.CreateLShr(NewVal, 64), Int64Ty, "new_hi");
18715 emitLeadingFence(Builder, CI, Ord);
18716 Value *LoHi =
18717 Builder.CreateCall(IntCmpXchg, {AlignedAddr, CmpLo, CmpHi, NewLo, NewHi});
18718 emitTrailingFence(Builder, CI, Ord);
18719 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
18720 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
18721 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
18722 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
18723 return Builder.CreateOr(
18724 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
18725}
unsigned const MachineRegisterInfo * MRI
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall)
#define Success
static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1, SDValue V2, unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static bool isSignExtended(SDValue N, SelectionDAG &DAG)
static const unsigned PerfectShuffleTable[6561+1]
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
unsigned Intr
static std::pair< Register, unsigned > getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static bool isLoad(int Opcode)
@ OP_COPY
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
Function Alias Analysis Results
Atomic ordering constants.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
This file defines the DenseMap class.
uint64_t Addr
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
IRTranslator LLVM IR MI
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
lazy value info
static bool isConstantOrUndef(const SDValue Op)
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
cl::opt< bool > ANDIGlueBug("expose-ppc-andi-glue-bug", cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden)
static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getCanonicalConstSplat - Build a canonical splat immediate of Val with an element size of SplatSize.
static bool IsSelectCC(MachineInstr &MI)
static const TargetRegisterClass * getRegClassForSVT(MVT::SimpleValueType SVT, bool IsPPC64, bool HasP8Vector, bool HasVSX)
static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign)
static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl< ISD::OutputArg > &Outs)
static bool isAlternatingShuffMask(const ArrayRef< int > &Mask, int NumElts)
static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, SDValue Input, uint64_t Elems, uint64_t CorrectElems)
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, bool Swap, SDLoc &DL, SelectionDAG &DAG)
This function is called when we have proved that a SETCC node can be replaced by subtraction (and oth...
static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL)
static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet, SelectionDAG &DAG)
Set alignment flags based on whether or not the Frame Index is aligned.
static bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget)
static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N)
Used when computing address flags for selecting loads and stores.
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
static bool callsShareTOCBase(const Function *Caller, const GlobalValue *CalleeGV, const TargetMachine &TM)
constexpr uint64_t AIXSmallTlsPolicySizeLimit
static bool isPCRelNode(SDValue N)
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls.
static bool areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, CallingConv::ID CalleeCC)
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
static bool IsSelect(MachineInstr &MI)
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, bool HasDirectMove, bool HasP8Vector)
Do we have an efficient pattern in a .td file for this node?
static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &S)
static void setUsesTOCBasePtr(MachineFunction &MF)
static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, const SDLoc &dl, const PPCSubtarget &Subtarget)
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, SelectionDAG &DAG)
static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth)
static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB)
static bool isFPExtLoad(SDValue Op)
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, const SDLoc &dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID.
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, const SDLoc &dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
static const char AIXSSPCanaryWordName[]
static cl::opt< bool > UseAbsoluteJumpTables("ppc-use-absolute-jumptables", cl::desc("use absolute jump tables on ppc"), cl::Hidden)
static void setXFormForUnalignedFI(SDValue N, unsigned Flags, PPC::AddrMode &Mode)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
Return true if we should reference labels using a PICBase, set the HiOpFlags and LoOpFlags to the tar...
cl::opt< bool > DisableAutoPairedVecSt("disable-auto-paired-vec-st", cl::desc("disable automatically generated 32byte paired vector stores"), cl::init(true), cl::Hidden)
static void buildCallOperands(SmallVectorImpl< SDValue > &Ops, PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, SmallVector< std::pair< unsigned, SDValue >, 8 > &RegsToPass, SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, const PPCSubtarget &Subtarget)
static cl::opt< bool > DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden)
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget &ST)
Returns true if we should use a direct load into vector instruction (such as lxsd or lfd),...
static SDValue getDataClassTest(SDValue Op, FPClassTest Mask, const SDLoc &Dl, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static cl::opt< bool > DisableSCO("disable-ppc-sco", cl::desc("disable sibling call optimization on ppc"), cl::Hidden)
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
static Intrinsic::ID getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp)
static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
static Instruction * callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id)
static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl< int > &ShuffV, int LHSMaxIdx, int RHSMinIdx, int RHSMaxIdx, int HalfVec, unsigned ValidLaneWidth, const PPCSubtarget &Subtarget)
static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, const SDLoc &dl)
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
static SDValue isScalarToVec(SDValue Op)
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl)
static cl::opt< bool > DisablePerfectShuffle("ppc-disable-perfect-shuffle", cl::desc("disable vector permute decomposition"), cl::init(true), cl::Hidden)
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getVectorCompareInfo - Given an intrinsic, return false if it is not a vector comparison.
static unsigned invertFMAOpcode(unsigned Opc)
static const SDValue * getNormalLoadInput(const SDValue &Op, bool &IsPermuted)
static cl::opt< unsigned > PPCMinimumJumpTableEntries("ppc-min-jump-table-entries", cl::init(64), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on PPC"))
static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op, unsigned &Opcode)
static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, const PPCSubtarget &Subtarget, SDValue Chain=SDValue())
static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget)
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InGlue, SDValue &Chain, const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, const SDLoc &dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG)
static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, SelectionDAG &DAG, SDValue ArgValue, MVT LocVT, const SDLoc &dl)
static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet, SelectionDAG &DAG)
Given a node, compute flags that are used for address computation when selecting load and store instr...
cl::opt< bool > ANDIGlueBug
static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart)
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
static unsigned getPPCStrictOpcode(unsigned Opc)
static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, SDValue CallSeqStart, const CallBase *CB, const SDLoc &dl, bool hasNest, const PPCSubtarget &Subtarget)
static bool isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width)
static bool isFunctionGlobalAddress(const GlobalValue *CalleeGV)
static bool isSplatBV(SDValue Op)
static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG)
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int)
Check that the mask is shuffling N byte elements.
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG)
Reduce the number of loads when building a vector.
static bool isValidPCRelNode(SDValue N)
const char LLVMTargetMachineRef TM
pre isel intrinsic Pre ISel Intrinsic Lowering
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static bool Enabled
Definition: Statistic.cpp:46
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
This defines the Use class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
bool isFixed(unsigned ValNo) const
Definition: PPCCCState.h:68
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5196
bool isDenormal() const
Definition: APFloat.h:1296
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1385
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition: APInt.h:427
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
APInt abs() const
Get the absolute value.
Definition: APInt.h:1737
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:413
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:449
double bitsToDouble() const
Converts APInt bits to a double.
Definition: APInt.h:1671
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:274
An arbitrary precision integer that knows its signedness.
Definition: APSInt.h:23
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:760
@ Add
*p = old + v
Definition: Instructions.h:764
@ Or
*p = old | v
Definition: Instructions.h:772
@ Sub
*p = old - v
Definition: Instructions.h:766
@ And
*p = old & v
Definition: Instructions.h:768
@ Xor
*p = old ^ v
Definition: Instructions.h:774
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:800
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:804
@ Nand
*p = ~(old & v)
Definition: Instructions.h:770
BinOp getOperation() const
Definition: Instructions.h:845
This is an SDNode representing atomic operations.
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:349
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
const BlockAddress * getBlockAddress() const
The address of a basic block.
Definition: Constants.h:888
static BranchProbability getOne()
static BranchProbability getZero()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
bool isVarArg() const
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1703
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
Definition: InstrTypes.h:2185
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1761
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1623
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
Definition: InstrTypes.h:1696
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1629
unsigned arg_size() const
Definition: InstrTypes.h:1646
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:267
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:238
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
Definition: DataLayout.cpp:900
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:878
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:865
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:680
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:695
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
Definition: Function.cpp:707
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:677
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:338
arg_iterator arg_begin()
Definition: Function.h:813
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
size_t arg_size() const
Definition: Function.h:846
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:205
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:213
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:669
const GlobalValue * getGlobal() const
const GlobalObject * getAliaseeObject() const
Definition: Globals.cpp:556
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
void setThreadLocalMode(ThreadLocalMode Val)
Definition: GlobalValue.h:267
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
StringRef getSection() const
Definition: Globals.cpp:174
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
Definition: GlobalValue.h:630
bool hasComdat() const
Definition: GlobalValue.h:241
Type * getValueType() const
Definition: GlobalValue.h:296
bool hasProtectedVisibility() const
Definition: GlobalValue.h:251
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2006
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2499
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1431
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1410
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2010
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1491
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
const BasicBlock * getParent() const
Definition: Instruction.h:151
bool hasAtomicLoad() const LLVM_READONLY
Return true if this atomic instruction loads from memory.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Base class for LoadSDNode and StoreSDNode.
An instruction for reading from memory.
Definition: Instructions.h:184
bool isUnordered() const
Definition: Instructions.h:274
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Context object for machine code objects.
Definition: MCContext.h:76
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbolXCOFF * getQualNameSymbol() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
Metadata node.
Definition: Metadata.h:1067
Machine Value Type.
SimpleValueType SimpleTy
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
uint64_t getSize() const
Return the size in bytes of the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
Align getAlign() const
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getBasePtr() const
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:287
uint64_t getReturnSaveOffset() const
getReturnSaveOffset - Return the previous frame offset to save the return address.
uint64_t getFramePointerSaveOffset() const
getFramePointerSaveOffset - Return the previous frame offset to save the frame pointer.
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
void setVarArgsNumFPR(unsigned Num)
unsigned getVarArgsNumFPR() const
void setVarArgsNumGPR(unsigned Num)
void appendParameterType(ParamType Type)
void setMinReservedArea(unsigned size)
unsigned getVarArgsNumGPR() const
unsigned getMinReservedArea() const
void setVarArgsStackOffset(int Offset)
void setVarArgsFrameIndex(int Index)
void addLiveInAttr(Register VReg, ISD::ArgFlagsTy Flags)
This function associates attributes for each live-in virtual register.
void setFramePointerSaveIndex(int Idx)
static bool hasPCRelFlag(unsigned TF)
Definition: PPCInstrInfo.h:287
bool is32BitELFABI() const
Definition: PPCSubtarget.h:219
unsigned descriptorTOCAnchorOffset() const
Definition: PPCSubtarget.h:259
bool isAIXABI() const
Definition: PPCSubtarget.h:214
bool useSoftFloat() const
Definition: PPCSubtarget.h:174
const PPCFrameLowering * getFrameLowering() const override
Definition: PPCSubtarget.h:142
bool needsSwapsForVSXMemOps() const
Definition: PPCSubtarget.h:202
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
bool isUsingPCRelativeCalls() const
bool usesFunctionDescriptors() const
True if the ABI is descriptor based.
Definition: PPCSubtarget.h:253
MCRegister getEnvironmentPointerRegister() const
Definition: PPCSubtarget.h:271
const PPCInstrInfo * getInstrInfo() const override
Definition: PPCSubtarget.h:145
bool isSVR4ABI() const
Definition: PPCSubtarget.h:215
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
Definition: PPCSubtarget.h:134
POPCNTDKind hasPOPCNTD() const
Definition: PPCSubtarget.h:206
bool isLittleEndian() const
Definition: PPCSubtarget.h:181
bool isTargetLinux() const
Definition: PPCSubtarget.h:212
MCRegister getTOCPointerRegister() const
Definition: PPCSubtarget.h:277
MCRegister getStackPointerRegister() const
Definition: PPCSubtarget.h:289
bool is64BitELFABI() const
Definition: PPCSubtarget.h:218
bool isELFv2ABI() const
const PPCTargetMachine & getTargetMachine() const
Definition: PPCSubtarget.h:155
bool isPredictableSelectIsExpensive() const
Definition: PPCSubtarget.h:295
bool enableMachineScheduler() const override
Scheduling customization.
const PPCRegisterInfo * getRegisterInfo() const override
Definition: PPCSubtarget.h:152
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned descriptorEnvironmentPointerOffset() const
Definition: PPCSubtarget.h:265
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CCAssignFn * ccAssignFnForCall(CallingConv::ID CC, bool Return, bool IsVarArg) const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
SelectForceXFormMode - Given the specified address, force it to be represented as an indexed [r+r] op...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node.
bool supportsTailCallFor(const CallBase *CB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=std::nullopt) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
bool useSoftFloat() const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override
Return true if the target shall perform extract vector element and store given that the vector is kno...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
unsigned getStackProbeSize(const MachineFunction &MF) const
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpTableRelative() const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign Align) const
SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), compute the address flags of...
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isAccessedAsGotIndirect(SDValue N) const
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
bool shouldInlineQuadwordAtomics() const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Common code between 32-bit and 64-bit PowerPC targets.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
void dump() const
Dump this node, for debugging.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
void dump() const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
static SectionKind getMetadata()
Definition: SectionKind.h:188
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:722
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
Definition: SelectionDAG.h:474
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:478
static constexpr unsigned MaxRecursionDepth
Definition: SelectionDAG.h:448
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:732
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:828
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:727
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:473
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:773
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:676
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:469
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:799
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
Definition: SelectionDAG.h:845
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:739
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:554
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
void clear()
Definition: SmallSet.h:218
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Class to represent struct types.
Definition: DerivedTypes.h:216
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual bool isJumpTableRelative() const
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
NegatibleCost
Enum that specifies when a float negation is beneficial.
std::vector< ArgListEntry > ArgListTy
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual MCSymbol * getFunctionEntryPointSymbol(const GlobalValue *Func, const TargetMachine &TM) const
If supported, return the function entry point symbol.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
TargetOptions Options
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
unsigned PPCGenScalarMASSEntries
Enables scalar MASS conversions.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
@ FloatTyID
32-bit floating point type
Definition: Type.h:58
@ DoubleTyID
64-bit floating point type
Definition: Type.h:59
@ FP128TyID
128-bit floating point type (112-bit significand)
Definition: Type.h:61
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:157
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:246
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:750
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:236
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1126
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1122
@ TargetConstantPool
Definition: ISDOpcodes.h:168
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
Definition: ISDOpcodes.h:476
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:44
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
Definition: ISDOpcodes.h:147
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1155
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1241
@ STRICT_FCEIL
Definition: ISDOpcodes.h:426
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:269
@ FMAXNUM_IEEE
Definition: ISDOpcodes.h:979
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1031
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:783
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:483
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
@ RETURNADDR
Definition: ISDOpcodes.h:95
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:790
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:543
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:688
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ STRICT_FSETCCS
Definition: ISDOpcodes.h:477
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:913
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:903
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:229
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:1199
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
Definition: ISDOpcodes.h:939
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
Definition: ISDOpcodes.h:411
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ FrameIndex
Definition: ISDOpcodes.h:80
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ STRICT_UINT_TO_FP
Definition: ISDOpcodes.h:450
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:620
@ TargetExternalSymbol
Definition: ISDOpcodes.h:169
@ BR
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:1047
@ TargetJumpTable
Definition: ISDOpcodes.h:167
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:1221
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:988
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:930
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1077
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition: ISDOpcodes.h:507
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition: ISDOpcodes.h:349
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1237
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:222
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1151
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition: ISDOpcodes.h:164
@ STRICT_FTRUNC
Definition: ISDOpcodes.h:430
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
Definition: ISDOpcodes.h:880
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:651
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:600
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:573
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
Definition: ISDOpcodes.h:978
@ STRICT_FMAXNUM
Definition: ISDOpcodes.h:424
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:535
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ STRICT_FMINNUM
Definition: ISDOpcodes.h:425
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:742
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1248
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:971
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1041
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:798
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:674
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:888
@ STRICT_FROUND
Definition: ISDOpcodes.h:428
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:736
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
Definition: ISDOpcodes.h:449
@ STRICT_FFLOOR
Definition: ISDOpcodes.h:427
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1097
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
Definition: ISDOpcodes.h:129
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:94
@ STRICT_FP_TO_UINT
Definition: ISDOpcodes.h:443
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Definition: ISDOpcodes.h:465
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:442
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:836
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1182
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:470
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1208
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:279
@ STRICT_FADD
Constrained versions of the binary floating point operators.
Definition: ISDOpcodes.h:400
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:524
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:869
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1094
@ STRICT_FNEARBYINT
Definition: ISDOpcodes.h:423
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
Definition: ISDOpcodes.h:141
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:786
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1146
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1070
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:763
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:493
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:340
@ AssertZext
Definition: ISDOpcodes.h:62
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
Definition: ISDOpcodes.h:1140
@ STRICT_FRINT
Definition: ISDOpcodes.h:422
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
Definition: ISDOpcodes.h:1320
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:1205
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
@ TargetGlobalTLSAddress
Definition: ISDOpcodes.h:165
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:515
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
Definition: ISDOpcodes.h:1556
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1472
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1523
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1503
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
Definition: ISDOpcodes.h:1562
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1451
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ VecShuffle
Definition: NVPTX.h:96
@ MO_TLSLDM_FLAG
MO_TLSLDM_FLAG - on AIX the ML relocation type is only valid for a reference to a TOC symbol from the...
Definition: PPC.h:146
@ MO_PIC_LO_FLAG
MO_PIC_LO_FLAG = MO_PIC_FLAG | MO_LO.
Definition: PPC.h:194
@ MO_TPREL_PCREL_FLAG
MO_TPREL_PCREL_FLAG = MO_PCREL_FLAG | MO_TPREL_FLAG.
Definition: PPC.h:197
@ MO_GOT_TPREL_PCREL_FLAG
MO_GOT_TPREL_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
Definition: PPC.h:172
@ MO_GOT_PCREL_FLAG
MO_GOT_PCREL_FLAG = MO_PCREL_FLAG | MO_GOT_FLAG.
Definition: PPC.h:203
@ MO_TLSGDM_FLAG
MO_TLSGDM_FLAG - If this bit is set the symbol reference is relative to the region handle of TLS Gene...
Definition: PPC.h:154
@ MO_PCREL_FLAG
MO_PCREL_FLAG - If this bit is set, the symbol reference is relative to the current instruction addre...
Definition: PPC.h:121
@ MO_TLSLD_FLAG
MO_TLSLD_FLAG - If this bit is set the symbol reference is relative to TLS Local Dynamic model.
Definition: PPC.h:150
@ MO_TLS_PCREL_FLAG
MO_TPREL_PCREL_FLAG = MO_PCREL_FLAG | MO_TLS.
Definition: PPC.h:200
@ MO_TPREL_HA
Definition: PPC.h:179
@ MO_PLT
On PPC, the 12 bits are not enough for all target operand flags.
Definition: PPC.h:113
@ MO_TLS
Symbol for VK_PPC_TLS fixup attached to an ADD instruction.
Definition: PPC.h:188
@ MO_TPREL_FLAG
MO_TPREL_FLAG - If this bit is set, the symbol reference is relative to the thread pointer and the sy...
Definition: PPC.h:140
@ MO_TPREL_LO
Definition: PPC.h:178
@ MO_LO
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
Definition: PPC.h:175
@ MO_GOT_TLSLD_PCREL_FLAG
MO_GOT_TLSLD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
Definition: PPC.h:166
@ MO_PIC_HA_FLAG
MO_PIC_HA_FLAG = MO_PIC_FLAG | MO_HA.
Definition: PPC.h:191
@ MO_TLSGD_FLAG
MO_TLSGD_FLAG - If this bit is set the symbol reference is relative to TLS General Dynamic model for ...
Definition: PPC.h:135
@ MO_GOT_TLSGD_PCREL_FLAG
MO_GOT_TLSGD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
Definition: PPC.h:160
@ MO_HA
Definition: PPC.h:176
@ MO_PIC_FLAG
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase,...
Definition: PPC.h:117
@ SEXT_LD_SPLAT
VSRC, CHAIN = SEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that sign-extends.
@ FCTIDUZ
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
@ ADDI_TLSGD_L_ADDR
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
@ FSQRT
Square root instruction.
@ STRICT_FCFID
Constrained integer-to-floating-point conversion instructions.
@ DYNALLOC
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
@ COND_BRANCH
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
@ TLSLD_AIX
[GP|G8]RC = TLSLD_AIX, TOC_ENTRY(module handle) Op that requires a single input of the module handle ...
@ CALL_RM
The variants that implicitly define rounding mode for calls with strictfp semantics.
@ STORE_VEC_BE
CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
@ BDNZ
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
@ MTVSRZ
Direct move from a GPR to a VSX register (zero)
@ SRL
These nodes represent PPC shifts.
@ VECINSERT
VECINSERT - The PPC vector insert instruction.
@ LXSIZX
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
@ RFEBB
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
@ FCTIDZ
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ GET_TLS_ADDR
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
@ XXSPLTI32DX
XXSPLTI32DX - The PPC XXSPLTI32DX instruction.
@ ANDI_rec_1_EQ_BIT
i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after ex...
@ FRE
Reciprocal estimate instructions (unary FP ops).
@ ADDIS_GOT_TPREL_HA
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
@ CLRBHRB
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
@ STORE_COND
CHAIN,Glue = STORE_COND CHAIN, GPR, Ptr The store conditional instruction ST[BHWD]ARX that produces a...
@ SINT_VEC_TO_FP
Extract a subvector from signed integer vector and convert to FP.
@ EXTRACT_SPE
Extract SPE register component, second argument is high or low.
@ XXSWAPD
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
@ ADDI_TLSLD_L_ADDR
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
@ ATOMIC_CMP_SWAP_8
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
@ ST_VSR_SCAL_INT
Store scalar integers from VSR.
@ VCMP
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
@ BCTRL
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
@ BUILD_SPE64
BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and EXTRACT_ELEMENT but take f64 arguments in...
@ LFIWZX
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
@ RET_GLUE
Return with a glue operand, matched by 'blr'.
@ SCALAR_TO_VECTOR_PERMUTED
PowerPC instructions that have SCALAR_TO_VECTOR semantics tend to place the value into the least sign...
@ EXTRACT_VSX_REG
EXTRACT_VSX_REG = Extract one of the underlying vsx registers of an accumulator or pair register.
@ STXSIX
STXSIX - The STXSI[bh]X instruction.
@ MAT_PCREL_ADDR
MAT_PCREL_ADDR = Materialize a PC Relative address.
@ MFOCRF
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
@ XXSPLT
XXSPLT - The PPC VSX splat instructions.
@ TOC_ENTRY
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
@ XXPERMDI
XXPERMDI - The PPC XXPERMDI instruction.
@ ADDIS_DTPREL_HA
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
@ ADD_TLS
G8RC = ADD_TLS G8RReg, Symbol - Can be used by the initial-exec and local-exec TLS models,...
@ MTVSRA
Direct move from a GPR to a VSX register (algebraic)
@ VADD_SPLAT
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
@ PPC32_GOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ ADDI_DTPREL_L
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
@ BCTRL_LOAD_TOC
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
@ PPC32_PICGOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ FCFID
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
@ CR6SET
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
@ LBRX
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
@ GET_TLS_MOD_AIX
x3 = GET_TLS_MOD_AIX _$TLSML - For the AIX local-dynamic TLS model, produces a call to ....
@ LD_VSX_LH
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
@ PROBED_ALLOCA
To avoid stack clash, allocation is performed by block and each block is probed.
@ XXMFACC
XXMFACC = This corresponds to the xxmfacc instruction.
@ ADDIS_TLSGD_HA
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
@ ACC_BUILD
ACC_BUILD = Build an accumulator register from 4 VSX registers.
@ GlobalBaseReg
The result of the mflr at function entry, used for PIC code.
@ LXVD2X
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
@ XSMAXC
XSMAXC[DQ]P, XSMINC[DQ]P - C-type min/max instructions.
@ CALL
CALL - A direct function call.
@ MTCTR
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
@ TC_RETURN
TC_RETURN - A tail call return.
@ STFIWX
STFIWX - The STFIWX instruction.
@ LD_SPLAT
VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory instructions such as LXVDSX,...
@ VCMP_rec
RESVEC, OUTFLAG = VCMP_rec(LHS, RHS, OPC) - Represents one of the altivec VCMP*_rec instructions.
@ MFFS
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
@ PADDI_DTPREL
G8RC = PADDI_DTPREL x3, Symbol - For the pc-rel based local-dynamic TLS model, produces a PADDI8 inst...
@ BUILD_FP128
Direct move of 2 consecutive GPR to a VSX register.
@ VEXTS
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
@ TLS_LOCAL_EXEC_MAT_ADDR
TLS_LOCAL_EXEC_MAT_ADDR = Materialize an address for TLS global address when using local exec access ...
@ VPERM
VPERM - The PPC VPERM Instruction.
@ ADDIS_TLSLD_HA
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
@ XXSPLTI_SP_TO_DP
XXSPLTI_SP_TO_DP - The PPC VSX splat instructions for immediates for converting immediate single prec...
@ GET_TLSLD_ADDR
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
@ ADDI_TLSGD_L
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
@ DYNAREAOFFSET
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
@ PAIR_BUILD
PAIR_BUILD = Build a vector pair register from 2 VSX registers.
@ STRICT_FADDRTZ
Constrained floating point add in round-to-zero mode.
@ FTSQRT
Test instruction for software square root.
@ FP_EXTEND_HALF
FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or lower (IDX=1) half of v4f32 to v2f6...
@ CMPB
The CMPB instruction (takes two operands of i32 or i64).
@ VECSHL
VECSHL - The PPC vector shift left instruction.
@ ADDI_TLSLD_L
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
@ FADDRTZ
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
@ ZEXT_LD_SPLAT
VSRC, CHAIN = ZEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that zero-extends.
@ SRA_ADDZE
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2.
@ EXTSWSLI
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate.
@ STXVD2X
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
@ TLSGD_AIX
GPRC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY G8RC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY Op that combines two re...
@ UINT_VEC_TO_FP
Extract a subvector from unsigned integer vector and convert to FP.
@ GET_TPOINTER
x3 = GET_TPOINTER - Used for the local- and initial-exec TLS model on 32-bit AIX, produces a call to ...
@ LXVRZX
LXVRZX - Load VSX Vector Rightmost and Zero Extend This node represents v1i128 BUILD_VECTOR of a zero...
@ MFBHRBE
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry.
@ FCFIDU
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
@ FSEL
FSEL - Traditional three-operand fsel node.
@ SWAP_NO_CHAIN
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain.
@ LOAD_VEC_BE
VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
@ LFIWAX
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
@ STBRX
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction.
@ LD_GOT_TPREL_L
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
@ MFVSR
Direct move from a VSX register to a GPR.
@ TLS_DYNAMIC_MAT_PCREL_ADDR
TLS_DYNAMIC_MAT_PCREL_ADDR = Materialize a PC Relative address for TLS global address when using dyna...
@ Hi
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
@ DIR_PWR_FUTURE
Definition: PPCSubtarget.h:64
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ GeneralDynamic
Definition: CodeGen.h:46
Reg
All possible values of the reg field in the ModR/M byte.
@ XMC_PR
Program Code.
Definition: XCOFF.h:105
@ XTY_ER
External reference.
Definition: XCOFF.h:241
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
constexpr double e
Definition: MathExtras.h:31
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
static bool isIndirectCall(const MachineInstr &MI)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat)
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
bool CC_PPC32_SVR4_ByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
bool CC_PPC32_SVR4_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
static bool isRunOfOnes64(uint64_t Val, unsigned &MB, unsigned &ME)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
unsigned M1(unsigned Val)
Definition: VE.h:376
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
bool RetCC_PPC_Cold(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
bool convertToNonDenormSingle(APInt &ArgAPInt)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool CC_PPC32_SVR4(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool CC_PPC64_ELF(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_PPC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Mod
The access may modify the value stored in memory.
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
@ Mul
Product of integers.
@ Add
Sum of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1923
DWARFExpression::Operation Op
unsigned M0(unsigned Val)
Definition: VE.h:375
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
Definition: MathExtras.h:436
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:452
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition: bit.h:327
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
static constexpr roundingMode rmNearestTiesToEven
Definition: APFloat.h:230
static const fltSemantics & PPCDoubleDouble() LLVM_READNONE
Definition: APFloat.cpp:252
static constexpr roundingMode rmTowardZero
Definition: APFloat.h:234
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition: ValueTypes.h:34
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:93
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:380
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:73
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:274
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:146
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:370
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:366
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:167
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:313
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:318
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
Definition: ValueTypes.h:141
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:156
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:326
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition: ValueTypes.h:438
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:151
unsigned getByValSize() const
void setByValSize(unsigned S)
Align getNonZeroByValAlign() const
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
bool isConstant() const
Returns true if we know the value of all bits.
Definition: KnownBits.h:50
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:71
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition: KnownBits.h:57
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Structure that collects some common arguments that get passed around between the functions for call l...
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)