File: | lib/Target/AArch64/AArch64ISelLowering.cpp |
Warning: | line 7921, column 29 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This file implements the AArch64TargetLowering class. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "AArch64ISelLowering.h" | |||
15 | #include "AArch64CallingConvention.h" | |||
16 | #include "AArch64MachineFunctionInfo.h" | |||
17 | #include "AArch64PerfectShuffle.h" | |||
18 | #include "AArch64RegisterInfo.h" | |||
19 | #include "AArch64Subtarget.h" | |||
20 | #include "MCTargetDesc/AArch64AddressingModes.h" | |||
21 | #include "Utils/AArch64BaseInfo.h" | |||
22 | #include "llvm/ADT/APFloat.h" | |||
23 | #include "llvm/ADT/APInt.h" | |||
24 | #include "llvm/ADT/ArrayRef.h" | |||
25 | #include "llvm/ADT/STLExtras.h" | |||
26 | #include "llvm/ADT/SmallVector.h" | |||
27 | #include "llvm/ADT/Statistic.h" | |||
28 | #include "llvm/ADT/StringRef.h" | |||
29 | #include "llvm/ADT/StringSwitch.h" | |||
30 | #include "llvm/ADT/Triple.h" | |||
31 | #include "llvm/ADT/Twine.h" | |||
32 | #include "llvm/Analysis/VectorUtils.h" | |||
33 | #include "llvm/CodeGen/CallingConvLower.h" | |||
34 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
35 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
36 | #include "llvm/CodeGen/MachineFunction.h" | |||
37 | #include "llvm/CodeGen/MachineInstr.h" | |||
38 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
39 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
40 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
41 | #include "llvm/CodeGen/RuntimeLibcalls.h" | |||
42 | #include "llvm/CodeGen/SelectionDAG.h" | |||
43 | #include "llvm/CodeGen/SelectionDAGNodes.h" | |||
44 | #include "llvm/CodeGen/TargetCallingConv.h" | |||
45 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
46 | #include "llvm/CodeGen/ValueTypes.h" | |||
47 | #include "llvm/IR/Attributes.h" | |||
48 | #include "llvm/IR/Constants.h" | |||
49 | #include "llvm/IR/DataLayout.h" | |||
50 | #include "llvm/IR/DebugLoc.h" | |||
51 | #include "llvm/IR/DerivedTypes.h" | |||
52 | #include "llvm/IR/Function.h" | |||
53 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
54 | #include "llvm/IR/GlobalValue.h" | |||
55 | #include "llvm/IR/IRBuilder.h" | |||
56 | #include "llvm/IR/Instruction.h" | |||
57 | #include "llvm/IR/Instructions.h" | |||
58 | #include "llvm/IR/Intrinsics.h" | |||
59 | #include "llvm/IR/Module.h" | |||
60 | #include "llvm/IR/OperandTraits.h" | |||
61 | #include "llvm/IR/Type.h" | |||
62 | #include "llvm/IR/Use.h" | |||
63 | #include "llvm/IR/Value.h" | |||
64 | #include "llvm/MC/MCRegisterInfo.h" | |||
65 | #include "llvm/Support/Casting.h" | |||
66 | #include "llvm/Support/CodeGen.h" | |||
67 | #include "llvm/Support/CommandLine.h" | |||
68 | #include "llvm/Support/Compiler.h" | |||
69 | #include "llvm/Support/Debug.h" | |||
70 | #include "llvm/Support/ErrorHandling.h" | |||
71 | #include "llvm/Support/KnownBits.h" | |||
72 | #include "llvm/Support/MachineValueType.h" | |||
73 | #include "llvm/Support/MathExtras.h" | |||
74 | #include "llvm/Support/raw_ostream.h" | |||
75 | #include "llvm/Target/TargetMachine.h" | |||
76 | #include "llvm/Target/TargetOptions.h" | |||
77 | #include <algorithm> | |||
78 | #include <bitset> | |||
79 | #include <cassert> | |||
80 | #include <cctype> | |||
81 | #include <cstdint> | |||
82 | #include <cstdlib> | |||
83 | #include <iterator> | |||
84 | #include <limits> | |||
85 | #include <tuple> | |||
86 | #include <utility> | |||
87 | #include <vector> | |||
88 | ||||
89 | using namespace llvm; | |||
90 | ||||
91 | #define DEBUG_TYPE"aarch64-lower" "aarch64-lower" | |||
92 | ||||
93 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"aarch64-lower", "NumTailCalls" , "Number of tail calls", {0}, {false}}; | |||
94 | STATISTIC(NumShiftInserts, "Number of vector shift inserts")static llvm::Statistic NumShiftInserts = {"aarch64-lower", "NumShiftInserts" , "Number of vector shift inserts", {0}, {false}}; | |||
95 | STATISTIC(NumOptimizedImms, "Number of times immediates were optimized")static llvm::Statistic NumOptimizedImms = {"aarch64-lower", "NumOptimizedImms" , "Number of times immediates were optimized", {0}, {false}}; | |||
96 | ||||
97 | static cl::opt<bool> | |||
98 | EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden, | |||
99 | cl::desc("Allow AArch64 SLI/SRI formation"), | |||
100 | cl::init(false)); | |||
101 | ||||
102 | // FIXME: The necessary dtprel relocations don't seem to be supported | |||
103 | // well in the GNU bfd and gold linkers at the moment. Therefore, by | |||
104 | // default, for now, fall back to GeneralDynamic code generation. | |||
105 | cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration( | |||
106 | "aarch64-elf-ldtls-generation", cl::Hidden, | |||
107 | cl::desc("Allow AArch64 Local Dynamic TLS code generation"), | |||
108 | cl::init(false)); | |||
109 | ||||
110 | static cl::opt<bool> | |||
111 | EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, | |||
112 | cl::desc("Enable AArch64 logical imm instruction " | |||
113 | "optimization"), | |||
114 | cl::init(true)); | |||
115 | ||||
116 | /// Value type used for condition codes. | |||
117 | static const MVT MVT_CC = MVT::i32; | |||
118 | ||||
119 | AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, | |||
120 | const AArch64Subtarget &STI) | |||
121 | : TargetLowering(TM), Subtarget(&STI) { | |||
122 | // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so | |||
123 | // we have to make something up. Arbitrarily, choose ZeroOrOne. | |||
124 | setBooleanContents(ZeroOrOneBooleanContent); | |||
125 | // When comparing vectors the result sets the different elements in the | |||
126 | // vector to all-one or all-zero. | |||
127 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); | |||
128 | ||||
129 | // Set up the register classes. | |||
130 | addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass); | |||
131 | addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass); | |||
132 | ||||
133 | if (Subtarget->hasFPARMv8()) { | |||
134 | addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); | |||
135 | addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); | |||
136 | addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); | |||
137 | addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); | |||
138 | } | |||
139 | ||||
140 | if (Subtarget->hasNEON()) { | |||
141 | addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass); | |||
142 | addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass); | |||
143 | // Someone set us up the NEON. | |||
144 | addDRTypeForNEON(MVT::v2f32); | |||
145 | addDRTypeForNEON(MVT::v8i8); | |||
146 | addDRTypeForNEON(MVT::v4i16); | |||
147 | addDRTypeForNEON(MVT::v2i32); | |||
148 | addDRTypeForNEON(MVT::v1i64); | |||
149 | addDRTypeForNEON(MVT::v1f64); | |||
150 | addDRTypeForNEON(MVT::v4f16); | |||
151 | ||||
152 | addQRTypeForNEON(MVT::v4f32); | |||
153 | addQRTypeForNEON(MVT::v2f64); | |||
154 | addQRTypeForNEON(MVT::v16i8); | |||
155 | addQRTypeForNEON(MVT::v8i16); | |||
156 | addQRTypeForNEON(MVT::v4i32); | |||
157 | addQRTypeForNEON(MVT::v2i64); | |||
158 | addQRTypeForNEON(MVT::v8f16); | |||
159 | } | |||
160 | ||||
161 | // Compute derived properties from the register classes | |||
162 | computeRegisterProperties(Subtarget->getRegisterInfo()); | |||
163 | ||||
164 | // Provide all sorts of operation actions | |||
165 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); | |||
166 | setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); | |||
167 | setOperationAction(ISD::SETCC, MVT::i32, Custom); | |||
168 | setOperationAction(ISD::SETCC, MVT::i64, Custom); | |||
169 | setOperationAction(ISD::SETCC, MVT::f16, Custom); | |||
170 | setOperationAction(ISD::SETCC, MVT::f32, Custom); | |||
171 | setOperationAction(ISD::SETCC, MVT::f64, Custom); | |||
172 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); | |||
173 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); | |||
174 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); | |||
175 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); | |||
176 | setOperationAction(ISD::BR_CC, MVT::i64, Custom); | |||
177 | setOperationAction(ISD::BR_CC, MVT::f16, Custom); | |||
178 | setOperationAction(ISD::BR_CC, MVT::f32, Custom); | |||
179 | setOperationAction(ISD::BR_CC, MVT::f64, Custom); | |||
180 | setOperationAction(ISD::SELECT, MVT::i32, Custom); | |||
181 | setOperationAction(ISD::SELECT, MVT::i64, Custom); | |||
182 | setOperationAction(ISD::SELECT, MVT::f16, Custom); | |||
183 | setOperationAction(ISD::SELECT, MVT::f32, Custom); | |||
184 | setOperationAction(ISD::SELECT, MVT::f64, Custom); | |||
185 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); | |||
186 | setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); | |||
187 | setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); | |||
188 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); | |||
189 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); | |||
190 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); | |||
191 | setOperationAction(ISD::JumpTable, MVT::i64, Custom); | |||
192 | ||||
193 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); | |||
194 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); | |||
195 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); | |||
196 | ||||
197 | setOperationAction(ISD::FREM, MVT::f32, Expand); | |||
198 | setOperationAction(ISD::FREM, MVT::f64, Expand); | |||
199 | setOperationAction(ISD::FREM, MVT::f80, Expand); | |||
200 | ||||
201 | setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); | |||
202 | ||||
203 | // Custom lowering hooks are needed for XOR | |||
204 | // to fold it into CSINC/CSINV. | |||
205 | setOperationAction(ISD::XOR, MVT::i32, Custom); | |||
206 | setOperationAction(ISD::XOR, MVT::i64, Custom); | |||
207 | ||||
208 | // Virtually no operation on f128 is legal, but LLVM can't expand them when | |||
209 | // there's a valid register class, so we need custom operations in most cases. | |||
210 | setOperationAction(ISD::FABS, MVT::f128, Expand); | |||
211 | setOperationAction(ISD::FADD, MVT::f128, Custom); | |||
212 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); | |||
213 | setOperationAction(ISD::FCOS, MVT::f128, Expand); | |||
214 | setOperationAction(ISD::FDIV, MVT::f128, Custom); | |||
215 | setOperationAction(ISD::FMA, MVT::f128, Expand); | |||
216 | setOperationAction(ISD::FMUL, MVT::f128, Custom); | |||
217 | setOperationAction(ISD::FNEG, MVT::f128, Expand); | |||
218 | setOperationAction(ISD::FPOW, MVT::f128, Expand); | |||
219 | setOperationAction(ISD::FREM, MVT::f128, Expand); | |||
220 | setOperationAction(ISD::FRINT, MVT::f128, Expand); | |||
221 | setOperationAction(ISD::FSIN, MVT::f128, Expand); | |||
222 | setOperationAction(ISD::FSINCOS, MVT::f128, Expand); | |||
223 | setOperationAction(ISD::FSQRT, MVT::f128, Expand); | |||
224 | setOperationAction(ISD::FSUB, MVT::f128, Custom); | |||
225 | setOperationAction(ISD::FTRUNC, MVT::f128, Expand); | |||
226 | setOperationAction(ISD::SETCC, MVT::f128, Custom); | |||
227 | setOperationAction(ISD::BR_CC, MVT::f128, Custom); | |||
228 | setOperationAction(ISD::SELECT, MVT::f128, Custom); | |||
229 | setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); | |||
230 | setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); | |||
231 | ||||
232 | // Lowering for many of the conversions is actually specified by the non-f128 | |||
233 | // type. The LowerXXX function will be trivial when f128 isn't involved. | |||
234 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); | |||
235 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); | |||
236 | setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); | |||
237 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); | |||
238 | setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); | |||
239 | setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); | |||
240 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); | |||
241 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); | |||
242 | setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); | |||
243 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); | |||
244 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); | |||
245 | setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); | |||
246 | setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); | |||
247 | setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); | |||
248 | ||||
249 | // Variable arguments. | |||
250 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | |||
251 | setOperationAction(ISD::VAARG, MVT::Other, Custom); | |||
252 | setOperationAction(ISD::VACOPY, MVT::Other, Custom); | |||
253 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | |||
254 | ||||
255 | // Variable-sized objects. | |||
256 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | |||
257 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | |||
258 | ||||
259 | if (Subtarget->isTargetWindows()) | |||
260 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); | |||
261 | else | |||
262 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); | |||
263 | ||||
264 | // Constant pool entries | |||
265 | setOperationAction(ISD::ConstantPool, MVT::i64, Custom); | |||
266 | ||||
267 | // BlockAddress | |||
268 | setOperationAction(ISD::BlockAddress, MVT::i64, Custom); | |||
269 | ||||
270 | // Add/Sub overflow ops with MVT::Glues are lowered to NZCV dependences. | |||
271 | setOperationAction(ISD::ADDC, MVT::i32, Custom); | |||
272 | setOperationAction(ISD::ADDE, MVT::i32, Custom); | |||
273 | setOperationAction(ISD::SUBC, MVT::i32, Custom); | |||
274 | setOperationAction(ISD::SUBE, MVT::i32, Custom); | |||
275 | setOperationAction(ISD::ADDC, MVT::i64, Custom); | |||
276 | setOperationAction(ISD::ADDE, MVT::i64, Custom); | |||
277 | setOperationAction(ISD::SUBC, MVT::i64, Custom); | |||
278 | setOperationAction(ISD::SUBE, MVT::i64, Custom); | |||
279 | ||||
280 | // AArch64 lacks both left-rotate and popcount instructions. | |||
281 | setOperationAction(ISD::ROTL, MVT::i32, Expand); | |||
282 | setOperationAction(ISD::ROTL, MVT::i64, Expand); | |||
283 | for (MVT VT : MVT::vector_valuetypes()) { | |||
284 | setOperationAction(ISD::ROTL, VT, Expand); | |||
285 | setOperationAction(ISD::ROTR, VT, Expand); | |||
286 | } | |||
287 | ||||
288 | // AArch64 doesn't have {U|S}MUL_LOHI. | |||
289 | setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); | |||
290 | setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); | |||
291 | ||||
292 | setOperationAction(ISD::CTPOP, MVT::i32, Custom); | |||
293 | setOperationAction(ISD::CTPOP, MVT::i64, Custom); | |||
294 | ||||
295 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); | |||
296 | setOperationAction(ISD::SDIVREM, MVT::i64, Expand); | |||
297 | for (MVT VT : MVT::vector_valuetypes()) { | |||
298 | setOperationAction(ISD::SDIVREM, VT, Expand); | |||
299 | setOperationAction(ISD::UDIVREM, VT, Expand); | |||
300 | } | |||
301 | setOperationAction(ISD::SREM, MVT::i32, Expand); | |||
302 | setOperationAction(ISD::SREM, MVT::i64, Expand); | |||
303 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); | |||
304 | setOperationAction(ISD::UDIVREM, MVT::i64, Expand); | |||
305 | setOperationAction(ISD::UREM, MVT::i32, Expand); | |||
306 | setOperationAction(ISD::UREM, MVT::i64, Expand); | |||
307 | ||||
308 | // Custom lower Add/Sub/Mul with overflow. | |||
309 | setOperationAction(ISD::SADDO, MVT::i32, Custom); | |||
310 | setOperationAction(ISD::SADDO, MVT::i64, Custom); | |||
311 | setOperationAction(ISD::UADDO, MVT::i32, Custom); | |||
312 | setOperationAction(ISD::UADDO, MVT::i64, Custom); | |||
313 | setOperationAction(ISD::SSUBO, MVT::i32, Custom); | |||
314 | setOperationAction(ISD::SSUBO, MVT::i64, Custom); | |||
315 | setOperationAction(ISD::USUBO, MVT::i32, Custom); | |||
316 | setOperationAction(ISD::USUBO, MVT::i64, Custom); | |||
317 | setOperationAction(ISD::SMULO, MVT::i32, Custom); | |||
318 | setOperationAction(ISD::SMULO, MVT::i64, Custom); | |||
319 | setOperationAction(ISD::UMULO, MVT::i32, Custom); | |||
320 | setOperationAction(ISD::UMULO, MVT::i64, Custom); | |||
321 | ||||
322 | setOperationAction(ISD::FSIN, MVT::f32, Expand); | |||
323 | setOperationAction(ISD::FSIN, MVT::f64, Expand); | |||
324 | setOperationAction(ISD::FCOS, MVT::f32, Expand); | |||
325 | setOperationAction(ISD::FCOS, MVT::f64, Expand); | |||
326 | setOperationAction(ISD::FPOW, MVT::f32, Expand); | |||
327 | setOperationAction(ISD::FPOW, MVT::f64, Expand); | |||
328 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); | |||
329 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); | |||
330 | if (Subtarget->hasFullFP16()) | |||
331 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom); | |||
332 | else | |||
333 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); | |||
334 | ||||
335 | setOperationAction(ISD::FREM, MVT::f16, Promote); | |||
336 | setOperationAction(ISD::FREM, MVT::v4f16, Promote); | |||
337 | setOperationAction(ISD::FREM, MVT::v8f16, Promote); | |||
338 | setOperationAction(ISD::FPOW, MVT::f16, Promote); | |||
339 | setOperationAction(ISD::FPOW, MVT::v4f16, Promote); | |||
340 | setOperationAction(ISD::FPOW, MVT::v8f16, Promote); | |||
341 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); | |||
342 | setOperationAction(ISD::FCOS, MVT::f16, Promote); | |||
343 | setOperationAction(ISD::FCOS, MVT::v4f16, Promote); | |||
344 | setOperationAction(ISD::FCOS, MVT::v8f16, Promote); | |||
345 | setOperationAction(ISD::FSIN, MVT::f16, Promote); | |||
346 | setOperationAction(ISD::FSIN, MVT::v4f16, Promote); | |||
347 | setOperationAction(ISD::FSIN, MVT::v8f16, Promote); | |||
348 | setOperationAction(ISD::FSINCOS, MVT::f16, Promote); | |||
349 | setOperationAction(ISD::FSINCOS, MVT::v4f16, Promote); | |||
350 | setOperationAction(ISD::FSINCOS, MVT::v8f16, Promote); | |||
351 | setOperationAction(ISD::FEXP, MVT::f16, Promote); | |||
352 | setOperationAction(ISD::FEXP, MVT::v4f16, Promote); | |||
353 | setOperationAction(ISD::FEXP, MVT::v8f16, Promote); | |||
354 | setOperationAction(ISD::FEXP2, MVT::f16, Promote); | |||
355 | setOperationAction(ISD::FEXP2, MVT::v4f16, Promote); | |||
356 | setOperationAction(ISD::FEXP2, MVT::v8f16, Promote); | |||
357 | setOperationAction(ISD::FLOG, MVT::f16, Promote); | |||
358 | setOperationAction(ISD::FLOG, MVT::v4f16, Promote); | |||
359 | setOperationAction(ISD::FLOG, MVT::v8f16, Promote); | |||
360 | setOperationAction(ISD::FLOG2, MVT::f16, Promote); | |||
361 | setOperationAction(ISD::FLOG2, MVT::v4f16, Promote); | |||
362 | setOperationAction(ISD::FLOG2, MVT::v8f16, Promote); | |||
363 | setOperationAction(ISD::FLOG10, MVT::f16, Promote); | |||
364 | setOperationAction(ISD::FLOG10, MVT::v4f16, Promote); | |||
365 | setOperationAction(ISD::FLOG10, MVT::v8f16, Promote); | |||
366 | ||||
367 | if (!Subtarget->hasFullFP16()) { | |||
368 | setOperationAction(ISD::SELECT, MVT::f16, Promote); | |||
369 | setOperationAction(ISD::SELECT_CC, MVT::f16, Promote); | |||
370 | setOperationAction(ISD::SETCC, MVT::f16, Promote); | |||
371 | setOperationAction(ISD::BR_CC, MVT::f16, Promote); | |||
372 | setOperationAction(ISD::FADD, MVT::f16, Promote); | |||
373 | setOperationAction(ISD::FSUB, MVT::f16, Promote); | |||
374 | setOperationAction(ISD::FMUL, MVT::f16, Promote); | |||
375 | setOperationAction(ISD::FDIV, MVT::f16, Promote); | |||
376 | setOperationAction(ISD::FMA, MVT::f16, Promote); | |||
377 | setOperationAction(ISD::FNEG, MVT::f16, Promote); | |||
378 | setOperationAction(ISD::FABS, MVT::f16, Promote); | |||
379 | setOperationAction(ISD::FCEIL, MVT::f16, Promote); | |||
380 | setOperationAction(ISD::FSQRT, MVT::f16, Promote); | |||
381 | setOperationAction(ISD::FFLOOR, MVT::f16, Promote); | |||
382 | setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); | |||
383 | setOperationAction(ISD::FRINT, MVT::f16, Promote); | |||
384 | setOperationAction(ISD::FROUND, MVT::f16, Promote); | |||
385 | setOperationAction(ISD::FTRUNC, MVT::f16, Promote); | |||
386 | setOperationAction(ISD::FMINNUM, MVT::f16, Promote); | |||
387 | setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); | |||
388 | setOperationAction(ISD::FMINNAN, MVT::f16, Promote); | |||
389 | setOperationAction(ISD::FMAXNAN, MVT::f16, Promote); | |||
390 | ||||
391 | // promote v4f16 to v4f32 when that is known to be safe. | |||
392 | setOperationAction(ISD::FADD, MVT::v4f16, Promote); | |||
393 | setOperationAction(ISD::FSUB, MVT::v4f16, Promote); | |||
394 | setOperationAction(ISD::FMUL, MVT::v4f16, Promote); | |||
395 | setOperationAction(ISD::FDIV, MVT::v4f16, Promote); | |||
396 | setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Promote); | |||
397 | setOperationAction(ISD::FP_ROUND, MVT::v4f16, Promote); | |||
398 | AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32); | |||
399 | AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32); | |||
400 | AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32); | |||
401 | AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32); | |||
402 | AddPromotedToType(ISD::FP_EXTEND, MVT::v4f16, MVT::v4f32); | |||
403 | AddPromotedToType(ISD::FP_ROUND, MVT::v4f16, MVT::v4f32); | |||
404 | ||||
405 | setOperationAction(ISD::FABS, MVT::v4f16, Expand); | |||
406 | setOperationAction(ISD::FNEG, MVT::v4f16, Expand); | |||
407 | setOperationAction(ISD::FROUND, MVT::v4f16, Expand); | |||
408 | setOperationAction(ISD::FMA, MVT::v4f16, Expand); | |||
409 | setOperationAction(ISD::SETCC, MVT::v4f16, Expand); | |||
410 | setOperationAction(ISD::BR_CC, MVT::v4f16, Expand); | |||
411 | setOperationAction(ISD::SELECT, MVT::v4f16, Expand); | |||
412 | setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand); | |||
413 | setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand); | |||
414 | setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand); | |||
415 | setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand); | |||
416 | setOperationAction(ISD::FCEIL, MVT::v4f16, Expand); | |||
417 | setOperationAction(ISD::FRINT, MVT::v4f16, Expand); | |||
418 | setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand); | |||
419 | setOperationAction(ISD::FSQRT, MVT::v4f16, Expand); | |||
420 | ||||
421 | setOperationAction(ISD::FABS, MVT::v8f16, Expand); | |||
422 | setOperationAction(ISD::FADD, MVT::v8f16, Expand); | |||
423 | setOperationAction(ISD::FCEIL, MVT::v8f16, Expand); | |||
424 | setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand); | |||
425 | setOperationAction(ISD::FDIV, MVT::v8f16, Expand); | |||
426 | setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand); | |||
427 | setOperationAction(ISD::FMA, MVT::v8f16, Expand); | |||
428 | setOperationAction(ISD::FMUL, MVT::v8f16, Expand); | |||
429 | setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand); | |||
430 | setOperationAction(ISD::FNEG, MVT::v8f16, Expand); | |||
431 | setOperationAction(ISD::FROUND, MVT::v8f16, Expand); | |||
432 | setOperationAction(ISD::FRINT, MVT::v8f16, Expand); | |||
433 | setOperationAction(ISD::FSQRT, MVT::v8f16, Expand); | |||
434 | setOperationAction(ISD::FSUB, MVT::v8f16, Expand); | |||
435 | setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand); | |||
436 | setOperationAction(ISD::SETCC, MVT::v8f16, Expand); | |||
437 | setOperationAction(ISD::BR_CC, MVT::v8f16, Expand); | |||
438 | setOperationAction(ISD::SELECT, MVT::v8f16, Expand); | |||
439 | setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand); | |||
440 | setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand); | |||
441 | } | |||
442 | ||||
443 | // AArch64 has implementations of a lot of rounding-like FP operations. | |||
444 | for (MVT Ty : {MVT::f32, MVT::f64}) { | |||
445 | setOperationAction(ISD::FFLOOR, Ty, Legal); | |||
446 | setOperationAction(ISD::FNEARBYINT, Ty, Legal); | |||
447 | setOperationAction(ISD::FCEIL, Ty, Legal); | |||
448 | setOperationAction(ISD::FRINT, Ty, Legal); | |||
449 | setOperationAction(ISD::FTRUNC, Ty, Legal); | |||
450 | setOperationAction(ISD::FROUND, Ty, Legal); | |||
451 | setOperationAction(ISD::FMINNUM, Ty, Legal); | |||
452 | setOperationAction(ISD::FMAXNUM, Ty, Legal); | |||
453 | setOperationAction(ISD::FMINNAN, Ty, Legal); | |||
454 | setOperationAction(ISD::FMAXNAN, Ty, Legal); | |||
455 | } | |||
456 | ||||
457 | if (Subtarget->hasFullFP16()) { | |||
458 | setOperationAction(ISD::FNEARBYINT, MVT::f16, Legal); | |||
459 | setOperationAction(ISD::FFLOOR, MVT::f16, Legal); | |||
460 | setOperationAction(ISD::FCEIL, MVT::f16, Legal); | |||
461 | setOperationAction(ISD::FRINT, MVT::f16, Legal); | |||
462 | setOperationAction(ISD::FTRUNC, MVT::f16, Legal); | |||
463 | setOperationAction(ISD::FROUND, MVT::f16, Legal); | |||
464 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); | |||
465 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); | |||
466 | setOperationAction(ISD::FMINNAN, MVT::f16, Legal); | |||
467 | setOperationAction(ISD::FMAXNAN, MVT::f16, Legal); | |||
468 | } | |||
469 | ||||
470 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); | |||
471 | ||||
472 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); | |||
473 | ||||
474 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); | |||
475 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); | |||
476 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); | |||
477 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); | |||
478 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); | |||
479 | ||||
480 | // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0. | |||
481 | // This requires the Performance Monitors extension. | |||
482 | if (Subtarget->hasPerfMon()) | |||
483 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); | |||
484 | ||||
485 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && | |||
486 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { | |||
487 | // Issue __sincos_stret if available. | |||
488 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); | |||
489 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); | |||
490 | } else { | |||
491 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); | |||
492 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); | |||
493 | } | |||
494 | ||||
495 | // Make floating-point constants legal for the large code model, so they don't | |||
496 | // become loads from the constant pool. | |||
497 | if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) { | |||
498 | setOperationAction(ISD::ConstantFP, MVT::f32, Legal); | |||
499 | setOperationAction(ISD::ConstantFP, MVT::f64, Legal); | |||
500 | } | |||
501 | ||||
502 | // AArch64 does not have floating-point extending loads, i1 sign-extending | |||
503 | // load, floating-point truncating stores, or v2i32->v2i16 truncating store. | |||
504 | for (MVT VT : MVT::fp_valuetypes()) { | |||
505 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); | |||
506 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); | |||
507 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); | |||
508 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); | |||
509 | } | |||
510 | for (MVT VT : MVT::integer_valuetypes()) | |||
511 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand); | |||
512 | ||||
513 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); | |||
514 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | |||
515 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); | |||
516 | setTruncStoreAction(MVT::f128, MVT::f80, Expand); | |||
517 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); | |||
518 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); | |||
519 | setTruncStoreAction(MVT::f128, MVT::f16, Expand); | |||
520 | ||||
521 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); | |||
522 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); | |||
523 | ||||
524 | // Indexed loads and stores are supported. | |||
525 | for (unsigned im = (unsigned)ISD::PRE_INC; | |||
526 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | |||
527 | setIndexedLoadAction(im, MVT::i8, Legal); | |||
528 | setIndexedLoadAction(im, MVT::i16, Legal); | |||
529 | setIndexedLoadAction(im, MVT::i32, Legal); | |||
530 | setIndexedLoadAction(im, MVT::i64, Legal); | |||
531 | setIndexedLoadAction(im, MVT::f64, Legal); | |||
532 | setIndexedLoadAction(im, MVT::f32, Legal); | |||
533 | setIndexedLoadAction(im, MVT::f16, Legal); | |||
534 | setIndexedStoreAction(im, MVT::i8, Legal); | |||
535 | setIndexedStoreAction(im, MVT::i16, Legal); | |||
536 | setIndexedStoreAction(im, MVT::i32, Legal); | |||
537 | setIndexedStoreAction(im, MVT::i64, Legal); | |||
538 | setIndexedStoreAction(im, MVT::f64, Legal); | |||
539 | setIndexedStoreAction(im, MVT::f32, Legal); | |||
540 | setIndexedStoreAction(im, MVT::f16, Legal); | |||
541 | } | |||
542 | ||||
543 | // Trap. | |||
544 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | |||
545 | ||||
546 | // We combine OR nodes for bitfield operations. | |||
547 | setTargetDAGCombine(ISD::OR); | |||
548 | ||||
549 | // Vector add and sub nodes may conceal a high-half opportunity. | |||
550 | // Also, try to fold ADD into CSINC/CSINV.. | |||
551 | setTargetDAGCombine(ISD::ADD); | |||
552 | setTargetDAGCombine(ISD::SUB); | |||
553 | setTargetDAGCombine(ISD::SRL); | |||
554 | setTargetDAGCombine(ISD::XOR); | |||
555 | setTargetDAGCombine(ISD::SINT_TO_FP); | |||
556 | setTargetDAGCombine(ISD::UINT_TO_FP); | |||
557 | ||||
558 | setTargetDAGCombine(ISD::FP_TO_SINT); | |||
559 | setTargetDAGCombine(ISD::FP_TO_UINT); | |||
560 | setTargetDAGCombine(ISD::FDIV); | |||
561 | ||||
562 | setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); | |||
563 | ||||
564 | setTargetDAGCombine(ISD::ANY_EXTEND); | |||
565 | setTargetDAGCombine(ISD::ZERO_EXTEND); | |||
566 | setTargetDAGCombine(ISD::SIGN_EXTEND); | |||
567 | setTargetDAGCombine(ISD::BITCAST); | |||
568 | setTargetDAGCombine(ISD::CONCAT_VECTORS); | |||
569 | setTargetDAGCombine(ISD::STORE); | |||
570 | if (Subtarget->supportsAddressTopByteIgnored()) | |||
571 | setTargetDAGCombine(ISD::LOAD); | |||
572 | ||||
573 | setTargetDAGCombine(ISD::MUL); | |||
574 | ||||
575 | setTargetDAGCombine(ISD::SELECT); | |||
576 | setTargetDAGCombine(ISD::VSELECT); | |||
577 | ||||
578 | setTargetDAGCombine(ISD::INTRINSIC_VOID); | |||
579 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); | |||
580 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); | |||
581 | ||||
582 | setTargetDAGCombine(ISD::GlobalAddress); | |||
583 | ||||
584 | // In case of strict alignment, avoid an excessive number of byte wide stores. | |||
585 | MaxStoresPerMemsetOptSize = 8; | |||
586 | MaxStoresPerMemset = Subtarget->requiresStrictAlign() | |||
587 | ? MaxStoresPerMemsetOptSize : 32; | |||
588 | ||||
589 | MaxGluedStoresPerMemcpy = 4; | |||
590 | MaxStoresPerMemcpyOptSize = 4; | |||
591 | MaxStoresPerMemcpy = Subtarget->requiresStrictAlign() | |||
592 | ? MaxStoresPerMemcpyOptSize : 16; | |||
593 | ||||
594 | MaxStoresPerMemmoveOptSize = MaxStoresPerMemmove = 4; | |||
595 | ||||
596 | setStackPointerRegisterToSaveRestore(AArch64::SP); | |||
597 | ||||
598 | setSchedulingPreference(Sched::Hybrid); | |||
599 | ||||
600 | EnableExtLdPromotion = true; | |||
601 | ||||
602 | // Set required alignment. | |||
603 | setMinFunctionAlignment(2); | |||
604 | // Set preferred alignments. | |||
605 | setPrefFunctionAlignment(STI.getPrefFunctionAlignment()); | |||
606 | setPrefLoopAlignment(STI.getPrefLoopAlignment()); | |||
607 | ||||
608 | // Only change the limit for entries in a jump table if specified by | |||
609 | // the subtarget, but not at the command line. | |||
610 | unsigned MaxJT = STI.getMaximumJumpTableSize(); | |||
611 | if (MaxJT && getMaximumJumpTableSize() == 0) | |||
612 | setMaximumJumpTableSize(MaxJT); | |||
613 | ||||
614 | setHasExtractBitsInsn(true); | |||
615 | ||||
616 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
617 | ||||
618 | if (Subtarget->hasNEON()) { | |||
619 | // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to | |||
620 | // silliness like this: | |||
621 | setOperationAction(ISD::FABS, MVT::v1f64, Expand); | |||
622 | setOperationAction(ISD::FADD, MVT::v1f64, Expand); | |||
623 | setOperationAction(ISD::FCEIL, MVT::v1f64, Expand); | |||
624 | setOperationAction(ISD::FCOPYSIGN, MVT::v1f64, Expand); | |||
625 | setOperationAction(ISD::FCOS, MVT::v1f64, Expand); | |||
626 | setOperationAction(ISD::FDIV, MVT::v1f64, Expand); | |||
627 | setOperationAction(ISD::FFLOOR, MVT::v1f64, Expand); | |||
628 | setOperationAction(ISD::FMA, MVT::v1f64, Expand); | |||
629 | setOperationAction(ISD::FMUL, MVT::v1f64, Expand); | |||
630 | setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Expand); | |||
631 | setOperationAction(ISD::FNEG, MVT::v1f64, Expand); | |||
632 | setOperationAction(ISD::FPOW, MVT::v1f64, Expand); | |||
633 | setOperationAction(ISD::FREM, MVT::v1f64, Expand); | |||
634 | setOperationAction(ISD::FROUND, MVT::v1f64, Expand); | |||
635 | setOperationAction(ISD::FRINT, MVT::v1f64, Expand); | |||
636 | setOperationAction(ISD::FSIN, MVT::v1f64, Expand); | |||
637 | setOperationAction(ISD::FSINCOS, MVT::v1f64, Expand); | |||
638 | setOperationAction(ISD::FSQRT, MVT::v1f64, Expand); | |||
639 | setOperationAction(ISD::FSUB, MVT::v1f64, Expand); | |||
640 | setOperationAction(ISD::FTRUNC, MVT::v1f64, Expand); | |||
641 | setOperationAction(ISD::SETCC, MVT::v1f64, Expand); | |||
642 | setOperationAction(ISD::BR_CC, MVT::v1f64, Expand); | |||
643 | setOperationAction(ISD::SELECT, MVT::v1f64, Expand); | |||
644 | setOperationAction(ISD::SELECT_CC, MVT::v1f64, Expand); | |||
645 | setOperationAction(ISD::FP_EXTEND, MVT::v1f64, Expand); | |||
646 | ||||
647 | setOperationAction(ISD::FP_TO_SINT, MVT::v1i64, Expand); | |||
648 | setOperationAction(ISD::FP_TO_UINT, MVT::v1i64, Expand); | |||
649 | setOperationAction(ISD::SINT_TO_FP, MVT::v1i64, Expand); | |||
650 | setOperationAction(ISD::UINT_TO_FP, MVT::v1i64, Expand); | |||
651 | setOperationAction(ISD::FP_ROUND, MVT::v1f64, Expand); | |||
652 | ||||
653 | setOperationAction(ISD::MUL, MVT::v1i64, Expand); | |||
654 | ||||
655 | // AArch64 doesn't have a direct vector ->f32 conversion instructions for | |||
656 | // elements smaller than i32, so promote the input to i32 first. | |||
657 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32); | |||
658 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32); | |||
659 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32); | |||
660 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32); | |||
661 | // i8 and i16 vector elements also need promotion to i32 for v8i8 or v8i16 | |||
662 | // -> v8f16 conversions. | |||
663 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32); | |||
664 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32); | |||
665 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32); | |||
666 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32); | |||
667 | // Similarly, there is no direct i32 -> f64 vector conversion instruction. | |||
668 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); | |||
669 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); | |||
670 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom); | |||
671 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom); | |||
672 | // Or, direct i32 -> f16 vector conversion. Set it so custom, so the | |||
673 | // conversion happens in two steps: v4i32 -> v4f32 -> v4f16 | |||
674 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom); | |||
675 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); | |||
676 | ||||
677 | setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); | |||
678 | setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); | |||
679 | ||||
680 | setOperationAction(ISD::CTTZ, MVT::v2i8, Expand); | |||
681 | setOperationAction(ISD::CTTZ, MVT::v4i16, Expand); | |||
682 | setOperationAction(ISD::CTTZ, MVT::v2i32, Expand); | |||
683 | setOperationAction(ISD::CTTZ, MVT::v1i64, Expand); | |||
684 | setOperationAction(ISD::CTTZ, MVT::v16i8, Expand); | |||
685 | setOperationAction(ISD::CTTZ, MVT::v8i16, Expand); | |||
686 | setOperationAction(ISD::CTTZ, MVT::v4i32, Expand); | |||
687 | setOperationAction(ISD::CTTZ, MVT::v2i64, Expand); | |||
688 | ||||
689 | // AArch64 doesn't have MUL.2d: | |||
690 | setOperationAction(ISD::MUL, MVT::v2i64, Expand); | |||
691 | // Custom handling for some quad-vector types to detect MULL. | |||
692 | setOperationAction(ISD::MUL, MVT::v8i16, Custom); | |||
693 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); | |||
694 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); | |||
695 | ||||
696 | // Vector reductions | |||
697 | for (MVT VT : MVT::integer_valuetypes()) { | |||
698 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | |||
699 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | |||
700 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | |||
701 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | |||
702 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | |||
703 | } | |||
704 | for (MVT VT : MVT::fp_valuetypes()) { | |||
705 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | |||
706 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | |||
707 | } | |||
708 | ||||
709 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); | |||
710 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); | |||
711 | // Likewise, narrowing and extending vector loads/stores aren't handled | |||
712 | // directly. | |||
713 | for (MVT VT : MVT::vector_valuetypes()) { | |||
714 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); | |||
715 | ||||
716 | if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) { | |||
717 | setOperationAction(ISD::MULHS, VT, Custom); | |||
718 | setOperationAction(ISD::MULHU, VT, Custom); | |||
719 | } else { | |||
720 | setOperationAction(ISD::MULHS, VT, Expand); | |||
721 | setOperationAction(ISD::MULHU, VT, Expand); | |||
722 | } | |||
723 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); | |||
724 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); | |||
725 | ||||
726 | setOperationAction(ISD::BSWAP, VT, Expand); | |||
727 | ||||
728 | for (MVT InnerVT : MVT::vector_valuetypes()) { | |||
729 | setTruncStoreAction(VT, InnerVT, Expand); | |||
730 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); | |||
731 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); | |||
732 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); | |||
733 | } | |||
734 | } | |||
735 | ||||
736 | // AArch64 has implementations of a lot of rounding-like FP operations. | |||
737 | for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64}) { | |||
738 | setOperationAction(ISD::FFLOOR, Ty, Legal); | |||
739 | setOperationAction(ISD::FNEARBYINT, Ty, Legal); | |||
740 | setOperationAction(ISD::FCEIL, Ty, Legal); | |||
741 | setOperationAction(ISD::FRINT, Ty, Legal); | |||
742 | setOperationAction(ISD::FTRUNC, Ty, Legal); | |||
743 | setOperationAction(ISD::FROUND, Ty, Legal); | |||
744 | } | |||
745 | ||||
746 | setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom); | |||
747 | } | |||
748 | ||||
749 | PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive(); | |||
750 | } | |||
751 | ||||
752 | void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) { | |||
753 | assert(VT.isVector() && "VT should be a vector type")(static_cast <bool> (VT.isVector() && "VT should be a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"VT should be a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 753, __extension__ __PRETTY_FUNCTION__)); | |||
754 | ||||
755 | if (VT.isFloatingPoint()) { | |||
756 | MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT(); | |||
757 | setOperationPromotedToType(ISD::LOAD, VT, PromoteTo); | |||
758 | setOperationPromotedToType(ISD::STORE, VT, PromoteTo); | |||
759 | } | |||
760 | ||||
761 | // Mark vector float intrinsics as expand. | |||
762 | if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) { | |||
763 | setOperationAction(ISD::FSIN, VT, Expand); | |||
764 | setOperationAction(ISD::FCOS, VT, Expand); | |||
765 | setOperationAction(ISD::FPOW, VT, Expand); | |||
766 | setOperationAction(ISD::FLOG, VT, Expand); | |||
767 | setOperationAction(ISD::FLOG2, VT, Expand); | |||
768 | setOperationAction(ISD::FLOG10, VT, Expand); | |||
769 | setOperationAction(ISD::FEXP, VT, Expand); | |||
770 | setOperationAction(ISD::FEXP2, VT, Expand); | |||
771 | ||||
772 | // But we do support custom-lowering for FCOPYSIGN. | |||
773 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | |||
774 | } | |||
775 | ||||
776 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
777 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
778 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
779 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
780 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
781 | setOperationAction(ISD::SRA, VT, Custom); | |||
782 | setOperationAction(ISD::SRL, VT, Custom); | |||
783 | setOperationAction(ISD::SHL, VT, Custom); | |||
784 | setOperationAction(ISD::AND, VT, Custom); | |||
785 | setOperationAction(ISD::OR, VT, Custom); | |||
786 | setOperationAction(ISD::SETCC, VT, Custom); | |||
787 | setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); | |||
788 | ||||
789 | setOperationAction(ISD::SELECT, VT, Expand); | |||
790 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
791 | setOperationAction(ISD::VSELECT, VT, Expand); | |||
792 | for (MVT InnerVT : MVT::all_valuetypes()) | |||
793 | setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); | |||
794 | ||||
795 | // CNT supports only B element sizes. | |||
796 | if (VT != MVT::v8i8 && VT != MVT::v16i8) | |||
797 | setOperationAction(ISD::CTPOP, VT, Expand); | |||
798 | ||||
799 | setOperationAction(ISD::UDIV, VT, Expand); | |||
800 | setOperationAction(ISD::SDIV, VT, Expand); | |||
801 | setOperationAction(ISD::UREM, VT, Expand); | |||
802 | setOperationAction(ISD::SREM, VT, Expand); | |||
803 | setOperationAction(ISD::FREM, VT, Expand); | |||
804 | ||||
805 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
806 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
807 | ||||
808 | if (!VT.isFloatingPoint()) | |||
809 | setOperationAction(ISD::ABS, VT, Legal); | |||
810 | ||||
811 | // [SU][MIN|MAX] are available for all NEON types apart from i64. | |||
812 | if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) | |||
813 | for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) | |||
814 | setOperationAction(Opcode, VT, Legal); | |||
815 | ||||
816 | // F[MIN|MAX][NUM|NAN] are available for all FP NEON types. | |||
817 | if (VT.isFloatingPoint() && | |||
818 | (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16())) | |||
819 | for (unsigned Opcode : {ISD::FMINNAN, ISD::FMAXNAN, | |||
820 | ISD::FMINNUM, ISD::FMAXNUM}) | |||
821 | setOperationAction(Opcode, VT, Legal); | |||
822 | ||||
823 | if (Subtarget->isLittleEndian()) { | |||
824 | for (unsigned im = (unsigned)ISD::PRE_INC; | |||
825 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | |||
826 | setIndexedLoadAction(im, VT, Legal); | |||
827 | setIndexedStoreAction(im, VT, Legal); | |||
828 | } | |||
829 | } | |||
830 | } | |||
831 | ||||
832 | void AArch64TargetLowering::addDRTypeForNEON(MVT VT) { | |||
833 | addRegisterClass(VT, &AArch64::FPR64RegClass); | |||
834 | addTypeForNEON(VT, MVT::v2i32); | |||
835 | } | |||
836 | ||||
837 | void AArch64TargetLowering::addQRTypeForNEON(MVT VT) { | |||
838 | addRegisterClass(VT, &AArch64::FPR128RegClass); | |||
839 | addTypeForNEON(VT, MVT::v4i32); | |||
840 | } | |||
841 | ||||
842 | EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, | |||
843 | EVT VT) const { | |||
844 | if (!VT.isVector()) | |||
845 | return MVT::i32; | |||
846 | return VT.changeVectorElementTypeToInteger(); | |||
847 | } | |||
848 | ||||
849 | static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, | |||
850 | const APInt &Demanded, | |||
851 | TargetLowering::TargetLoweringOpt &TLO, | |||
852 | unsigned NewOpc) { | |||
853 | uint64_t OldImm = Imm, NewImm, Enc; | |||
854 | uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask; | |||
855 | ||||
856 | // Return if the immediate is already all zeros, all ones, a bimm32 or a | |||
857 | // bimm64. | |||
858 | if (Imm == 0 || Imm == Mask || | |||
859 | AArch64_AM::isLogicalImmediate(Imm & Mask, Size)) | |||
860 | return false; | |||
861 | ||||
862 | unsigned EltSize = Size; | |||
863 | uint64_t DemandedBits = Demanded.getZExtValue(); | |||
864 | ||||
865 | // Clear bits that are not demanded. | |||
866 | Imm &= DemandedBits; | |||
867 | ||||
868 | while (true) { | |||
869 | // The goal here is to set the non-demanded bits in a way that minimizes | |||
870 | // the number of switching between 0 and 1. In order to achieve this goal, | |||
871 | // we set the non-demanded bits to the value of the preceding demanded bits. | |||
872 | // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a | |||
873 | // non-demanded bit), we copy bit0 (1) to the least significant 'x', | |||
874 | // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'. | |||
875 | // The final result is 0b11000011. | |||
876 | uint64_t NonDemandedBits = ~DemandedBits; | |||
877 | uint64_t InvertedImm = ~Imm & DemandedBits; | |||
878 | uint64_t RotatedImm = | |||
879 | ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) & | |||
880 | NonDemandedBits; | |||
881 | uint64_t Sum = RotatedImm + NonDemandedBits; | |||
882 | bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1)); | |||
883 | uint64_t Ones = (Sum + Carry) & NonDemandedBits; | |||
884 | NewImm = (Imm | Ones) & Mask; | |||
885 | ||||
886 | // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate | |||
887 | // or all-ones or all-zeros, in which case we can stop searching. Otherwise, | |||
888 | // we halve the element size and continue the search. | |||
889 | if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask))) | |||
890 | break; | |||
891 | ||||
892 | // We cannot shrink the element size any further if it is 2-bits. | |||
893 | if (EltSize == 2) | |||
894 | return false; | |||
895 | ||||
896 | EltSize /= 2; | |||
897 | Mask >>= EltSize; | |||
898 | uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize; | |||
899 | ||||
900 | // Return if there is mismatch in any of the demanded bits of Imm and Hi. | |||
901 | if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0) | |||
902 | return false; | |||
903 | ||||
904 | // Merge the upper and lower halves of Imm and DemandedBits. | |||
905 | Imm |= Hi; | |||
906 | DemandedBits |= DemandedBitsHi; | |||
907 | } | |||
908 | ||||
909 | ++NumOptimizedImms; | |||
910 | ||||
911 | // Replicate the element across the register width. | |||
912 | while (EltSize < Size) { | |||
913 | NewImm |= NewImm << EltSize; | |||
914 | EltSize *= 2; | |||
915 | } | |||
916 | ||||
917 | (void)OldImm; | |||
918 | assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&(static_cast <bool> (((OldImm ^ NewImm) & Demanded. getZExtValue()) == 0 && "demanded bits should never be altered" ) ? void (0) : __assert_fail ("((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && \"demanded bits should never be altered\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 919, __extension__ __PRETTY_FUNCTION__)) | |||
919 | "demanded bits should never be altered")(static_cast <bool> (((OldImm ^ NewImm) & Demanded. getZExtValue()) == 0 && "demanded bits should never be altered" ) ? void (0) : __assert_fail ("((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && \"demanded bits should never be altered\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 919, __extension__ __PRETTY_FUNCTION__)); | |||
920 | assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm")(static_cast <bool> (OldImm != NewImm && "the new imm shouldn't be equal to the old imm" ) ? void (0) : __assert_fail ("OldImm != NewImm && \"the new imm shouldn't be equal to the old imm\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 920, __extension__ __PRETTY_FUNCTION__)); | |||
921 | ||||
922 | // Create the new constant immediate node. | |||
923 | EVT VT = Op.getValueType(); | |||
924 | SDLoc DL(Op); | |||
925 | SDValue New; | |||
926 | ||||
927 | // If the new constant immediate is all-zeros or all-ones, let the target | |||
928 | // independent DAG combine optimize this node. | |||
929 | if (NewImm == 0 || NewImm == OrigMask) { | |||
930 | New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), | |||
931 | TLO.DAG.getConstant(NewImm, DL, VT)); | |||
932 | // Otherwise, create a machine node so that target independent DAG combine | |||
933 | // doesn't undo this optimization. | |||
934 | } else { | |||
935 | Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size); | |||
936 | SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT); | |||
937 | New = SDValue( | |||
938 | TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); | |||
939 | } | |||
940 | ||||
941 | return TLO.CombineTo(Op, New); | |||
942 | } | |||
943 | ||||
944 | bool AArch64TargetLowering::targetShrinkDemandedConstant( | |||
945 | SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const { | |||
946 | // Delay this optimization to as late as possible. | |||
947 | if (!TLO.LegalOps) | |||
948 | return false; | |||
949 | ||||
950 | if (!EnableOptimizeLogicalImm) | |||
951 | return false; | |||
952 | ||||
953 | EVT VT = Op.getValueType(); | |||
954 | if (VT.isVector()) | |||
955 | return false; | |||
956 | ||||
957 | unsigned Size = VT.getSizeInBits(); | |||
958 | assert((Size == 32 || Size == 64) &&(static_cast <bool> ((Size == 32 || Size == 64) && "i32 or i64 is expected after legalization.") ? void (0) : __assert_fail ("(Size == 32 || Size == 64) && \"i32 or i64 is expected after legalization.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 959, __extension__ __PRETTY_FUNCTION__)) | |||
959 | "i32 or i64 is expected after legalization.")(static_cast <bool> ((Size == 32 || Size == 64) && "i32 or i64 is expected after legalization.") ? void (0) : __assert_fail ("(Size == 32 || Size == 64) && \"i32 or i64 is expected after legalization.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 959, __extension__ __PRETTY_FUNCTION__)); | |||
960 | ||||
961 | // Exit early if we demand all bits. | |||
962 | if (Demanded.countPopulation() == Size) | |||
963 | return false; | |||
964 | ||||
965 | unsigned NewOpc; | |||
966 | switch (Op.getOpcode()) { | |||
967 | default: | |||
968 | return false; | |||
969 | case ISD::AND: | |||
970 | NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri; | |||
971 | break; | |||
972 | case ISD::OR: | |||
973 | NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri; | |||
974 | break; | |||
975 | case ISD::XOR: | |||
976 | NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri; | |||
977 | break; | |||
978 | } | |||
979 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
980 | if (!C) | |||
981 | return false; | |||
982 | uint64_t Imm = C->getZExtValue(); | |||
983 | return optimizeLogicalImm(Op, Size, Imm, Demanded, TLO, NewOpc); | |||
984 | } | |||
985 | ||||
986 | /// computeKnownBitsForTargetNode - Determine which of the bits specified in | |||
987 | /// Mask are known to be either zero or one and return them Known. | |||
988 | void AArch64TargetLowering::computeKnownBitsForTargetNode( | |||
989 | const SDValue Op, KnownBits &Known, | |||
990 | const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { | |||
991 | switch (Op.getOpcode()) { | |||
992 | default: | |||
993 | break; | |||
994 | case AArch64ISD::CSEL: { | |||
995 | KnownBits Known2; | |||
996 | DAG.computeKnownBits(Op->getOperand(0), Known, Depth + 1); | |||
997 | DAG.computeKnownBits(Op->getOperand(1), Known2, Depth + 1); | |||
998 | Known.Zero &= Known2.Zero; | |||
999 | Known.One &= Known2.One; | |||
1000 | break; | |||
1001 | } | |||
1002 | case ISD::INTRINSIC_W_CHAIN: { | |||
1003 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); | |||
1004 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); | |||
1005 | switch (IntID) { | |||
1006 | default: return; | |||
1007 | case Intrinsic::aarch64_ldaxr: | |||
1008 | case Intrinsic::aarch64_ldxr: { | |||
1009 | unsigned BitWidth = Known.getBitWidth(); | |||
1010 | EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); | |||
1011 | unsigned MemBits = VT.getScalarSizeInBits(); | |||
1012 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); | |||
1013 | return; | |||
1014 | } | |||
1015 | } | |||
1016 | break; | |||
1017 | } | |||
1018 | case ISD::INTRINSIC_WO_CHAIN: | |||
1019 | case ISD::INTRINSIC_VOID: { | |||
1020 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
1021 | switch (IntNo) { | |||
1022 | default: | |||
1023 | break; | |||
1024 | case Intrinsic::aarch64_neon_umaxv: | |||
1025 | case Intrinsic::aarch64_neon_uminv: { | |||
1026 | // Figure out the datatype of the vector operand. The UMINV instruction | |||
1027 | // will zero extend the result, so we can mark as known zero all the | |||
1028 | // bits larger than the element datatype. 32-bit or larget doesn't need | |||
1029 | // this as those are legal types and will be handled by isel directly. | |||
1030 | MVT VT = Op.getOperand(1).getValueType().getSimpleVT(); | |||
1031 | unsigned BitWidth = Known.getBitWidth(); | |||
1032 | if (VT == MVT::v8i8 || VT == MVT::v16i8) { | |||
1033 | assert(BitWidth >= 8 && "Unexpected width!")(static_cast <bool> (BitWidth >= 8 && "Unexpected width!" ) ? void (0) : __assert_fail ("BitWidth >= 8 && \"Unexpected width!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1033, __extension__ __PRETTY_FUNCTION__)); | |||
1034 | APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8); | |||
1035 | Known.Zero |= Mask; | |||
1036 | } else if (VT == MVT::v4i16 || VT == MVT::v8i16) { | |||
1037 | assert(BitWidth >= 16 && "Unexpected width!")(static_cast <bool> (BitWidth >= 16 && "Unexpected width!" ) ? void (0) : __assert_fail ("BitWidth >= 16 && \"Unexpected width!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1037, __extension__ __PRETTY_FUNCTION__)); | |||
1038 | APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16); | |||
1039 | Known.Zero |= Mask; | |||
1040 | } | |||
1041 | break; | |||
1042 | } break; | |||
1043 | } | |||
1044 | } | |||
1045 | } | |||
1046 | } | |||
1047 | ||||
1048 | MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL, | |||
1049 | EVT) const { | |||
1050 | return MVT::i64; | |||
1051 | } | |||
1052 | ||||
1053 | bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, | |||
1054 | unsigned AddrSpace, | |||
1055 | unsigned Align, | |||
1056 | bool *Fast) const { | |||
1057 | if (Subtarget->requiresStrictAlign()) | |||
1058 | return false; | |||
1059 | ||||
1060 | if (Fast) { | |||
1061 | // Some CPUs are fine with unaligned stores except for 128-bit ones. | |||
1062 | *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 || | |||
1063 | // See comments in performSTORECombine() for more details about | |||
1064 | // these conditions. | |||
1065 | ||||
1066 | // Code that uses clang vector extensions can mark that it | |||
1067 | // wants unaligned accesses to be treated as fast by | |||
1068 | // underspecifying alignment to be 1 or 2. | |||
1069 | Align <= 2 || | |||
1070 | ||||
1071 | // Disregard v2i64. Memcpy lowering produces those and splitting | |||
1072 | // them regresses performance on micro-benchmarks and olden/bh. | |||
1073 | VT == MVT::v2i64; | |||
1074 | } | |||
1075 | return true; | |||
1076 | } | |||
1077 | ||||
1078 | FastISel * | |||
1079 | AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, | |||
1080 | const TargetLibraryInfo *libInfo) const { | |||
1081 | return AArch64::createFastISel(funcInfo, libInfo); | |||
1082 | } | |||
1083 | ||||
1084 | const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
1085 | switch ((AArch64ISD::NodeType)Opcode) { | |||
1086 | case AArch64ISD::FIRST_NUMBER: break; | |||
1087 | case AArch64ISD::CALL: return "AArch64ISD::CALL"; | |||
1088 | case AArch64ISD::ADRP: return "AArch64ISD::ADRP"; | |||
1089 | case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow"; | |||
1090 | case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot"; | |||
1091 | case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG"; | |||
1092 | case AArch64ISD::BRCOND: return "AArch64ISD::BRCOND"; | |||
1093 | case AArch64ISD::CSEL: return "AArch64ISD::CSEL"; | |||
1094 | case AArch64ISD::FCSEL: return "AArch64ISD::FCSEL"; | |||
1095 | case AArch64ISD::CSINV: return "AArch64ISD::CSINV"; | |||
1096 | case AArch64ISD::CSNEG: return "AArch64ISD::CSNEG"; | |||
1097 | case AArch64ISD::CSINC: return "AArch64ISD::CSINC"; | |||
1098 | case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; | |||
1099 | case AArch64ISD::TLSDESC_CALLSEQ: return "AArch64ISD::TLSDESC_CALLSEQ"; | |||
1100 | case AArch64ISD::ADC: return "AArch64ISD::ADC"; | |||
1101 | case AArch64ISD::SBC: return "AArch64ISD::SBC"; | |||
1102 | case AArch64ISD::ADDS: return "AArch64ISD::ADDS"; | |||
1103 | case AArch64ISD::SUBS: return "AArch64ISD::SUBS"; | |||
1104 | case AArch64ISD::ADCS: return "AArch64ISD::ADCS"; | |||
1105 | case AArch64ISD::SBCS: return "AArch64ISD::SBCS"; | |||
1106 | case AArch64ISD::ANDS: return "AArch64ISD::ANDS"; | |||
1107 | case AArch64ISD::CCMP: return "AArch64ISD::CCMP"; | |||
1108 | case AArch64ISD::CCMN: return "AArch64ISD::CCMN"; | |||
1109 | case AArch64ISD::FCCMP: return "AArch64ISD::FCCMP"; | |||
1110 | case AArch64ISD::FCMP: return "AArch64ISD::FCMP"; | |||
1111 | case AArch64ISD::DUP: return "AArch64ISD::DUP"; | |||
1112 | case AArch64ISD::DUPLANE8: return "AArch64ISD::DUPLANE8"; | |||
1113 | case AArch64ISD::DUPLANE16: return "AArch64ISD::DUPLANE16"; | |||
1114 | case AArch64ISD::DUPLANE32: return "AArch64ISD::DUPLANE32"; | |||
1115 | case AArch64ISD::DUPLANE64: return "AArch64ISD::DUPLANE64"; | |||
1116 | case AArch64ISD::MOVI: return "AArch64ISD::MOVI"; | |||
1117 | case AArch64ISD::MOVIshift: return "AArch64ISD::MOVIshift"; | |||
1118 | case AArch64ISD::MOVIedit: return "AArch64ISD::MOVIedit"; | |||
1119 | case AArch64ISD::MOVImsl: return "AArch64ISD::MOVImsl"; | |||
1120 | case AArch64ISD::FMOV: return "AArch64ISD::FMOV"; | |||
1121 | case AArch64ISD::MVNIshift: return "AArch64ISD::MVNIshift"; | |||
1122 | case AArch64ISD::MVNImsl: return "AArch64ISD::MVNImsl"; | |||
1123 | case AArch64ISD::BICi: return "AArch64ISD::BICi"; | |||
1124 | case AArch64ISD::ORRi: return "AArch64ISD::ORRi"; | |||
1125 | case AArch64ISD::BSL: return "AArch64ISD::BSL"; | |||
1126 | case AArch64ISD::NEG: return "AArch64ISD::NEG"; | |||
1127 | case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; | |||
1128 | case AArch64ISD::ZIP1: return "AArch64ISD::ZIP1"; | |||
1129 | case AArch64ISD::ZIP2: return "AArch64ISD::ZIP2"; | |||
1130 | case AArch64ISD::UZP1: return "AArch64ISD::UZP1"; | |||
1131 | case AArch64ISD::UZP2: return "AArch64ISD::UZP2"; | |||
1132 | case AArch64ISD::TRN1: return "AArch64ISD::TRN1"; | |||
1133 | case AArch64ISD::TRN2: return "AArch64ISD::TRN2"; | |||
1134 | case AArch64ISD::REV16: return "AArch64ISD::REV16"; | |||
1135 | case AArch64ISD::REV32: return "AArch64ISD::REV32"; | |||
1136 | case AArch64ISD::REV64: return "AArch64ISD::REV64"; | |||
1137 | case AArch64ISD::EXT: return "AArch64ISD::EXT"; | |||
1138 | case AArch64ISD::VSHL: return "AArch64ISD::VSHL"; | |||
1139 | case AArch64ISD::VLSHR: return "AArch64ISD::VLSHR"; | |||
1140 | case AArch64ISD::VASHR: return "AArch64ISD::VASHR"; | |||
1141 | case AArch64ISD::CMEQ: return "AArch64ISD::CMEQ"; | |||
1142 | case AArch64ISD::CMGE: return "AArch64ISD::CMGE"; | |||
1143 | case AArch64ISD::CMGT: return "AArch64ISD::CMGT"; | |||
1144 | case AArch64ISD::CMHI: return "AArch64ISD::CMHI"; | |||
1145 | case AArch64ISD::CMHS: return "AArch64ISD::CMHS"; | |||
1146 | case AArch64ISD::FCMEQ: return "AArch64ISD::FCMEQ"; | |||
1147 | case AArch64ISD::FCMGE: return "AArch64ISD::FCMGE"; | |||
1148 | case AArch64ISD::FCMGT: return "AArch64ISD::FCMGT"; | |||
1149 | case AArch64ISD::CMEQz: return "AArch64ISD::CMEQz"; | |||
1150 | case AArch64ISD::CMGEz: return "AArch64ISD::CMGEz"; | |||
1151 | case AArch64ISD::CMGTz: return "AArch64ISD::CMGTz"; | |||
1152 | case AArch64ISD::CMLEz: return "AArch64ISD::CMLEz"; | |||
1153 | case AArch64ISD::CMLTz: return "AArch64ISD::CMLTz"; | |||
1154 | case AArch64ISD::FCMEQz: return "AArch64ISD::FCMEQz"; | |||
1155 | case AArch64ISD::FCMGEz: return "AArch64ISD::FCMGEz"; | |||
1156 | case AArch64ISD::FCMGTz: return "AArch64ISD::FCMGTz"; | |||
1157 | case AArch64ISD::FCMLEz: return "AArch64ISD::FCMLEz"; | |||
1158 | case AArch64ISD::FCMLTz: return "AArch64ISD::FCMLTz"; | |||
1159 | case AArch64ISD::SADDV: return "AArch64ISD::SADDV"; | |||
1160 | case AArch64ISD::UADDV: return "AArch64ISD::UADDV"; | |||
1161 | case AArch64ISD::SMINV: return "AArch64ISD::SMINV"; | |||
1162 | case AArch64ISD::UMINV: return "AArch64ISD::UMINV"; | |||
1163 | case AArch64ISD::SMAXV: return "AArch64ISD::SMAXV"; | |||
1164 | case AArch64ISD::UMAXV: return "AArch64ISD::UMAXV"; | |||
1165 | case AArch64ISD::NOT: return "AArch64ISD::NOT"; | |||
1166 | case AArch64ISD::BIT: return "AArch64ISD::BIT"; | |||
1167 | case AArch64ISD::CBZ: return "AArch64ISD::CBZ"; | |||
1168 | case AArch64ISD::CBNZ: return "AArch64ISD::CBNZ"; | |||
1169 | case AArch64ISD::TBZ: return "AArch64ISD::TBZ"; | |||
1170 | case AArch64ISD::TBNZ: return "AArch64ISD::TBNZ"; | |||
1171 | case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; | |||
1172 | case AArch64ISD::PREFETCH: return "AArch64ISD::PREFETCH"; | |||
1173 | case AArch64ISD::SITOF: return "AArch64ISD::SITOF"; | |||
1174 | case AArch64ISD::UITOF: return "AArch64ISD::UITOF"; | |||
1175 | case AArch64ISD::NVCAST: return "AArch64ISD::NVCAST"; | |||
1176 | case AArch64ISD::SQSHL_I: return "AArch64ISD::SQSHL_I"; | |||
1177 | case AArch64ISD::UQSHL_I: return "AArch64ISD::UQSHL_I"; | |||
1178 | case AArch64ISD::SRSHR_I: return "AArch64ISD::SRSHR_I"; | |||
1179 | case AArch64ISD::URSHR_I: return "AArch64ISD::URSHR_I"; | |||
1180 | case AArch64ISD::SQSHLU_I: return "AArch64ISD::SQSHLU_I"; | |||
1181 | case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; | |||
1182 | case AArch64ISD::LD2post: return "AArch64ISD::LD2post"; | |||
1183 | case AArch64ISD::LD3post: return "AArch64ISD::LD3post"; | |||
1184 | case AArch64ISD::LD4post: return "AArch64ISD::LD4post"; | |||
1185 | case AArch64ISD::ST2post: return "AArch64ISD::ST2post"; | |||
1186 | case AArch64ISD::ST3post: return "AArch64ISD::ST3post"; | |||
1187 | case AArch64ISD::ST4post: return "AArch64ISD::ST4post"; | |||
1188 | case AArch64ISD::LD1x2post: return "AArch64ISD::LD1x2post"; | |||
1189 | case AArch64ISD::LD1x3post: return "AArch64ISD::LD1x3post"; | |||
1190 | case AArch64ISD::LD1x4post: return "AArch64ISD::LD1x4post"; | |||
1191 | case AArch64ISD::ST1x2post: return "AArch64ISD::ST1x2post"; | |||
1192 | case AArch64ISD::ST1x3post: return "AArch64ISD::ST1x3post"; | |||
1193 | case AArch64ISD::ST1x4post: return "AArch64ISD::ST1x4post"; | |||
1194 | case AArch64ISD::LD1DUPpost: return "AArch64ISD::LD1DUPpost"; | |||
1195 | case AArch64ISD::LD2DUPpost: return "AArch64ISD::LD2DUPpost"; | |||
1196 | case AArch64ISD::LD3DUPpost: return "AArch64ISD::LD3DUPpost"; | |||
1197 | case AArch64ISD::LD4DUPpost: return "AArch64ISD::LD4DUPpost"; | |||
1198 | case AArch64ISD::LD1LANEpost: return "AArch64ISD::LD1LANEpost"; | |||
1199 | case AArch64ISD::LD2LANEpost: return "AArch64ISD::LD2LANEpost"; | |||
1200 | case AArch64ISD::LD3LANEpost: return "AArch64ISD::LD3LANEpost"; | |||
1201 | case AArch64ISD::LD4LANEpost: return "AArch64ISD::LD4LANEpost"; | |||
1202 | case AArch64ISD::ST2LANEpost: return "AArch64ISD::ST2LANEpost"; | |||
1203 | case AArch64ISD::ST3LANEpost: return "AArch64ISD::ST3LANEpost"; | |||
1204 | case AArch64ISD::ST4LANEpost: return "AArch64ISD::ST4LANEpost"; | |||
1205 | case AArch64ISD::SMULL: return "AArch64ISD::SMULL"; | |||
1206 | case AArch64ISD::UMULL: return "AArch64ISD::UMULL"; | |||
1207 | case AArch64ISD::FRECPE: return "AArch64ISD::FRECPE"; | |||
1208 | case AArch64ISD::FRECPS: return "AArch64ISD::FRECPS"; | |||
1209 | case AArch64ISD::FRSQRTE: return "AArch64ISD::FRSQRTE"; | |||
1210 | case AArch64ISD::FRSQRTS: return "AArch64ISD::FRSQRTS"; | |||
1211 | } | |||
1212 | return nullptr; | |||
1213 | } | |||
1214 | ||||
1215 | MachineBasicBlock * | |||
1216 | AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI, | |||
1217 | MachineBasicBlock *MBB) const { | |||
1218 | // We materialise the F128CSEL pseudo-instruction as some control flow and a | |||
1219 | // phi node: | |||
1220 | ||||
1221 | // OrigBB: | |||
1222 | // [... previous instrs leading to comparison ...] | |||
1223 | // b.ne TrueBB | |||
1224 | // b EndBB | |||
1225 | // TrueBB: | |||
1226 | // ; Fallthrough | |||
1227 | // EndBB: | |||
1228 | // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB] | |||
1229 | ||||
1230 | MachineFunction *MF = MBB->getParent(); | |||
1231 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
1232 | const BasicBlock *LLVM_BB = MBB->getBasicBlock(); | |||
1233 | DebugLoc DL = MI.getDebugLoc(); | |||
1234 | MachineFunction::iterator It = ++MBB->getIterator(); | |||
1235 | ||||
1236 | unsigned DestReg = MI.getOperand(0).getReg(); | |||
1237 | unsigned IfTrueReg = MI.getOperand(1).getReg(); | |||
1238 | unsigned IfFalseReg = MI.getOperand(2).getReg(); | |||
1239 | unsigned CondCode = MI.getOperand(3).getImm(); | |||
1240 | bool NZCVKilled = MI.getOperand(4).isKill(); | |||
1241 | ||||
1242 | MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
1243 | MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
1244 | MF->insert(It, TrueBB); | |||
1245 | MF->insert(It, EndBB); | |||
1246 | ||||
1247 | // Transfer rest of current basic-block to EndBB | |||
1248 | EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)), | |||
1249 | MBB->end()); | |||
1250 | EndBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
1251 | ||||
1252 | BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB); | |||
1253 | BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB); | |||
1254 | MBB->addSuccessor(TrueBB); | |||
1255 | MBB->addSuccessor(EndBB); | |||
1256 | ||||
1257 | // TrueBB falls through to the end. | |||
1258 | TrueBB->addSuccessor(EndBB); | |||
1259 | ||||
1260 | if (!NZCVKilled) { | |||
1261 | TrueBB->addLiveIn(AArch64::NZCV); | |||
1262 | EndBB->addLiveIn(AArch64::NZCV); | |||
1263 | } | |||
1264 | ||||
1265 | BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg) | |||
1266 | .addReg(IfTrueReg) | |||
1267 | .addMBB(TrueBB) | |||
1268 | .addReg(IfFalseReg) | |||
1269 | .addMBB(MBB); | |||
1270 | ||||
1271 | MI.eraseFromParent(); | |||
1272 | return EndBB; | |||
1273 | } | |||
1274 | ||||
1275 | MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter( | |||
1276 | MachineInstr &MI, MachineBasicBlock *BB) const { | |||
1277 | switch (MI.getOpcode()) { | |||
1278 | default: | |||
1279 | #ifndef NDEBUG | |||
1280 | MI.dump(); | |||
1281 | #endif | |||
1282 | llvm_unreachable("Unexpected instruction for custom inserter!")::llvm::llvm_unreachable_internal("Unexpected instruction for custom inserter!" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1282); | |||
1283 | ||||
1284 | case AArch64::F128CSEL: | |||
1285 | return EmitF128CSEL(MI, BB); | |||
1286 | ||||
1287 | case TargetOpcode::STACKMAP: | |||
1288 | case TargetOpcode::PATCHPOINT: | |||
1289 | return emitPatchPoint(MI, BB); | |||
1290 | } | |||
1291 | } | |||
1292 | ||||
1293 | //===----------------------------------------------------------------------===// | |||
1294 | // AArch64 Lowering private implementation. | |||
1295 | //===----------------------------------------------------------------------===// | |||
1296 | ||||
1297 | //===----------------------------------------------------------------------===// | |||
1298 | // Lowering Code | |||
1299 | //===----------------------------------------------------------------------===// | |||
1300 | ||||
1301 | /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 | |||
1302 | /// CC | |||
1303 | static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) { | |||
1304 | switch (CC) { | |||
1305 | default: | |||
1306 | llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1306); | |||
1307 | case ISD::SETNE: | |||
1308 | return AArch64CC::NE; | |||
1309 | case ISD::SETEQ: | |||
1310 | return AArch64CC::EQ; | |||
1311 | case ISD::SETGT: | |||
1312 | return AArch64CC::GT; | |||
1313 | case ISD::SETGE: | |||
1314 | return AArch64CC::GE; | |||
1315 | case ISD::SETLT: | |||
1316 | return AArch64CC::LT; | |||
1317 | case ISD::SETLE: | |||
1318 | return AArch64CC::LE; | |||
1319 | case ISD::SETUGT: | |||
1320 | return AArch64CC::HI; | |||
1321 | case ISD::SETUGE: | |||
1322 | return AArch64CC::HS; | |||
1323 | case ISD::SETULT: | |||
1324 | return AArch64CC::LO; | |||
1325 | case ISD::SETULE: | |||
1326 | return AArch64CC::LS; | |||
1327 | } | |||
1328 | } | |||
1329 | ||||
1330 | /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC. | |||
1331 | static void changeFPCCToAArch64CC(ISD::CondCode CC, | |||
1332 | AArch64CC::CondCode &CondCode, | |||
1333 | AArch64CC::CondCode &CondCode2) { | |||
1334 | CondCode2 = AArch64CC::AL; | |||
1335 | switch (CC) { | |||
1336 | default: | |||
1337 | llvm_unreachable("Unknown FP condition!")::llvm::llvm_unreachable_internal("Unknown FP condition!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1337); | |||
1338 | case ISD::SETEQ: | |||
1339 | case ISD::SETOEQ: | |||
1340 | CondCode = AArch64CC::EQ; | |||
1341 | break; | |||
1342 | case ISD::SETGT: | |||
1343 | case ISD::SETOGT: | |||
1344 | CondCode = AArch64CC::GT; | |||
1345 | break; | |||
1346 | case ISD::SETGE: | |||
1347 | case ISD::SETOGE: | |||
1348 | CondCode = AArch64CC::GE; | |||
1349 | break; | |||
1350 | case ISD::SETOLT: | |||
1351 | CondCode = AArch64CC::MI; | |||
1352 | break; | |||
1353 | case ISD::SETOLE: | |||
1354 | CondCode = AArch64CC::LS; | |||
1355 | break; | |||
1356 | case ISD::SETONE: | |||
1357 | CondCode = AArch64CC::MI; | |||
1358 | CondCode2 = AArch64CC::GT; | |||
1359 | break; | |||
1360 | case ISD::SETO: | |||
1361 | CondCode = AArch64CC::VC; | |||
1362 | break; | |||
1363 | case ISD::SETUO: | |||
1364 | CondCode = AArch64CC::VS; | |||
1365 | break; | |||
1366 | case ISD::SETUEQ: | |||
1367 | CondCode = AArch64CC::EQ; | |||
1368 | CondCode2 = AArch64CC::VS; | |||
1369 | break; | |||
1370 | case ISD::SETUGT: | |||
1371 | CondCode = AArch64CC::HI; | |||
1372 | break; | |||
1373 | case ISD::SETUGE: | |||
1374 | CondCode = AArch64CC::PL; | |||
1375 | break; | |||
1376 | case ISD::SETLT: | |||
1377 | case ISD::SETULT: | |||
1378 | CondCode = AArch64CC::LT; | |||
1379 | break; | |||
1380 | case ISD::SETLE: | |||
1381 | case ISD::SETULE: | |||
1382 | CondCode = AArch64CC::LE; | |||
1383 | break; | |||
1384 | case ISD::SETNE: | |||
1385 | case ISD::SETUNE: | |||
1386 | CondCode = AArch64CC::NE; | |||
1387 | break; | |||
1388 | } | |||
1389 | } | |||
1390 | ||||
1391 | /// Convert a DAG fp condition code to an AArch64 CC. | |||
1392 | /// This differs from changeFPCCToAArch64CC in that it returns cond codes that | |||
1393 | /// should be AND'ed instead of OR'ed. | |||
1394 | static void changeFPCCToANDAArch64CC(ISD::CondCode CC, | |||
1395 | AArch64CC::CondCode &CondCode, | |||
1396 | AArch64CC::CondCode &CondCode2) { | |||
1397 | CondCode2 = AArch64CC::AL; | |||
1398 | switch (CC) { | |||
1399 | default: | |||
1400 | changeFPCCToAArch64CC(CC, CondCode, CondCode2); | |||
1401 | assert(CondCode2 == AArch64CC::AL)(static_cast <bool> (CondCode2 == AArch64CC::AL) ? void (0) : __assert_fail ("CondCode2 == AArch64CC::AL", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1401, __extension__ __PRETTY_FUNCTION__)); | |||
1402 | break; | |||
1403 | case ISD::SETONE: | |||
1404 | // (a one b) | |||
1405 | // == ((a olt b) || (a ogt b)) | |||
1406 | // == ((a ord b) && (a une b)) | |||
1407 | CondCode = AArch64CC::VC; | |||
1408 | CondCode2 = AArch64CC::NE; | |||
1409 | break; | |||
1410 | case ISD::SETUEQ: | |||
1411 | // (a ueq b) | |||
1412 | // == ((a uno b) || (a oeq b)) | |||
1413 | // == ((a ule b) && (a uge b)) | |||
1414 | CondCode = AArch64CC::PL; | |||
1415 | CondCode2 = AArch64CC::LE; | |||
1416 | break; | |||
1417 | } | |||
1418 | } | |||
1419 | ||||
1420 | /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 | |||
1421 | /// CC usable with the vector instructions. Fewer operations are available | |||
1422 | /// without a real NZCV register, so we have to use less efficient combinations | |||
1423 | /// to get the same effect. | |||
1424 | static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, | |||
1425 | AArch64CC::CondCode &CondCode, | |||
1426 | AArch64CC::CondCode &CondCode2, | |||
1427 | bool &Invert) { | |||
1428 | Invert = false; | |||
1429 | switch (CC) { | |||
1430 | default: | |||
1431 | // Mostly the scalar mappings work fine. | |||
1432 | changeFPCCToAArch64CC(CC, CondCode, CondCode2); | |||
1433 | break; | |||
1434 | case ISD::SETUO: | |||
1435 | Invert = true; | |||
1436 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
1437 | case ISD::SETO: | |||
1438 | CondCode = AArch64CC::MI; | |||
1439 | CondCode2 = AArch64CC::GE; | |||
1440 | break; | |||
1441 | case ISD::SETUEQ: | |||
1442 | case ISD::SETULT: | |||
1443 | case ISD::SETULE: | |||
1444 | case ISD::SETUGT: | |||
1445 | case ISD::SETUGE: | |||
1446 | // All of the compare-mask comparisons are ordered, but we can switch | |||
1447 | // between the two by a double inversion. E.g. ULE == !OGT. | |||
1448 | Invert = true; | |||
1449 | changeFPCCToAArch64CC(getSetCCInverse(CC, false), CondCode, CondCode2); | |||
1450 | break; | |||
1451 | } | |||
1452 | } | |||
1453 | ||||
1454 | static bool isLegalArithImmed(uint64_t C) { | |||
1455 | // Matches AArch64DAGToDAGISel::SelectArithImmed(). | |||
1456 | bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); | |||
1457 | LLVM_DEBUG(dbgs() << "Is imm " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n"); } } while ( false) | |||
1458 | << " legal: " << (IsLegal ? "yes\n" : "no\n"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n"); } } while ( false); | |||
1459 | return IsLegal; | |||
1460 | } | |||
1461 | ||||
1462 | static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, | |||
1463 | const SDLoc &dl, SelectionDAG &DAG) { | |||
1464 | EVT VT = LHS.getValueType(); | |||
1465 | const bool FullFP16 = | |||
1466 | static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16(); | |||
1467 | ||||
1468 | if (VT.isFloatingPoint()) { | |||
1469 | assert(VT != MVT::f128)(static_cast <bool> (VT != MVT::f128) ? void (0) : __assert_fail ("VT != MVT::f128", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1469, __extension__ __PRETTY_FUNCTION__)); | |||
1470 | if (VT == MVT::f16 && !FullFP16) { | |||
1471 | LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); | |||
1472 | RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); | |||
1473 | VT = MVT::f32; | |||
1474 | } | |||
1475 | return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS); | |||
1476 | } | |||
1477 | ||||
1478 | // The CMP instruction is just an alias for SUBS, and representing it as | |||
1479 | // SUBS means that it's possible to get CSE with subtract operations. | |||
1480 | // A later phase can perform the optimization of setting the destination | |||
1481 | // register to WZR/XZR if it ends up being unused. | |||
1482 | unsigned Opcode = AArch64ISD::SUBS; | |||
1483 | ||||
1484 | if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && | |||
1485 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
1486 | // We'd like to combine a (CMP op1, (sub 0, op2) into a CMN instruction on | |||
1487 | // the grounds that "op1 - (-op2) == op1 + op2". However, the C and V flags | |||
1488 | // can be set differently by this operation. It comes down to whether | |||
1489 | // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then | |||
1490 | // everything is fine. If not then the optimization is wrong. Thus general | |||
1491 | // comparisons are only valid if op2 != 0. | |||
1492 | ||||
1493 | // So, finally, the only LLVM-native comparisons that don't mention C and V | |||
1494 | // are SETEQ and SETNE. They're the only ones we can safely use CMN for in | |||
1495 | // the absence of information about op2. | |||
1496 | Opcode = AArch64ISD::ADDS; | |||
1497 | RHS = RHS.getOperand(1); | |||
1498 | } else if (LHS.getOpcode() == ISD::AND && isNullConstant(RHS) && | |||
1499 | !isUnsignedIntSetCC(CC)) { | |||
1500 | // Similarly, (CMP (and X, Y), 0) can be implemented with a TST | |||
1501 | // (a.k.a. ANDS) except that the flags are only guaranteed to work for one | |||
1502 | // of the signed comparisons. | |||
1503 | Opcode = AArch64ISD::ANDS; | |||
1504 | RHS = LHS.getOperand(1); | |||
1505 | LHS = LHS.getOperand(0); | |||
1506 | } | |||
1507 | ||||
1508 | return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS) | |||
1509 | .getValue(1); | |||
1510 | } | |||
1511 | ||||
1512 | /// \defgroup AArch64CCMP CMP;CCMP matching | |||
1513 | /// | |||
1514 | /// These functions deal with the formation of CMP;CCMP;... sequences. | |||
1515 | /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of | |||
1516 | /// a comparison. They set the NZCV flags to a predefined value if their | |||
1517 | /// predicate is false. This allows to express arbitrary conjunctions, for | |||
1518 | /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B))))" | |||
1519 | /// expressed as: | |||
1520 | /// cmp A | |||
1521 | /// ccmp B, inv(CB), CA | |||
1522 | /// check for CB flags | |||
1523 | /// | |||
1524 | /// In general we can create code for arbitrary "... (and (and A B) C)" | |||
1525 | /// sequences. We can also implement some "or" expressions, because "(or A B)" | |||
1526 | /// is equivalent to "not (and (not A) (not B))" and we can implement some | |||
1527 | /// negation operations: | |||
1528 | /// We can negate the results of a single comparison by inverting the flags | |||
1529 | /// used when the predicate fails and inverting the flags tested in the next | |||
1530 | /// instruction; We can also negate the results of the whole previous | |||
1531 | /// conditional compare sequence by inverting the flags tested in the next | |||
1532 | /// instruction. However there is no way to negate the result of a partial | |||
1533 | /// sequence. | |||
1534 | /// | |||
1535 | /// Therefore on encountering an "or" expression we can negate the subtree on | |||
1536 | /// one side and have to be able to push the negate to the leafs of the subtree | |||
1537 | /// on the other side (see also the comments in code). As complete example: | |||
1538 | /// "or (or (setCA (cmp A)) (setCB (cmp B))) | |||
1539 | /// (and (setCC (cmp C)) (setCD (cmp D)))" | |||
1540 | /// is transformed to | |||
1541 | /// "not (and (not (and (setCC (cmp C)) (setCC (cmp D)))) | |||
1542 | /// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" | |||
1543 | /// and implemented as: | |||
1544 | /// cmp C | |||
1545 | /// ccmp D, inv(CD), CC | |||
1546 | /// ccmp A, CA, inv(CD) | |||
1547 | /// ccmp B, CB, inv(CA) | |||
1548 | /// check for CB flags | |||
1549 | /// A counterexample is "or (and A B) (and C D)" which cannot be implemented | |||
1550 | /// by conditional compare sequences. | |||
1551 | /// @{ | |||
1552 | ||||
1553 | /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate. | |||
1554 | static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, | |||
1555 | ISD::CondCode CC, SDValue CCOp, | |||
1556 | AArch64CC::CondCode Predicate, | |||
1557 | AArch64CC::CondCode OutCC, | |||
1558 | const SDLoc &DL, SelectionDAG &DAG) { | |||
1559 | unsigned Opcode = 0; | |||
1560 | const bool FullFP16 = | |||
1561 | static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16(); | |||
1562 | ||||
1563 | if (LHS.getValueType().isFloatingPoint()) { | |||
1564 | assert(LHS.getValueType() != MVT::f128)(static_cast <bool> (LHS.getValueType() != MVT::f128) ? void (0) : __assert_fail ("LHS.getValueType() != MVT::f128", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1564, __extension__ __PRETTY_FUNCTION__)); | |||
1565 | if (LHS.getValueType() == MVT::f16 && !FullFP16) { | |||
1566 | LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS); | |||
1567 | RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS); | |||
1568 | } | |||
1569 | Opcode = AArch64ISD::FCCMP; | |||
1570 | } else if (RHS.getOpcode() == ISD::SUB) { | |||
1571 | SDValue SubOp0 = RHS.getOperand(0); | |||
1572 | if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
1573 | // See emitComparison() on why we can only do this for SETEQ and SETNE. | |||
1574 | Opcode = AArch64ISD::CCMN; | |||
1575 | RHS = RHS.getOperand(1); | |||
1576 | } | |||
1577 | } | |||
1578 | if (Opcode == 0) | |||
1579 | Opcode = AArch64ISD::CCMP; | |||
1580 | ||||
1581 | SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC); | |||
1582 | AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC); | |||
1583 | unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC); | |||
1584 | SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); | |||
1585 | return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp); | |||
1586 | } | |||
1587 | ||||
1588 | /// Returns true if @p Val is a tree of AND/OR/SETCC operations. | |||
1589 | /// CanPushNegate is set to true if we can push a negate operation through | |||
1590 | /// the tree in a was that we are left with AND operations and negate operations | |||
1591 | /// at the leafs only. i.e. "not (or (or x y) z)" can be changed to | |||
1592 | /// "and (and (not x) (not y)) (not z)"; "not (or (and x y) z)" cannot be | |||
1593 | /// brought into such a form. | |||
1594 | static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate, | |||
1595 | unsigned Depth = 0) { | |||
1596 | if (!Val.hasOneUse()) | |||
1597 | return false; | |||
1598 | unsigned Opcode = Val->getOpcode(); | |||
1599 | if (Opcode == ISD::SETCC) { | |||
1600 | if (Val->getOperand(0).getValueType() == MVT::f128) | |||
1601 | return false; | |||
1602 | CanNegate = true; | |||
1603 | return true; | |||
1604 | } | |||
1605 | // Protect against exponential runtime and stack overflow. | |||
1606 | if (Depth > 6) | |||
1607 | return false; | |||
1608 | if (Opcode == ISD::AND || Opcode == ISD::OR) { | |||
1609 | SDValue O0 = Val->getOperand(0); | |||
1610 | SDValue O1 = Val->getOperand(1); | |||
1611 | bool CanNegateL; | |||
1612 | if (!isConjunctionDisjunctionTree(O0, CanNegateL, Depth+1)) | |||
1613 | return false; | |||
1614 | bool CanNegateR; | |||
1615 | if (!isConjunctionDisjunctionTree(O1, CanNegateR, Depth+1)) | |||
1616 | return false; | |||
1617 | ||||
1618 | if (Opcode == ISD::OR) { | |||
1619 | // For an OR expression we need to be able to negate at least one side or | |||
1620 | // we cannot do the transformation at all. | |||
1621 | if (!CanNegateL && !CanNegateR) | |||
1622 | return false; | |||
1623 | // We can however change a (not (or x y)) to (and (not x) (not y)) if we | |||
1624 | // can negate the x and y subtrees. | |||
1625 | CanNegate = CanNegateL && CanNegateR; | |||
1626 | } else { | |||
1627 | // If the operands are OR expressions then we finally need to negate their | |||
1628 | // outputs, we can only do that for the operand with emitted last by | |||
1629 | // negating OutCC, not for both operands. | |||
1630 | bool NeedsNegOutL = O0->getOpcode() == ISD::OR; | |||
1631 | bool NeedsNegOutR = O1->getOpcode() == ISD::OR; | |||
1632 | if (NeedsNegOutL && NeedsNegOutR) | |||
1633 | return false; | |||
1634 | // We cannot negate an AND operation (it would become an OR), | |||
1635 | CanNegate = false; | |||
1636 | } | |||
1637 | return true; | |||
1638 | } | |||
1639 | return false; | |||
1640 | } | |||
1641 | ||||
1642 | /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain | |||
1643 | /// of CCMP/CFCMP ops. See @ref AArch64CCMP. | |||
1644 | /// Tries to transform the given i1 producing node @p Val to a series compare | |||
1645 | /// and conditional compare operations. @returns an NZCV flags producing node | |||
1646 | /// and sets @p OutCC to the flags that should be tested or returns SDValue() if | |||
1647 | /// transformation was not possible. | |||
1648 | /// On recursive invocations @p PushNegate may be set to true to have negation | |||
1649 | /// effects pushed to the tree leafs; @p Predicate is an NZCV flag predicate | |||
1650 | /// for the comparisons in the current subtree; @p Depth limits the search | |||
1651 | /// depth to avoid stack overflow. | |||
1652 | static SDValue emitConjunctionDisjunctionTreeRec(SelectionDAG &DAG, SDValue Val, | |||
1653 | AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, | |||
1654 | AArch64CC::CondCode Predicate) { | |||
1655 | // We're at a tree leaf, produce a conditional comparison operation. | |||
1656 | unsigned Opcode = Val->getOpcode(); | |||
1657 | if (Opcode == ISD::SETCC) { | |||
1658 | SDValue LHS = Val->getOperand(0); | |||
1659 | SDValue RHS = Val->getOperand(1); | |||
1660 | ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get(); | |||
1661 | bool isInteger = LHS.getValueType().isInteger(); | |||
1662 | if (Negate) | |||
1663 | CC = getSetCCInverse(CC, isInteger); | |||
1664 | SDLoc DL(Val); | |||
1665 | // Determine OutCC and handle FP special case. | |||
1666 | if (isInteger) { | |||
1667 | OutCC = changeIntCCToAArch64CC(CC); | |||
1668 | } else { | |||
1669 | assert(LHS.getValueType().isFloatingPoint())(static_cast <bool> (LHS.getValueType().isFloatingPoint ()) ? void (0) : __assert_fail ("LHS.getValueType().isFloatingPoint()" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1669, __extension__ __PRETTY_FUNCTION__)); | |||
1670 | AArch64CC::CondCode ExtraCC; | |||
1671 | changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC); | |||
1672 | // Some floating point conditions can't be tested with a single condition | |||
1673 | // code. Construct an additional comparison in this case. | |||
1674 | if (ExtraCC != AArch64CC::AL) { | |||
1675 | SDValue ExtraCmp; | |||
1676 | if (!CCOp.getNode()) | |||
1677 | ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG); | |||
1678 | else | |||
1679 | ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, | |||
1680 | ExtraCC, DL, DAG); | |||
1681 | CCOp = ExtraCmp; | |||
1682 | Predicate = ExtraCC; | |||
1683 | } | |||
1684 | } | |||
1685 | ||||
1686 | // Produce a normal comparison if we are first in the chain | |||
1687 | if (!CCOp) | |||
1688 | return emitComparison(LHS, RHS, CC, DL, DAG); | |||
1689 | // Otherwise produce a ccmp. | |||
1690 | return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL, | |||
1691 | DAG); | |||
1692 | } | |||
1693 | assert((Opcode == ISD::AND || (Opcode == ISD::OR && Val->hasOneUse())) &&(static_cast <bool> ((Opcode == ISD::AND || (Opcode == ISD ::OR && Val->hasOneUse())) && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("(Opcode == ISD::AND || (Opcode == ISD::OR && Val->hasOneUse())) && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1694, __extension__ __PRETTY_FUNCTION__)) | |||
1694 | "Valid conjunction/disjunction tree")(static_cast <bool> ((Opcode == ISD::AND || (Opcode == ISD ::OR && Val->hasOneUse())) && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("(Opcode == ISD::AND || (Opcode == ISD::OR && Val->hasOneUse())) && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1694, __extension__ __PRETTY_FUNCTION__)); | |||
1695 | ||||
1696 | // Check if both sides can be transformed. | |||
1697 | SDValue LHS = Val->getOperand(0); | |||
1698 | SDValue RHS = Val->getOperand(1); | |||
1699 | ||||
1700 | // In case of an OR we need to negate our operands and the result. | |||
1701 | // (A v B) <=> not(not(A) ^ not(B)) | |||
1702 | bool NegateOpsAndResult = Opcode == ISD::OR; | |||
1703 | // We can negate the results of all previous operations by inverting the | |||
1704 | // predicate flags giving us a free negation for one side. The other side | |||
1705 | // must be negatable by itself. | |||
1706 | if (NegateOpsAndResult) { | |||
1707 | // See which side we can negate. | |||
1708 | bool CanNegateL; | |||
1709 | bool isValidL = isConjunctionDisjunctionTree(LHS, CanNegateL); | |||
1710 | assert(isValidL && "Valid conjunction/disjunction tree")(static_cast <bool> (isValidL && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("isValidL && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1710, __extension__ __PRETTY_FUNCTION__)); | |||
1711 | (void)isValidL; | |||
1712 | ||||
1713 | #ifndef NDEBUG | |||
1714 | bool CanNegateR; | |||
1715 | bool isValidR = isConjunctionDisjunctionTree(RHS, CanNegateR); | |||
1716 | assert(isValidR && "Valid conjunction/disjunction tree")(static_cast <bool> (isValidR && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("isValidR && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1716, __extension__ __PRETTY_FUNCTION__)); | |||
1717 | assert((CanNegateL || CanNegateR) && "Valid conjunction/disjunction tree")(static_cast <bool> ((CanNegateL || CanNegateR) && "Valid conjunction/disjunction tree") ? void (0) : __assert_fail ("(CanNegateL || CanNegateR) && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1717, __extension__ __PRETTY_FUNCTION__)); | |||
1718 | #endif | |||
1719 | ||||
1720 | // Order the side which we cannot negate to RHS so we can emit it first. | |||
1721 | if (!CanNegateL) | |||
1722 | std::swap(LHS, RHS); | |||
1723 | } else { | |||
1724 | bool NeedsNegOutL = LHS->getOpcode() == ISD::OR; | |||
1725 | assert((!NeedsNegOutL || RHS->getOpcode() != ISD::OR) &&(static_cast <bool> ((!NeedsNegOutL || RHS->getOpcode () != ISD::OR) && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("(!NeedsNegOutL || RHS->getOpcode() != ISD::OR) && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1726, __extension__ __PRETTY_FUNCTION__)) | |||
1726 | "Valid conjunction/disjunction tree")(static_cast <bool> ((!NeedsNegOutL || RHS->getOpcode () != ISD::OR) && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("(!NeedsNegOutL || RHS->getOpcode() != ISD::OR) && \"Valid conjunction/disjunction tree\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1726, __extension__ __PRETTY_FUNCTION__)); | |||
1727 | // Order the side where we need to negate the output flags to RHS so it | |||
1728 | // gets emitted first. | |||
1729 | if (NeedsNegOutL) | |||
1730 | std::swap(LHS, RHS); | |||
1731 | } | |||
1732 | ||||
1733 | // Emit RHS. If we want to negate the tree we only need to push a negate | |||
1734 | // through if we are already in a PushNegate case, otherwise we can negate | |||
1735 | // the "flags to test" afterwards. | |||
1736 | AArch64CC::CondCode RHSCC; | |||
1737 | SDValue CmpR = emitConjunctionDisjunctionTreeRec(DAG, RHS, RHSCC, Negate, | |||
1738 | CCOp, Predicate); | |||
1739 | if (NegateOpsAndResult && !Negate) | |||
1740 | RHSCC = AArch64CC::getInvertedCondCode(RHSCC); | |||
1741 | // Emit LHS. We may need to negate it. | |||
1742 | SDValue CmpL = emitConjunctionDisjunctionTreeRec(DAG, LHS, OutCC, | |||
1743 | NegateOpsAndResult, CmpR, | |||
1744 | RHSCC); | |||
1745 | // If we transformed an OR to and AND then we have to negate the result | |||
1746 | // (or absorb the Negate parameter). | |||
1747 | if (NegateOpsAndResult && !Negate) | |||
1748 | OutCC = AArch64CC::getInvertedCondCode(OutCC); | |||
1749 | return CmpL; | |||
1750 | } | |||
1751 | ||||
1752 | /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain | |||
1753 | /// of CCMP/CFCMP ops. See @ref AArch64CCMP. | |||
1754 | /// \see emitConjunctionDisjunctionTreeRec(). | |||
1755 | static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val, | |||
1756 | AArch64CC::CondCode &OutCC) { | |||
1757 | bool CanNegate; | |||
1758 | if (!isConjunctionDisjunctionTree(Val, CanNegate)) | |||
1759 | return SDValue(); | |||
1760 | ||||
1761 | return emitConjunctionDisjunctionTreeRec(DAG, Val, OutCC, false, SDValue(), | |||
1762 | AArch64CC::AL); | |||
1763 | } | |||
1764 | ||||
1765 | /// @} | |||
1766 | ||||
1767 | static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, | |||
1768 | SDValue &AArch64cc, SelectionDAG &DAG, | |||
1769 | const SDLoc &dl) { | |||
1770 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { | |||
1771 | EVT VT = RHS.getValueType(); | |||
1772 | uint64_t C = RHSC->getZExtValue(); | |||
1773 | if (!isLegalArithImmed(C)) { | |||
1774 | // Constant does not fit, try adjusting it by one? | |||
1775 | switch (CC) { | |||
1776 | default: | |||
1777 | break; | |||
1778 | case ISD::SETLT: | |||
1779 | case ISD::SETGE: | |||
1780 | if ((VT == MVT::i32 && C != 0x80000000 && | |||
1781 | isLegalArithImmed((uint32_t)(C - 1))) || | |||
1782 | (VT == MVT::i64 && C != 0x80000000ULL && | |||
1783 | isLegalArithImmed(C - 1ULL))) { | |||
1784 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; | |||
1785 | C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; | |||
1786 | RHS = DAG.getConstant(C, dl, VT); | |||
1787 | } | |||
1788 | break; | |||
1789 | case ISD::SETULT: | |||
1790 | case ISD::SETUGE: | |||
1791 | if ((VT == MVT::i32 && C != 0 && | |||
1792 | isLegalArithImmed((uint32_t)(C - 1))) || | |||
1793 | (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) { | |||
1794 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; | |||
1795 | C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; | |||
1796 | RHS = DAG.getConstant(C, dl, VT); | |||
1797 | } | |||
1798 | break; | |||
1799 | case ISD::SETLE: | |||
1800 | case ISD::SETGT: | |||
1801 | if ((VT == MVT::i32 && C != INT32_MAX(2147483647) && | |||
1802 | isLegalArithImmed((uint32_t)(C + 1))) || | |||
1803 | (VT == MVT::i64 && C != INT64_MAX(9223372036854775807L) && | |||
1804 | isLegalArithImmed(C + 1ULL))) { | |||
1805 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; | |||
1806 | C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; | |||
1807 | RHS = DAG.getConstant(C, dl, VT); | |||
1808 | } | |||
1809 | break; | |||
1810 | case ISD::SETULE: | |||
1811 | case ISD::SETUGT: | |||
1812 | if ((VT == MVT::i32 && C != UINT32_MAX(4294967295U) && | |||
1813 | isLegalArithImmed((uint32_t)(C + 1))) || | |||
1814 | (VT == MVT::i64 && C != UINT64_MAX(18446744073709551615UL) && | |||
1815 | isLegalArithImmed(C + 1ULL))) { | |||
1816 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; | |||
1817 | C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; | |||
1818 | RHS = DAG.getConstant(C, dl, VT); | |||
1819 | } | |||
1820 | break; | |||
1821 | } | |||
1822 | } | |||
1823 | } | |||
1824 | SDValue Cmp; | |||
1825 | AArch64CC::CondCode AArch64CC; | |||
1826 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) { | |||
1827 | const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS); | |||
1828 | ||||
1829 | // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095. | |||
1830 | // For the i8 operand, the largest immediate is 255, so this can be easily | |||
1831 | // encoded in the compare instruction. For the i16 operand, however, the | |||
1832 | // largest immediate cannot be encoded in the compare. | |||
1833 | // Therefore, use a sign extending load and cmn to avoid materializing the | |||
1834 | // -1 constant. For example, | |||
1835 | // movz w1, #65535 | |||
1836 | // ldrh w0, [x0, #0] | |||
1837 | // cmp w0, w1 | |||
1838 | // > | |||
1839 | // ldrsh w0, [x0, #0] | |||
1840 | // cmn w0, #1 | |||
1841 | // Fundamental, we're relying on the property that (zext LHS) == (zext RHS) | |||
1842 | // if and only if (sext LHS) == (sext RHS). The checks are in place to | |||
1843 | // ensure both the LHS and RHS are truly zero extended and to make sure the | |||
1844 | // transformation is profitable. | |||
1845 | if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) && | |||
1846 | cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD && | |||
1847 | cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 && | |||
1848 | LHS.getNode()->hasNUsesOfValue(1, 0)) { | |||
1849 | int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue(); | |||
1850 | if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) { | |||
1851 | SDValue SExt = | |||
1852 | DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS, | |||
1853 | DAG.getValueType(MVT::i16)); | |||
1854 | Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl, | |||
1855 | RHS.getValueType()), | |||
1856 | CC, dl, DAG); | |||
1857 | AArch64CC = changeIntCCToAArch64CC(CC); | |||
1858 | } | |||
1859 | } | |||
1860 | ||||
1861 | if (!Cmp && (RHSC->isNullValue() || RHSC->isOne())) { | |||
1862 | if ((Cmp = emitConjunctionDisjunctionTree(DAG, LHS, AArch64CC))) { | |||
1863 | if ((CC == ISD::SETNE) ^ RHSC->isNullValue()) | |||
1864 | AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); | |||
1865 | } | |||
1866 | } | |||
1867 | } | |||
1868 | ||||
1869 | if (!Cmp) { | |||
1870 | Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | |||
1871 | AArch64CC = changeIntCCToAArch64CC(CC); | |||
1872 | } | |||
1873 | AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC); | |||
1874 | return Cmp; | |||
1875 | } | |||
1876 | ||||
1877 | static std::pair<SDValue, SDValue> | |||
1878 | getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) { | |||
1879 | assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&(static_cast <bool> ((Op.getValueType() == MVT::i32 || Op .getValueType() == MVT::i64) && "Unsupported value type" ) ? void (0) : __assert_fail ("(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && \"Unsupported value type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1880, __extension__ __PRETTY_FUNCTION__)) | |||
1880 | "Unsupported value type")(static_cast <bool> ((Op.getValueType() == MVT::i32 || Op .getValueType() == MVT::i64) && "Unsupported value type" ) ? void (0) : __assert_fail ("(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && \"Unsupported value type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1880, __extension__ __PRETTY_FUNCTION__)); | |||
1881 | SDValue Value, Overflow; | |||
1882 | SDLoc DL(Op); | |||
1883 | SDValue LHS = Op.getOperand(0); | |||
1884 | SDValue RHS = Op.getOperand(1); | |||
1885 | unsigned Opc = 0; | |||
1886 | switch (Op.getOpcode()) { | |||
1887 | default: | |||
1888 | llvm_unreachable("Unknown overflow instruction!")::llvm::llvm_unreachable_internal("Unknown overflow instruction!" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1888); | |||
1889 | case ISD::SADDO: | |||
1890 | Opc = AArch64ISD::ADDS; | |||
1891 | CC = AArch64CC::VS; | |||
1892 | break; | |||
1893 | case ISD::UADDO: | |||
1894 | Opc = AArch64ISD::ADDS; | |||
1895 | CC = AArch64CC::HS; | |||
1896 | break; | |||
1897 | case ISD::SSUBO: | |||
1898 | Opc = AArch64ISD::SUBS; | |||
1899 | CC = AArch64CC::VS; | |||
1900 | break; | |||
1901 | case ISD::USUBO: | |||
1902 | Opc = AArch64ISD::SUBS; | |||
1903 | CC = AArch64CC::LO; | |||
1904 | break; | |||
1905 | // Multiply needs a little bit extra work. | |||
1906 | case ISD::SMULO: | |||
1907 | case ISD::UMULO: { | |||
1908 | CC = AArch64CC::NE; | |||
1909 | bool IsSigned = Op.getOpcode() == ISD::SMULO; | |||
1910 | if (Op.getValueType() == MVT::i32) { | |||
1911 | unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
1912 | // For a 32 bit multiply with overflow check we want the instruction | |||
1913 | // selector to generate a widening multiply (SMADDL/UMADDL). For that we | |||
1914 | // need to generate the following pattern: | |||
1915 | // (i64 add 0, (i64 mul (i64 sext|zext i32 %a), (i64 sext|zext i32 %b)) | |||
1916 | LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS); | |||
1917 | RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS); | |||
1918 | SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); | |||
1919 | SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Mul, | |||
1920 | DAG.getConstant(0, DL, MVT::i64)); | |||
1921 | // On AArch64 the upper 32 bits are always zero extended for a 32 bit | |||
1922 | // operation. We need to clear out the upper 32 bits, because we used a | |||
1923 | // widening multiply that wrote all 64 bits. In the end this should be a | |||
1924 | // noop. | |||
1925 | Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Add); | |||
1926 | if (IsSigned) { | |||
1927 | // The signed overflow check requires more than just a simple check for | |||
1928 | // any bit set in the upper 32 bits of the result. These bits could be | |||
1929 | // just the sign bits of a negative number. To perform the overflow | |||
1930 | // check we have to arithmetic shift right the 32nd bit of the result by | |||
1931 | // 31 bits. Then we compare the result to the upper 32 bits. | |||
1932 | SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Add, | |||
1933 | DAG.getConstant(32, DL, MVT::i64)); | |||
1934 | UpperBits = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, UpperBits); | |||
1935 | SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i32, Value, | |||
1936 | DAG.getConstant(31, DL, MVT::i64)); | |||
1937 | // It is important that LowerBits is last, otherwise the arithmetic | |||
1938 | // shift will not be folded into the compare (SUBS). | |||
1939 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32); | |||
1940 | Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) | |||
1941 | .getValue(1); | |||
1942 | } else { | |||
1943 | // The overflow check for unsigned multiply is easy. We only need to | |||
1944 | // check if any of the upper 32 bits are set. This can be done with a | |||
1945 | // CMP (shifted register). For that we need to generate the following | |||
1946 | // pattern: | |||
1947 | // (i64 AArch64ISD::SUBS i64 0, (i64 srl i64 %Mul, i64 32) | |||
1948 | SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, | |||
1949 | DAG.getConstant(32, DL, MVT::i64)); | |||
1950 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); | |||
1951 | Overflow = | |||
1952 | DAG.getNode(AArch64ISD::SUBS, DL, VTs, | |||
1953 | DAG.getConstant(0, DL, MVT::i64), | |||
1954 | UpperBits).getValue(1); | |||
1955 | } | |||
1956 | break; | |||
1957 | } | |||
1958 | assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type")(static_cast <bool> (Op.getValueType() == MVT::i64 && "Expected an i64 value type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"Expected an i64 value type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 1958, __extension__ __PRETTY_FUNCTION__)); | |||
1959 | // For the 64 bit multiply | |||
1960 | Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); | |||
1961 | if (IsSigned) { | |||
1962 | SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS); | |||
1963 | SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value, | |||
1964 | DAG.getConstant(63, DL, MVT::i64)); | |||
1965 | // It is important that LowerBits is last, otherwise the arithmetic | |||
1966 | // shift will not be folded into the compare (SUBS). | |||
1967 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); | |||
1968 | Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) | |||
1969 | .getValue(1); | |||
1970 | } else { | |||
1971 | SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS); | |||
1972 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); | |||
1973 | Overflow = | |||
1974 | DAG.getNode(AArch64ISD::SUBS, DL, VTs, | |||
1975 | DAG.getConstant(0, DL, MVT::i64), | |||
1976 | UpperBits).getValue(1); | |||
1977 | } | |||
1978 | break; | |||
1979 | } | |||
1980 | } // switch (...) | |||
1981 | ||||
1982 | if (Opc) { | |||
1983 | SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32); | |||
1984 | ||||
1985 | // Emit the AArch64 operation with overflow check. | |||
1986 | Value = DAG.getNode(Opc, DL, VTs, LHS, RHS); | |||
1987 | Overflow = Value.getValue(1); | |||
1988 | } | |||
1989 | return std::make_pair(Value, Overflow); | |||
1990 | } | |||
1991 | ||||
1992 | SDValue AArch64TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG, | |||
1993 | RTLIB::Libcall Call) const { | |||
1994 | SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end()); | |||
1995 | return makeLibCall(DAG, Call, MVT::f128, Ops, false, SDLoc(Op)).first; | |||
1996 | } | |||
1997 | ||||
1998 | // Returns true if the given Op is the overflow flag result of an overflow | |||
1999 | // intrinsic operation. | |||
2000 | static bool isOverflowIntrOpRes(SDValue Op) { | |||
2001 | unsigned Opc = Op.getOpcode(); | |||
2002 | return (Op.getResNo() == 1 && | |||
2003 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
2004 | Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)); | |||
2005 | } | |||
2006 | ||||
2007 | static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) { | |||
2008 | SDValue Sel = Op.getOperand(0); | |||
2009 | SDValue Other = Op.getOperand(1); | |||
2010 | SDLoc dl(Sel); | |||
2011 | ||||
2012 | // If the operand is an overflow checking operation, invert the condition | |||
2013 | // code and kill the Not operation. I.e., transform: | |||
2014 | // (xor (overflow_op_bool, 1)) | |||
2015 | // --> | |||
2016 | // (csel 1, 0, invert(cc), overflow_op_bool) | |||
2017 | // ... which later gets transformed to just a cset instruction with an | |||
2018 | // inverted condition code, rather than a cset + eor sequence. | |||
2019 | if (isOneConstant(Other) && isOverflowIntrOpRes(Sel)) { | |||
2020 | // Only lower legal XALUO ops. | |||
2021 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0))) | |||
2022 | return SDValue(); | |||
2023 | ||||
2024 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); | |||
2025 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); | |||
2026 | AArch64CC::CondCode CC; | |||
2027 | SDValue Value, Overflow; | |||
2028 | std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG); | |||
2029 | SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); | |||
2030 | return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal, | |||
2031 | CCVal, Overflow); | |||
2032 | } | |||
2033 | // If neither operand is a SELECT_CC, give up. | |||
2034 | if (Sel.getOpcode() != ISD::SELECT_CC) | |||
2035 | std::swap(Sel, Other); | |||
2036 | if (Sel.getOpcode() != ISD::SELECT_CC) | |||
2037 | return Op; | |||
2038 | ||||
2039 | // The folding we want to perform is: | |||
2040 | // (xor x, (select_cc a, b, cc, 0, -1) ) | |||
2041 | // --> | |||
2042 | // (csel x, (xor x, -1), cc ...) | |||
2043 | // | |||
2044 | // The latter will get matched to a CSINV instruction. | |||
2045 | ||||
2046 | ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get(); | |||
2047 | SDValue LHS = Sel.getOperand(0); | |||
2048 | SDValue RHS = Sel.getOperand(1); | |||
2049 | SDValue TVal = Sel.getOperand(2); | |||
2050 | SDValue FVal = Sel.getOperand(3); | |||
2051 | ||||
2052 | // FIXME: This could be generalized to non-integer comparisons. | |||
2053 | if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) | |||
2054 | return Op; | |||
2055 | ||||
2056 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); | |||
2057 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); | |||
2058 | ||||
2059 | // The values aren't constants, this isn't the pattern we're looking for. | |||
2060 | if (!CFVal || !CTVal) | |||
2061 | return Op; | |||
2062 | ||||
2063 | // We can commute the SELECT_CC by inverting the condition. This | |||
2064 | // might be needed to make this fit into a CSINV pattern. | |||
2065 | if (CTVal->isAllOnesValue() && CFVal->isNullValue()) { | |||
2066 | std::swap(TVal, FVal); | |||
2067 | std::swap(CTVal, CFVal); | |||
2068 | CC = ISD::getSetCCInverse(CC, true); | |||
2069 | } | |||
2070 | ||||
2071 | // If the constants line up, perform the transform! | |||
2072 | if (CTVal->isNullValue() && CFVal->isAllOnesValue()) { | |||
2073 | SDValue CCVal; | |||
2074 | SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); | |||
2075 | ||||
2076 | FVal = Other; | |||
2077 | TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other, | |||
2078 | DAG.getConstant(-1ULL, dl, Other.getValueType())); | |||
2079 | ||||
2080 | return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal, | |||
2081 | CCVal, Cmp); | |||
2082 | } | |||
2083 | ||||
2084 | return Op; | |||
2085 | } | |||
2086 | ||||
2087 | static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { | |||
2088 | EVT VT = Op.getValueType(); | |||
2089 | ||||
2090 | // Let legalize expand this if it isn't a legal type yet. | |||
2091 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
2092 | return SDValue(); | |||
2093 | ||||
2094 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
2095 | ||||
2096 | unsigned Opc; | |||
2097 | bool ExtraOp = false; | |||
2098 | switch (Op.getOpcode()) { | |||
2099 | default: | |||
2100 | llvm_unreachable("Invalid code")::llvm::llvm_unreachable_internal("Invalid code", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2100); | |||
2101 | case ISD::ADDC: | |||
2102 | Opc = AArch64ISD::ADDS; | |||
2103 | break; | |||
2104 | case ISD::SUBC: | |||
2105 | Opc = AArch64ISD::SUBS; | |||
2106 | break; | |||
2107 | case ISD::ADDE: | |||
2108 | Opc = AArch64ISD::ADCS; | |||
2109 | ExtraOp = true; | |||
2110 | break; | |||
2111 | case ISD::SUBE: | |||
2112 | Opc = AArch64ISD::SBCS; | |||
2113 | ExtraOp = true; | |||
2114 | break; | |||
2115 | } | |||
2116 | ||||
2117 | if (!ExtraOp) | |||
2118 | return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); | |||
2119 | return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), | |||
2120 | Op.getOperand(2)); | |||
2121 | } | |||
2122 | ||||
2123 | static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { | |||
2124 | // Let legalize expand this if it isn't a legal type yet. | |||
2125 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) | |||
2126 | return SDValue(); | |||
2127 | ||||
2128 | SDLoc dl(Op); | |||
2129 | AArch64CC::CondCode CC; | |||
2130 | // The actual operation that sets the overflow or carry flag. | |||
2131 | SDValue Value, Overflow; | |||
2132 | std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG); | |||
2133 | ||||
2134 | // We use 0 and 1 as false and true values. | |||
2135 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); | |||
2136 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); | |||
2137 | ||||
2138 | // We use an inverted condition, because the conditional select is inverted | |||
2139 | // too. This will allow it to be selected to a single instruction: | |||
2140 | // CSINC Wd, WZR, WZR, invert(cond). | |||
2141 | SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); | |||
2142 | Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal, | |||
2143 | CCVal, Overflow); | |||
2144 | ||||
2145 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); | |||
2146 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); | |||
2147 | } | |||
2148 | ||||
2149 | // Prefetch operands are: | |||
2150 | // 1: Address to prefetch | |||
2151 | // 2: bool isWrite | |||
2152 | // 3: int locality (0 = no locality ... 3 = extreme locality) | |||
2153 | // 4: bool isDataCache | |||
2154 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { | |||
2155 | SDLoc DL(Op); | |||
2156 | unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | |||
2157 | unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | |||
2158 | unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); | |||
2159 | ||||
2160 | bool IsStream = !Locality; | |||
2161 | // When the locality number is set | |||
2162 | if (Locality) { | |||
2163 | // The front-end should have filtered out the out-of-range values | |||
2164 | assert(Locality <= 3 && "Prefetch locality out-of-range")(static_cast <bool> (Locality <= 3 && "Prefetch locality out-of-range" ) ? void (0) : __assert_fail ("Locality <= 3 && \"Prefetch locality out-of-range\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2164, __extension__ __PRETTY_FUNCTION__)); | |||
2165 | // The locality degree is the opposite of the cache speed. | |||
2166 | // Put the number the other way around. | |||
2167 | // The encoding starts at 0 for level 1 | |||
2168 | Locality = 3 - Locality; | |||
2169 | } | |||
2170 | ||||
2171 | // built the mask value encoding the expected behavior. | |||
2172 | unsigned PrfOp = (IsWrite << 4) | // Load/Store bit | |||
2173 | (!IsData << 3) | // IsDataCache bit | |||
2174 | (Locality << 1) | // Cache level bits | |||
2175 | (unsigned)IsStream; // Stream bit | |||
2176 | return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0), | |||
2177 | DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1)); | |||
2178 | } | |||
2179 | ||||
2180 | SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, | |||
2181 | SelectionDAG &DAG) const { | |||
2182 | assert(Op.getValueType() == MVT::f128 && "Unexpected lowering")(static_cast <bool> (Op.getValueType() == MVT::f128 && "Unexpected lowering") ? void (0) : __assert_fail ("Op.getValueType() == MVT::f128 && \"Unexpected lowering\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2182, __extension__ __PRETTY_FUNCTION__)); | |||
2183 | ||||
2184 | RTLIB::Libcall LC; | |||
2185 | LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); | |||
2186 | ||||
2187 | return LowerF128Call(Op, DAG, LC); | |||
2188 | } | |||
2189 | ||||
2190 | SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op, | |||
2191 | SelectionDAG &DAG) const { | |||
2192 | if (Op.getOperand(0).getValueType() != MVT::f128) { | |||
2193 | // It's legal except when f128 is involved | |||
2194 | return Op; | |||
2195 | } | |||
2196 | ||||
2197 | RTLIB::Libcall LC; | |||
2198 | LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); | |||
2199 | ||||
2200 | // FP_ROUND node has a second operand indicating whether it is known to be | |||
2201 | // precise. That doesn't take part in the LibCall so we can't directly use | |||
2202 | // LowerF128Call. | |||
2203 | SDValue SrcVal = Op.getOperand(0); | |||
2204 | return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, | |||
2205 | SDLoc(Op)).first; | |||
2206 | } | |||
2207 | ||||
2208 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { | |||
2209 | // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. | |||
2210 | // Any additional optimization in this function should be recorded | |||
2211 | // in the cost tables. | |||
2212 | EVT InVT = Op.getOperand(0).getValueType(); | |||
2213 | EVT VT = Op.getValueType(); | |||
2214 | unsigned NumElts = InVT.getVectorNumElements(); | |||
2215 | ||||
2216 | // f16 vectors are promoted to f32 before a conversion. | |||
2217 | if (InVT.getVectorElementType() == MVT::f16) { | |||
2218 | MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts); | |||
2219 | SDLoc dl(Op); | |||
2220 | return DAG.getNode( | |||
2221 | Op.getOpcode(), dl, Op.getValueType(), | |||
2222 | DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0))); | |||
2223 | } | |||
2224 | ||||
2225 | if (VT.getSizeInBits() < InVT.getSizeInBits()) { | |||
2226 | SDLoc dl(Op); | |||
2227 | SDValue Cv = | |||
2228 | DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(), | |||
2229 | Op.getOperand(0)); | |||
2230 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); | |||
2231 | } | |||
2232 | ||||
2233 | if (VT.getSizeInBits() > InVT.getSizeInBits()) { | |||
2234 | SDLoc dl(Op); | |||
2235 | MVT ExtVT = | |||
2236 | MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()), | |||
2237 | VT.getVectorNumElements()); | |||
2238 | SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0)); | |||
2239 | return DAG.getNode(Op.getOpcode(), dl, VT, Ext); | |||
2240 | } | |||
2241 | ||||
2242 | // Type changing conversions are illegal. | |||
2243 | return Op; | |||
2244 | } | |||
2245 | ||||
2246 | SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, | |||
2247 | SelectionDAG &DAG) const { | |||
2248 | if (Op.getOperand(0).getValueType().isVector()) | |||
2249 | return LowerVectorFP_TO_INT(Op, DAG); | |||
2250 | ||||
2251 | // f16 conversions are promoted to f32 when full fp16 is not supported. | |||
2252 | if (Op.getOperand(0).getValueType() == MVT::f16 && | |||
2253 | !Subtarget->hasFullFP16()) { | |||
2254 | SDLoc dl(Op); | |||
2255 | return DAG.getNode( | |||
2256 | Op.getOpcode(), dl, Op.getValueType(), | |||
2257 | DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Op.getOperand(0))); | |||
2258 | } | |||
2259 | ||||
2260 | if (Op.getOperand(0).getValueType() != MVT::f128) { | |||
2261 | // It's legal except when f128 is involved | |||
2262 | return Op; | |||
2263 | } | |||
2264 | ||||
2265 | RTLIB::Libcall LC; | |||
2266 | if (Op.getOpcode() == ISD::FP_TO_SINT) | |||
2267 | LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); | |||
2268 | else | |||
2269 | LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); | |||
2270 | ||||
2271 | SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end()); | |||
2272 | return makeLibCall(DAG, LC, Op.getValueType(), Ops, false, SDLoc(Op)).first; | |||
2273 | } | |||
2274 | ||||
2275 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { | |||
2276 | // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. | |||
2277 | // Any additional optimization in this function should be recorded | |||
2278 | // in the cost tables. | |||
2279 | EVT VT = Op.getValueType(); | |||
2280 | SDLoc dl(Op); | |||
2281 | SDValue In = Op.getOperand(0); | |||
2282 | EVT InVT = In.getValueType(); | |||
2283 | ||||
2284 | if (VT.getSizeInBits() < InVT.getSizeInBits()) { | |||
2285 | MVT CastVT = | |||
2286 | MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()), | |||
2287 | InVT.getVectorNumElements()); | |||
2288 | In = DAG.getNode(Op.getOpcode(), dl, CastVT, In); | |||
2289 | return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl)); | |||
2290 | } | |||
2291 | ||||
2292 | if (VT.getSizeInBits() > InVT.getSizeInBits()) { | |||
2293 | unsigned CastOpc = | |||
2294 | Op.getOpcode() == ISD::SINT_TO_FP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
2295 | EVT CastVT = VT.changeVectorElementTypeToInteger(); | |||
2296 | In = DAG.getNode(CastOpc, dl, CastVT, In); | |||
2297 | return DAG.getNode(Op.getOpcode(), dl, VT, In); | |||
2298 | } | |||
2299 | ||||
2300 | return Op; | |||
2301 | } | |||
2302 | ||||
2303 | SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, | |||
2304 | SelectionDAG &DAG) const { | |||
2305 | if (Op.getValueType().isVector()) | |||
2306 | return LowerVectorINT_TO_FP(Op, DAG); | |||
2307 | ||||
2308 | // f16 conversions are promoted to f32 when full fp16 is not supported. | |||
2309 | if (Op.getValueType() == MVT::f16 && | |||
2310 | !Subtarget->hasFullFP16()) { | |||
2311 | SDLoc dl(Op); | |||
2312 | return DAG.getNode( | |||
2313 | ISD::FP_ROUND, dl, MVT::f16, | |||
2314 | DAG.getNode(Op.getOpcode(), dl, MVT::f32, Op.getOperand(0)), | |||
2315 | DAG.getIntPtrConstant(0, dl)); | |||
2316 | } | |||
2317 | ||||
2318 | // i128 conversions are libcalls. | |||
2319 | if (Op.getOperand(0).getValueType() == MVT::i128) | |||
2320 | return SDValue(); | |||
2321 | ||||
2322 | // Other conversions are legal, unless it's to the completely software-based | |||
2323 | // fp128. | |||
2324 | if (Op.getValueType() != MVT::f128) | |||
2325 | return Op; | |||
2326 | ||||
2327 | RTLIB::Libcall LC; | |||
2328 | if (Op.getOpcode() == ISD::SINT_TO_FP) | |||
2329 | LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); | |||
2330 | else | |||
2331 | LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); | |||
2332 | ||||
2333 | return LowerF128Call(Op, DAG, LC); | |||
2334 | } | |||
2335 | ||||
2336 | SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op, | |||
2337 | SelectionDAG &DAG) const { | |||
2338 | // For iOS, we want to call an alternative entry point: __sincos_stret, | |||
2339 | // which returns the values in two S / D registers. | |||
2340 | SDLoc dl(Op); | |||
2341 | SDValue Arg = Op.getOperand(0); | |||
2342 | EVT ArgVT = Arg.getValueType(); | |||
2343 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); | |||
2344 | ||||
2345 | ArgListTy Args; | |||
2346 | ArgListEntry Entry; | |||
2347 | ||||
2348 | Entry.Node = Arg; | |||
2349 | Entry.Ty = ArgTy; | |||
2350 | Entry.IsSExt = false; | |||
2351 | Entry.IsZExt = false; | |||
2352 | Args.push_back(Entry); | |||
2353 | ||||
2354 | RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64 | |||
2355 | : RTLIB::SINCOS_STRET_F32; | |||
2356 | const char *LibcallName = getLibcallName(LC); | |||
2357 | SDValue Callee = | |||
2358 | DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout())); | |||
2359 | ||||
2360 | StructType *RetTy = StructType::get(ArgTy, ArgTy); | |||
2361 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
2362 | CLI.setDebugLoc(dl) | |||
2363 | .setChain(DAG.getEntryNode()) | |||
2364 | .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args)); | |||
2365 | ||||
2366 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); | |||
2367 | return CallResult.first; | |||
2368 | } | |||
2369 | ||||
2370 | static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) { | |||
2371 | if (Op.getValueType() != MVT::f16) | |||
2372 | return SDValue(); | |||
2373 | ||||
2374 | assert(Op.getOperand(0).getValueType() == MVT::i16)(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::i16) ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::i16" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2374, __extension__ __PRETTY_FUNCTION__)); | |||
2375 | SDLoc DL(Op); | |||
2376 | ||||
2377 | Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0)); | |||
2378 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op); | |||
2379 | return SDValue( | |||
2380 | DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::f16, Op, | |||
2381 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | |||
2382 | 0); | |||
2383 | } | |||
2384 | ||||
2385 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { | |||
2386 | if (OrigVT.getSizeInBits() >= 64) | |||
2387 | return OrigVT; | |||
2388 | ||||
2389 | assert(OrigVT.isSimple() && "Expecting a simple value type")(static_cast <bool> (OrigVT.isSimple() && "Expecting a simple value type" ) ? void (0) : __assert_fail ("OrigVT.isSimple() && \"Expecting a simple value type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2389, __extension__ __PRETTY_FUNCTION__)); | |||
2390 | ||||
2391 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; | |||
2392 | switch (OrigSimpleTy) { | |||
2393 | default: llvm_unreachable("Unexpected Vector Type")::llvm::llvm_unreachable_internal("Unexpected Vector Type", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2393); | |||
2394 | case MVT::v2i8: | |||
2395 | case MVT::v2i16: | |||
2396 | return MVT::v2i32; | |||
2397 | case MVT::v4i8: | |||
2398 | return MVT::v4i16; | |||
2399 | } | |||
2400 | } | |||
2401 | ||||
2402 | static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, | |||
2403 | const EVT &OrigTy, | |||
2404 | const EVT &ExtTy, | |||
2405 | unsigned ExtOpcode) { | |||
2406 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. | |||
2407 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than | |||
2408 | // 64-bits we need to insert a new extension so that it will be 64-bits. | |||
2409 | assert(ExtTy.is128BitVector() && "Unexpected extension size")(static_cast <bool> (ExtTy.is128BitVector() && "Unexpected extension size" ) ? void (0) : __assert_fail ("ExtTy.is128BitVector() && \"Unexpected extension size\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2409, __extension__ __PRETTY_FUNCTION__)); | |||
2410 | if (OrigTy.getSizeInBits() >= 64) | |||
2411 | return N; | |||
2412 | ||||
2413 | // Must extend size to at least 64 bits to be used as an operand for VMULL. | |||
2414 | EVT NewVT = getExtensionTo64Bits(OrigTy); | |||
2415 | ||||
2416 | return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); | |||
2417 | } | |||
2418 | ||||
2419 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, | |||
2420 | bool isSigned) { | |||
2421 | EVT VT = N->getValueType(0); | |||
2422 | ||||
2423 | if (N->getOpcode() != ISD::BUILD_VECTOR) | |||
2424 | return false; | |||
2425 | ||||
2426 | for (const SDValue &Elt : N->op_values()) { | |||
2427 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { | |||
2428 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
2429 | unsigned HalfSize = EltSize / 2; | |||
2430 | if (isSigned) { | |||
2431 | if (!isIntN(HalfSize, C->getSExtValue())) | |||
2432 | return false; | |||
2433 | } else { | |||
2434 | if (!isUIntN(HalfSize, C->getZExtValue())) | |||
2435 | return false; | |||
2436 | } | |||
2437 | continue; | |||
2438 | } | |||
2439 | return false; | |||
2440 | } | |||
2441 | ||||
2442 | return true; | |||
2443 | } | |||
2444 | ||||
2445 | static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) { | |||
2446 | if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) | |||
2447 | return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG, | |||
2448 | N->getOperand(0)->getValueType(0), | |||
2449 | N->getValueType(0), | |||
2450 | N->getOpcode()); | |||
2451 | ||||
2452 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR")(static_cast <bool> (N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR") ? void (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"expected BUILD_VECTOR\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2452, __extension__ __PRETTY_FUNCTION__)); | |||
2453 | EVT VT = N->getValueType(0); | |||
2454 | SDLoc dl(N); | |||
2455 | unsigned EltSize = VT.getScalarSizeInBits() / 2; | |||
2456 | unsigned NumElts = VT.getVectorNumElements(); | |||
2457 | MVT TruncVT = MVT::getIntegerVT(EltSize); | |||
2458 | SmallVector<SDValue, 8> Ops; | |||
2459 | for (unsigned i = 0; i != NumElts; ++i) { | |||
2460 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); | |||
2461 | const APInt &CInt = C->getAPIntValue(); | |||
2462 | // Element types smaller than 32 bits are not legal, so use i32 elements. | |||
2463 | // The values are implicitly truncated so sext vs. zext doesn't matter. | |||
2464 | Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); | |||
2465 | } | |||
2466 | return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); | |||
2467 | } | |||
2468 | ||||
2469 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { | |||
2470 | return N->getOpcode() == ISD::SIGN_EXTEND || | |||
2471 | isExtendedBUILD_VECTOR(N, DAG, true); | |||
2472 | } | |||
2473 | ||||
2474 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { | |||
2475 | return N->getOpcode() == ISD::ZERO_EXTEND || | |||
2476 | isExtendedBUILD_VECTOR(N, DAG, false); | |||
2477 | } | |||
2478 | ||||
2479 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { | |||
2480 | unsigned Opcode = N->getOpcode(); | |||
2481 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { | |||
2482 | SDNode *N0 = N->getOperand(0).getNode(); | |||
2483 | SDNode *N1 = N->getOperand(1).getNode(); | |||
2484 | return N0->hasOneUse() && N1->hasOneUse() && | |||
2485 | isSignExtended(N0, DAG) && isSignExtended(N1, DAG); | |||
2486 | } | |||
2487 | return false; | |||
2488 | } | |||
2489 | ||||
2490 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { | |||
2491 | unsigned Opcode = N->getOpcode(); | |||
2492 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { | |||
2493 | SDNode *N0 = N->getOperand(0).getNode(); | |||
2494 | SDNode *N1 = N->getOperand(1).getNode(); | |||
2495 | return N0->hasOneUse() && N1->hasOneUse() && | |||
2496 | isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); | |||
2497 | } | |||
2498 | return false; | |||
2499 | } | |||
2500 | ||||
2501 | SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op, | |||
2502 | SelectionDAG &DAG) const { | |||
2503 | // The rounding mode is in bits 23:22 of the FPSCR. | |||
2504 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 | |||
2505 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) | |||
2506 | // so that the shift + and get folded into a bitfield extract. | |||
2507 | SDLoc dl(Op); | |||
2508 | ||||
2509 | SDValue FPCR_64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i64, | |||
2510 | DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, | |||
2511 | MVT::i64)); | |||
2512 | SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64); | |||
2513 | SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32, | |||
2514 | DAG.getConstant(1U << 22, dl, MVT::i32)); | |||
2515 | SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, | |||
2516 | DAG.getConstant(22, dl, MVT::i32)); | |||
2517 | return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, | |||
2518 | DAG.getConstant(3, dl, MVT::i32)); | |||
2519 | } | |||
2520 | ||||
2521 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { | |||
2522 | // Multiplications are only custom-lowered for 128-bit vectors so that | |||
2523 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. | |||
2524 | EVT VT = Op.getValueType(); | |||
2525 | assert(VT.is128BitVector() && VT.isInteger() &&(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2526, __extension__ __PRETTY_FUNCTION__)) | |||
2526 | "unexpected type for custom-lowering ISD::MUL")(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2526, __extension__ __PRETTY_FUNCTION__)); | |||
2527 | SDNode *N0 = Op.getOperand(0).getNode(); | |||
2528 | SDNode *N1 = Op.getOperand(1).getNode(); | |||
2529 | unsigned NewOpc = 0; | |||
2530 | bool isMLA = false; | |||
2531 | bool isN0SExt = isSignExtended(N0, DAG); | |||
2532 | bool isN1SExt = isSignExtended(N1, DAG); | |||
2533 | if (isN0SExt && isN1SExt) | |||
2534 | NewOpc = AArch64ISD::SMULL; | |||
2535 | else { | |||
2536 | bool isN0ZExt = isZeroExtended(N0, DAG); | |||
2537 | bool isN1ZExt = isZeroExtended(N1, DAG); | |||
2538 | if (isN0ZExt && isN1ZExt) | |||
2539 | NewOpc = AArch64ISD::UMULL; | |||
2540 | else if (isN1SExt || isN1ZExt) { | |||
2541 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these | |||
2542 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) | |||
2543 | if (isN1SExt && isAddSubSExt(N0, DAG)) { | |||
2544 | NewOpc = AArch64ISD::SMULL; | |||
2545 | isMLA = true; | |||
2546 | } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { | |||
2547 | NewOpc = AArch64ISD::UMULL; | |||
2548 | isMLA = true; | |||
2549 | } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { | |||
2550 | std::swap(N0, N1); | |||
2551 | NewOpc = AArch64ISD::UMULL; | |||
2552 | isMLA = true; | |||
2553 | } | |||
2554 | } | |||
2555 | ||||
2556 | if (!NewOpc) { | |||
2557 | if (VT == MVT::v2i64) | |||
2558 | // Fall through to expand this. It is not legal. | |||
2559 | return SDValue(); | |||
2560 | else | |||
2561 | // Other vector multiplications are legal. | |||
2562 | return Op; | |||
2563 | } | |||
2564 | } | |||
2565 | ||||
2566 | // Legalize to a S/UMULL instruction | |||
2567 | SDLoc DL(Op); | |||
2568 | SDValue Op0; | |||
2569 | SDValue Op1 = skipExtensionForVectorMULL(N1, DAG); | |||
2570 | if (!isMLA) { | |||
2571 | Op0 = skipExtensionForVectorMULL(N0, DAG); | |||
2572 | assert(Op0.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2574, __extension__ __PRETTY_FUNCTION__)) | |||
2573 | Op1.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2574, __extension__ __PRETTY_FUNCTION__)) | |||
2574 | "unexpected types for extended operands to VMULL")(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2574, __extension__ __PRETTY_FUNCTION__)); | |||
2575 | return DAG.getNode(NewOpc, DL, VT, Op0, Op1); | |||
2576 | } | |||
2577 | // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during | |||
2578 | // isel lowering to take advantage of no-stall back to back s/umul + s/umla. | |||
2579 | // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57 | |||
2580 | SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG); | |||
2581 | SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG); | |||
2582 | EVT Op1VT = Op1.getValueType(); | |||
2583 | return DAG.getNode(N0->getOpcode(), DL, VT, | |||
2584 | DAG.getNode(NewOpc, DL, VT, | |||
2585 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), | |||
2586 | DAG.getNode(NewOpc, DL, VT, | |||
2587 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); | |||
2588 | } | |||
2589 | ||||
2590 | // Lower vector multiply high (ISD::MULHS and ISD::MULHU). | |||
2591 | static SDValue LowerMULH(SDValue Op, SelectionDAG &DAG) { | |||
2592 | // Multiplications are only custom-lowered for 128-bit vectors so that | |||
2593 | // {S,U}MULL{2} can be detected. Otherwise v2i64 multiplications are not | |||
2594 | // legal. | |||
2595 | EVT VT = Op.getValueType(); | |||
2596 | assert(VT.is128BitVector() && VT.isInteger() &&(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MULH{U,S}" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MULH{U,S}\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2597, __extension__ __PRETTY_FUNCTION__)) | |||
2597 | "unexpected type for custom-lowering ISD::MULH{U,S}")(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MULH{U,S}" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MULH{U,S}\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2597, __extension__ __PRETTY_FUNCTION__)); | |||
2598 | ||||
2599 | SDValue V0 = Op.getOperand(0); | |||
2600 | SDValue V1 = Op.getOperand(1); | |||
2601 | ||||
2602 | SDLoc DL(Op); | |||
2603 | ||||
2604 | EVT ExtractVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); | |||
2605 | ||||
2606 | // We turn (V0 mulhs/mulhu V1) to: | |||
2607 | // | |||
2608 | // (uzp2 (smull (extract_subvector (ExtractVT V128:V0, (i64 0)), | |||
2609 | // (extract_subvector (ExtractVT V128:V1, (i64 0))))), | |||
2610 | // (smull (extract_subvector (ExtractVT V128:V0, (i64 VMull2Idx)), | |||
2611 | // (extract_subvector (ExtractVT V128:V2, (i64 VMull2Idx)))))) | |||
2612 | // | |||
2613 | // Where ExtractVT is a subvector with half number of elements, and | |||
2614 | // VMullIdx2 is the index of the middle element (the high part). | |||
2615 | // | |||
2616 | // The vector hight part extract and multiply will be matched against | |||
2617 | // {S,U}MULL{v16i8_v8i16,v8i16_v4i32,v4i32_v2i64} which in turn will | |||
2618 | // issue a {s}mull2 instruction. | |||
2619 | // | |||
2620 | // This basically multiply the lower subvector with '{s,u}mull', the high | |||
2621 | // subvector with '{s,u}mull2', and shuffle both results high part in | |||
2622 | // resulting vector. | |||
2623 | unsigned Mull2VectorIdx = VT.getVectorNumElements () / 2; | |||
2624 | SDValue VMullIdx = DAG.getConstant(0, DL, MVT::i64); | |||
2625 | SDValue VMull2Idx = DAG.getConstant(Mull2VectorIdx, DL, MVT::i64); | |||
2626 | ||||
2627 | SDValue VMullV0 = | |||
2628 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V0, VMullIdx); | |||
2629 | SDValue VMullV1 = | |||
2630 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V1, VMullIdx); | |||
2631 | ||||
2632 | SDValue VMull2V0 = | |||
2633 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V0, VMull2Idx); | |||
2634 | SDValue VMull2V1 = | |||
2635 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V1, VMull2Idx); | |||
2636 | ||||
2637 | unsigned MullOpc = Op.getOpcode() == ISD::MULHS ? AArch64ISD::SMULL | |||
2638 | : AArch64ISD::UMULL; | |||
2639 | ||||
2640 | EVT MullVT = ExtractVT.widenIntegerVectorElementType(*DAG.getContext()); | |||
2641 | SDValue Mull = DAG.getNode(MullOpc, DL, MullVT, VMullV0, VMullV1); | |||
2642 | SDValue Mull2 = DAG.getNode(MullOpc, DL, MullVT, VMull2V0, VMull2V1); | |||
2643 | ||||
2644 | Mull = DAG.getNode(ISD::BITCAST, DL, VT, Mull); | |||
2645 | Mull2 = DAG.getNode(ISD::BITCAST, DL, VT, Mull2); | |||
2646 | ||||
2647 | return DAG.getNode(AArch64ISD::UZP2, DL, VT, Mull, Mull2); | |||
2648 | } | |||
2649 | ||||
2650 | SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | |||
2651 | SelectionDAG &DAG) const { | |||
2652 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
2653 | SDLoc dl(Op); | |||
2654 | switch (IntNo) { | |||
2655 | default: return SDValue(); // Don't custom lower most intrinsics. | |||
2656 | case Intrinsic::thread_pointer: { | |||
2657 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2658 | return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT); | |||
2659 | } | |||
2660 | case Intrinsic::aarch64_neon_abs: | |||
2661 | return DAG.getNode(ISD::ABS, dl, Op.getValueType(), | |||
2662 | Op.getOperand(1)); | |||
2663 | case Intrinsic::aarch64_neon_smax: | |||
2664 | return DAG.getNode(ISD::SMAX, dl, Op.getValueType(), | |||
2665 | Op.getOperand(1), Op.getOperand(2)); | |||
2666 | case Intrinsic::aarch64_neon_umax: | |||
2667 | return DAG.getNode(ISD::UMAX, dl, Op.getValueType(), | |||
2668 | Op.getOperand(1), Op.getOperand(2)); | |||
2669 | case Intrinsic::aarch64_neon_smin: | |||
2670 | return DAG.getNode(ISD::SMIN, dl, Op.getValueType(), | |||
2671 | Op.getOperand(1), Op.getOperand(2)); | |||
2672 | case Intrinsic::aarch64_neon_umin: | |||
2673 | return DAG.getNode(ISD::UMIN, dl, Op.getValueType(), | |||
2674 | Op.getOperand(1), Op.getOperand(2)); | |||
2675 | } | |||
2676 | } | |||
2677 | ||||
2678 | // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16. | |||
2679 | static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, | |||
2680 | EVT VT, EVT MemVT, | |||
2681 | SelectionDAG &DAG) { | |||
2682 | assert(VT.isVector() && "VT should be a vector type")(static_cast <bool> (VT.isVector() && "VT should be a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"VT should be a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2682, __extension__ __PRETTY_FUNCTION__)); | |||
2683 | assert(MemVT == MVT::v4i8 && VT == MVT::v4i16)(static_cast <bool> (MemVT == MVT::v4i8 && VT == MVT::v4i16) ? void (0) : __assert_fail ("MemVT == MVT::v4i8 && VT == MVT::v4i16" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2683, __extension__ __PRETTY_FUNCTION__)); | |||
2684 | ||||
2685 | SDValue Value = ST->getValue(); | |||
2686 | ||||
2687 | // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract | |||
2688 | // the word lane which represent the v4i8 subvector. It optimizes the store | |||
2689 | // to: | |||
2690 | // | |||
2691 | // xtn v0.8b, v0.8h | |||
2692 | // str s0, [x0] | |||
2693 | ||||
2694 | SDValue Undef = DAG.getUNDEF(MVT::i16); | |||
2695 | SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL, | |||
2696 | {Undef, Undef, Undef, Undef}); | |||
2697 | ||||
2698 | SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, | |||
2699 | Value, UndefVec); | |||
2700 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt); | |||
2701 | ||||
2702 | Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc); | |||
2703 | SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, | |||
2704 | Trunc, DAG.getConstant(0, DL, MVT::i64)); | |||
2705 | ||||
2706 | return DAG.getStore(ST->getChain(), DL, ExtractTrunc, | |||
2707 | ST->getBasePtr(), ST->getMemOperand()); | |||
2708 | } | |||
2709 | ||||
2710 | // Custom lowering for any store, vector or scalar and/or default or with | |||
2711 | // a truncate operations. Currently only custom lower truncate operation | |||
2712 | // from vector v4i16 to v4i8. | |||
2713 | SDValue AArch64TargetLowering::LowerSTORE(SDValue Op, | |||
2714 | SelectionDAG &DAG) const { | |||
2715 | SDLoc Dl(Op); | |||
2716 | StoreSDNode *StoreNode = cast<StoreSDNode>(Op); | |||
2717 | assert (StoreNode && "Can only custom lower store nodes")(static_cast <bool> (StoreNode && "Can only custom lower store nodes" ) ? void (0) : __assert_fail ("StoreNode && \"Can only custom lower store nodes\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2717, __extension__ __PRETTY_FUNCTION__)); | |||
2718 | ||||
2719 | SDValue Value = StoreNode->getValue(); | |||
2720 | ||||
2721 | EVT VT = Value.getValueType(); | |||
2722 | EVT MemVT = StoreNode->getMemoryVT(); | |||
2723 | ||||
2724 | assert (VT.isVector() && "Can only custom lower vector store types")(static_cast <bool> (VT.isVector() && "Can only custom lower vector store types" ) ? void (0) : __assert_fail ("VT.isVector() && \"Can only custom lower vector store types\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2724, __extension__ __PRETTY_FUNCTION__)); | |||
2725 | ||||
2726 | unsigned AS = StoreNode->getAddressSpace(); | |||
2727 | unsigned Align = StoreNode->getAlignment(); | |||
2728 | if (Align < MemVT.getStoreSize() && | |||
2729 | !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) { | |||
2730 | return scalarizeVectorStore(StoreNode, DAG); | |||
2731 | } | |||
2732 | ||||
2733 | if (StoreNode->isTruncatingStore()) { | |||
2734 | return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG); | |||
2735 | } | |||
2736 | ||||
2737 | return SDValue(); | |||
2738 | } | |||
2739 | ||||
2740 | SDValue AArch64TargetLowering::LowerOperation(SDValue Op, | |||
2741 | SelectionDAG &DAG) const { | |||
2742 | LLVM_DEBUG(dbgs() << "Custom lowering: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Custom lowering: "; } } while (false); | |||
2743 | LLVM_DEBUG(Op.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { Op.dump(); } } while (false); | |||
2744 | ||||
2745 | switch (Op.getOpcode()) { | |||
2746 | default: | |||
2747 | llvm_unreachable("unimplemented operand")::llvm::llvm_unreachable_internal("unimplemented operand", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2747); | |||
2748 | return SDValue(); | |||
2749 | case ISD::BITCAST: | |||
2750 | return LowerBITCAST(Op, DAG); | |||
2751 | case ISD::GlobalAddress: | |||
2752 | return LowerGlobalAddress(Op, DAG); | |||
2753 | case ISD::GlobalTLSAddress: | |||
2754 | return LowerGlobalTLSAddress(Op, DAG); | |||
2755 | case ISD::SETCC: | |||
2756 | return LowerSETCC(Op, DAG); | |||
2757 | case ISD::BR_CC: | |||
2758 | return LowerBR_CC(Op, DAG); | |||
2759 | case ISD::SELECT: | |||
2760 | return LowerSELECT(Op, DAG); | |||
2761 | case ISD::SELECT_CC: | |||
2762 | return LowerSELECT_CC(Op, DAG); | |||
2763 | case ISD::JumpTable: | |||
2764 | return LowerJumpTable(Op, DAG); | |||
2765 | case ISD::ConstantPool: | |||
2766 | return LowerConstantPool(Op, DAG); | |||
2767 | case ISD::BlockAddress: | |||
2768 | return LowerBlockAddress(Op, DAG); | |||
2769 | case ISD::VASTART: | |||
2770 | return LowerVASTART(Op, DAG); | |||
2771 | case ISD::VACOPY: | |||
2772 | return LowerVACOPY(Op, DAG); | |||
2773 | case ISD::VAARG: | |||
2774 | return LowerVAARG(Op, DAG); | |||
2775 | case ISD::ADDC: | |||
2776 | case ISD::ADDE: | |||
2777 | case ISD::SUBC: | |||
2778 | case ISD::SUBE: | |||
2779 | return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); | |||
2780 | case ISD::SADDO: | |||
2781 | case ISD::UADDO: | |||
2782 | case ISD::SSUBO: | |||
2783 | case ISD::USUBO: | |||
2784 | case ISD::SMULO: | |||
2785 | case ISD::UMULO: | |||
2786 | return LowerXALUO(Op, DAG); | |||
2787 | case ISD::FADD: | |||
2788 | return LowerF128Call(Op, DAG, RTLIB::ADD_F128); | |||
2789 | case ISD::FSUB: | |||
2790 | return LowerF128Call(Op, DAG, RTLIB::SUB_F128); | |||
2791 | case ISD::FMUL: | |||
2792 | return LowerF128Call(Op, DAG, RTLIB::MUL_F128); | |||
2793 | case ISD::FDIV: | |||
2794 | return LowerF128Call(Op, DAG, RTLIB::DIV_F128); | |||
2795 | case ISD::FP_ROUND: | |||
2796 | return LowerFP_ROUND(Op, DAG); | |||
2797 | case ISD::FP_EXTEND: | |||
2798 | return LowerFP_EXTEND(Op, DAG); | |||
2799 | case ISD::FRAMEADDR: | |||
2800 | return LowerFRAMEADDR(Op, DAG); | |||
2801 | case ISD::RETURNADDR: | |||
2802 | return LowerRETURNADDR(Op, DAG); | |||
2803 | case ISD::INSERT_VECTOR_ELT: | |||
2804 | return LowerINSERT_VECTOR_ELT(Op, DAG); | |||
2805 | case ISD::EXTRACT_VECTOR_ELT: | |||
2806 | return LowerEXTRACT_VECTOR_ELT(Op, DAG); | |||
2807 | case ISD::BUILD_VECTOR: | |||
2808 | return LowerBUILD_VECTOR(Op, DAG); | |||
2809 | case ISD::VECTOR_SHUFFLE: | |||
2810 | return LowerVECTOR_SHUFFLE(Op, DAG); | |||
2811 | case ISD::EXTRACT_SUBVECTOR: | |||
2812 | return LowerEXTRACT_SUBVECTOR(Op, DAG); | |||
2813 | case ISD::SRA: | |||
2814 | case ISD::SRL: | |||
2815 | case ISD::SHL: | |||
2816 | return LowerVectorSRA_SRL_SHL(Op, DAG); | |||
2817 | case ISD::SHL_PARTS: | |||
2818 | return LowerShiftLeftParts(Op, DAG); | |||
2819 | case ISD::SRL_PARTS: | |||
2820 | case ISD::SRA_PARTS: | |||
2821 | return LowerShiftRightParts(Op, DAG); | |||
2822 | case ISD::CTPOP: | |||
2823 | return LowerCTPOP(Op, DAG); | |||
2824 | case ISD::FCOPYSIGN: | |||
2825 | return LowerFCOPYSIGN(Op, DAG); | |||
2826 | case ISD::AND: | |||
2827 | return LowerVectorAND(Op, DAG); | |||
2828 | case ISD::OR: | |||
2829 | return LowerVectorOR(Op, DAG); | |||
2830 | case ISD::XOR: | |||
2831 | return LowerXOR(Op, DAG); | |||
2832 | case ISD::PREFETCH: | |||
2833 | return LowerPREFETCH(Op, DAG); | |||
2834 | case ISD::SINT_TO_FP: | |||
2835 | case ISD::UINT_TO_FP: | |||
2836 | return LowerINT_TO_FP(Op, DAG); | |||
2837 | case ISD::FP_TO_SINT: | |||
2838 | case ISD::FP_TO_UINT: | |||
2839 | return LowerFP_TO_INT(Op, DAG); | |||
2840 | case ISD::FSINCOS: | |||
2841 | return LowerFSINCOS(Op, DAG); | |||
2842 | case ISD::FLT_ROUNDS_: | |||
2843 | return LowerFLT_ROUNDS_(Op, DAG); | |||
2844 | case ISD::MUL: | |||
2845 | return LowerMUL(Op, DAG); | |||
2846 | case ISD::MULHS: | |||
2847 | case ISD::MULHU: | |||
2848 | return LowerMULH(Op, DAG); | |||
2849 | case ISD::INTRINSIC_WO_CHAIN: | |||
2850 | return LowerINTRINSIC_WO_CHAIN(Op, DAG); | |||
2851 | case ISD::STORE: | |||
2852 | return LowerSTORE(Op, DAG); | |||
2853 | case ISD::VECREDUCE_ADD: | |||
2854 | case ISD::VECREDUCE_SMAX: | |||
2855 | case ISD::VECREDUCE_SMIN: | |||
2856 | case ISD::VECREDUCE_UMAX: | |||
2857 | case ISD::VECREDUCE_UMIN: | |||
2858 | case ISD::VECREDUCE_FMAX: | |||
2859 | case ISD::VECREDUCE_FMIN: | |||
2860 | return LowerVECREDUCE(Op, DAG); | |||
2861 | case ISD::ATOMIC_LOAD_SUB: | |||
2862 | return LowerATOMIC_LOAD_SUB(Op, DAG); | |||
2863 | case ISD::ATOMIC_LOAD_AND: | |||
2864 | return LowerATOMIC_LOAD_AND(Op, DAG); | |||
2865 | case ISD::DYNAMIC_STACKALLOC: | |||
2866 | return LowerDYNAMIC_STACKALLOC(Op, DAG); | |||
2867 | } | |||
2868 | } | |||
2869 | ||||
2870 | //===----------------------------------------------------------------------===// | |||
2871 | // Calling Convention Implementation | |||
2872 | //===----------------------------------------------------------------------===// | |||
2873 | ||||
2874 | #include "AArch64GenCallingConv.inc" | |||
2875 | ||||
2876 | /// Selects the correct CCAssignFn for a given CallingConvention value. | |||
2877 | CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC, | |||
2878 | bool IsVarArg) const { | |||
2879 | switch (CC) { | |||
2880 | default: | |||
2881 | report_fatal_error("Unsupported calling convention."); | |||
2882 | case CallingConv::WebKit_JS: | |||
2883 | return CC_AArch64_WebKit_JS; | |||
2884 | case CallingConv::GHC: | |||
2885 | return CC_AArch64_GHC; | |||
2886 | case CallingConv::C: | |||
2887 | case CallingConv::Fast: | |||
2888 | case CallingConv::PreserveMost: | |||
2889 | case CallingConv::CXX_FAST_TLS: | |||
2890 | case CallingConv::Swift: | |||
2891 | if (Subtarget->isTargetWindows() && IsVarArg) | |||
2892 | return CC_AArch64_Win64_VarArg; | |||
2893 | if (!Subtarget->isTargetDarwin()) | |||
2894 | return CC_AArch64_AAPCS; | |||
2895 | return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS; | |||
2896 | case CallingConv::Win64: | |||
2897 | return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS; | |||
2898 | } | |||
2899 | } | |||
2900 | ||||
2901 | CCAssignFn * | |||
2902 | AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { | |||
2903 | return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS | |||
2904 | : RetCC_AArch64_AAPCS; | |||
2905 | } | |||
2906 | ||||
2907 | SDValue AArch64TargetLowering::LowerFormalArguments( | |||
2908 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | |||
2909 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
2910 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
2911 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2912 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
2913 | bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); | |||
2914 | ||||
2915 | // Assign locations to all of the incoming arguments. | |||
2916 | SmallVector<CCValAssign, 16> ArgLocs; | |||
2917 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | |||
2918 | *DAG.getContext()); | |||
2919 | ||||
2920 | // At this point, Ins[].VT may already be promoted to i32. To correctly | |||
2921 | // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and | |||
2922 | // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. | |||
2923 | // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here | |||
2924 | // we use a special version of AnalyzeFormalArguments to pass in ValVT and | |||
2925 | // LocVT. | |||
2926 | unsigned NumArgs = Ins.size(); | |||
2927 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); | |||
2928 | unsigned CurArgIdx = 0; | |||
2929 | for (unsigned i = 0; i != NumArgs; ++i) { | |||
2930 | MVT ValVT = Ins[i].VT; | |||
2931 | if (Ins[i].isOrigArg()) { | |||
2932 | std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx); | |||
2933 | CurArgIdx = Ins[i].getOrigArgIndex(); | |||
2934 | ||||
2935 | // Get type of the original argument. | |||
2936 | EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(), | |||
2937 | /*AllowUnknown*/ true); | |||
2938 | MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other; | |||
2939 | // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. | |||
2940 | if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) | |||
2941 | ValVT = MVT::i8; | |||
2942 | else if (ActualMVT == MVT::i16) | |||
2943 | ValVT = MVT::i16; | |||
2944 | } | |||
2945 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); | |||
2946 | bool Res = | |||
2947 | AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo); | |||
2948 | assert(!Res && "Call operand has unhandled type")(static_cast <bool> (!Res && "Call operand has unhandled type" ) ? void (0) : __assert_fail ("!Res && \"Call operand has unhandled type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2948, __extension__ __PRETTY_FUNCTION__)); | |||
2949 | (void)Res; | |||
2950 | } | |||
2951 | assert(ArgLocs.size() == Ins.size())(static_cast <bool> (ArgLocs.size() == Ins.size()) ? void (0) : __assert_fail ("ArgLocs.size() == Ins.size()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2951, __extension__ __PRETTY_FUNCTION__)); | |||
2952 | SmallVector<SDValue, 16> ArgValues; | |||
2953 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
2954 | CCValAssign &VA = ArgLocs[i]; | |||
2955 | ||||
2956 | if (Ins[i].Flags.isByVal()) { | |||
2957 | // Byval is used for HFAs in the PCS, but the system should work in a | |||
2958 | // non-compliant manner for larger structs. | |||
2959 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2960 | int Size = Ins[i].Flags.getByValSize(); | |||
2961 | unsigned NumRegs = (Size + 7) / 8; | |||
2962 | ||||
2963 | // FIXME: This works on big-endian for composite byvals, which are the common | |||
2964 | // case. It should also work for fundamental types too. | |||
2965 | unsigned FrameIdx = | |||
2966 | MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false); | |||
2967 | SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT); | |||
2968 | InVals.push_back(FrameIdxN); | |||
2969 | ||||
2970 | continue; | |||
2971 | } | |||
2972 | ||||
2973 | if (VA.isRegLoc()) { | |||
2974 | // Arguments stored in registers. | |||
2975 | EVT RegVT = VA.getLocVT(); | |||
2976 | ||||
2977 | SDValue ArgValue; | |||
2978 | const TargetRegisterClass *RC; | |||
2979 | ||||
2980 | if (RegVT == MVT::i32) | |||
2981 | RC = &AArch64::GPR32RegClass; | |||
2982 | else if (RegVT == MVT::i64) | |||
2983 | RC = &AArch64::GPR64RegClass; | |||
2984 | else if (RegVT == MVT::f16) | |||
2985 | RC = &AArch64::FPR16RegClass; | |||
2986 | else if (RegVT == MVT::f32) | |||
2987 | RC = &AArch64::FPR32RegClass; | |||
2988 | else if (RegVT == MVT::f64 || RegVT.is64BitVector()) | |||
2989 | RC = &AArch64::FPR64RegClass; | |||
2990 | else if (RegVT == MVT::f128 || RegVT.is128BitVector()) | |||
2991 | RC = &AArch64::FPR128RegClass; | |||
2992 | else | |||
2993 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering")::llvm::llvm_unreachable_internal("RegVT not supported by FORMAL_ARGUMENTS Lowering" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2993); | |||
2994 | ||||
2995 | // Transform the arguments in physical registers into virtual ones. | |||
2996 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); | |||
2997 | ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); | |||
2998 | ||||
2999 | // If this is an 8, 16 or 32-bit value, it is really passed promoted | |||
3000 | // to 64 bits. Insert an assert[sz]ext to capture this, then | |||
3001 | // truncate to the right size. | |||
3002 | switch (VA.getLocInfo()) { | |||
3003 | default: | |||
3004 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3004); | |||
3005 | case CCValAssign::Full: | |||
3006 | break; | |||
3007 | case CCValAssign::BCvt: | |||
3008 | ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue); | |||
3009 | break; | |||
3010 | case CCValAssign::AExt: | |||
3011 | case CCValAssign::SExt: | |||
3012 | case CCValAssign::ZExt: | |||
3013 | // SelectionDAGBuilder will insert appropriate AssertZExt & AssertSExt | |||
3014 | // nodes after our lowering. | |||
3015 | assert(RegVT == Ins[i].VT && "incorrect register location selected")(static_cast <bool> (RegVT == Ins[i].VT && "incorrect register location selected" ) ? void (0) : __assert_fail ("RegVT == Ins[i].VT && \"incorrect register location selected\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3015, __extension__ __PRETTY_FUNCTION__)); | |||
3016 | break; | |||
3017 | } | |||
3018 | ||||
3019 | InVals.push_back(ArgValue); | |||
3020 | ||||
3021 | } else { // VA.isRegLoc() | |||
3022 | assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem")(static_cast <bool> (VA.isMemLoc() && "CCValAssign is neither reg nor mem" ) ? void (0) : __assert_fail ("VA.isMemLoc() && \"CCValAssign is neither reg nor mem\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3022, __extension__ __PRETTY_FUNCTION__)); | |||
3023 | unsigned ArgOffset = VA.getLocMemOffset(); | |||
3024 | unsigned ArgSize = VA.getValVT().getSizeInBits() / 8; | |||
3025 | ||||
3026 | uint32_t BEAlign = 0; | |||
3027 | if (!Subtarget->isLittleEndian() && ArgSize < 8 && | |||
3028 | !Ins[i].Flags.isInConsecutiveRegs()) | |||
3029 | BEAlign = 8 - ArgSize; | |||
3030 | ||||
3031 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true); | |||
3032 | ||||
3033 | // Create load nodes to retrieve arguments from the stack. | |||
3034 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
3035 | SDValue ArgValue; | |||
3036 | ||||
3037 | // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) | |||
3038 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; | |||
3039 | MVT MemVT = VA.getValVT(); | |||
3040 | ||||
3041 | switch (VA.getLocInfo()) { | |||
3042 | default: | |||
3043 | break; | |||
3044 | case CCValAssign::BCvt: | |||
3045 | MemVT = VA.getLocVT(); | |||
3046 | break; | |||
3047 | case CCValAssign::SExt: | |||
3048 | ExtType = ISD::SEXTLOAD; | |||
3049 | break; | |||
3050 | case CCValAssign::ZExt: | |||
3051 | ExtType = ISD::ZEXTLOAD; | |||
3052 | break; | |||
3053 | case CCValAssign::AExt: | |||
3054 | ExtType = ISD::EXTLOAD; | |||
3055 | break; | |||
3056 | } | |||
3057 | ||||
3058 | ArgValue = DAG.getExtLoad( | |||
3059 | ExtType, DL, VA.getLocVT(), Chain, FIN, | |||
3060 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), | |||
3061 | MemVT); | |||
3062 | ||||
3063 | InVals.push_back(ArgValue); | |||
3064 | } | |||
3065 | } | |||
3066 | ||||
3067 | // varargs | |||
3068 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | |||
3069 | if (isVarArg) { | |||
3070 | if (!Subtarget->isTargetDarwin() || IsWin64) { | |||
3071 | // The AAPCS variadic function ABI is identical to the non-variadic | |||
3072 | // one. As a result there may be more arguments in registers and we should | |||
3073 | // save them for future reference. | |||
3074 | // Win64 variadic functions also pass arguments in registers, but all float | |||
3075 | // arguments are passed in integer registers. | |||
3076 | saveVarArgRegisters(CCInfo, DAG, DL, Chain); | |||
3077 | } | |||
3078 | ||||
3079 | // This will point to the next argument passed via stack. | |||
3080 | unsigned StackOffset = CCInfo.getNextStackOffset(); | |||
3081 | // We currently pass all varargs at 8-byte alignment. | |||
3082 | StackOffset = ((StackOffset + 7) & ~7); | |||
3083 | FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); | |||
3084 | } | |||
3085 | ||||
3086 | unsigned StackArgSize = CCInfo.getNextStackOffset(); | |||
3087 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | |||
3088 | if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { | |||
3089 | // This is a non-standard ABI so by fiat I say we're allowed to make full | |||
3090 | // use of the stack area to be popped, which must be aligned to 16 bytes in | |||
3091 | // any case: | |||
3092 | StackArgSize = alignTo(StackArgSize, 16); | |||
3093 | ||||
3094 | // If we're expected to restore the stack (e.g. fastcc) then we'll be adding | |||
3095 | // a multiple of 16. | |||
3096 | FuncInfo->setArgumentStackToRestore(StackArgSize); | |||
3097 | ||||
3098 | // This realignment carries over to the available bytes below. Our own | |||
3099 | // callers will guarantee the space is free by giving an aligned value to | |||
3100 | // CALLSEQ_START. | |||
3101 | } | |||
3102 | // Even if we're not expected to free up the space, it's useful to know how | |||
3103 | // much is there while considering tail calls (because we can reuse it). | |||
3104 | FuncInfo->setBytesInStackArgArea(StackArgSize); | |||
3105 | ||||
3106 | return Chain; | |||
3107 | } | |||
3108 | ||||
3109 | void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, | |||
3110 | SelectionDAG &DAG, | |||
3111 | const SDLoc &DL, | |||
3112 | SDValue &Chain) const { | |||
3113 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3114 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
3115 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | |||
3116 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3117 | bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); | |||
3118 | ||||
3119 | SmallVector<SDValue, 8> MemOps; | |||
3120 | ||||
3121 | static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2, | |||
3122 | AArch64::X3, AArch64::X4, AArch64::X5, | |||
3123 | AArch64::X6, AArch64::X7 }; | |||
3124 | static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs); | |||
3125 | unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs); | |||
3126 | ||||
3127 | unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); | |||
3128 | int GPRIdx = 0; | |||
3129 | if (GPRSaveSize != 0) { | |||
3130 | if (IsWin64) { | |||
3131 | GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false); | |||
3132 | if (GPRSaveSize & 15) | |||
3133 | // The extra size here, if triggered, will always be 8. | |||
3134 | MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false); | |||
3135 | } else | |||
3136 | GPRIdx = MFI.CreateStackObject(GPRSaveSize, 8, false); | |||
3137 | ||||
3138 | SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT); | |||
3139 | ||||
3140 | for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) { | |||
3141 | unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass); | |||
3142 | SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); | |||
3143 | SDValue Store = DAG.getStore( | |||
3144 | Val.getValue(1), DL, Val, FIN, | |||
3145 | IsWin64 | |||
3146 | ? MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), | |||
3147 | GPRIdx, | |||
3148 | (i - FirstVariadicGPR) * 8) | |||
3149 | : MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8)); | |||
3150 | MemOps.push_back(Store); | |||
3151 | FIN = | |||
3152 | DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT)); | |||
3153 | } | |||
3154 | } | |||
3155 | FuncInfo->setVarArgsGPRIndex(GPRIdx); | |||
3156 | FuncInfo->setVarArgsGPRSize(GPRSaveSize); | |||
3157 | ||||
3158 | if (Subtarget->hasFPARMv8() && !IsWin64) { | |||
3159 | static const MCPhysReg FPRArgRegs[] = { | |||
3160 | AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, | |||
3161 | AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; | |||
3162 | static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs); | |||
3163 | unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs); | |||
3164 | ||||
3165 | unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); | |||
3166 | int FPRIdx = 0; | |||
3167 | if (FPRSaveSize != 0) { | |||
3168 | FPRIdx = MFI.CreateStackObject(FPRSaveSize, 16, false); | |||
3169 | ||||
3170 | SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT); | |||
3171 | ||||
3172 | for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { | |||
3173 | unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass); | |||
3174 | SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); | |||
3175 | ||||
3176 | SDValue Store = DAG.getStore( | |||
3177 | Val.getValue(1), DL, Val, FIN, | |||
3178 | MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 16)); | |||
3179 | MemOps.push_back(Store); | |||
3180 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, | |||
3181 | DAG.getConstant(16, DL, PtrVT)); | |||
3182 | } | |||
3183 | } | |||
3184 | FuncInfo->setVarArgsFPRIndex(FPRIdx); | |||
3185 | FuncInfo->setVarArgsFPRSize(FPRSaveSize); | |||
3186 | } | |||
3187 | ||||
3188 | if (!MemOps.empty()) { | |||
3189 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); | |||
3190 | } | |||
3191 | } | |||
3192 | ||||
3193 | /// LowerCallResult - Lower the result values of a call into the | |||
3194 | /// appropriate copies out of appropriate physical registers. | |||
3195 | SDValue AArch64TargetLowering::LowerCallResult( | |||
3196 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, | |||
3197 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
3198 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, | |||
3199 | SDValue ThisVal) const { | |||
3200 | CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS | |||
3201 | ? RetCC_AArch64_WebKit_JS | |||
3202 | : RetCC_AArch64_AAPCS; | |||
3203 | // Assign locations to each value returned by this call. | |||
3204 | SmallVector<CCValAssign, 16> RVLocs; | |||
3205 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
3206 | *DAG.getContext()); | |||
3207 | CCInfo.AnalyzeCallResult(Ins, RetCC); | |||
3208 | ||||
3209 | // Copy all of the result registers out of their specified physreg. | |||
3210 | for (unsigned i = 0; i != RVLocs.size(); ++i) { | |||
3211 | CCValAssign VA = RVLocs[i]; | |||
3212 | ||||
3213 | // Pass 'this' value directly from the argument to return value, to avoid | |||
3214 | // reg unit interference | |||
3215 | if (i == 0 && isThisReturn) { | |||
3216 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i64 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i64 && \"unexpected return calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3217, __extension__ __PRETTY_FUNCTION__)) | |||
3217 | "unexpected return calling convention register assignment")(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i64 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i64 && \"unexpected return calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3217, __extension__ __PRETTY_FUNCTION__)); | |||
3218 | InVals.push_back(ThisVal); | |||
3219 | continue; | |||
3220 | } | |||
3221 | ||||
3222 | SDValue Val = | |||
3223 | DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); | |||
3224 | Chain = Val.getValue(1); | |||
3225 | InFlag = Val.getValue(2); | |||
3226 | ||||
3227 | switch (VA.getLocInfo()) { | |||
3228 | default: | |||
3229 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3229); | |||
3230 | case CCValAssign::Full: | |||
3231 | break; | |||
3232 | case CCValAssign::BCvt: | |||
3233 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); | |||
3234 | break; | |||
3235 | } | |||
3236 | ||||
3237 | InVals.push_back(Val); | |||
3238 | } | |||
3239 | ||||
3240 | return Chain; | |||
3241 | } | |||
3242 | ||||
3243 | /// Return true if the calling convention is one that we can guarantee TCO for. | |||
3244 | static bool canGuaranteeTCO(CallingConv::ID CC) { | |||
3245 | return CC == CallingConv::Fast; | |||
3246 | } | |||
3247 | ||||
3248 | /// Return true if we might ever do TCO for calls with this calling convention. | |||
3249 | static bool mayTailCallThisCC(CallingConv::ID CC) { | |||
3250 | switch (CC) { | |||
3251 | case CallingConv::C: | |||
3252 | case CallingConv::PreserveMost: | |||
3253 | case CallingConv::Swift: | |||
3254 | return true; | |||
3255 | default: | |||
3256 | return canGuaranteeTCO(CC); | |||
3257 | } | |||
3258 | } | |||
3259 | ||||
3260 | bool AArch64TargetLowering::isEligibleForTailCallOptimization( | |||
3261 | SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, | |||
3262 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
3263 | const SmallVectorImpl<SDValue> &OutVals, | |||
3264 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { | |||
3265 | if (!mayTailCallThisCC(CalleeCC)) | |||
3266 | return false; | |||
3267 | ||||
3268 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3269 | const Function &CallerF = MF.getFunction(); | |||
3270 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
3271 | bool CCMatch = CallerCC == CalleeCC; | |||
3272 | ||||
3273 | // Byval parameters hand the function a pointer directly into the stack area | |||
3274 | // we want to reuse during a tail call. Working around this *is* possible (see | |||
3275 | // X86) but less efficient and uglier in LowerCall. | |||
3276 | for (Function::const_arg_iterator i = CallerF.arg_begin(), | |||
3277 | e = CallerF.arg_end(); | |||
3278 | i != e; ++i) | |||
3279 | if (i->hasByValAttr()) | |||
3280 | return false; | |||
3281 | ||||
3282 | if (getTargetMachine().Options.GuaranteedTailCallOpt) | |||
3283 | return canGuaranteeTCO(CalleeCC) && CCMatch; | |||
3284 | ||||
3285 | // Externally-defined functions with weak linkage should not be | |||
3286 | // tail-called on AArch64 when the OS does not support dynamic | |||
3287 | // pre-emption of symbols, as the AAELF spec requires normal calls | |||
3288 | // to undefined weak functions to be replaced with a NOP or jump to the | |||
3289 | // next instruction. The behaviour of branch instructions in this | |||
3290 | // situation (as used for tail calls) is implementation-defined, so we | |||
3291 | // cannot rely on the linker replacing the tail call with a return. | |||
3292 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
3293 | const GlobalValue *GV = G->getGlobal(); | |||
3294 | const Triple &TT = getTargetMachine().getTargetTriple(); | |||
3295 | if (GV->hasExternalWeakLinkage() && | |||
3296 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) | |||
3297 | return false; | |||
3298 | } | |||
3299 | ||||
3300 | // Now we search for cases where we can use a tail call without changing the | |||
3301 | // ABI. Sibcall is used in some places (particularly gcc) to refer to this | |||
3302 | // concept. | |||
3303 | ||||
3304 | // I want anyone implementing a new calling convention to think long and hard | |||
3305 | // about this assert. | |||
3306 | assert((!isVarArg || CalleeCC == CallingConv::C) &&(static_cast <bool> ((!isVarArg || CalleeCC == CallingConv ::C) && "Unexpected variadic calling convention") ? void (0) : __assert_fail ("(!isVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3307, __extension__ __PRETTY_FUNCTION__)) | |||
3307 | "Unexpected variadic calling convention")(static_cast <bool> ((!isVarArg || CalleeCC == CallingConv ::C) && "Unexpected variadic calling convention") ? void (0) : __assert_fail ("(!isVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3307, __extension__ __PRETTY_FUNCTION__)); | |||
3308 | ||||
3309 | LLVMContext &C = *DAG.getContext(); | |||
3310 | if (isVarArg && !Outs.empty()) { | |||
3311 | // At least two cases here: if caller is fastcc then we can't have any | |||
3312 | // memory arguments (we'd be expected to clean up the stack afterwards). If | |||
3313 | // caller is C then we could potentially use its argument area. | |||
3314 | ||||
3315 | // FIXME: for now we take the most conservative of these in both cases: | |||
3316 | // disallow all variadic memory operands. | |||
3317 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3318 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); | |||
3319 | ||||
3320 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true)); | |||
3321 | for (const CCValAssign &ArgLoc : ArgLocs) | |||
3322 | if (!ArgLoc.isRegLoc()) | |||
3323 | return false; | |||
3324 | } | |||
3325 | ||||
3326 | // Check that the call results are passed in the same way. | |||
3327 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, | |||
3328 | CCAssignFnForCall(CalleeCC, isVarArg), | |||
3329 | CCAssignFnForCall(CallerCC, isVarArg))) | |||
3330 | return false; | |||
3331 | // The callee has to preserve all registers the caller needs to preserve. | |||
3332 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
3333 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
3334 | if (!CCMatch) { | |||
3335 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
3336 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
3337 | return false; | |||
3338 | } | |||
3339 | ||||
3340 | // Nothing more to check if the callee is taking no arguments | |||
3341 | if (Outs.empty()) | |||
3342 | return true; | |||
3343 | ||||
3344 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3345 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); | |||
3346 | ||||
3347 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); | |||
3348 | ||||
3349 | const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | |||
3350 | ||||
3351 | // If the stack arguments for this call do not fit into our own save area then | |||
3352 | // the call cannot be made tail. | |||
3353 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) | |||
3354 | return false; | |||
3355 | ||||
3356 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
3357 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) | |||
3358 | return false; | |||
3359 | ||||
3360 | return true; | |||
3361 | } | |||
3362 | ||||
3363 | SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, | |||
3364 | SelectionDAG &DAG, | |||
3365 | MachineFrameInfo &MFI, | |||
3366 | int ClobberedFI) const { | |||
3367 | SmallVector<SDValue, 8> ArgChains; | |||
3368 | int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); | |||
3369 | int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; | |||
3370 | ||||
3371 | // Include the original chain at the beginning of the list. When this is | |||
3372 | // used by target LowerCall hooks, this helps legalize find the | |||
3373 | // CALLSEQ_BEGIN node. | |||
3374 | ArgChains.push_back(Chain); | |||
3375 | ||||
3376 | // Add a chain value for each stack argument corresponding | |||
3377 | for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), | |||
3378 | UE = DAG.getEntryNode().getNode()->use_end(); | |||
3379 | U != UE; ++U) | |||
3380 | if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) | |||
3381 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) | |||
3382 | if (FI->getIndex() < 0) { | |||
3383 | int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); | |||
3384 | int64_t InLastByte = InFirstByte; | |||
3385 | InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; | |||
3386 | ||||
3387 | if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || | |||
3388 | (FirstByte <= InFirstByte && InFirstByte <= LastByte)) | |||
3389 | ArgChains.push_back(SDValue(L, 1)); | |||
3390 | } | |||
3391 | ||||
3392 | // Build a tokenfactor for all the chains. | |||
3393 | return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); | |||
3394 | } | |||
3395 | ||||
3396 | bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, | |||
3397 | bool TailCallOpt) const { | |||
3398 | return CallCC == CallingConv::Fast && TailCallOpt; | |||
3399 | } | |||
3400 | ||||
3401 | /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain, | |||
3402 | /// and add input and output parameter nodes. | |||
3403 | SDValue | |||
3404 | AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, | |||
3405 | SmallVectorImpl<SDValue> &InVals) const { | |||
3406 | SelectionDAG &DAG = CLI.DAG; | |||
3407 | SDLoc &DL = CLI.DL; | |||
3408 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; | |||
3409 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; | |||
3410 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; | |||
3411 | SDValue Chain = CLI.Chain; | |||
3412 | SDValue Callee = CLI.Callee; | |||
3413 | bool &IsTailCall = CLI.IsTailCall; | |||
3414 | CallingConv::ID CallConv = CLI.CallConv; | |||
3415 | bool IsVarArg = CLI.IsVarArg; | |||
3416 | ||||
3417 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3418 | bool IsThisReturn = false; | |||
3419 | ||||
3420 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | |||
3421 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | |||
3422 | bool IsSibCall = false; | |||
3423 | ||||
3424 | if (IsTailCall) { | |||
3425 | // Check if it's really possible to do a tail call. | |||
3426 | IsTailCall = isEligibleForTailCallOptimization( | |||
3427 | Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); | |||
3428 | if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) | |||
3429 | report_fatal_error("failed to perform tail call elimination on a call " | |||
3430 | "site marked musttail"); | |||
3431 | ||||
3432 | // A sibling call is one where we're under the usual C ABI and not planning | |||
3433 | // to change that but can still do a tail call: | |||
3434 | if (!TailCallOpt && IsTailCall) | |||
3435 | IsSibCall = true; | |||
3436 | ||||
3437 | if (IsTailCall) | |||
3438 | ++NumTailCalls; | |||
3439 | } | |||
3440 | ||||
3441 | // Analyze operands of the call, assigning locations to each operand. | |||
3442 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3443 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, | |||
3444 | *DAG.getContext()); | |||
3445 | ||||
3446 | if (IsVarArg) { | |||
3447 | // Handle fixed and variable vector arguments differently. | |||
3448 | // Variable vector arguments always go into memory. | |||
3449 | unsigned NumArgs = Outs.size(); | |||
3450 | ||||
3451 | for (unsigned i = 0; i != NumArgs; ++i) { | |||
3452 | MVT ArgVT = Outs[i].VT; | |||
3453 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | |||
3454 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, | |||
3455 | /*IsVarArg=*/ !Outs[i].IsFixed); | |||
3456 | bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); | |||
3457 | assert(!Res && "Call operand has unhandled type")(static_cast <bool> (!Res && "Call operand has unhandled type" ) ? void (0) : __assert_fail ("!Res && \"Call operand has unhandled type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3457, __extension__ __PRETTY_FUNCTION__)); | |||
3458 | (void)Res; | |||
3459 | } | |||
3460 | } else { | |||
3461 | // At this point, Outs[].VT may already be promoted to i32. To correctly | |||
3462 | // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and | |||
3463 | // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. | |||
3464 | // Since AnalyzeCallOperands uses Ins[].VT for both ValVT and LocVT, here | |||
3465 | // we use a special version of AnalyzeCallOperands to pass in ValVT and | |||
3466 | // LocVT. | |||
3467 | unsigned NumArgs = Outs.size(); | |||
3468 | for (unsigned i = 0; i != NumArgs; ++i) { | |||
3469 | MVT ValVT = Outs[i].VT; | |||
3470 | // Get type of the original argument. | |||
3471 | EVT ActualVT = getValueType(DAG.getDataLayout(), | |||
3472 | CLI.getArgs()[Outs[i].OrigArgIndex].Ty, | |||
3473 | /*AllowUnknown*/ true); | |||
3474 | MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ValVT; | |||
3475 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | |||
3476 | // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. | |||
3477 | if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) | |||
3478 | ValVT = MVT::i8; | |||
3479 | else if (ActualMVT == MVT::i16) | |||
3480 | ValVT = MVT::i16; | |||
3481 | ||||
3482 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); | |||
3483 | bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, ArgFlags, CCInfo); | |||
3484 | assert(!Res && "Call operand has unhandled type")(static_cast <bool> (!Res && "Call operand has unhandled type" ) ? void (0) : __assert_fail ("!Res && \"Call operand has unhandled type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3484, __extension__ __PRETTY_FUNCTION__)); | |||
3485 | (void)Res; | |||
3486 | } | |||
3487 | } | |||
3488 | ||||
3489 | // Get a count of how many bytes are to be pushed on the stack. | |||
3490 | unsigned NumBytes = CCInfo.getNextStackOffset(); | |||
3491 | ||||
3492 | if (IsSibCall) { | |||
3493 | // Since we're not changing the ABI to make this a tail call, the memory | |||
3494 | // operands are already available in the caller's incoming argument space. | |||
3495 | NumBytes = 0; | |||
3496 | } | |||
3497 | ||||
3498 | // FPDiff is the byte offset of the call's argument area from the callee's. | |||
3499 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | |||
3500 | // by this amount for a tail call. In a sibling call it must be 0 because the | |||
3501 | // caller will deallocate the entire stack and the callee still expects its | |||
3502 | // arguments to begin at SP+0. Completely unused for non-tail calls. | |||
3503 | int FPDiff = 0; | |||
3504 | ||||
3505 | if (IsTailCall && !IsSibCall) { | |||
3506 | unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); | |||
3507 | ||||
3508 | // Since callee will pop argument stack as a tail call, we must keep the | |||
3509 | // popped size 16-byte aligned. | |||
3510 | NumBytes = alignTo(NumBytes, 16); | |||
3511 | ||||
3512 | // FPDiff will be negative if this tail call requires more space than we | |||
3513 | // would automatically have in our incoming argument space. Positive if we | |||
3514 | // can actually shrink the stack. | |||
3515 | FPDiff = NumReusableBytes - NumBytes; | |||
3516 | ||||
3517 | // The stack pointer must be 16-byte aligned at all times it's used for a | |||
3518 | // memory operation, which in practice means at *all* times and in | |||
3519 | // particular across call boundaries. Therefore our own arguments started at | |||
3520 | // a 16-byte aligned SP and the delta applied for the tail call should | |||
3521 | // satisfy the same constraint. | |||
3522 | assert(FPDiff % 16 == 0 && "unaligned stack on tail call")(static_cast <bool> (FPDiff % 16 == 0 && "unaligned stack on tail call" ) ? void (0) : __assert_fail ("FPDiff % 16 == 0 && \"unaligned stack on tail call\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3522, __extension__ __PRETTY_FUNCTION__)); | |||
3523 | } | |||
3524 | ||||
3525 | // Adjust the stack pointer for the new arguments... | |||
3526 | // These operations are automatically eliminated by the prolog/epilog pass | |||
3527 | if (!IsSibCall) | |||
3528 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); | |||
3529 | ||||
3530 | SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, | |||
3531 | getPointerTy(DAG.getDataLayout())); | |||
3532 | ||||
3533 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | |||
3534 | SmallVector<SDValue, 8> MemOpChains; | |||
3535 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3536 | ||||
3537 | // Walk the register/memloc assignments, inserting copies/loads. | |||
3538 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; | |||
3539 | ++i, ++realArgIdx) { | |||
3540 | CCValAssign &VA = ArgLocs[i]; | |||
3541 | SDValue Arg = OutVals[realArgIdx]; | |||
3542 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; | |||
3543 | ||||
3544 | // Promote the value if needed. | |||
3545 | switch (VA.getLocInfo()) { | |||
3546 | default: | |||
3547 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3547); | |||
3548 | case CCValAssign::Full: | |||
3549 | break; | |||
3550 | case CCValAssign::SExt: | |||
3551 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); | |||
3552 | break; | |||
3553 | case CCValAssign::ZExt: | |||
3554 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | |||
3555 | break; | |||
3556 | case CCValAssign::AExt: | |||
3557 | if (Outs[realArgIdx].ArgVT == MVT::i1) { | |||
3558 | // AAPCS requires i1 to be zero-extended to 8-bits by the caller. | |||
3559 | Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); | |||
3560 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg); | |||
3561 | } | |||
3562 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | |||
3563 | break; | |||
3564 | case CCValAssign::BCvt: | |||
3565 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | |||
3566 | break; | |||
3567 | case CCValAssign::FPExt: | |||
3568 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); | |||
3569 | break; | |||
3570 | } | |||
3571 | ||||
3572 | if (VA.isRegLoc()) { | |||
3573 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && | |||
3574 | Outs[0].VT == MVT::i64) { | |||
3575 | assert(VA.getLocVT() == MVT::i64 &&(static_cast <bool> (VA.getLocVT() == MVT::i64 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i64 && \"unexpected calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3576, __extension__ __PRETTY_FUNCTION__)) | |||
3576 | "unexpected calling convention register assignment")(static_cast <bool> (VA.getLocVT() == MVT::i64 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i64 && \"unexpected calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3576, __extension__ __PRETTY_FUNCTION__)); | |||
3577 | assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i64 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i64 && \"unexpected use of 'returned'\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3578, __extension__ __PRETTY_FUNCTION__)) | |||
3578 | "unexpected use of 'returned'")(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i64 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i64 && \"unexpected use of 'returned'\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3578, __extension__ __PRETTY_FUNCTION__)); | |||
3579 | IsThisReturn = true; | |||
3580 | } | |||
3581 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); | |||
3582 | } else { | |||
3583 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3583, __extension__ __PRETTY_FUNCTION__)); | |||
3584 | ||||
3585 | SDValue DstAddr; | |||
3586 | MachinePointerInfo DstInfo; | |||
3587 | ||||
3588 | // FIXME: This works on big-endian for composite byvals, which are the | |||
3589 | // common case. It should also work for fundamental types too. | |||
3590 | uint32_t BEAlign = 0; | |||
3591 | unsigned OpSize = Flags.isByVal() ? Flags.getByValSize() * 8 | |||
3592 | : VA.getValVT().getSizeInBits(); | |||
3593 | OpSize = (OpSize + 7) / 8; | |||
3594 | if (!Subtarget->isLittleEndian() && !Flags.isByVal() && | |||
3595 | !Flags.isInConsecutiveRegs()) { | |||
3596 | if (OpSize < 8) | |||
3597 | BEAlign = 8 - OpSize; | |||
3598 | } | |||
3599 | unsigned LocMemOffset = VA.getLocMemOffset(); | |||
3600 | int32_t Offset = LocMemOffset + BEAlign; | |||
3601 | SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); | |||
3602 | PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); | |||
3603 | ||||
3604 | if (IsTailCall) { | |||
3605 | Offset = Offset + FPDiff; | |||
3606 | int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); | |||
3607 | ||||
3608 | DstAddr = DAG.getFrameIndex(FI, PtrVT); | |||
3609 | DstInfo = | |||
3610 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); | |||
3611 | ||||
3612 | // Make sure any stack arguments overlapping with where we're storing | |||
3613 | // are loaded before this eventual operation. Otherwise they'll be | |||
3614 | // clobbered. | |||
3615 | Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); | |||
3616 | } else { | |||
3617 | SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); | |||
3618 | ||||
3619 | DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); | |||
3620 | DstInfo = MachinePointerInfo::getStack(DAG.getMachineFunction(), | |||
3621 | LocMemOffset); | |||
3622 | } | |||
3623 | ||||
3624 | if (Outs[i].Flags.isByVal()) { | |||
3625 | SDValue SizeNode = | |||
3626 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64); | |||
3627 | SDValue Cpy = DAG.getMemcpy( | |||
3628 | Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), | |||
3629 | /*isVol = */ false, /*AlwaysInline = */ false, | |||
3630 | /*isTailCall = */ false, | |||
3631 | DstInfo, MachinePointerInfo()); | |||
3632 | ||||
3633 | MemOpChains.push_back(Cpy); | |||
3634 | } else { | |||
3635 | // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already | |||
3636 | // promoted to a legal register type i32, we should truncate Arg back to | |||
3637 | // i1/i8/i16. | |||
3638 | if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 || | |||
3639 | VA.getValVT() == MVT::i16) | |||
3640 | Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); | |||
3641 | ||||
3642 | SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); | |||
3643 | MemOpChains.push_back(Store); | |||
3644 | } | |||
3645 | } | |||
3646 | } | |||
3647 | ||||
3648 | if (!MemOpChains.empty()) | |||
3649 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | |||
3650 | ||||
3651 | // Build a sequence of copy-to-reg nodes chained together with token chain | |||
3652 | // and flag operands which copy the outgoing args into the appropriate regs. | |||
3653 | SDValue InFlag; | |||
3654 | for (auto &RegToPass : RegsToPass) { | |||
3655 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, | |||
3656 | RegToPass.second, InFlag); | |||
3657 | InFlag = Chain.getValue(1); | |||
3658 | } | |||
3659 | ||||
3660 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every | |||
3661 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol | |||
3662 | // node so that legalize doesn't hack it. | |||
3663 | if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
3664 | auto GV = G->getGlobal(); | |||
3665 | if (Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()) == | |||
3666 | AArch64II::MO_GOT) { | |||
3667 | Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT); | |||
3668 | Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); | |||
3669 | } else if (Subtarget->isTargetCOFF() && GV->hasDLLImportStorageClass()) { | |||
3670 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "Windows is the only supported COFF target") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Windows is the only supported COFF target\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3671, __extension__ __PRETTY_FUNCTION__)) | |||
3671 | "Windows is the only supported COFF target")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows is the only supported COFF target") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Windows is the only supported COFF target\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3671, __extension__ __PRETTY_FUNCTION__)); | |||
3672 | Callee = getGOT(G, DAG, AArch64II::MO_DLLIMPORT); | |||
3673 | } else { | |||
3674 | const GlobalValue *GV = G->getGlobal(); | |||
3675 | Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); | |||
3676 | } | |||
3677 | } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
3678 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | |||
3679 | Subtarget->isTargetMachO()) { | |||
3680 | const char *Sym = S->getSymbol(); | |||
3681 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT); | |||
3682 | Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); | |||
3683 | } else { | |||
3684 | const char *Sym = S->getSymbol(); | |||
3685 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); | |||
3686 | } | |||
3687 | } | |||
3688 | ||||
3689 | // We don't usually want to end the call-sequence here because we would tidy | |||
3690 | // the frame up *after* the call, however in the ABI-changing tail-call case | |||
3691 | // we've carefully laid out the parameters so that when sp is reset they'll be | |||
3692 | // in the correct location. | |||
3693 | if (IsTailCall && !IsSibCall) { | |||
3694 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), | |||
3695 | DAG.getIntPtrConstant(0, DL, true), InFlag, DL); | |||
3696 | InFlag = Chain.getValue(1); | |||
3697 | } | |||
3698 | ||||
3699 | std::vector<SDValue> Ops; | |||
3700 | Ops.push_back(Chain); | |||
3701 | Ops.push_back(Callee); | |||
3702 | ||||
3703 | if (IsTailCall) { | |||
3704 | // Each tail call may have to adjust the stack by a different amount, so | |||
3705 | // this information must travel along with the operation for eventual | |||
3706 | // consumption by emitEpilogue. | |||
3707 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); | |||
3708 | } | |||
3709 | ||||
3710 | // Add argument registers to the end of the list so that they are known live | |||
3711 | // into the call. | |||
3712 | for (auto &RegToPass : RegsToPass) | |||
3713 | Ops.push_back(DAG.getRegister(RegToPass.first, | |||
3714 | RegToPass.second.getValueType())); | |||
3715 | ||||
3716 | // Add a register mask operand representing the call-preserved registers. | |||
3717 | const uint32_t *Mask; | |||
3718 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
3719 | if (IsThisReturn) { | |||
3720 | // For 'this' returns, use the X0-preserving mask if applicable | |||
3721 | Mask = TRI->getThisReturnPreservedMask(MF, CallConv); | |||
3722 | if (!Mask) { | |||
3723 | IsThisReturn = false; | |||
3724 | Mask = TRI->getCallPreservedMask(MF, CallConv); | |||
3725 | } | |||
3726 | } else | |||
3727 | Mask = TRI->getCallPreservedMask(MF, CallConv); | |||
3728 | ||||
3729 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3729, __extension__ __PRETTY_FUNCTION__)); | |||
3730 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
3731 | ||||
3732 | if (InFlag.getNode()) | |||
3733 | Ops.push_back(InFlag); | |||
3734 | ||||
3735 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
3736 | ||||
3737 | // If we're doing a tall call, use a TC_RETURN here rather than an | |||
3738 | // actual call instruction. | |||
3739 | if (IsTailCall) { | |||
3740 | MF.getFrameInfo().setHasTailCall(); | |||
3741 | return DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops); | |||
3742 | } | |||
3743 | ||||
3744 | // Returns a chain and a flag for retval copy to use. | |||
3745 | Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops); | |||
3746 | InFlag = Chain.getValue(1); | |||
3747 | ||||
3748 | uint64_t CalleePopBytes = | |||
3749 | DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0; | |||
3750 | ||||
3751 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), | |||
3752 | DAG.getIntPtrConstant(CalleePopBytes, DL, true), | |||
3753 | InFlag, DL); | |||
3754 | if (!Ins.empty()) | |||
3755 | InFlag = Chain.getValue(1); | |||
3756 | ||||
3757 | // Handle result values, copying them out of physregs into vregs that we | |||
3758 | // return. | |||
3759 | return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, | |||
3760 | InVals, IsThisReturn, | |||
3761 | IsThisReturn ? OutVals[0] : SDValue()); | |||
3762 | } | |||
3763 | ||||
3764 | bool AArch64TargetLowering::CanLowerReturn( | |||
3765 | CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, | |||
3766 | const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { | |||
3767 | CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS | |||
3768 | ? RetCC_AArch64_WebKit_JS | |||
3769 | : RetCC_AArch64_AAPCS; | |||
3770 | SmallVector<CCValAssign, 16> RVLocs; | |||
3771 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); | |||
3772 | return CCInfo.CheckReturn(Outs, RetCC); | |||
3773 | } | |||
3774 | ||||
3775 | SDValue | |||
3776 | AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
3777 | bool isVarArg, | |||
3778 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
3779 | const SmallVectorImpl<SDValue> &OutVals, | |||
3780 | const SDLoc &DL, SelectionDAG &DAG) const { | |||
3781 | CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS | |||
3782 | ? RetCC_AArch64_WebKit_JS | |||
3783 | : RetCC_AArch64_AAPCS; | |||
3784 | SmallVector<CCValAssign, 16> RVLocs; | |||
3785 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
3786 | *DAG.getContext()); | |||
3787 | CCInfo.AnalyzeReturn(Outs, RetCC); | |||
3788 | ||||
3789 | // Copy the result values into the output registers. | |||
3790 | SDValue Flag; | |||
3791 | SmallVector<SDValue, 4> RetOps(1, Chain); | |||
3792 | for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size(); | |||
3793 | ++i, ++realRVLocIdx) { | |||
3794 | CCValAssign &VA = RVLocs[i]; | |||
3795 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3795, __extension__ __PRETTY_FUNCTION__)); | |||
3796 | SDValue Arg = OutVals[realRVLocIdx]; | |||
3797 | ||||
3798 | switch (VA.getLocInfo()) { | |||
3799 | default: | |||
3800 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3800); | |||
3801 | case CCValAssign::Full: | |||
3802 | if (Outs[i].ArgVT == MVT::i1) { | |||
3803 | // AAPCS requires i1 to be zero-extended to i8 by the producer of the | |||
3804 | // value. This is strictly redundant on Darwin (which uses "zeroext | |||
3805 | // i1"), but will be optimised out before ISel. | |||
3806 | Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); | |||
3807 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | |||
3808 | } | |||
3809 | break; | |||
3810 | case CCValAssign::BCvt: | |||
3811 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | |||
3812 | break; | |||
3813 | } | |||
3814 | ||||
3815 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); | |||
3816 | Flag = Chain.getValue(1); | |||
3817 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
3818 | } | |||
3819 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
3820 | const MCPhysReg *I = | |||
3821 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); | |||
3822 | if (I) { | |||
3823 | for (; *I; ++I) { | |||
3824 | if (AArch64::GPR64RegClass.contains(*I)) | |||
3825 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); | |||
3826 | else if (AArch64::FPR64RegClass.contains(*I)) | |||
3827 | RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); | |||
3828 | else | |||
3829 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3829); | |||
3830 | } | |||
3831 | } | |||
3832 | ||||
3833 | RetOps[0] = Chain; // Update chain. | |||
3834 | ||||
3835 | // Add the flag if we have it. | |||
3836 | if (Flag.getNode()) | |||
3837 | RetOps.push_back(Flag); | |||
3838 | ||||
3839 | return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps); | |||
3840 | } | |||
3841 | ||||
3842 | //===----------------------------------------------------------------------===// | |||
3843 | // Other Lowering Code | |||
3844 | //===----------------------------------------------------------------------===// | |||
3845 | ||||
3846 | SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty, | |||
3847 | SelectionDAG &DAG, | |||
3848 | unsigned Flag) const { | |||
3849 | return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, | |||
3850 | N->getOffset(), Flag); | |||
3851 | } | |||
3852 | ||||
3853 | SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty, | |||
3854 | SelectionDAG &DAG, | |||
3855 | unsigned Flag) const { | |||
3856 | return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag); | |||
3857 | } | |||
3858 | ||||
3859 | SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty, | |||
3860 | SelectionDAG &DAG, | |||
3861 | unsigned Flag) const { | |||
3862 | return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), | |||
3863 | N->getOffset(), Flag); | |||
3864 | } | |||
3865 | ||||
3866 | SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty, | |||
3867 | SelectionDAG &DAG, | |||
3868 | unsigned Flag) const { | |||
3869 | return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); | |||
3870 | } | |||
3871 | ||||
3872 | // (loadGOT sym) | |||
3873 | template <class NodeTy> | |||
3874 | SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG, | |||
3875 | unsigned Flags) const { | |||
3876 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getGOT\n" ; } } while (false); | |||
3877 | SDLoc DL(N); | |||
3878 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
3879 | SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags); | |||
3880 | // FIXME: Once remat is capable of dealing with instructions with register | |||
3881 | // operands, expand this into two nodes instead of using a wrapper node. | |||
3882 | return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr); | |||
3883 | } | |||
3884 | ||||
3885 | // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym)) | |||
3886 | template <class NodeTy> | |||
3887 | SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG, | |||
3888 | unsigned Flags) const { | |||
3889 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getAddrLarge\n" ; } } while (false); | |||
3890 | SDLoc DL(N); | |||
3891 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
3892 | const unsigned char MO_NC = AArch64II::MO_NC; | |||
3893 | return DAG.getNode( | |||
3894 | AArch64ISD::WrapperLarge, DL, Ty, | |||
3895 | getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags), | |||
3896 | getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags), | |||
3897 | getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags), | |||
3898 | getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags)); | |||
3899 | } | |||
3900 | ||||
3901 | // (addlow (adrp %hi(sym)) %lo(sym)) | |||
3902 | template <class NodeTy> | |||
3903 | SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, | |||
3904 | unsigned Flags) const { | |||
3905 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getAddr\n" ; } } while (false); | |||
3906 | SDLoc DL(N); | |||
3907 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
3908 | SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags); | |||
3909 | SDValue Lo = getTargetNode(N, Ty, DAG, | |||
3910 | AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags); | |||
3911 | SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi); | |||
3912 | return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo); | |||
3913 | } | |||
3914 | ||||
3915 | SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, | |||
3916 | SelectionDAG &DAG) const { | |||
3917 | GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); | |||
3918 | const GlobalValue *GV = GN->getGlobal(); | |||
3919 | const AArch64II::TOF TargetFlags = | |||
3920 | (GV->hasDLLImportStorageClass() ? AArch64II::MO_DLLIMPORT | |||
3921 | : AArch64II::MO_NO_FLAG); | |||
3922 | unsigned char OpFlags = | |||
3923 | Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); | |||
3924 | ||||
3925 | if (OpFlags != AArch64II::MO_NO_FLAG) | |||
3926 | assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&(static_cast <bool> (cast<GlobalAddressSDNode>(Op )->getOffset() == 0 && "unexpected offset in global node" ) ? void (0) : __assert_fail ("cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && \"unexpected offset in global node\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3927, __extension__ __PRETTY_FUNCTION__)) | |||
3927 | "unexpected offset in global node")(static_cast <bool> (cast<GlobalAddressSDNode>(Op )->getOffset() == 0 && "unexpected offset in global node" ) ? void (0) : __assert_fail ("cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && \"unexpected offset in global node\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3927, __extension__ __PRETTY_FUNCTION__)); | |||
3928 | ||||
3929 | // This also catches the large code model case for Darwin. | |||
3930 | if ((OpFlags & AArch64II::MO_GOT) != 0) { | |||
3931 | return getGOT(GN, DAG, TargetFlags); | |||
3932 | } | |||
3933 | ||||
3934 | SDValue Result; | |||
3935 | if (getTargetMachine().getCodeModel() == CodeModel::Large) { | |||
3936 | Result = getAddrLarge(GN, DAG, TargetFlags); | |||
3937 | } else { | |||
3938 | Result = getAddr(GN, DAG, TargetFlags); | |||
3939 | } | |||
3940 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3941 | SDLoc DL(GN); | |||
3942 | if (GV->hasDLLImportStorageClass()) | |||
3943 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, | |||
3944 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
3945 | return Result; | |||
3946 | } | |||
3947 | ||||
3948 | /// Convert a TLS address reference into the correct sequence of loads | |||
3949 | /// and calls to compute the variable's address (for Darwin, currently) and | |||
3950 | /// return an SDValue containing the final node. | |||
3951 | ||||
3952 | /// Darwin only has one TLS scheme which must be capable of dealing with the | |||
3953 | /// fully general situation, in the worst case. This means: | |||
3954 | /// + "extern __thread" declaration. | |||
3955 | /// + Defined in a possibly unknown dynamic library. | |||
3956 | /// | |||
3957 | /// The general system is that each __thread variable has a [3 x i64] descriptor | |||
3958 | /// which contains information used by the runtime to calculate the address. The | |||
3959 | /// only part of this the compiler needs to know about is the first xword, which | |||
3960 | /// contains a function pointer that must be called with the address of the | |||
3961 | /// entire descriptor in "x0". | |||
3962 | /// | |||
3963 | /// Since this descriptor may be in a different unit, in general even the | |||
3964 | /// descriptor must be accessed via an indirect load. The "ideal" code sequence | |||
3965 | /// is: | |||
3966 | /// adrp x0, _var@TLVPPAGE | |||
3967 | /// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor | |||
3968 | /// ldr x1, [x0] ; x1 contains 1st entry of descriptor, | |||
3969 | /// ; the function pointer | |||
3970 | /// blr x1 ; Uses descriptor address in x0 | |||
3971 | /// ; Address of _var is now in x0. | |||
3972 | /// | |||
3973 | /// If the address of _var's descriptor *is* known to the linker, then it can | |||
3974 | /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for | |||
3975 | /// a slight efficiency gain. | |||
3976 | SDValue | |||
3977 | AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op, | |||
3978 | SelectionDAG &DAG) const { | |||
3979 | assert(Subtarget->isTargetDarwin() &&(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3980, __extension__ __PRETTY_FUNCTION__)) | |||
3980 | "This function expects a Darwin target")(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3980, __extension__ __PRETTY_FUNCTION__)); | |||
3981 | ||||
3982 | SDLoc DL(Op); | |||
3983 | MVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3984 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | |||
3985 | ||||
3986 | SDValue TLVPAddr = | |||
3987 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); | |||
3988 | SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr); | |||
3989 | ||||
3990 | // The first entry in the descriptor is a function pointer that we must call | |||
3991 | // to obtain the address of the variable. | |||
3992 | SDValue Chain = DAG.getEntryNode(); | |||
3993 | SDValue FuncTLVGet = DAG.getLoad( | |||
3994 | MVT::i64, DL, Chain, DescAddr, | |||
3995 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), | |||
3996 | /* Alignment = */ 8, | |||
3997 | MachineMemOperand::MONonTemporal | MachineMemOperand::MOInvariant | | |||
3998 | MachineMemOperand::MODereferenceable); | |||
3999 | Chain = FuncTLVGet.getValue(1); | |||
4000 | ||||
4001 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
4002 | MFI.setAdjustsStack(true); | |||
4003 | ||||
4004 | // TLS calls preserve all registers except those that absolutely must be | |||
4005 | // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be | |||
4006 | // silly). | |||
4007 | const uint32_t *Mask = | |||
4008 | Subtarget->getRegisterInfo()->getTLSCallPreservedMask(); | |||
4009 | ||||
4010 | // Finally, we can make the call. This is just a degenerate version of a | |||
4011 | // normal AArch64 call node: x0 takes the address of the descriptor, and | |||
4012 | // returns the address of the variable in this thread. | |||
4013 | Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue()); | |||
4014 | Chain = | |||
4015 | DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), | |||
4016 | Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64), | |||
4017 | DAG.getRegisterMask(Mask), Chain.getValue(1)); | |||
4018 | return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1)); | |||
4019 | } | |||
4020 | ||||
4021 | /// When accessing thread-local variables under either the general-dynamic or | |||
4022 | /// local-dynamic system, we make a "TLS-descriptor" call. The variable will | |||
4023 | /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry | |||
4024 | /// is a function pointer to carry out the resolution. | |||
4025 | /// | |||
4026 | /// The sequence is: | |||
4027 | /// adrp x0, :tlsdesc:var | |||
4028 | /// ldr x1, [x0, #:tlsdesc_lo12:var] | |||
4029 | /// add x0, x0, #:tlsdesc_lo12:var | |||
4030 | /// .tlsdesccall var | |||
4031 | /// blr x1 | |||
4032 | /// (TPIDR_EL0 offset now in x0) | |||
4033 | /// | |||
4034 | /// The above sequence must be produced unscheduled, to enable the linker to | |||
4035 | /// optimize/relax this sequence. | |||
4036 | /// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the | |||
4037 | /// above sequence, and expanded really late in the compilation flow, to ensure | |||
4038 | /// the sequence is produced as per above. | |||
4039 | SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr, | |||
4040 | const SDLoc &DL, | |||
4041 | SelectionDAG &DAG) const { | |||
4042 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4043 | ||||
4044 | SDValue Chain = DAG.getEntryNode(); | |||
4045 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
4046 | ||||
4047 | Chain = | |||
4048 | DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr}); | |||
4049 | SDValue Glue = Chain.getValue(1); | |||
4050 | ||||
4051 | return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); | |||
4052 | } | |||
4053 | ||||
4054 | SDValue | |||
4055 | AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op, | |||
4056 | SelectionDAG &DAG) const { | |||
4057 | assert(Subtarget->isTargetELF() && "This function expects an ELF target")(static_cast <bool> (Subtarget->isTargetELF() && "This function expects an ELF target") ? void (0) : __assert_fail ("Subtarget->isTargetELF() && \"This function expects an ELF target\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4057, __extension__ __PRETTY_FUNCTION__)); | |||
4058 | assert(Subtarget->useSmallAddressing() &&(static_cast <bool> (Subtarget->useSmallAddressing() && "ELF TLS only supported in small memory model") ? void (0) : __assert_fail ("Subtarget->useSmallAddressing() && \"ELF TLS only supported in small memory model\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4059, __extension__ __PRETTY_FUNCTION__)) | |||
4059 | "ELF TLS only supported in small memory model")(static_cast <bool> (Subtarget->useSmallAddressing() && "ELF TLS only supported in small memory model") ? void (0) : __assert_fail ("Subtarget->useSmallAddressing() && \"ELF TLS only supported in small memory model\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4059, __extension__ __PRETTY_FUNCTION__)); | |||
4060 | // Different choices can be made for the maximum size of the TLS area for a | |||
4061 | // module. For the small address model, the default TLS size is 16MiB and the | |||
4062 | // maximum TLS size is 4GiB. | |||
4063 | // FIXME: add -mtls-size command line option and make it control the 16MiB | |||
4064 | // vs. 4GiB code sequence generation. | |||
4065 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | |||
4066 | ||||
4067 | TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); | |||
4068 | ||||
4069 | if (!EnableAArch64ELFLocalDynamicTLSGeneration) { | |||
4070 | if (Model == TLSModel::LocalDynamic) | |||
4071 | Model = TLSModel::GeneralDynamic; | |||
4072 | } | |||
4073 | ||||
4074 | SDValue TPOff; | |||
4075 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4076 | SDLoc DL(Op); | |||
4077 | const GlobalValue *GV = GA->getGlobal(); | |||
4078 | ||||
4079 | SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); | |||
4080 | ||||
4081 | if (Model == TLSModel::LocalExec) { | |||
4082 | SDValue HiVar = DAG.getTargetGlobalAddress( | |||
4083 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); | |||
4084 | SDValue LoVar = DAG.getTargetGlobalAddress( | |||
4085 | GV, DL, PtrVT, 0, | |||
4086 | AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | |||
4087 | ||||
4088 | SDValue TPWithOff_lo = | |||
4089 | SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase, | |||
4090 | HiVar, | |||
4091 | DAG.getTargetConstant(0, DL, MVT::i32)), | |||
4092 | 0); | |||
4093 | SDValue TPWithOff = | |||
4094 | SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPWithOff_lo, | |||
4095 | LoVar, | |||
4096 | DAG.getTargetConstant(0, DL, MVT::i32)), | |||
4097 | 0); | |||
4098 | return TPWithOff; | |||
4099 | } else if (Model == TLSModel::InitialExec) { | |||
4100 | TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); | |||
4101 | TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff); | |||
4102 | } else if (Model == TLSModel::LocalDynamic) { | |||
4103 | // Local-dynamic accesses proceed in two phases. A general-dynamic TLS | |||
4104 | // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate | |||
4105 | // the beginning of the module's TLS region, followed by a DTPREL offset | |||
4106 | // calculation. | |||
4107 | ||||
4108 | // These accesses will need deduplicating if there's more than one. | |||
4109 | AArch64FunctionInfo *MFI = | |||
4110 | DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); | |||
4111 | MFI->incNumLocalDynamicTLSAccesses(); | |||
4112 | ||||
4113 | // The call needs a relocation too for linker relaxation. It doesn't make | |||
4114 | // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of | |||
4115 | // the address. | |||
4116 | SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, | |||
4117 | AArch64II::MO_TLS); | |||
4118 | ||||
4119 | // Now we can calculate the offset from TPIDR_EL0 to this module's | |||
4120 | // thread-local area. | |||
4121 | TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); | |||
4122 | ||||
4123 | // Now use :dtprel_whatever: operations to calculate this variable's offset | |||
4124 | // in its thread-storage area. | |||
4125 | SDValue HiVar = DAG.getTargetGlobalAddress( | |||
4126 | GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); | |||
4127 | SDValue LoVar = DAG.getTargetGlobalAddress( | |||
4128 | GV, DL, MVT::i64, 0, | |||
4129 | AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | |||
4130 | ||||
4131 | TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar, | |||
4132 | DAG.getTargetConstant(0, DL, MVT::i32)), | |||
4133 | 0); | |||
4134 | TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar, | |||
4135 | DAG.getTargetConstant(0, DL, MVT::i32)), | |||
4136 | 0); | |||
4137 | } else if (Model == TLSModel::GeneralDynamic) { | |||
4138 | // The call needs a relocation too for linker relaxation. It doesn't make | |||
4139 | // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of | |||
4140 | // the address. | |||
4141 | SDValue SymAddr = | |||
4142 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); | |||
4143 | ||||
4144 | // Finally we can make a call to calculate the offset from tpidr_el0. | |||
4145 | TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); | |||
4146 | } else | |||
4147 | llvm_unreachable("Unsupported ELF TLS access model")::llvm::llvm_unreachable_internal("Unsupported ELF TLS access model" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4147); | |||
4148 | ||||
4149 | return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); | |||
4150 | } | |||
4151 | ||||
4152 | SDValue | |||
4153 | AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op, | |||
4154 | SelectionDAG &DAG) const { | |||
4155 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows specific TLS lowering") ? void (0) : __assert_fail ( "Subtarget->isTargetWindows() && \"Windows specific TLS lowering\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4155, __extension__ __PRETTY_FUNCTION__)); | |||
4156 | ||||
4157 | SDValue Chain = DAG.getEntryNode(); | |||
4158 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4159 | SDLoc DL(Op); | |||
4160 | ||||
4161 | SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64); | |||
4162 | ||||
4163 | // Load the ThreadLocalStoragePointer from the TEB | |||
4164 | // A pointer to the TLS array is located at offset 0x58 from the TEB. | |||
4165 | SDValue TLSArray = | |||
4166 | DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL)); | |||
4167 | TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); | |||
4168 | Chain = TLSArray.getValue(1); | |||
4169 | ||||
4170 | // Load the TLS index from the C runtime; | |||
4171 | // This does the same as getAddr(), but without having a GlobalAddressSDNode. | |||
4172 | // This also does the same as LOADgot, but using a generic i32 load, | |||
4173 | // while LOADgot only loads i64. | |||
4174 | SDValue TLSIndexHi = | |||
4175 | DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE); | |||
4176 | SDValue TLSIndexLo = DAG.getTargetExternalSymbol( | |||
4177 | "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | |||
4178 | SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi); | |||
4179 | SDValue TLSIndex = | |||
4180 | DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo); | |||
4181 | TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo()); | |||
4182 | Chain = TLSIndex.getValue(1); | |||
4183 | ||||
4184 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 8 | |||
4185 | // offset into the TLSArray. | |||
4186 | TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex); | |||
4187 | SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, | |||
4188 | DAG.getConstant(3, DL, PtrVT)); | |||
4189 | SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, | |||
4190 | DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), | |||
4191 | MachinePointerInfo()); | |||
4192 | Chain = TLS.getValue(1); | |||
4193 | ||||
4194 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | |||
4195 | const GlobalValue *GV = GA->getGlobal(); | |||
4196 | SDValue TGAHi = DAG.getTargetGlobalAddress( | |||
4197 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); | |||
4198 | SDValue TGALo = DAG.getTargetGlobalAddress( | |||
4199 | GV, DL, PtrVT, 0, | |||
4200 | AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | |||
4201 | ||||
4202 | // Add the offset from the start of the .tls section (section base). | |||
4203 | SDValue Addr = | |||
4204 | SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi, | |||
4205 | DAG.getTargetConstant(0, DL, MVT::i32)), | |||
4206 | 0); | |||
4207 | Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo); | |||
4208 | return Addr; | |||
4209 | } | |||
4210 | ||||
4211 | SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, | |||
4212 | SelectionDAG &DAG) const { | |||
4213 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | |||
4214 | if (DAG.getTarget().useEmulatedTLS()) | |||
4215 | return LowerToTLSEmulatedModel(GA, DAG); | |||
4216 | ||||
4217 | if (Subtarget->isTargetDarwin()) | |||
4218 | return LowerDarwinGlobalTLSAddress(Op, DAG); | |||
4219 | if (Subtarget->isTargetELF()) | |||
4220 | return LowerELFGlobalTLSAddress(Op, DAG); | |||
4221 | if (Subtarget->isTargetWindows()) | |||
4222 | return LowerWindowsGlobalTLSAddress(Op, DAG); | |||
4223 | ||||
4224 | llvm_unreachable("Unexpected platform trying to use TLS")::llvm::llvm_unreachable_internal("Unexpected platform trying to use TLS" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4224); | |||
4225 | } | |||
4226 | ||||
4227 | SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { | |||
4228 | SDValue Chain = Op.getOperand(0); | |||
4229 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); | |||
4230 | SDValue LHS = Op.getOperand(2); | |||
4231 | SDValue RHS = Op.getOperand(3); | |||
4232 | SDValue Dest = Op.getOperand(4); | |||
4233 | SDLoc dl(Op); | |||
4234 | ||||
4235 | // Handle f128 first, since lowering it will result in comparing the return | |||
4236 | // value of a libcall against zero, which is just what the rest of LowerBR_CC | |||
4237 | // is expecting to deal with. | |||
4238 | if (LHS.getValueType() == MVT::f128) { | |||
4239 | softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); | |||
4240 | ||||
4241 | // If softenSetCCOperands returned a scalar, we need to compare the result | |||
4242 | // against zero to select between true and false values. | |||
4243 | if (!RHS.getNode()) { | |||
4244 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | |||
4245 | CC = ISD::SETNE; | |||
4246 | } | |||
4247 | } | |||
4248 | ||||
4249 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch | |||
4250 | // instruction. | |||
4251 | if (isOverflowIntrOpRes(LHS) && isOneConstant(RHS) && | |||
4252 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
4253 | // Only lower legal XALUO ops. | |||
4254 | if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) | |||
4255 | return SDValue(); | |||
4256 | ||||
4257 | // The actual operation with overflow check. | |||
4258 | AArch64CC::CondCode OFCC; | |||
4259 | SDValue Value, Overflow; | |||
4260 | std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG); | |||
4261 | ||||
4262 | if (CC == ISD::SETNE) | |||
4263 | OFCC = getInvertedCondCode(OFCC); | |||
4264 | SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32); | |||
4265 | ||||
4266 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, | |||
4267 | Overflow); | |||
4268 | } | |||
4269 | ||||
4270 | if (LHS.getValueType().isInteger()) { | |||
4271 | assert((LHS.getValueType() == RHS.getValueType()) &&(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4272, __extension__ __PRETTY_FUNCTION__)) | |||
4272 | (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64))(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4272, __extension__ __PRETTY_FUNCTION__)); | |||
4273 | ||||
4274 | // If the RHS of the comparison is zero, we can potentially fold this | |||
4275 | // to a specialized branch. | |||
4276 | const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS); | |||
4277 | if (RHSC && RHSC->getZExtValue() == 0) { | |||
4278 | if (CC == ISD::SETEQ) { | |||
4279 | // See if we can use a TBZ to fold in an AND as well. | |||
4280 | // TBZ has a smaller branch displacement than CBZ. If the offset is | |||
4281 | // out of bounds, a late MI-layer pass rewrites branches. | |||
4282 | // 403.gcc is an example that hits this case. | |||
4283 | if (LHS.getOpcode() == ISD::AND && | |||
4284 | isa<ConstantSDNode>(LHS.getOperand(1)) && | |||
4285 | isPowerOf2_64(LHS.getConstantOperandVal(1))) { | |||
4286 | SDValue Test = LHS.getOperand(0); | |||
4287 | uint64_t Mask = LHS.getConstantOperandVal(1); | |||
4288 | return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test, | |||
4289 | DAG.getConstant(Log2_64(Mask), dl, MVT::i64), | |||
4290 | Dest); | |||
4291 | } | |||
4292 | ||||
4293 | return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest); | |||
4294 | } else if (CC == ISD::SETNE) { | |||
4295 | // See if we can use a TBZ to fold in an AND as well. | |||
4296 | // TBZ has a smaller branch displacement than CBZ. If the offset is | |||
4297 | // out of bounds, a late MI-layer pass rewrites branches. | |||
4298 | // 403.gcc is an example that hits this case. | |||
4299 | if (LHS.getOpcode() == ISD::AND && | |||
4300 | isa<ConstantSDNode>(LHS.getOperand(1)) && | |||
4301 | isPowerOf2_64(LHS.getConstantOperandVal(1))) { | |||
4302 | SDValue Test = LHS.getOperand(0); | |||
4303 | uint64_t Mask = LHS.getConstantOperandVal(1); | |||
4304 | return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test, | |||
4305 | DAG.getConstant(Log2_64(Mask), dl, MVT::i64), | |||
4306 | Dest); | |||
4307 | } | |||
4308 | ||||
4309 | return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest); | |||
4310 | } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) { | |||
4311 | // Don't combine AND since emitComparison converts the AND to an ANDS | |||
4312 | // (a.k.a. TST) and the test in the test bit and branch instruction | |||
4313 | // becomes redundant. This would also increase register pressure. | |||
4314 | uint64_t Mask = LHS.getValueSizeInBits() - 1; | |||
4315 | return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS, | |||
4316 | DAG.getConstant(Mask, dl, MVT::i64), Dest); | |||
4317 | } | |||
4318 | } | |||
4319 | if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT && | |||
4320 | LHS.getOpcode() != ISD::AND) { | |||
4321 | // Don't combine AND since emitComparison converts the AND to an ANDS | |||
4322 | // (a.k.a. TST) and the test in the test bit and branch instruction | |||
4323 | // becomes redundant. This would also increase register pressure. | |||
4324 | uint64_t Mask = LHS.getValueSizeInBits() - 1; | |||
4325 | return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS, | |||
4326 | DAG.getConstant(Mask, dl, MVT::i64), Dest); | |||
4327 | } | |||
4328 | ||||
4329 | SDValue CCVal; | |||
4330 | SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); | |||
4331 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, | |||
4332 | Cmp); | |||
4333 | } | |||
4334 | ||||
4335 | assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4336, __extension__ __PRETTY_FUNCTION__)) | |||
4336 | LHS.getValueType() == MVT::f64)(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4336, __extension__ __PRETTY_FUNCTION__)); | |||
4337 | ||||
4338 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally | |||
4339 | // clean. Some of them require two branches to implement. | |||
4340 | SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | |||
4341 | AArch64CC::CondCode CC1, CC2; | |||
4342 | changeFPCCToAArch64CC(CC, CC1, CC2); | |||
4343 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | |||
4344 | SDValue BR1 = | |||
4345 | DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp); | |||
4346 | if (CC2 != AArch64CC::AL) { | |||
4347 | SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); | |||
4348 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val, | |||
4349 | Cmp); | |||
4350 | } | |||
4351 | ||||
4352 | return BR1; | |||
4353 | } | |||
4354 | ||||
4355 | SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, | |||
4356 | SelectionDAG &DAG) const { | |||
4357 | EVT VT = Op.getValueType(); | |||
4358 | SDLoc DL(Op); | |||
4359 | ||||
4360 | SDValue In1 = Op.getOperand(0); | |||
4361 | SDValue In2 = Op.getOperand(1); | |||
4362 | EVT SrcVT = In2.getValueType(); | |||
4363 | ||||
4364 | if (SrcVT.bitsLT(VT)) | |||
4365 | In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2); | |||
4366 | else if (SrcVT.bitsGT(VT)) | |||
4367 | In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL)); | |||
4368 | ||||
4369 | EVT VecVT; | |||
4370 | uint64_t EltMask; | |||
4371 | SDValue VecVal1, VecVal2; | |||
4372 | ||||
4373 | auto setVecVal = [&] (int Idx) { | |||
4374 | if (!VT.isVector()) { | |||
4375 | VecVal1 = DAG.getTargetInsertSubreg(Idx, DL, VecVT, | |||
4376 | DAG.getUNDEF(VecVT), In1); | |||
4377 | VecVal2 = DAG.getTargetInsertSubreg(Idx, DL, VecVT, | |||
4378 | DAG.getUNDEF(VecVT), In2); | |||
4379 | } else { | |||
4380 | VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1); | |||
4381 | VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2); | |||
4382 | } | |||
4383 | }; | |||
4384 | ||||
4385 | if (VT == MVT::f32 || VT == MVT::v2f32 || VT == MVT::v4f32) { | |||
4386 | VecVT = (VT == MVT::v2f32 ? MVT::v2i32 : MVT::v4i32); | |||
4387 | EltMask = 0x80000000ULL; | |||
4388 | setVecVal(AArch64::ssub); | |||
4389 | } else if (VT == MVT::f64 || VT == MVT::v2f64) { | |||
4390 | VecVT = MVT::v2i64; | |||
4391 | ||||
4392 | // We want to materialize a mask with the high bit set, but the AdvSIMD | |||
4393 | // immediate moves cannot materialize that in a single instruction for | |||
4394 | // 64-bit elements. Instead, materialize zero and then negate it. | |||
4395 | EltMask = 0; | |||
4396 | ||||
4397 | setVecVal(AArch64::dsub); | |||
4398 | } else if (VT == MVT::f16 || VT == MVT::v4f16 || VT == MVT::v8f16) { | |||
4399 | VecVT = (VT == MVT::v4f16 ? MVT::v4i16 : MVT::v8i16); | |||
4400 | EltMask = 0x8000ULL; | |||
4401 | setVecVal(AArch64::hsub); | |||
4402 | } else { | |||
4403 | llvm_unreachable("Invalid type for copysign!")::llvm::llvm_unreachable_internal("Invalid type for copysign!" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4403); | |||
4404 | } | |||
4405 | ||||
4406 | SDValue BuildVec = DAG.getConstant(EltMask, DL, VecVT); | |||
4407 | ||||
4408 | // If we couldn't materialize the mask above, then the mask vector will be | |||
4409 | // the zero vector, and we need to negate it here. | |||
4410 | if (VT == MVT::f64 || VT == MVT::v2f64) { | |||
4411 | BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, BuildVec); | |||
4412 | BuildVec = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, BuildVec); | |||
4413 | BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, BuildVec); | |||
4414 | } | |||
4415 | ||||
4416 | SDValue Sel = | |||
4417 | DAG.getNode(AArch64ISD::BIT, DL, VecVT, VecVal1, VecVal2, BuildVec); | |||
4418 | ||||
4419 | if (VT == MVT::f16) | |||
4420 | return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, Sel); | |||
4421 | if (VT == MVT::f32) | |||
4422 | return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, Sel); | |||
4423 | else if (VT == MVT::f64) | |||
4424 | return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, Sel); | |||
4425 | else | |||
4426 | return DAG.getNode(ISD::BITCAST, DL, VT, Sel); | |||
4427 | } | |||
4428 | ||||
4429 | SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { | |||
4430 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( | |||
4431 | Attribute::NoImplicitFloat)) | |||
4432 | return SDValue(); | |||
4433 | ||||
4434 | if (!Subtarget->hasNEON()) | |||
4435 | return SDValue(); | |||
4436 | ||||
4437 | // While there is no integer popcount instruction, it can | |||
4438 | // be more efficiently lowered to the following sequence that uses | |||
4439 | // AdvSIMD registers/instructions as long as the copies to/from | |||
4440 | // the AdvSIMD registers are cheap. | |||
4441 | // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd | |||
4442 | // CNT V0.8B, V0.8B // 8xbyte pop-counts | |||
4443 | // ADDV B0, V0.8B // sum 8xbyte pop-counts | |||
4444 | // UMOV X0, V0.B[0] // copy byte result back to integer reg | |||
4445 | SDValue Val = Op.getOperand(0); | |||
4446 | SDLoc DL(Op); | |||
4447 | EVT VT = Op.getValueType(); | |||
4448 | ||||
4449 | if (VT == MVT::i32) | |||
4450 | Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val); | |||
4451 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val); | |||
4452 | ||||
4453 | SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val); | |||
4454 | SDValue UaddLV = DAG.getNode( | |||
4455 | ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, | |||
4456 | DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop); | |||
4457 | ||||
4458 | if (VT == MVT::i64) | |||
4459 | UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV); | |||
4460 | return UaddLV; | |||
4461 | } | |||
4462 | ||||
4463 | SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { | |||
4464 | ||||
4465 | if (Op.getValueType().isVector()) | |||
4466 | return LowerVSETCC(Op, DAG); | |||
4467 | ||||
4468 | SDValue LHS = Op.getOperand(0); | |||
4469 | SDValue RHS = Op.getOperand(1); | |||
4470 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
4471 | SDLoc dl(Op); | |||
4472 | ||||
4473 | // We chose ZeroOrOneBooleanContents, so use zero and one. | |||
4474 | EVT VT = Op.getValueType(); | |||
4475 | SDValue TVal = DAG.getConstant(1, dl, VT); | |||
4476 | SDValue FVal = DAG.getConstant(0, dl, VT); | |||
4477 | ||||
4478 | // Handle f128 first, since one possible outcome is a normal integer | |||
4479 | // comparison which gets picked up by the next if statement. | |||
4480 | if (LHS.getValueType() == MVT::f128) { | |||
4481 | softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); | |||
4482 | ||||
4483 | // If softenSetCCOperands returned a scalar, use it. | |||
4484 | if (!RHS.getNode()) { | |||
4485 | assert(LHS.getValueType() == Op.getValueType() &&(static_cast <bool> (LHS.getValueType() == Op.getValueType () && "Unexpected setcc expansion!") ? void (0) : __assert_fail ("LHS.getValueType() == Op.getValueType() && \"Unexpected setcc expansion!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4486, __extension__ __PRETTY_FUNCTION__)) | |||
4486 | "Unexpected setcc expansion!")(static_cast <bool> (LHS.getValueType() == Op.getValueType () && "Unexpected setcc expansion!") ? void (0) : __assert_fail ("LHS.getValueType() == Op.getValueType() && \"Unexpected setcc expansion!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4486, __extension__ __PRETTY_FUNCTION__)); | |||
4487 | return LHS; | |||
4488 | } | |||
4489 | } | |||
4490 | ||||
4491 | if (LHS.getValueType().isInteger()) { | |||
4492 | SDValue CCVal; | |||
4493 | SDValue Cmp = | |||
4494 | getAArch64Cmp(LHS, RHS, ISD::getSetCCInverse(CC, true), CCVal, DAG, dl); | |||
4495 | ||||
4496 | // Note that we inverted the condition above, so we reverse the order of | |||
4497 | // the true and false operands here. This will allow the setcc to be | |||
4498 | // matched to a single CSINC instruction. | |||
4499 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp); | |||
4500 | } | |||
4501 | ||||
4502 | // Now we know we're dealing with FP values. | |||
4503 | assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4504, __extension__ __PRETTY_FUNCTION__)) | |||
4504 | LHS.getValueType() == MVT::f64)(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4504, __extension__ __PRETTY_FUNCTION__)); | |||
4505 | ||||
4506 | // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead | |||
4507 | // and do the comparison. | |||
4508 | SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | |||
4509 | ||||
4510 | AArch64CC::CondCode CC1, CC2; | |||
4511 | changeFPCCToAArch64CC(CC, CC1, CC2); | |||
4512 | if (CC2 == AArch64CC::AL) { | |||
4513 | changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, false), CC1, CC2); | |||
4514 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | |||
4515 | ||||
4516 | // Note that we inverted the condition above, so we reverse the order of | |||
4517 | // the true and false operands here. This will allow the setcc to be | |||
4518 | // matched to a single CSINC instruction. | |||
4519 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp); | |||
4520 | } else { | |||
4521 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't | |||
4522 | // totally clean. Some of them require two CSELs to implement. As is in | |||
4523 | // this case, we emit the first CSEL and then emit a second using the output | |||
4524 | // of the first as the RHS. We're effectively OR'ing the two CC's together. | |||
4525 | ||||
4526 | // FIXME: It would be nice if we could match the two CSELs to two CSINCs. | |||
4527 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | |||
4528 | SDValue CS1 = | |||
4529 | DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); | |||
4530 | ||||
4531 | SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); | |||
4532 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); | |||
4533 | } | |||
4534 | } | |||
4535 | ||||
4536 | SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, | |||
4537 | SDValue RHS, SDValue TVal, | |||
4538 | SDValue FVal, const SDLoc &dl, | |||
4539 | SelectionDAG &DAG) const { | |||
4540 | // Handle f128 first, because it will result in a comparison of some RTLIB | |||
4541 | // call result against zero. | |||
4542 | if (LHS.getValueType() == MVT::f128) { | |||
4543 | softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); | |||
4544 | ||||
4545 | // If softenSetCCOperands returned a scalar, we need to compare the result | |||
4546 | // against zero to select between true and false values. | |||
4547 | if (!RHS.getNode()) { | |||
4548 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | |||
4549 | CC = ISD::SETNE; | |||
4550 | } | |||
4551 | } | |||
4552 | ||||
4553 | // Also handle f16, for which we need to do a f32 comparison. | |||
4554 | if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { | |||
4555 | LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); | |||
4556 | RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); | |||
4557 | } | |||
4558 | ||||
4559 | // Next, handle integers. | |||
4560 | if (LHS.getValueType().isInteger()) { | |||
4561 | assert((LHS.getValueType() == RHS.getValueType()) &&(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4562, __extension__ __PRETTY_FUNCTION__)) | |||
4562 | (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64))(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4562, __extension__ __PRETTY_FUNCTION__)); | |||
4563 | ||||
4564 | unsigned Opcode = AArch64ISD::CSEL; | |||
4565 | ||||
4566 | // If both the TVal and the FVal are constants, see if we can swap them in | |||
4567 | // order to for a CSINV or CSINC out of them. | |||
4568 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); | |||
4569 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); | |||
4570 | ||||
4571 | if (CTVal && CFVal && CTVal->isAllOnesValue() && CFVal->isNullValue()) { | |||
4572 | std::swap(TVal, FVal); | |||
4573 | std::swap(CTVal, CFVal); | |||
4574 | CC = ISD::getSetCCInverse(CC, true); | |||
4575 | } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isNullValue()) { | |||
4576 | std::swap(TVal, FVal); | |||
4577 | std::swap(CTVal, CFVal); | |||
4578 | CC = ISD::getSetCCInverse(CC, true); | |||
4579 | } else if (TVal.getOpcode() == ISD::XOR) { | |||
4580 | // If TVal is a NOT we want to swap TVal and FVal so that we can match | |||
4581 | // with a CSINV rather than a CSEL. | |||
4582 | if (isAllOnesConstant(TVal.getOperand(1))) { | |||
4583 | std::swap(TVal, FVal); | |||
4584 | std::swap(CTVal, CFVal); | |||
4585 | CC = ISD::getSetCCInverse(CC, true); | |||
4586 | } | |||
4587 | } else if (TVal.getOpcode() == ISD::SUB) { | |||
4588 | // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so | |||
4589 | // that we can match with a CSNEG rather than a CSEL. | |||
4590 | if (isNullConstant(TVal.getOperand(0))) { | |||
4591 | std::swap(TVal, FVal); | |||
4592 | std::swap(CTVal, CFVal); | |||
4593 | CC = ISD::getSetCCInverse(CC, true); | |||
4594 | } | |||
4595 | } else if (CTVal && CFVal) { | |||
4596 | const int64_t TrueVal = CTVal->getSExtValue(); | |||
4597 | const int64_t FalseVal = CFVal->getSExtValue(); | |||
4598 | bool Swap = false; | |||
4599 | ||||
4600 | // If both TVal and FVal are constants, see if FVal is the | |||
4601 | // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC | |||
4602 | // instead of a CSEL in that case. | |||
4603 | if (TrueVal == ~FalseVal) { | |||
4604 | Opcode = AArch64ISD::CSINV; | |||
4605 | } else if (TrueVal == -FalseVal) { | |||
4606 | Opcode = AArch64ISD::CSNEG; | |||
4607 | } else if (TVal.getValueType() == MVT::i32) { | |||
4608 | // If our operands are only 32-bit wide, make sure we use 32-bit | |||
4609 | // arithmetic for the check whether we can use CSINC. This ensures that | |||
4610 | // the addition in the check will wrap around properly in case there is | |||
4611 | // an overflow (which would not be the case if we do the check with | |||
4612 | // 64-bit arithmetic). | |||
4613 | const uint32_t TrueVal32 = CTVal->getZExtValue(); | |||
4614 | const uint32_t FalseVal32 = CFVal->getZExtValue(); | |||
4615 | ||||
4616 | if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) { | |||
4617 | Opcode = AArch64ISD::CSINC; | |||
4618 | ||||
4619 | if (TrueVal32 > FalseVal32) { | |||
4620 | Swap = true; | |||
4621 | } | |||
4622 | } | |||
4623 | // 64-bit check whether we can use CSINC. | |||
4624 | } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) { | |||
4625 | Opcode = AArch64ISD::CSINC; | |||
4626 | ||||
4627 | if (TrueVal > FalseVal) { | |||
4628 | Swap = true; | |||
4629 | } | |||
4630 | } | |||
4631 | ||||
4632 | // Swap TVal and FVal if necessary. | |||
4633 | if (Swap) { | |||
4634 | std::swap(TVal, FVal); | |||
4635 | std::swap(CTVal, CFVal); | |||
4636 | CC = ISD::getSetCCInverse(CC, true); | |||
4637 | } | |||
4638 | ||||
4639 | if (Opcode != AArch64ISD::CSEL) { | |||
4640 | // Drop FVal since we can get its value by simply inverting/negating | |||
4641 | // TVal. | |||
4642 | FVal = TVal; | |||
4643 | } | |||
4644 | } | |||
4645 | ||||
4646 | // Avoid materializing a constant when possible by reusing a known value in | |||
4647 | // a register. However, don't perform this optimization if the known value | |||
4648 | // is one, zero or negative one in the case of a CSEL. We can always | |||
4649 | // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the | |||
4650 | // FVal, respectively. | |||
4651 | ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS); | |||
4652 | if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() && | |||
4653 | !RHSVal->isNullValue() && !RHSVal->isAllOnesValue()) { | |||
4654 | AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); | |||
4655 | // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to | |||
4656 | // "a != C ? x : a" to avoid materializing C. | |||
4657 | if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ) | |||
4658 | TVal = LHS; | |||
4659 | else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE) | |||
4660 | FVal = LHS; | |||
4661 | } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) { | |||
4662 | assert (CTVal && CFVal && "Expected constant operands for CSNEG.")(static_cast <bool> (CTVal && CFVal && "Expected constant operands for CSNEG." ) ? void (0) : __assert_fail ("CTVal && CFVal && \"Expected constant operands for CSNEG.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4662, __extension__ __PRETTY_FUNCTION__)); | |||
4663 | // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to | |||
4664 | // avoid materializing C. | |||
4665 | AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); | |||
4666 | if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) { | |||
4667 | Opcode = AArch64ISD::CSINV; | |||
4668 | TVal = LHS; | |||
4669 | FVal = DAG.getConstant(0, dl, FVal.getValueType()); | |||
4670 | } | |||
4671 | } | |||
4672 | ||||
4673 | SDValue CCVal; | |||
4674 | SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); | |||
4675 | EVT VT = TVal.getValueType(); | |||
4676 | return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp); | |||
4677 | } | |||
4678 | ||||
4679 | // Now we know we're dealing with FP values. | |||
4680 | assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4681, __extension__ __PRETTY_FUNCTION__)) | |||
4681 | LHS.getValueType() == MVT::f64)(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4681, __extension__ __PRETTY_FUNCTION__)); | |||
4682 | assert(LHS.getValueType() == RHS.getValueType())(static_cast <bool> (LHS.getValueType() == RHS.getValueType ()) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType()" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4682, __extension__ __PRETTY_FUNCTION__)); | |||
4683 | EVT VT = TVal.getValueType(); | |||
4684 | SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | |||
4685 | ||||
4686 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally | |||
4687 | // clean. Some of them require two CSELs to implement. | |||
4688 | AArch64CC::CondCode CC1, CC2; | |||
4689 | changeFPCCToAArch64CC(CC, CC1, CC2); | |||
4690 | ||||
4691 | if (DAG.getTarget().Options.UnsafeFPMath) { | |||
4692 | // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and | |||
4693 | // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0. | |||
4694 | ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS); | |||
4695 | if (RHSVal && RHSVal->isZero()) { | |||
4696 | ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal); | |||
4697 | ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal); | |||
4698 | ||||
4699 | if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) && | |||
4700 | CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType()) | |||
4701 | TVal = LHS; | |||
4702 | else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) && | |||
4703 | CFVal && CFVal->isZero() && | |||
4704 | FVal.getValueType() == LHS.getValueType()) | |||
4705 | FVal = LHS; | |||
4706 | } | |||
4707 | } | |||
4708 | ||||
4709 | // Emit first, and possibly only, CSEL. | |||
4710 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | |||
4711 | SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); | |||
4712 | ||||
4713 | // If we need a second CSEL, emit it, using the output of the first as the | |||
4714 | // RHS. We're effectively OR'ing the two CC's together. | |||
4715 | if (CC2 != AArch64CC::AL) { | |||
4716 | SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); | |||
4717 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); | |||
4718 | } | |||
4719 | ||||
4720 | // Otherwise, return the output of the first CSEL. | |||
4721 | return CS1; | |||
4722 | } | |||
4723 | ||||
4724 | SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, | |||
4725 | SelectionDAG &DAG) const { | |||
4726 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | |||
4727 | SDValue LHS = Op.getOperand(0); | |||
4728 | SDValue RHS = Op.getOperand(1); | |||
4729 | SDValue TVal = Op.getOperand(2); | |||
4730 | SDValue FVal = Op.getOperand(3); | |||
4731 | SDLoc DL(Op); | |||
4732 | return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); | |||
4733 | } | |||
4734 | ||||
4735 | SDValue AArch64TargetLowering::LowerSELECT(SDValue Op, | |||
4736 | SelectionDAG &DAG) const { | |||
4737 | SDValue CCVal = Op->getOperand(0); | |||
4738 | SDValue TVal = Op->getOperand(1); | |||
4739 | SDValue FVal = Op->getOperand(2); | |||
4740 | SDLoc DL(Op); | |||
4741 | ||||
4742 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select | |||
4743 | // instruction. | |||
4744 | if (isOverflowIntrOpRes(CCVal)) { | |||
4745 | // Only lower legal XALUO ops. | |||
4746 | if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0))) | |||
4747 | return SDValue(); | |||
4748 | ||||
4749 | AArch64CC::CondCode OFCC; | |||
4750 | SDValue Value, Overflow; | |||
4751 | std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG); | |||
4752 | SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32); | |||
4753 | ||||
4754 | return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal, | |||
4755 | CCVal, Overflow); | |||
4756 | } | |||
4757 | ||||
4758 | // Lower it the same way as we would lower a SELECT_CC node. | |||
4759 | ISD::CondCode CC; | |||
4760 | SDValue LHS, RHS; | |||
4761 | if (CCVal.getOpcode() == ISD::SETCC) { | |||
4762 | LHS = CCVal.getOperand(0); | |||
4763 | RHS = CCVal.getOperand(1); | |||
4764 | CC = cast<CondCodeSDNode>(CCVal->getOperand(2))->get(); | |||
4765 | } else { | |||
4766 | LHS = CCVal; | |||
4767 | RHS = DAG.getConstant(0, DL, CCVal.getValueType()); | |||
4768 | CC = ISD::SETNE; | |||
4769 | } | |||
4770 | return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); | |||
4771 | } | |||
4772 | ||||
4773 | SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op, | |||
4774 | SelectionDAG &DAG) const { | |||
4775 | // Jump table entries as PC relative offsets. No additional tweaking | |||
4776 | // is necessary here. Just get the address of the jump table. | |||
4777 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); | |||
4778 | ||||
4779 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | |||
4780 | !Subtarget->isTargetMachO()) { | |||
4781 | return getAddrLarge(JT, DAG); | |||
4782 | } | |||
4783 | return getAddr(JT, DAG); | |||
4784 | } | |||
4785 | ||||
4786 | SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op, | |||
4787 | SelectionDAG &DAG) const { | |||
4788 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); | |||
4789 | ||||
4790 | if (getTargetMachine().getCodeModel() == CodeModel::Large) { | |||
4791 | // Use the GOT for the large code model on iOS. | |||
4792 | if (Subtarget->isTargetMachO()) { | |||
4793 | return getGOT(CP, DAG); | |||
4794 | } | |||
4795 | return getAddrLarge(CP, DAG); | |||
4796 | } else { | |||
4797 | return getAddr(CP, DAG); | |||
4798 | } | |||
4799 | } | |||
4800 | ||||
4801 | SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op, | |||
4802 | SelectionDAG &DAG) const { | |||
4803 | BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op); | |||
4804 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | |||
4805 | !Subtarget->isTargetMachO()) { | |||
4806 | return getAddrLarge(BA, DAG); | |||
4807 | } else { | |||
4808 | return getAddr(BA, DAG); | |||
4809 | } | |||
4810 | } | |||
4811 | ||||
4812 | SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op, | |||
4813 | SelectionDAG &DAG) const { | |||
4814 | AArch64FunctionInfo *FuncInfo = | |||
4815 | DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); | |||
4816 | ||||
4817 | SDLoc DL(Op); | |||
4818 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), | |||
4819 | getPointerTy(DAG.getDataLayout())); | |||
4820 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
4821 | return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), | |||
4822 | MachinePointerInfo(SV)); | |||
4823 | } | |||
4824 | ||||
4825 | SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op, | |||
4826 | SelectionDAG &DAG) const { | |||
4827 | AArch64FunctionInfo *FuncInfo = | |||
4828 | DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); | |||
4829 | ||||
4830 | SDLoc DL(Op); | |||
4831 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0 | |||
4832 | ? FuncInfo->getVarArgsGPRIndex() | |||
4833 | : FuncInfo->getVarArgsStackIndex(), | |||
4834 | getPointerTy(DAG.getDataLayout())); | |||
4835 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
4836 | return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), | |||
4837 | MachinePointerInfo(SV)); | |||
4838 | } | |||
4839 | ||||
4840 | SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op, | |||
4841 | SelectionDAG &DAG) const { | |||
4842 | // The layout of the va_list struct is specified in the AArch64 Procedure Call | |||
4843 | // Standard, section B.3. | |||
4844 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4845 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | |||
4846 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4847 | SDLoc DL(Op); | |||
4848 | ||||
4849 | SDValue Chain = Op.getOperand(0); | |||
4850 | SDValue VAList = Op.getOperand(1); | |||
4851 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
4852 | SmallVector<SDValue, 4> MemOps; | |||
4853 | ||||
4854 | // void *__stack at offset 0 | |||
4855 | SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT); | |||
4856 | MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, | |||
4857 | MachinePointerInfo(SV), /* Alignment = */ 8)); | |||
4858 | ||||
4859 | // void *__gr_top at offset 8 | |||
4860 | int GPRSize = FuncInfo->getVarArgsGPRSize(); | |||
4861 | if (GPRSize > 0) { | |||
4862 | SDValue GRTop, GRTopAddr; | |||
4863 | ||||
4864 | GRTopAddr = | |||
4865 | DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(8, DL, PtrVT)); | |||
4866 | ||||
4867 | GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT); | |||
4868 | GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop, | |||
4869 | DAG.getConstant(GPRSize, DL, PtrVT)); | |||
4870 | ||||
4871 | MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, | |||
4872 | MachinePointerInfo(SV, 8), | |||
4873 | /* Alignment = */ 8)); | |||
4874 | } | |||
4875 | ||||
4876 | // void *__vr_top at offset 16 | |||
4877 | int FPRSize = FuncInfo->getVarArgsFPRSize(); | |||
4878 | if (FPRSize > 0) { | |||
4879 | SDValue VRTop, VRTopAddr; | |||
4880 | VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | |||
4881 | DAG.getConstant(16, DL, PtrVT)); | |||
4882 | ||||
4883 | VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT); | |||
4884 | VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop, | |||
4885 | DAG.getConstant(FPRSize, DL, PtrVT)); | |||
4886 | ||||
4887 | MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, | |||
4888 | MachinePointerInfo(SV, 16), | |||
4889 | /* Alignment = */ 8)); | |||
4890 | } | |||
4891 | ||||
4892 | // int __gr_offs at offset 24 | |||
4893 | SDValue GROffsAddr = | |||
4894 | DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(24, DL, PtrVT)); | |||
4895 | MemOps.push_back(DAG.getStore( | |||
4896 | Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32), GROffsAddr, | |||
4897 | MachinePointerInfo(SV, 24), /* Alignment = */ 4)); | |||
4898 | ||||
4899 | // int __vr_offs at offset 28 | |||
4900 | SDValue VROffsAddr = | |||
4901 | DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(28, DL, PtrVT)); | |||
4902 | MemOps.push_back(DAG.getStore( | |||
4903 | Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32), VROffsAddr, | |||
4904 | MachinePointerInfo(SV, 28), /* Alignment = */ 4)); | |||
4905 | ||||
4906 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); | |||
4907 | } | |||
4908 | ||||
4909 | SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, | |||
4910 | SelectionDAG &DAG) const { | |||
4911 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4912 | ||||
4913 | if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv())) | |||
4914 | return LowerWin64_VASTART(Op, DAG); | |||
4915 | else if (Subtarget->isTargetDarwin()) | |||
4916 | return LowerDarwin_VASTART(Op, DAG); | |||
4917 | else | |||
4918 | return LowerAAPCS_VASTART(Op, DAG); | |||
4919 | } | |||
4920 | ||||
4921 | SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op, | |||
4922 | SelectionDAG &DAG) const { | |||
4923 | // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single | |||
4924 | // pointer. | |||
4925 | SDLoc DL(Op); | |||
4926 | unsigned VaListSize = | |||
4927 | Subtarget->isTargetDarwin() || Subtarget->isTargetWindows() ? 8 : 32; | |||
4928 | const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); | |||
4929 | const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); | |||
4930 | ||||
4931 | return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), | |||
4932 | Op.getOperand(2), | |||
4933 | DAG.getConstant(VaListSize, DL, MVT::i32), | |||
4934 | 8, false, false, false, MachinePointerInfo(DestSV), | |||
4935 | MachinePointerInfo(SrcSV)); | |||
4936 | } | |||
4937 | ||||
4938 | SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { | |||
4939 | assert(Subtarget->isTargetDarwin() &&(static_cast <bool> (Subtarget->isTargetDarwin() && "automatic va_arg instruction only works on Darwin") ? void ( 0) : __assert_fail ("Subtarget->isTargetDarwin() && \"automatic va_arg instruction only works on Darwin\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4940, __extension__ __PRETTY_FUNCTION__)) | |||
4940 | "automatic va_arg instruction only works on Darwin")(static_cast <bool> (Subtarget->isTargetDarwin() && "automatic va_arg instruction only works on Darwin") ? void ( 0) : __assert_fail ("Subtarget->isTargetDarwin() && \"automatic va_arg instruction only works on Darwin\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4940, __extension__ __PRETTY_FUNCTION__)); | |||
4941 | ||||
4942 | const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
4943 | EVT VT = Op.getValueType(); | |||
4944 | SDLoc DL(Op); | |||
4945 | SDValue Chain = Op.getOperand(0); | |||
4946 | SDValue Addr = Op.getOperand(1); | |||
4947 | unsigned Align = Op.getConstantOperandVal(3); | |||
4948 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4949 | ||||
4950 | SDValue VAList = DAG.getLoad(PtrVT, DL, Chain, Addr, MachinePointerInfo(V)); | |||
4951 | Chain = VAList.getValue(1); | |||
4952 | ||||
4953 | if (Align > 8) { | |||
4954 | assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2")(static_cast <bool> (((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2") ? void (0) : __assert_fail ("((Align & (Align - 1)) == 0) && \"Expected Align to be a power of 2\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4954, __extension__ __PRETTY_FUNCTION__)); | |||
4955 | VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | |||
4956 | DAG.getConstant(Align - 1, DL, PtrVT)); | |||
4957 | VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, | |||
4958 | DAG.getConstant(-(int64_t)Align, DL, PtrVT)); | |||
4959 | } | |||
4960 | ||||
4961 | Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); | |||
4962 | uint64_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy); | |||
4963 | ||||
4964 | // Scalar integer and FP values smaller than 64 bits are implicitly extended | |||
4965 | // up to 64 bits. At the very least, we have to increase the striding of the | |||
4966 | // vaargs list to match this, and for FP values we need to introduce | |||
4967 | // FP_ROUND nodes as well. | |||
4968 | if (VT.isInteger() && !VT.isVector()) | |||
4969 | ArgSize = 8; | |||
4970 | bool NeedFPTrunc = false; | |||
4971 | if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) { | |||
4972 | ArgSize = 8; | |||
4973 | NeedFPTrunc = true; | |||
4974 | } | |||
4975 | ||||
4976 | // Increment the pointer, VAList, to the next vaarg | |||
4977 | SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | |||
4978 | DAG.getConstant(ArgSize, DL, PtrVT)); | |||
4979 | // Store the incremented VAList to the legalized pointer | |||
4980 | SDValue APStore = | |||
4981 | DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V)); | |||
4982 | ||||
4983 | // Load the actual argument out of the pointer VAList | |||
4984 | if (NeedFPTrunc) { | |||
4985 | // Load the value as an f64. | |||
4986 | SDValue WideFP = | |||
4987 | DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo()); | |||
4988 | // Round the value down to an f32. | |||
4989 | SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0), | |||
4990 | DAG.getIntPtrConstant(1, DL)); | |||
4991 | SDValue Ops[] = { NarrowFP, WideFP.getValue(1) }; | |||
4992 | // Merge the rounded value with the chain output of the load. | |||
4993 | return DAG.getMergeValues(Ops, DL); | |||
4994 | } | |||
4995 | ||||
4996 | return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo()); | |||
4997 | } | |||
4998 | ||||
4999 | SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, | |||
5000 | SelectionDAG &DAG) const { | |||
5001 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
5002 | MFI.setFrameAddressIsTaken(true); | |||
5003 | ||||
5004 | EVT VT = Op.getValueType(); | |||
5005 | SDLoc DL(Op); | |||
5006 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
5007 | SDValue FrameAddr = | |||
5008 | DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT); | |||
5009 | while (Depth--) | |||
5010 | FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr, | |||
5011 | MachinePointerInfo()); | |||
5012 | return FrameAddr; | |||
5013 | } | |||
5014 | ||||
5015 | // FIXME? Maybe this could be a TableGen attribute on some registers and | |||
5016 | // this table could be generated automatically from RegInfo. | |||
5017 | unsigned AArch64TargetLowering::getRegisterByName(const char* RegName, EVT VT, | |||
5018 | SelectionDAG &DAG) const { | |||
5019 | unsigned Reg = StringSwitch<unsigned>(RegName) | |||
5020 | .Case("sp", AArch64::SP) | |||
5021 | .Case("x18", AArch64::X18) | |||
5022 | .Case("w18", AArch64::W18) | |||
5023 | .Case("x20", AArch64::X20) | |||
5024 | .Case("w20", AArch64::W20) | |||
5025 | .Default(0); | |||
5026 | if (((Reg == AArch64::X18 || Reg == AArch64::W18) && | |||
5027 | !Subtarget->isX18Reserved()) || | |||
5028 | ((Reg == AArch64::X20 || Reg == AArch64::W20) && | |||
5029 | !Subtarget->isX20Reserved())) | |||
5030 | Reg = 0; | |||
5031 | if (Reg) | |||
5032 | return Reg; | |||
5033 | report_fatal_error(Twine("Invalid register name \"" | |||
5034 | + StringRef(RegName) + "\".")); | |||
5035 | } | |||
5036 | ||||
5037 | SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, | |||
5038 | SelectionDAG &DAG) const { | |||
5039 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5040 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
5041 | MFI.setReturnAddressIsTaken(true); | |||
5042 | ||||
5043 | EVT VT = Op.getValueType(); | |||
5044 | SDLoc DL(Op); | |||
5045 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
5046 | if (Depth) { | |||
5047 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); | |||
5048 | SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); | |||
5049 | return DAG.getLoad(VT, DL, DAG.getEntryNode(), | |||
5050 | DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), | |||
5051 | MachinePointerInfo()); | |||
5052 | } | |||
5053 | ||||
5054 | // Return LR, which contains the return address. Mark it an implicit live-in. | |||
5055 | unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); | |||
5056 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); | |||
5057 | } | |||
5058 | ||||
5059 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two | |||
5060 | /// i64 values and take a 2 x i64 value to shift plus a shift amount. | |||
5061 | SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op, | |||
5062 | SelectionDAG &DAG) const { | |||
5063 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")(static_cast <bool> (Op.getNumOperands() == 3 && "Not a double-shift!") ? void (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5063, __extension__ __PRETTY_FUNCTION__)); | |||
5064 | EVT VT = Op.getValueType(); | |||
5065 | unsigned VTBits = VT.getSizeInBits(); | |||
5066 | SDLoc dl(Op); | |||
5067 | SDValue ShOpLo = Op.getOperand(0); | |||
5068 | SDValue ShOpHi = Op.getOperand(1); | |||
5069 | SDValue ShAmt = Op.getOperand(2); | |||
5070 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; | |||
5071 | ||||
5072 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS)(static_cast <bool> (Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS) ? void (0) : __assert_fail ("Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5072, __extension__ __PRETTY_FUNCTION__)); | |||
5073 | ||||
5074 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, | |||
5075 | DAG.getConstant(VTBits, dl, MVT::i64), ShAmt); | |||
5076 | SDValue HiBitsForLo = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); | |||
5077 | ||||
5078 | // Unfortunately, if ShAmt == 0, we just calculated "(SHL ShOpHi, 64)" which | |||
5079 | // is "undef". We wanted 0, so CSEL it directly. | |||
5080 | SDValue Cmp = emitComparison(ShAmt, DAG.getConstant(0, dl, MVT::i64), | |||
5081 | ISD::SETEQ, dl, DAG); | |||
5082 | SDValue CCVal = DAG.getConstant(AArch64CC::EQ, dl, MVT::i32); | |||
5083 | HiBitsForLo = | |||
5084 | DAG.getNode(AArch64ISD::CSEL, dl, VT, DAG.getConstant(0, dl, MVT::i64), | |||
5085 | HiBitsForLo, CCVal, Cmp); | |||
5086 | ||||
5087 | SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, | |||
5088 | DAG.getConstant(VTBits, dl, MVT::i64)); | |||
5089 | ||||
5090 | SDValue LoBitsForLo = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); | |||
5091 | SDValue LoForNormalShift = | |||
5092 | DAG.getNode(ISD::OR, dl, VT, LoBitsForLo, HiBitsForLo); | |||
5093 | ||||
5094 | Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, dl, MVT::i64), ISD::SETGE, | |||
5095 | dl, DAG); | |||
5096 | CCVal = DAG.getConstant(AArch64CC::GE, dl, MVT::i32); | |||
5097 | SDValue LoForBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); | |||
5098 | SDValue Lo = DAG.getNode(AArch64ISD::CSEL, dl, VT, LoForBigShift, | |||
5099 | LoForNormalShift, CCVal, Cmp); | |||
5100 | ||||
5101 | // AArch64 shifts larger than the register width are wrapped rather than | |||
5102 | // clamped, so we can't just emit "hi >> x". | |||
5103 | SDValue HiForNormalShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); | |||
5104 | SDValue HiForBigShift = | |||
5105 | Opc == ISD::SRA | |||
5106 | ? DAG.getNode(Opc, dl, VT, ShOpHi, | |||
5107 | DAG.getConstant(VTBits - 1, dl, MVT::i64)) | |||
5108 | : DAG.getConstant(0, dl, VT); | |||
5109 | SDValue Hi = DAG.getNode(AArch64ISD::CSEL, dl, VT, HiForBigShift, | |||
5110 | HiForNormalShift, CCVal, Cmp); | |||
5111 | ||||
5112 | SDValue Ops[2] = { Lo, Hi }; | |||
5113 | return DAG.getMergeValues(Ops, dl); | |||
5114 | } | |||
5115 | ||||
5116 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two | |||
5117 | /// i64 values and take a 2 x i64 value to shift plus a shift amount. | |||
5118 | SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op, | |||
5119 | SelectionDAG &DAG) const { | |||
5120 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")(static_cast <bool> (Op.getNumOperands() == 3 && "Not a double-shift!") ? void (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5120, __extension__ __PRETTY_FUNCTION__)); | |||
5121 | EVT VT = Op.getValueType(); | |||
5122 | unsigned VTBits = VT.getSizeInBits(); | |||
5123 | SDLoc dl(Op); | |||
5124 | SDValue ShOpLo = Op.getOperand(0); | |||
5125 | SDValue ShOpHi = Op.getOperand(1); | |||
5126 | SDValue ShAmt = Op.getOperand(2); | |||
5127 | ||||
5128 | assert(Op.getOpcode() == ISD::SHL_PARTS)(static_cast <bool> (Op.getOpcode() == ISD::SHL_PARTS) ? void (0) : __assert_fail ("Op.getOpcode() == ISD::SHL_PARTS" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5128, __extension__ __PRETTY_FUNCTION__)); | |||
5129 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, | |||
5130 | DAG.getConstant(VTBits, dl, MVT::i64), ShAmt); | |||
5131 | SDValue LoBitsForHi = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); | |||
5132 | ||||
5133 | // Unfortunately, if ShAmt == 0, we just calculated "(SRL ShOpLo, 64)" which | |||
5134 | // is "undef". We wanted 0, so CSEL it directly. | |||
5135 | SDValue Cmp = emitComparison(ShAmt, DAG.getConstant(0, dl, MVT::i64), | |||
5136 | ISD::SETEQ, dl, DAG); | |||
5137 | SDValue CCVal = DAG.getConstant(AArch64CC::EQ, dl, MVT::i32); | |||
5138 | LoBitsForHi = | |||
5139 | DAG.getNode(AArch64ISD::CSEL, dl, VT, DAG.getConstant(0, dl, MVT::i64), | |||
5140 | LoBitsForHi, CCVal, Cmp); | |||
5141 | ||||
5142 | SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, | |||
5143 | DAG.getConstant(VTBits, dl, MVT::i64)); | |||
5144 | SDValue HiBitsForHi = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); | |||
5145 | SDValue HiForNormalShift = | |||
5146 | DAG.getNode(ISD::OR, dl, VT, LoBitsForHi, HiBitsForHi); | |||
5147 | ||||
5148 | SDValue HiForBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); | |||
5149 | ||||
5150 | Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, dl, MVT::i64), ISD::SETGE, | |||
5151 | dl, DAG); | |||
5152 | CCVal = DAG.getConstant(AArch64CC::GE, dl, MVT::i32); | |||
5153 | SDValue Hi = DAG.getNode(AArch64ISD::CSEL, dl, VT, HiForBigShift, | |||
5154 | HiForNormalShift, CCVal, Cmp); | |||
5155 | ||||
5156 | // AArch64 shifts of larger than register sizes are wrapped rather than | |||
5157 | // clamped, so we can't just emit "lo << a" if a is too big. | |||
5158 | SDValue LoForBigShift = DAG.getConstant(0, dl, VT); | |||
5159 | SDValue LoForNormalShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); | |||
5160 | SDValue Lo = DAG.getNode(AArch64ISD::CSEL, dl, VT, LoForBigShift, | |||
5161 | LoForNormalShift, CCVal, Cmp); | |||
5162 | ||||
5163 | SDValue Ops[2] = { Lo, Hi }; | |||
5164 | return DAG.getMergeValues(Ops, dl); | |||
5165 | } | |||
5166 | ||||
5167 | bool AArch64TargetLowering::isOffsetFoldingLegal( | |||
5168 | const GlobalAddressSDNode *GA) const { | |||
5169 | // Offsets are folded in the DAG combine rather than here so that we can | |||
5170 | // intelligently choose an offset based on the uses. | |||
5171 | return false; | |||
5172 | } | |||
5173 | ||||
5174 | bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { | |||
5175 | // We can materialize #0.0 as fmov $Rd, XZR for 64-bit and 32-bit cases. | |||
5176 | // FIXME: We should be able to handle f128 as well with a clever lowering. | |||
5177 | if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32 || | |||
5178 | (VT == MVT::f16 && Subtarget->hasFullFP16()))) { | |||
5179 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Legal fp imm: materialize 0 using the zero register\n" ; } } while (false) | |||
5180 | dbgs() << "Legal fp imm: materialize 0 using the zero register\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Legal fp imm: materialize 0 using the zero register\n" ; } } while (false); | |||
5181 | return true; | |||
5182 | } | |||
5183 | ||||
5184 | StringRef FPType; | |||
5185 | bool IsLegal = false; | |||
5186 | SmallString<128> ImmStrVal; | |||
5187 | Imm.toString(ImmStrVal); | |||
5188 | ||||
5189 | if (VT == MVT::f64) { | |||
5190 | FPType = "f64"; | |||
5191 | IsLegal = AArch64_AM::getFP64Imm(Imm) != -1; | |||
5192 | } else if (VT == MVT::f32) { | |||
5193 | FPType = "f32"; | |||
5194 | IsLegal = AArch64_AM::getFP32Imm(Imm) != -1; | |||
5195 | } else if (VT == MVT::f16 && Subtarget->hasFullFP16()) { | |||
5196 | FPType = "f16"; | |||
5197 | IsLegal = AArch64_AM::getFP16Imm(Imm) != -1; | |||
5198 | } | |||
5199 | ||||
5200 | if (IsLegal) { | |||
5201 | LLVM_DEBUG(dbgs() << "Legal " << FPType << " imm value: " << ImmStrValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal << "\n"; } } while (false) | |||
5202 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal << "\n"; } } while (false); | |||
5203 | return true; | |||
5204 | } | |||
5205 | ||||
5206 | if (!FPType.empty()) | |||
5207 | LLVM_DEBUG(dbgs() << "Illegal " << FPType << " imm value: " << ImmStrValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal << "\n"; } } while (false) | |||
5208 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal << "\n"; } } while (false); | |||
5209 | else | |||
5210 | LLVM_DEBUG(dbgs() << "Illegal fp imm " << ImmStrValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal fp imm " << ImmStrVal << ": unsupported fp type\n"; } } while (false ) | |||
5211 | << ": unsupported fp type\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal fp imm " << ImmStrVal << ": unsupported fp type\n"; } } while (false ); | |||
5212 | ||||
5213 | return false; | |||
5214 | } | |||
5215 | ||||
5216 | //===----------------------------------------------------------------------===// | |||
5217 | // AArch64 Optimization Hooks | |||
5218 | //===----------------------------------------------------------------------===// | |||
5219 | ||||
5220 | static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, | |||
5221 | SDValue Operand, SelectionDAG &DAG, | |||
5222 | int &ExtraSteps) { | |||
5223 | EVT VT = Operand.getValueType(); | |||
5224 | if (ST->hasNEON() && | |||
5225 | (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 || | |||
5226 | VT == MVT::f32 || VT == MVT::v1f32 || | |||
5227 | VT == MVT::v2f32 || VT == MVT::v4f32)) { | |||
5228 | if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified) | |||
5229 | // For the reciprocal estimates, convergence is quadratic, so the number | |||
5230 | // of digits is doubled after each iteration. In ARMv8, the accuracy of | |||
5231 | // the initial estimate is 2^-8. Thus the number of extra steps to refine | |||
5232 | // the result for float (23 mantissa bits) is 2 and for double (52 | |||
5233 | // mantissa bits) is 3. | |||
5234 | ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2; | |||
5235 | ||||
5236 | return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand); | |||
5237 | } | |||
5238 | ||||
5239 | return SDValue(); | |||
5240 | } | |||
5241 | ||||
5242 | SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand, | |||
5243 | SelectionDAG &DAG, int Enabled, | |||
5244 | int &ExtraSteps, | |||
5245 | bool &UseOneConst, | |||
5246 | bool Reciprocal) const { | |||
5247 | if (Enabled == ReciprocalEstimate::Enabled || | |||
5248 | (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt())) | |||
5249 | if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand, | |||
5250 | DAG, ExtraSteps)) { | |||
5251 | SDLoc DL(Operand); | |||
5252 | EVT VT = Operand.getValueType(); | |||
5253 | ||||
5254 | SDNodeFlags Flags; | |||
5255 | Flags.setAllowReassociation(true); | |||
5256 | ||||
5257 | // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2) | |||
5258 | // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N) | |||
5259 | for (int i = ExtraSteps; i > 0; --i) { | |||
5260 | SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate, | |||
5261 | Flags); | |||
5262 | Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags); | |||
5263 | Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); | |||
5264 | } | |||
5265 | if (!Reciprocal) { | |||
5266 | EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), | |||
5267 | VT); | |||
5268 | SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); | |||
5269 | SDValue Eq = DAG.getSetCC(DL, CCVT, Operand, FPZero, ISD::SETEQ); | |||
5270 | ||||
5271 | Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags); | |||
5272 | // Correct the result if the operand is 0.0. | |||
5273 | Estimate = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, | |||
5274 | VT, Eq, Operand, Estimate); | |||
5275 | } | |||
5276 | ||||
5277 | ExtraSteps = 0; | |||
5278 | return Estimate; | |||
5279 | } | |||
5280 | ||||
5281 | return SDValue(); | |||
5282 | } | |||
5283 | ||||
5284 | SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand, | |||
5285 | SelectionDAG &DAG, int Enabled, | |||
5286 | int &ExtraSteps) const { | |||
5287 | if (Enabled == ReciprocalEstimate::Enabled) | |||
5288 | if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand, | |||
5289 | DAG, ExtraSteps)) { | |||
5290 | SDLoc DL(Operand); | |||
5291 | EVT VT = Operand.getValueType(); | |||
5292 | ||||
5293 | SDNodeFlags Flags; | |||
5294 | Flags.setAllowReassociation(true); | |||
5295 | ||||
5296 | // Newton reciprocal iteration: E * (2 - X * E) | |||
5297 | // AArch64 reciprocal iteration instruction: (2 - M * N) | |||
5298 | for (int i = ExtraSteps; i > 0; --i) { | |||
5299 | SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand, | |||
5300 | Estimate, Flags); | |||
5301 | Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); | |||
5302 | } | |||
5303 | ||||
5304 | ExtraSteps = 0; | |||
5305 | return Estimate; | |||
5306 | } | |||
5307 | ||||
5308 | return SDValue(); | |||
5309 | } | |||
5310 | ||||
5311 | //===----------------------------------------------------------------------===// | |||
5312 | // AArch64 Inline Assembly Support | |||
5313 | //===----------------------------------------------------------------------===// | |||
5314 | ||||
5315 | // Table of Constraints | |||
5316 | // TODO: This is the current set of constraints supported by ARM for the | |||
5317 | // compiler, not all of them may make sense. | |||
5318 | // | |||
5319 | // r - A general register | |||
5320 | // w - An FP/SIMD register of some size in the range v0-v31 | |||
5321 | // x - An FP/SIMD register of some size in the range v0-v15 | |||
5322 | // I - Constant that can be used with an ADD instruction | |||
5323 | // J - Constant that can be used with a SUB instruction | |||
5324 | // K - Constant that can be used with a 32-bit logical instruction | |||
5325 | // L - Constant that can be used with a 64-bit logical instruction | |||
5326 | // M - Constant that can be used as a 32-bit MOV immediate | |||
5327 | // N - Constant that can be used as a 64-bit MOV immediate | |||
5328 | // Q - A memory reference with base register and no offset | |||
5329 | // S - A symbolic address | |||
5330 | // Y - Floating point constant zero | |||
5331 | // Z - Integer constant zero | |||
5332 | // | |||
5333 | // Note that general register operands will be output using their 64-bit x | |||
5334 | // register name, whatever the size of the variable, unless the asm operand | |||
5335 | // is prefixed by the %w modifier. Floating-point and SIMD register operands | |||
5336 | // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or | |||
5337 | // %q modifier. | |||
5338 | const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const { | |||
5339 | // At this point, we have to lower this constraint to something else, so we | |||
5340 | // lower it to an "r" or "w". However, by doing this we will force the result | |||
5341 | // to be in register, while the X constraint is much more permissive. | |||
5342 | // | |||
5343 | // Although we are correct (we are free to emit anything, without | |||
5344 | // constraints), we might break use cases that would expect us to be more | |||
5345 | // efficient and emit something else. | |||
5346 | if (!Subtarget->hasFPARMv8()) | |||
5347 | return "r"; | |||
5348 | ||||
5349 | if (ConstraintVT.isFloatingPoint()) | |||
5350 | return "w"; | |||
5351 | ||||
5352 | if (ConstraintVT.isVector() && | |||
5353 | (ConstraintVT.getSizeInBits() == 64 || | |||
5354 | ConstraintVT.getSizeInBits() == 128)) | |||
5355 | return "w"; | |||
5356 | ||||
5357 | return "r"; | |||
5358 | } | |||
5359 | ||||
5360 | /// getConstraintType - Given a constraint letter, return the type of | |||
5361 | /// constraint it is for this target. | |||
5362 | AArch64TargetLowering::ConstraintType | |||
5363 | AArch64TargetLowering::getConstraintType(StringRef Constraint) const { | |||
5364 | if (Constraint.size() == 1) { | |||
5365 | switch (Constraint[0]) { | |||
5366 | default: | |||
5367 | break; | |||
5368 | case 'z': | |||
5369 | return C_Other; | |||
5370 | case 'x': | |||
5371 | case 'w': | |||
5372 | return C_RegisterClass; | |||
5373 | // An address with a single base register. Due to the way we | |||
5374 | // currently handle addresses it is the same as 'r'. | |||
5375 | case 'Q': | |||
5376 | return C_Memory; | |||
5377 | case 'S': // A symbolic address | |||
5378 | return C_Other; | |||
5379 | } | |||
5380 | } | |||
5381 | return TargetLowering::getConstraintType(Constraint); | |||
5382 | } | |||
5383 | ||||
5384 | /// Examine constraint type and operand type and determine a weight value. | |||
5385 | /// This object must already have been set up with the operand type | |||
5386 | /// and the current alternative constraint selected. | |||
5387 | TargetLowering::ConstraintWeight | |||
5388 | AArch64TargetLowering::getSingleConstraintMatchWeight( | |||
5389 | AsmOperandInfo &info, const char *constraint) const { | |||
5390 | ConstraintWeight weight = CW_Invalid; | |||
5391 | Value *CallOperandVal = info.CallOperandVal; | |||
5392 | // If we don't have a value, we can't do a match, | |||
5393 | // but allow it at the lowest weight. | |||
5394 | if (!CallOperandVal) | |||
5395 | return CW_Default; | |||
5396 | Type *type = CallOperandVal->getType(); | |||
5397 | // Look at the constraint type. | |||
5398 | switch (*constraint) { | |||
5399 | default: | |||
5400 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); | |||
5401 | break; | |||
5402 | case 'x': | |||
5403 | case 'w': | |||
5404 | if (type->isFloatingPointTy() || type->isVectorTy()) | |||
5405 | weight = CW_Register; | |||
5406 | break; | |||
5407 | case 'z': | |||
5408 | weight = CW_Constant; | |||
5409 | break; | |||
5410 | } | |||
5411 | return weight; | |||
5412 | } | |||
5413 | ||||
5414 | std::pair<unsigned, const TargetRegisterClass *> | |||
5415 | AArch64TargetLowering::getRegForInlineAsmConstraint( | |||
5416 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { | |||
5417 | if (Constraint.size() == 1) { | |||
5418 | switch (Constraint[0]) { | |||
5419 | case 'r': | |||
5420 | if (VT.getSizeInBits() == 64) | |||
5421 | return std::make_pair(0U, &AArch64::GPR64commonRegClass); | |||
5422 | return std::make_pair(0U, &AArch64::GPR32commonRegClass); | |||
5423 | case 'w': | |||
5424 | if (VT.getSizeInBits() == 16) | |||
5425 | return std::make_pair(0U, &AArch64::FPR16RegClass); | |||
5426 | if (VT.getSizeInBits() == 32) | |||
5427 | return std::make_pair(0U, &AArch64::FPR32RegClass); | |||
5428 | if (VT.getSizeInBits() == 64) | |||
5429 | return std::make_pair(0U, &AArch64::FPR64RegClass); | |||
5430 | if (VT.getSizeInBits() == 128) | |||
5431 | return std::make_pair(0U, &AArch64::FPR128RegClass); | |||
5432 | break; | |||
5433 | // The instructions that this constraint is designed for can | |||
5434 | // only take 128-bit registers so just use that regclass. | |||
5435 | case 'x': | |||
5436 | if (VT.getSizeInBits() == 128) | |||
5437 | return std::make_pair(0U, &AArch64::FPR128_loRegClass); | |||
5438 | break; | |||
5439 | } | |||
5440 | } | |||
5441 | if (StringRef("{cc}").equals_lower(Constraint)) | |||
5442 | return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass); | |||
5443 | ||||
5444 | // Use the default implementation in TargetLowering to convert the register | |||
5445 | // constraint into a member of a register class. | |||
5446 | std::pair<unsigned, const TargetRegisterClass *> Res; | |||
5447 | Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
5448 | ||||
5449 | // Not found as a standard register? | |||
5450 | if (!Res.second) { | |||
5451 | unsigned Size = Constraint.size(); | |||
5452 | if ((Size == 4 || Size == 5) && Constraint[0] == '{' && | |||
5453 | tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') { | |||
5454 | int RegNo; | |||
5455 | bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo); | |||
5456 | if (!Failed && RegNo >= 0 && RegNo <= 31) { | |||
5457 | // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size. | |||
5458 | // By default we'll emit v0-v31 for this unless there's a modifier where | |||
5459 | // we'll emit the correct register as well. | |||
5460 | if (VT != MVT::Other && VT.getSizeInBits() == 64) { | |||
5461 | Res.first = AArch64::FPR64RegClass.getRegister(RegNo); | |||
5462 | Res.second = &AArch64::FPR64RegClass; | |||
5463 | } else { | |||
5464 | Res.first = AArch64::FPR128RegClass.getRegister(RegNo); | |||
5465 | Res.second = &AArch64::FPR128RegClass; | |||
5466 | } | |||
5467 | } | |||
5468 | } | |||
5469 | } | |||
5470 | ||||
5471 | return Res; | |||
5472 | } | |||
5473 | ||||
5474 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops | |||
5475 | /// vector. If it is invalid, don't add anything to Ops. | |||
5476 | void AArch64TargetLowering::LowerAsmOperandForConstraint( | |||
5477 | SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, | |||
5478 | SelectionDAG &DAG) const { | |||
5479 | SDValue Result; | |||
5480 | ||||
5481 | // Currently only support length 1 constraints. | |||
5482 | if (Constraint.length() != 1) | |||
5483 | return; | |||
5484 | ||||
5485 | char ConstraintLetter = Constraint[0]; | |||
5486 | switch (ConstraintLetter) { | |||
5487 | default: | |||
5488 | break; | |||
5489 | ||||
5490 | // This set of constraints deal with valid constants for various instructions. | |||
5491 | // Validate and return a target constant for them if we can. | |||
5492 | case 'z': { | |||
5493 | // 'z' maps to xzr or wzr so it needs an input of 0. | |||
5494 | if (!isNullConstant(Op)) | |||
5495 | return; | |||
5496 | ||||
5497 | if (Op.getValueType() == MVT::i64) | |||
5498 | Result = DAG.getRegister(AArch64::XZR, MVT::i64); | |||
5499 | else | |||
5500 | Result = DAG.getRegister(AArch64::WZR, MVT::i32); | |||
5501 | break; | |||
5502 | } | |||
5503 | case 'S': { | |||
5504 | // An absolute symbolic address or label reference. | |||
5505 | if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { | |||
5506 | Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), | |||
5507 | GA->getValueType(0)); | |||
5508 | } else if (const BlockAddressSDNode *BA = | |||
5509 | dyn_cast<BlockAddressSDNode>(Op)) { | |||
5510 | Result = | |||
5511 | DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0)); | |||
5512 | } else if (const ExternalSymbolSDNode *ES = | |||
5513 | dyn_cast<ExternalSymbolSDNode>(Op)) { | |||
5514 | Result = | |||
5515 | DAG.getTargetExternalSymbol(ES->getSymbol(), ES->getValueType(0)); | |||
5516 | } else | |||
5517 | return; | |||
5518 | break; | |||
5519 | } | |||
5520 | ||||
5521 | case 'I': | |||
5522 | case 'J': | |||
5523 | case 'K': | |||
5524 | case 'L': | |||
5525 | case 'M': | |||
5526 | case 'N': | |||
5527 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); | |||
5528 | if (!C) | |||
5529 | return; | |||
5530 | ||||
5531 | // Grab the value and do some validation. | |||
5532 | uint64_t CVal = C->getZExtValue(); | |||
5533 | switch (ConstraintLetter) { | |||
5534 | // The I constraint applies only to simple ADD or SUB immediate operands: | |||
5535 | // i.e. 0 to 4095 with optional shift by 12 | |||
5536 | // The J constraint applies only to ADD or SUB immediates that would be | |||
5537 | // valid when negated, i.e. if [an add pattern] were to be output as a SUB | |||
5538 | // instruction [or vice versa], in other words -1 to -4095 with optional | |||
5539 | // left shift by 12. | |||
5540 | case 'I': | |||
5541 | if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal)) | |||
5542 | break; | |||
5543 | return; | |||
5544 | case 'J': { | |||
5545 | uint64_t NVal = -C->getSExtValue(); | |||
5546 | if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) { | |||
5547 | CVal = C->getSExtValue(); | |||
5548 | break; | |||
5549 | } | |||
5550 | return; | |||
5551 | } | |||
5552 | // The K and L constraints apply *only* to logical immediates, including | |||
5553 | // what used to be the MOVI alias for ORR (though the MOVI alias has now | |||
5554 | // been removed and MOV should be used). So these constraints have to | |||
5555 | // distinguish between bit patterns that are valid 32-bit or 64-bit | |||
5556 | // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but | |||
5557 | // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice | |||
5558 | // versa. | |||
5559 | case 'K': | |||
5560 | if (AArch64_AM::isLogicalImmediate(CVal, 32)) | |||
5561 | break; | |||
5562 | return; | |||
5563 | case 'L': | |||
5564 | if (AArch64_AM::isLogicalImmediate(CVal, 64)) | |||
5565 | break; | |||
5566 | return; | |||
5567 | // The M and N constraints are a superset of K and L respectively, for use | |||
5568 | // with the MOV (immediate) alias. As well as the logical immediates they | |||
5569 | // also match 32 or 64-bit immediates that can be loaded either using a | |||
5570 | // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca | |||
5571 | // (M) or 64-bit 0x1234000000000000 (N) etc. | |||
5572 | // As a note some of this code is liberally stolen from the asm parser. | |||
5573 | case 'M': { | |||
5574 | if (!isUInt<32>(CVal)) | |||
5575 | return; | |||
5576 | if (AArch64_AM::isLogicalImmediate(CVal, 32)) | |||
5577 | break; | |||
5578 | if ((CVal & 0xFFFF) == CVal) | |||
5579 | break; | |||
5580 | if ((CVal & 0xFFFF0000ULL) == CVal) | |||
5581 | break; | |||
5582 | uint64_t NCVal = ~(uint32_t)CVal; | |||
5583 | if ((NCVal & 0xFFFFULL) == NCVal) | |||
5584 | break; | |||
5585 | if ((NCVal & 0xFFFF0000ULL) == NCVal) | |||
5586 | break; | |||
5587 | return; | |||
5588 | } | |||
5589 | case 'N': { | |||
5590 | if (AArch64_AM::isLogicalImmediate(CVal, 64)) | |||
5591 | break; | |||
5592 | if ((CVal & 0xFFFFULL) == CVal) | |||
5593 | break; | |||
5594 | if ((CVal & 0xFFFF0000ULL) == CVal) | |||
5595 | break; | |||
5596 | if ((CVal & 0xFFFF00000000ULL) == CVal) | |||
5597 | break; | |||
5598 | if ((CVal & 0xFFFF000000000000ULL) == CVal) | |||
5599 | break; | |||
5600 | uint64_t NCVal = ~CVal; | |||
5601 | if ((NCVal & 0xFFFFULL) == NCVal) | |||
5602 | break; | |||
5603 | if ((NCVal & 0xFFFF0000ULL) == NCVal) | |||
5604 | break; | |||
5605 | if ((NCVal & 0xFFFF00000000ULL) == NCVal) | |||
5606 | break; | |||
5607 | if ((NCVal & 0xFFFF000000000000ULL) == NCVal) | |||
5608 | break; | |||
5609 | return; | |||
5610 | } | |||
5611 | default: | |||
5612 | return; | |||
5613 | } | |||
5614 | ||||
5615 | // All assembler immediates are 64-bit integers. | |||
5616 | Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64); | |||
5617 | break; | |||
5618 | } | |||
5619 | ||||
5620 | if (Result.getNode()) { | |||
5621 | Ops.push_back(Result); | |||
5622 | return; | |||
5623 | } | |||
5624 | ||||
5625 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
5626 | } | |||
5627 | ||||
5628 | //===----------------------------------------------------------------------===// | |||
5629 | // AArch64 Advanced SIMD Support | |||
5630 | //===----------------------------------------------------------------------===// | |||
5631 | ||||
5632 | /// WidenVector - Given a value in the V64 register class, produce the | |||
5633 | /// equivalent value in the V128 register class. | |||
5634 | static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) { | |||
5635 | EVT VT = V64Reg.getValueType(); | |||
5636 | unsigned NarrowSize = VT.getVectorNumElements(); | |||
5637 | MVT EltTy = VT.getVectorElementType().getSimpleVT(); | |||
5638 | MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); | |||
5639 | SDLoc DL(V64Reg); | |||
5640 | ||||
5641 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy), | |||
5642 | V64Reg, DAG.getConstant(0, DL, MVT::i32)); | |||
5643 | } | |||
5644 | ||||
5645 | /// getExtFactor - Determine the adjustment factor for the position when | |||
5646 | /// generating an "extract from vector registers" instruction. | |||
5647 | static unsigned getExtFactor(SDValue &V) { | |||
5648 | EVT EltType = V.getValueType().getVectorElementType(); | |||
5649 | return EltType.getSizeInBits() / 8; | |||
5650 | } | |||
5651 | ||||
5652 | /// NarrowVector - Given a value in the V128 register class, produce the | |||
5653 | /// equivalent value in the V64 register class. | |||
5654 | static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { | |||
5655 | EVT VT = V128Reg.getValueType(); | |||
5656 | unsigned WideSize = VT.getVectorNumElements(); | |||
5657 | MVT EltTy = VT.getVectorElementType().getSimpleVT(); | |||
5658 | MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); | |||
5659 | SDLoc DL(V128Reg); | |||
5660 | ||||
5661 | return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg); | |||
5662 | } | |||
5663 | ||||
5664 | // Gather data to see if the operation can be modelled as a | |||
5665 | // shuffle in combination with VEXTs. | |||
5666 | SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, | |||
5667 | SelectionDAG &DAG) const { | |||
5668 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5668, __extension__ __PRETTY_FUNCTION__)); | |||
5669 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::ReconstructShuffle\n" ; } } while (false); | |||
5670 | SDLoc dl(Op); | |||
5671 | EVT VT = Op.getValueType(); | |||
5672 | unsigned NumElts = VT.getVectorNumElements(); | |||
5673 | ||||
5674 | struct ShuffleSourceInfo { | |||
5675 | SDValue Vec; | |||
5676 | unsigned MinElt; | |||
5677 | unsigned MaxElt; | |||
5678 | ||||
5679 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to | |||
5680 | // be compatible with the shuffle we intend to construct. As a result | |||
5681 | // ShuffleVec will be some sliding window into the original Vec. | |||
5682 | SDValue ShuffleVec; | |||
5683 | ||||
5684 | // Code should guarantee that element i in Vec starts at element "WindowBase | |||
5685 | // + i * WindowScale in ShuffleVec". | |||
5686 | int WindowBase; | |||
5687 | int WindowScale; | |||
5688 | ||||
5689 | ShuffleSourceInfo(SDValue Vec) | |||
5690 | : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0), | |||
5691 | ShuffleVec(Vec), WindowBase(0), WindowScale(1) {} | |||
5692 | ||||
5693 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } | |||
5694 | }; | |||
5695 | ||||
5696 | // First gather all vectors used as an immediate source for this BUILD_VECTOR | |||
5697 | // node. | |||
5698 | SmallVector<ShuffleSourceInfo, 2> Sources; | |||
5699 | for (unsigned i = 0; i < NumElts; ++i) { | |||
5700 | SDValue V = Op.getOperand(i); | |||
5701 | if (V.isUndef()) | |||
5702 | continue; | |||
5703 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
5704 | !isa<ConstantSDNode>(V.getOperand(1))) { | |||
5705 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other vectors, provided their " "indices are constant\n" ; } } while (false) | |||
5706 | dbgs() << "Reshuffle failed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other vectors, provided their " "indices are constant\n" ; } } while (false) | |||
5707 | "a shuffle can only come from building a vector from "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other vectors, provided their " "indices are constant\n" ; } } while (false) | |||
5708 | "various elements of other vectors, provided their "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other vectors, provided their " "indices are constant\n" ; } } while (false) | |||
5709 | "indices are constant\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other vectors, provided their " "indices are constant\n" ; } } while (false); | |||
5710 | return SDValue(); | |||
5711 | } | |||
5712 | ||||
5713 | // Add this element source to the list if it's not already there. | |||
5714 | SDValue SourceVec = V.getOperand(0); | |||
5715 | auto Source = find(Sources, SourceVec); | |||
5716 | if (Source == Sources.end()) | |||
5717 | Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); | |||
5718 | ||||
5719 | // Update the minimum and maximum lane number seen. | |||
5720 | unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); | |||
5721 | Source->MinElt = std::min(Source->MinElt, EltNo); | |||
5722 | Source->MaxElt = std::max(Source->MaxElt, EltNo); | |||
5723 | } | |||
5724 | ||||
5725 | if (Sources.size() > 2) { | |||
5726 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: currently only do something sane when at " "most two source vectors are involved\n"; } } while (false) | |||
5727 | dbgs() << "Reshuffle failed: currently only do something sane when at "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: currently only do something sane when at " "most two source vectors are involved\n"; } } while (false) | |||
5728 | "most two source vectors are involved\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: currently only do something sane when at " "most two source vectors are involved\n"; } } while (false); | |||
5729 | return SDValue(); | |||
5730 | } | |||
5731 | ||||
5732 | // Find out the smallest element size among result and two sources, and use | |||
5733 | // it as element size to build the shuffle_vector. | |||
5734 | EVT SmallestEltTy = VT.getVectorElementType(); | |||
5735 | for (auto &Source : Sources) { | |||
5736 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); | |||
5737 | if (SrcEltTy.bitsLT(SmallestEltTy)) { | |||
5738 | SmallestEltTy = SrcEltTy; | |||
5739 | } | |||
5740 | } | |||
5741 | unsigned ResMultiplier = | |||
5742 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); | |||
5743 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); | |||
5744 | EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); | |||
5745 | ||||
5746 | // If the source vector is too wide or too narrow, we may nevertheless be able | |||
5747 | // to construct a compatible shuffle either by concatenating it with UNDEF or | |||
5748 | // extracting a suitable range of elements. | |||
5749 | for (auto &Src : Sources) { | |||
5750 | EVT SrcVT = Src.ShuffleVec.getValueType(); | |||
5751 | ||||
5752 | if (SrcVT.getSizeInBits() == VT.getSizeInBits()) | |||
5753 | continue; | |||
5754 | ||||
5755 | // This stage of the search produces a source with the same element type as | |||
5756 | // the original, but with a total width matching the BUILD_VECTOR output. | |||
5757 | EVT EltVT = SrcVT.getVectorElementType(); | |||
5758 | unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); | |||
5759 | EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); | |||
5760 | ||||
5761 | if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { | |||
5762 | assert(2 * SrcVT.getSizeInBits() == VT.getSizeInBits())(static_cast <bool> (2 * SrcVT.getSizeInBits() == VT.getSizeInBits ()) ? void (0) : __assert_fail ("2 * SrcVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5762, __extension__ __PRETTY_FUNCTION__)); | |||
5763 | // We can pad out the smaller vector for free, so if it's part of a | |||
5764 | // shuffle... | |||
5765 | Src.ShuffleVec = | |||
5766 | DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, | |||
5767 | DAG.getUNDEF(Src.ShuffleVec.getValueType())); | |||
5768 | continue; | |||
5769 | } | |||
5770 | ||||
5771 | assert(SrcVT.getSizeInBits() == 2 * VT.getSizeInBits())(static_cast <bool> (SrcVT.getSizeInBits() == 2 * VT.getSizeInBits ()) ? void (0) : __assert_fail ("SrcVT.getSizeInBits() == 2 * VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5771, __extension__ __PRETTY_FUNCTION__)); | |||
5772 | ||||
5773 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { | |||
5774 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n" ; } } while (false) | |||
5775 | dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n" ; } } while (false); | |||
5776 | return SDValue(); | |||
5777 | } | |||
5778 | ||||
5779 | if (Src.MinElt >= NumSrcElts) { | |||
5780 | // The extraction can just take the second half | |||
5781 | Src.ShuffleVec = | |||
5782 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
5783 | DAG.getConstant(NumSrcElts, dl, MVT::i64)); | |||
5784 | Src.WindowBase = -NumSrcElts; | |||
5785 | } else if (Src.MaxElt < NumSrcElts) { | |||
5786 | // The extraction can just take the first half | |||
5787 | Src.ShuffleVec = | |||
5788 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
5789 | DAG.getConstant(0, dl, MVT::i64)); | |||
5790 | } else { | |||
5791 | // An actual VEXT is needed | |||
5792 | SDValue VEXTSrc1 = | |||
5793 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
5794 | DAG.getConstant(0, dl, MVT::i64)); | |||
5795 | SDValue VEXTSrc2 = | |||
5796 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
5797 | DAG.getConstant(NumSrcElts, dl, MVT::i64)); | |||
5798 | unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1); | |||
5799 | ||||
5800 | Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1, | |||
5801 | VEXTSrc2, | |||
5802 | DAG.getConstant(Imm, dl, MVT::i32)); | |||
5803 | Src.WindowBase = -Src.MinElt; | |||
5804 | } | |||
5805 | } | |||
5806 | ||||
5807 | // Another possible incompatibility occurs from the vector element types. We | |||
5808 | // can fix this by bitcasting the source vectors to the same type we intend | |||
5809 | // for the shuffle. | |||
5810 | for (auto &Src : Sources) { | |||
5811 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); | |||
5812 | if (SrcEltTy == SmallestEltTy) | |||
5813 | continue; | |||
5814 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy)(static_cast <bool> (ShuffleVT.getVectorElementType() == SmallestEltTy) ? void (0) : __assert_fail ("ShuffleVT.getVectorElementType() == SmallestEltTy" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5814, __extension__ __PRETTY_FUNCTION__)); | |||
5815 | Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); | |||
5816 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); | |||
5817 | Src.WindowBase *= Src.WindowScale; | |||
5818 | } | |||
5819 | ||||
5820 | // Final sanity check before we try to actually produce a shuffle. | |||
5821 | LLVM_DEBUG(for (auto Srcdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { for (auto Src : Sources) (static_cast < bool> (Src.ShuffleVec.getValueType() == ShuffleVT) ? void ( 0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5823, __extension__ __PRETTY_FUNCTION__));; } } while (false ) | |||
5822 | : Sources)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { for (auto Src : Sources) (static_cast < bool> (Src.ShuffleVec.getValueType() == ShuffleVT) ? void ( 0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5823, __extension__ __PRETTY_FUNCTION__));; } } while (false ) | |||
5823 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { for (auto Src : Sources) (static_cast < bool> (Src.ShuffleVec.getValueType() == ShuffleVT) ? void ( 0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5823, __extension__ __PRETTY_FUNCTION__));; } } while (false ); | |||
5824 | ||||
5825 | // The stars all align, our next step is to produce the mask for the shuffle. | |||
5826 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); | |||
5827 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); | |||
5828 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { | |||
5829 | SDValue Entry = Op.getOperand(i); | |||
5830 | if (Entry.isUndef()) | |||
5831 | continue; | |||
5832 | ||||
5833 | auto Src = find(Sources, Entry.getOperand(0)); | |||
5834 | int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); | |||
5835 | ||||
5836 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit | |||
5837 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this | |||
5838 | // segment. | |||
5839 | EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); | |||
5840 | int BitsDefined = | |||
5841 | std::min(OrigEltTy.getSizeInBits(), VT.getScalarSizeInBits()); | |||
5842 | int LanesDefined = BitsDefined / BitsPerShuffleLane; | |||
5843 | ||||
5844 | // This source is expected to fill ResMultiplier lanes of the final shuffle, | |||
5845 | // starting at the appropriate offset. | |||
5846 | int *LaneMask = &Mask[i * ResMultiplier]; | |||
5847 | ||||
5848 | int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; | |||
5849 | ExtractBase += NumElts * (Src - Sources.begin()); | |||
5850 | for (int j = 0; j < LanesDefined; ++j) | |||
5851 | LaneMask[j] = ExtractBase + j; | |||
5852 | } | |||
5853 | ||||
5854 | // Final check before we try to produce nonsense... | |||
5855 | if (!isShuffleMaskLegal(Mask, ShuffleVT)) { | |||
5856 | LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: illegal shuffle mask\n" ; } } while (false); | |||
5857 | return SDValue(); | |||
5858 | } | |||
5859 | ||||
5860 | SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; | |||
5861 | for (unsigned i = 0; i < Sources.size(); ++i) | |||
5862 | ShuffleOps[i] = Sources[i].ShuffleVec; | |||
5863 | ||||
5864 | SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], | |||
5865 | ShuffleOps[1], Mask); | |||
5866 | SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); | |||
5867 | ||||
5868 | LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle, creating node: " ; Shuffle.dump(); dbgs() << "Reshuffle, creating node: " ; V.dump();; } } while (false) | |||
5869 | dbgs() << "Reshuffle, creating node: "; V.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle, creating node: " ; Shuffle.dump(); dbgs() << "Reshuffle, creating node: " ; V.dump();; } } while (false); | |||
5870 | ||||
5871 | return V; | |||
5872 | } | |||
5873 | ||||
5874 | // check if an EXT instruction can handle the shuffle mask when the | |||
5875 | // vector sources of the shuffle are the same. | |||
5876 | static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { | |||
5877 | unsigned NumElts = VT.getVectorNumElements(); | |||
5878 | ||||
5879 | // Assume that the first shuffle index is not UNDEF. Fail if it is. | |||
5880 | if (M[0] < 0) | |||
5881 | return false; | |||
5882 | ||||
5883 | Imm = M[0]; | |||
5884 | ||||
5885 | // If this is a VEXT shuffle, the immediate value is the index of the first | |||
5886 | // element. The other shuffle indices must be the successive elements after | |||
5887 | // the first one. | |||
5888 | unsigned ExpectedElt = Imm; | |||
5889 | for (unsigned i = 1; i < NumElts; ++i) { | |||
5890 | // Increment the expected index. If it wraps around, just follow it | |||
5891 | // back to index zero and keep going. | |||
5892 | ++ExpectedElt; | |||
5893 | if (ExpectedElt == NumElts) | |||
5894 | ExpectedElt = 0; | |||
5895 | ||||
5896 | if (M[i] < 0) | |||
5897 | continue; // ignore UNDEF indices | |||
5898 | if (ExpectedElt != static_cast<unsigned>(M[i])) | |||
5899 | return false; | |||
5900 | } | |||
5901 | ||||
5902 | return true; | |||
5903 | } | |||
5904 | ||||
5905 | // check if an EXT instruction can handle the shuffle mask when the | |||
5906 | // vector sources of the shuffle are different. | |||
5907 | static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT, | |||
5908 | unsigned &Imm) { | |||
5909 | // Look for the first non-undef element. | |||
5910 | const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; }); | |||
5911 | ||||
5912 | // Benefit form APInt to handle overflow when calculating expected element. | |||
5913 | unsigned NumElts = VT.getVectorNumElements(); | |||
5914 | unsigned MaskBits = APInt(32, NumElts * 2).logBase2(); | |||
5915 | APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1); | |||
5916 | // The following shuffle indices must be the successive elements after the | |||
5917 | // first real element. | |||
5918 | const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(), | |||
5919 | [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;}); | |||
5920 | if (FirstWrongElt != M.end()) | |||
5921 | return false; | |||
5922 | ||||
5923 | // The index of an EXT is the first element if it is not UNDEF. | |||
5924 | // Watch out for the beginning UNDEFs. The EXT index should be the expected | |||
5925 | // value of the first element. E.g. | |||
5926 | // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. | |||
5927 | // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. | |||
5928 | // ExpectedElt is the last mask index plus 1. | |||
5929 | Imm = ExpectedElt.getZExtValue(); | |||
5930 | ||||
5931 | // There are two difference cases requiring to reverse input vectors. | |||
5932 | // For example, for vector <4 x i32> we have the following cases, | |||
5933 | // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) | |||
5934 | // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) | |||
5935 | // For both cases, we finally use mask <5, 6, 7, 0>, which requires | |||
5936 | // to reverse two input vectors. | |||
5937 | if (Imm < NumElts) | |||
5938 | ReverseEXT = true; | |||
5939 | else | |||
5940 | Imm -= NumElts; | |||
5941 | ||||
5942 | return true; | |||
5943 | } | |||
5944 | ||||
5945 | /// isREVMask - Check if a vector shuffle corresponds to a REV | |||
5946 | /// instruction with the specified blocksize. (The order of the elements | |||
5947 | /// within each block of the vector is reversed.) | |||
5948 | static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { | |||
5949 | assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for REV are: 16, 32, 64" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && \"Only possible block sizes for REV are: 16, 32, 64\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5950, __extension__ __PRETTY_FUNCTION__)) | |||
5950 | "Only possible block sizes for REV are: 16, 32, 64")(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for REV are: 16, 32, 64" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && \"Only possible block sizes for REV are: 16, 32, 64\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5950, __extension__ __PRETTY_FUNCTION__)); | |||
5951 | ||||
5952 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
5953 | if (EltSz == 64) | |||
5954 | return false; | |||
5955 | ||||
5956 | unsigned NumElts = VT.getVectorNumElements(); | |||
5957 | unsigned BlockElts = M[0] + 1; | |||
5958 | // If the first shuffle index is UNDEF, be optimistic. | |||
5959 | if (M[0] < 0) | |||
5960 | BlockElts = BlockSize / EltSz; | |||
5961 | ||||
5962 | if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) | |||
5963 | return false; | |||
5964 | ||||
5965 | for (unsigned i = 0; i < NumElts; ++i) { | |||
5966 | if (M[i] < 0) | |||
5967 | continue; // ignore UNDEF indices | |||
5968 | if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) | |||
5969 | return false; | |||
5970 | } | |||
5971 | ||||
5972 | return true; | |||
5973 | } | |||
5974 | ||||
5975 | static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
5976 | unsigned NumElts = VT.getVectorNumElements(); | |||
5977 | WhichResult = (M[0] == 0 ? 0 : 1); | |||
5978 | unsigned Idx = WhichResult * NumElts / 2; | |||
5979 | for (unsigned i = 0; i != NumElts; i += 2) { | |||
5980 | if ((M[i] >= 0 && (unsigned)M[i] != Idx) || | |||
5981 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts)) | |||
5982 | return false; | |||
5983 | Idx += 1; | |||
5984 | } | |||
5985 | ||||
5986 | return true; | |||
5987 | } | |||
5988 | ||||
5989 | static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
5990 | unsigned NumElts = VT.getVectorNumElements(); | |||
5991 | WhichResult = (M[0] == 0 ? 0 : 1); | |||
5992 | for (unsigned i = 0; i != NumElts; ++i) { | |||
5993 | if (M[i] < 0) | |||
5994 | continue; // ignore UNDEF indices | |||
5995 | if ((unsigned)M[i] != 2 * i + WhichResult) | |||
5996 | return false; | |||
5997 | } | |||
5998 | ||||
5999 | return true; | |||
6000 | } | |||
6001 | ||||
6002 | static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
6003 | unsigned NumElts = VT.getVectorNumElements(); | |||
6004 | WhichResult = (M[0] == 0 ? 0 : 1); | |||
6005 | for (unsigned i = 0; i < NumElts; i += 2) { | |||
6006 | if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || | |||
6007 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult)) | |||
6008 | return false; | |||
6009 | } | |||
6010 | return true; | |||
6011 | } | |||
6012 | ||||
6013 | /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of | |||
6014 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | |||
6015 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. | |||
6016 | static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
6017 | unsigned NumElts = VT.getVectorNumElements(); | |||
6018 | WhichResult = (M[0] == 0 ? 0 : 1); | |||
6019 | unsigned Idx = WhichResult * NumElts / 2; | |||
6020 | for (unsigned i = 0; i != NumElts; i += 2) { | |||
6021 | if ((M[i] >= 0 && (unsigned)M[i] != Idx) || | |||
6022 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx)) | |||
6023 | return false; | |||
6024 | Idx += 1; | |||
6025 | } | |||
6026 | ||||
6027 | return true; | |||
6028 | } | |||
6029 | ||||
6030 | /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of | |||
6031 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | |||
6032 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, | |||
6033 | static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
6034 | unsigned Half = VT.getVectorNumElements() / 2; | |||
6035 | WhichResult = (M[0] == 0 ? 0 : 1); | |||
6036 | for (unsigned j = 0; j != 2; ++j) { | |||
6037 | unsigned Idx = WhichResult; | |||
6038 | for (unsigned i = 0; i != Half; ++i) { | |||
6039 | int MIdx = M[i + j * Half]; | |||
6040 | if (MIdx >= 0 && (unsigned)MIdx != Idx) | |||
6041 | return false; | |||
6042 | Idx += 2; | |||
6043 | } | |||
6044 | } | |||
6045 | ||||
6046 | return true; | |||
6047 | } | |||
6048 | ||||
6049 | /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of | |||
6050 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | |||
6051 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. | |||
6052 | static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
6053 | unsigned NumElts = VT.getVectorNumElements(); | |||
6054 | WhichResult = (M[0] == 0 ? 0 : 1); | |||
6055 | for (unsigned i = 0; i < NumElts; i += 2) { | |||
6056 | if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || | |||
6057 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult)) | |||
6058 | return false; | |||
6059 | } | |||
6060 | return true; | |||
6061 | } | |||
6062 | ||||
6063 | static bool isINSMask(ArrayRef<int> M, int NumInputElements, | |||
6064 | bool &DstIsLeft, int &Anomaly) { | |||
6065 | if (M.size() != static_cast<size_t>(NumInputElements)) | |||
6066 | return false; | |||
6067 | ||||
6068 | int NumLHSMatch = 0, NumRHSMatch = 0; | |||
6069 | int LastLHSMismatch = -1, LastRHSMismatch = -1; | |||
6070 | ||||
6071 | for (int i = 0; i < NumInputElements; ++i) { | |||
6072 | if (M[i] == -1) { | |||
6073 | ++NumLHSMatch; | |||
6074 | ++NumRHSMatch; | |||
6075 | continue; | |||
6076 | } | |||
6077 | ||||
6078 | if (M[i] == i) | |||
6079 | ++NumLHSMatch; | |||
6080 | else | |||
6081 | LastLHSMismatch = i; | |||
6082 | ||||
6083 | if (M[i] == i + NumInputElements) | |||
6084 | ++NumRHSMatch; | |||
6085 | else | |||
6086 | LastRHSMismatch = i; | |||
6087 | } | |||
6088 | ||||
6089 | if (NumLHSMatch == NumInputElements - 1) { | |||
6090 | DstIsLeft = true; | |||
6091 | Anomaly = LastLHSMismatch; | |||
6092 | return true; | |||
6093 | } else if (NumRHSMatch == NumInputElements - 1) { | |||
6094 | DstIsLeft = false; | |||
6095 | Anomaly = LastRHSMismatch; | |||
6096 | return true; | |||
6097 | } | |||
6098 | ||||
6099 | return false; | |||
6100 | } | |||
6101 | ||||
6102 | static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) { | |||
6103 | if (VT.getSizeInBits() != 128) | |||
6104 | return false; | |||
6105 | ||||
6106 | unsigned NumElts = VT.getVectorNumElements(); | |||
6107 | ||||
6108 | for (int I = 0, E = NumElts / 2; I != E; I++) { | |||
6109 | if (Mask[I] != I) | |||
6110 | return false; | |||
6111 | } | |||
6112 | ||||
6113 | int Offset = NumElts / 2; | |||
6114 | for (int I = NumElts / 2, E = NumElts; I != E; I++) { | |||
6115 | if (Mask[I] != I + SplitLHS * Offset) | |||
6116 | return false; | |||
6117 | } | |||
6118 | ||||
6119 | return true; | |||
6120 | } | |||
6121 | ||||
6122 | static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) { | |||
6123 | SDLoc DL(Op); | |||
6124 | EVT VT = Op.getValueType(); | |||
6125 | SDValue V0 = Op.getOperand(0); | |||
6126 | SDValue V1 = Op.getOperand(1); | |||
6127 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask(); | |||
6128 | ||||
6129 | if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() || | |||
6130 | VT.getVectorElementType() != V1.getValueType().getVectorElementType()) | |||
6131 | return SDValue(); | |||
6132 | ||||
6133 | bool SplitV0 = V0.getValueSizeInBits() == 128; | |||
6134 | ||||
6135 | if (!isConcatMask(Mask, VT, SplitV0)) | |||
6136 | return SDValue(); | |||
6137 | ||||
6138 | EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), | |||
6139 | VT.getVectorNumElements() / 2); | |||
6140 | if (SplitV0) { | |||
6141 | V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0, | |||
6142 | DAG.getConstant(0, DL, MVT::i64)); | |||
6143 | } | |||
6144 | if (V1.getValueSizeInBits() == 128) { | |||
6145 | V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1, | |||
6146 | DAG.getConstant(0, DL, MVT::i64)); | |||
6147 | } | |||
6148 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1); | |||
6149 | } | |||
6150 | ||||
6151 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit | |||
6152 | /// the specified operations to build the shuffle. | |||
6153 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, | |||
6154 | SDValue RHS, SelectionDAG &DAG, | |||
6155 | const SDLoc &dl) { | |||
6156 | unsigned OpNum = (PFEntry >> 26) & 0x0F; | |||
6157 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1); | |||
6158 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1); | |||
6159 | ||||
6160 | enum { | |||
6161 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> | |||
6162 | OP_VREV, | |||
6163 | OP_VDUP0, | |||
6164 | OP_VDUP1, | |||
6165 | OP_VDUP2, | |||
6166 | OP_VDUP3, | |||
6167 | OP_VEXT1, | |||
6168 | OP_VEXT2, | |||
6169 | OP_VEXT3, | |||
6170 | OP_VUZPL, // VUZP, left result | |||
6171 | OP_VUZPR, // VUZP, right result | |||
6172 | OP_VZIPL, // VZIP, left result | |||
6173 | OP_VZIPR, // VZIP, right result | |||
6174 | OP_VTRNL, // VTRN, left result | |||
6175 | OP_VTRNR // VTRN, right result | |||
6176 | }; | |||
6177 | ||||
6178 | if (OpNum == OP_COPY) { | |||
6179 | if (LHSID == (1 * 9 + 2) * 9 + 3) | |||
6180 | return LHS; | |||
6181 | assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!")(static_cast <bool> (LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!") ? void (0) : __assert_fail ("LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && \"Illegal OP_COPY!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6181, __extension__ __PRETTY_FUNCTION__)); | |||
6182 | return RHS; | |||
6183 | } | |||
6184 | ||||
6185 | SDValue OpLHS, OpRHS; | |||
6186 | OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); | |||
6187 | OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); | |||
6188 | EVT VT = OpLHS.getValueType(); | |||
6189 | ||||
6190 | switch (OpNum) { | |||
6191 | default: | |||
6192 | llvm_unreachable("Unknown shuffle opcode!")::llvm::llvm_unreachable_internal("Unknown shuffle opcode!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6192); | |||
6193 | case OP_VREV: | |||
6194 | // VREV divides the vector in half and swaps within the half. | |||
6195 | if (VT.getVectorElementType() == MVT::i32 || | |||
6196 | VT.getVectorElementType() == MVT::f32) | |||
6197 | return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS); | |||
6198 | // vrev <4 x i16> -> REV32 | |||
6199 | if (VT.getVectorElementType() == MVT::i16 || | |||
6200 | VT.getVectorElementType() == MVT::f16) | |||
6201 | return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS); | |||
6202 | // vrev <4 x i8> -> REV16 | |||
6203 | assert(VT.getVectorElementType() == MVT::i8)(static_cast <bool> (VT.getVectorElementType() == MVT:: i8) ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i8" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6203, __extension__ __PRETTY_FUNCTION__)); | |||
6204 | return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS); | |||
6205 | case OP_VDUP0: | |||
6206 | case OP_VDUP1: | |||
6207 | case OP_VDUP2: | |||
6208 | case OP_VDUP3: { | |||
6209 | EVT EltTy = VT.getVectorElementType(); | |||
6210 | unsigned Opcode; | |||
6211 | if (EltTy == MVT::i8) | |||
6212 | Opcode = AArch64ISD::DUPLANE8; | |||
6213 | else if (EltTy == MVT::i16 || EltTy == MVT::f16) | |||
6214 | Opcode = AArch64ISD::DUPLANE16; | |||
6215 | else if (EltTy == MVT::i32 || EltTy == MVT::f32) | |||
6216 | Opcode = AArch64ISD::DUPLANE32; | |||
6217 | else if (EltTy == MVT::i64 || EltTy == MVT::f64) | |||
6218 | Opcode = AArch64ISD::DUPLANE64; | |||
6219 | else | |||
6220 | llvm_unreachable("Invalid vector element type?")::llvm::llvm_unreachable_internal("Invalid vector element type?" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6220); | |||
6221 | ||||
6222 | if (VT.getSizeInBits() == 64) | |||
6223 | OpLHS = WidenVector(OpLHS, DAG); | |||
6224 | SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64); | |||
6225 | return DAG.getNode(Opcode, dl, VT, OpLHS, Lane); | |||
6226 | } | |||
6227 | case OP_VEXT1: | |||
6228 | case OP_VEXT2: | |||
6229 | case OP_VEXT3: { | |||
6230 | unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS); | |||
6231 | return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS, | |||
6232 | DAG.getConstant(Imm, dl, MVT::i32)); | |||
6233 | } | |||
6234 | case OP_VUZPL: | |||
6235 | return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS, | |||
6236 | OpRHS); | |||
6237 | case OP_VUZPR: | |||
6238 | return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS, | |||
6239 | OpRHS); | |||
6240 | case OP_VZIPL: | |||
6241 | return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS, | |||
6242 | OpRHS); | |||
6243 | case OP_VZIPR: | |||
6244 | return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS, | |||
6245 | OpRHS); | |||
6246 | case OP_VTRNL: | |||
6247 | return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS, | |||
6248 | OpRHS); | |||
6249 | case OP_VTRNR: | |||
6250 | return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS, | |||
6251 | OpRHS); | |||
6252 | } | |||
6253 | } | |||
6254 | ||||
6255 | static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask, | |||
6256 | SelectionDAG &DAG) { | |||
6257 | // Check to see if we can use the TBL instruction. | |||
6258 | SDValue V1 = Op.getOperand(0); | |||
6259 | SDValue V2 = Op.getOperand(1); | |||
6260 | SDLoc DL(Op); | |||
6261 | ||||
6262 | EVT EltVT = Op.getValueType().getVectorElementType(); | |||
6263 | unsigned BytesPerElt = EltVT.getSizeInBits() / 8; | |||
6264 | ||||
6265 | SmallVector<SDValue, 8> TBLMask; | |||
6266 | for (int Val : ShuffleMask) { | |||
6267 | for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { | |||
6268 | unsigned Offset = Byte + Val * BytesPerElt; | |||
6269 | TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32)); | |||
6270 | } | |||
6271 | } | |||
6272 | ||||
6273 | MVT IndexVT = MVT::v8i8; | |||
6274 | unsigned IndexLen = 8; | |||
6275 | if (Op.getValueSizeInBits() == 128) { | |||
6276 | IndexVT = MVT::v16i8; | |||
6277 | IndexLen = 16; | |||
6278 | } | |||
6279 | ||||
6280 | SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1); | |||
6281 | SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2); | |||
6282 | ||||
6283 | SDValue Shuffle; | |||
6284 | if (V2.getNode()->isUndef()) { | |||
6285 | if (IndexLen == 8) | |||
6286 | V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst); | |||
6287 | Shuffle = DAG.getNode( | |||
6288 | ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, | |||
6289 | DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, | |||
6290 | DAG.getBuildVector(IndexVT, DL, | |||
6291 | makeArrayRef(TBLMask.data(), IndexLen))); | |||
6292 | } else { | |||
6293 | if (IndexLen == 8) { | |||
6294 | V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst); | |||
6295 | Shuffle = DAG.getNode( | |||
6296 | ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, | |||
6297 | DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, | |||
6298 | DAG.getBuildVector(IndexVT, DL, | |||
6299 | makeArrayRef(TBLMask.data(), IndexLen))); | |||
6300 | } else { | |||
6301 | // FIXME: We cannot, for the moment, emit a TBL2 instruction because we | |||
6302 | // cannot currently represent the register constraints on the input | |||
6303 | // table registers. | |||
6304 | // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst, | |||
6305 | // DAG.getBuildVector(IndexVT, DL, &TBLMask[0], | |||
6306 | // IndexLen)); | |||
6307 | Shuffle = DAG.getNode( | |||
6308 | ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, | |||
6309 | DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst, | |||
6310 | V2Cst, DAG.getBuildVector(IndexVT, DL, | |||
6311 | makeArrayRef(TBLMask.data(), IndexLen))); | |||
6312 | } | |||
6313 | } | |||
6314 | return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle); | |||
6315 | } | |||
6316 | ||||
6317 | static unsigned getDUPLANEOp(EVT EltType) { | |||
6318 | if (EltType == MVT::i8) | |||
6319 | return AArch64ISD::DUPLANE8; | |||
6320 | if (EltType == MVT::i16 || EltType == MVT::f16) | |||
6321 | return AArch64ISD::DUPLANE16; | |||
6322 | if (EltType == MVT::i32 || EltType == MVT::f32) | |||
6323 | return AArch64ISD::DUPLANE32; | |||
6324 | if (EltType == MVT::i64 || EltType == MVT::f64) | |||
6325 | return AArch64ISD::DUPLANE64; | |||
6326 | ||||
6327 | llvm_unreachable("Invalid vector element type?")::llvm::llvm_unreachable_internal("Invalid vector element type?" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6327); | |||
6328 | } | |||
6329 | ||||
6330 | SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, | |||
6331 | SelectionDAG &DAG) const { | |||
6332 | SDLoc dl(Op); | |||
6333 | EVT VT = Op.getValueType(); | |||
6334 | ||||
6335 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | |||
6336 | ||||
6337 | // Convert shuffles that are directly supported on NEON to target-specific | |||
6338 | // DAG nodes, instead of keeping them as shuffles and matching them again | |||
6339 | // during code selection. This is more efficient and avoids the possibility | |||
6340 | // of inconsistencies between legalization and selection. | |||
6341 | ArrayRef<int> ShuffleMask = SVN->getMask(); | |||
6342 | ||||
6343 | SDValue V1 = Op.getOperand(0); | |||
6344 | SDValue V2 = Op.getOperand(1); | |||
6345 | ||||
6346 | if (SVN->isSplat()) { | |||
6347 | int Lane = SVN->getSplatIndex(); | |||
6348 | // If this is undef splat, generate it via "just" vdup, if possible. | |||
6349 | if (Lane == -1) | |||
6350 | Lane = 0; | |||
6351 | ||||
6352 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) | |||
6353 | return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(), | |||
6354 | V1.getOperand(0)); | |||
6355 | // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non- | |||
6356 | // constant. If so, we can just reference the lane's definition directly. | |||
6357 | if (V1.getOpcode() == ISD::BUILD_VECTOR && | |||
6358 | !isa<ConstantSDNode>(V1.getOperand(Lane))) | |||
6359 | return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane)); | |||
6360 | ||||
6361 | // Otherwise, duplicate from the lane of the input vector. | |||
6362 | unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType()); | |||
6363 | ||||
6364 | // SelectionDAGBuilder may have "helpfully" already extracted or conatenated | |||
6365 | // to make a vector of the same size as this SHUFFLE. We can ignore the | |||
6366 | // extract entirely, and canonicalise the concat using WidenVector. | |||
6367 | if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { | |||
6368 | Lane += cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); | |||
6369 | V1 = V1.getOperand(0); | |||
6370 | } else if (V1.getOpcode() == ISD::CONCAT_VECTORS) { | |||
6371 | unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2; | |||
6372 | Lane -= Idx * VT.getVectorNumElements() / 2; | |||
6373 | V1 = WidenVector(V1.getOperand(Idx), DAG); | |||
6374 | } else if (VT.getSizeInBits() == 64) | |||
6375 | V1 = WidenVector(V1, DAG); | |||
6376 | ||||
6377 | return DAG.getNode(Opcode, dl, VT, V1, DAG.getConstant(Lane, dl, MVT::i64)); | |||
6378 | } | |||
6379 | ||||
6380 | if (isREVMask(ShuffleMask, VT, 64)) | |||
6381 | return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2); | |||
6382 | if (isREVMask(ShuffleMask, VT, 32)) | |||
6383 | return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2); | |||
6384 | if (isREVMask(ShuffleMask, VT, 16)) | |||
6385 | return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2); | |||
6386 | ||||
6387 | bool ReverseEXT = false; | |||
6388 | unsigned Imm; | |||
6389 | if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) { | |||
6390 | if (ReverseEXT) | |||
6391 | std::swap(V1, V2); | |||
6392 | Imm *= getExtFactor(V1); | |||
6393 | return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2, | |||
6394 | DAG.getConstant(Imm, dl, MVT::i32)); | |||
6395 | } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) { | |||
6396 | Imm *= getExtFactor(V1); | |||
6397 | return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1, | |||
6398 | DAG.getConstant(Imm, dl, MVT::i32)); | |||
6399 | } | |||
6400 | ||||
6401 | unsigned WhichResult; | |||
6402 | if (isZIPMask(ShuffleMask, VT, WhichResult)) { | |||
6403 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; | |||
6404 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); | |||
6405 | } | |||
6406 | if (isUZPMask(ShuffleMask, VT, WhichResult)) { | |||
6407 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; | |||
6408 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); | |||
6409 | } | |||
6410 | if (isTRNMask(ShuffleMask, VT, WhichResult)) { | |||
6411 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; | |||
6412 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); | |||
6413 | } | |||
6414 | ||||
6415 | if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | |||
6416 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; | |||
6417 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); | |||
6418 | } | |||
6419 | if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | |||
6420 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; | |||
6421 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); | |||
6422 | } | |||
6423 | if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | |||
6424 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; | |||
6425 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); | |||
6426 | } | |||
6427 | ||||
6428 | if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG)) | |||
6429 | return Concat; | |||
6430 | ||||
6431 | bool DstIsLeft; | |||
6432 | int Anomaly; | |||
6433 | int NumInputElements = V1.getValueType().getVectorNumElements(); | |||
6434 | if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) { | |||
6435 | SDValue DstVec = DstIsLeft ? V1 : V2; | |||
6436 | SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64); | |||
6437 | ||||
6438 | SDValue SrcVec = V1; | |||
6439 | int SrcLane = ShuffleMask[Anomaly]; | |||
6440 | if (SrcLane >= NumInputElements) { | |||
6441 | SrcVec = V2; | |||
6442 | SrcLane -= VT.getVectorNumElements(); | |||
6443 | } | |||
6444 | SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64); | |||
6445 | ||||
6446 | EVT ScalarVT = VT.getVectorElementType(); | |||
6447 | ||||
6448 | if (ScalarVT.getSizeInBits() < 32 && ScalarVT.isInteger()) | |||
6449 | ScalarVT = MVT::i32; | |||
6450 | ||||
6451 | return DAG.getNode( | |||
6452 | ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, | |||
6453 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV), | |||
6454 | DstLaneV); | |||
6455 | } | |||
6456 | ||||
6457 | // If the shuffle is not directly supported and it has 4 elements, use | |||
6458 | // the PerfectShuffle-generated table to synthesize it from other shuffles. | |||
6459 | unsigned NumElts = VT.getVectorNumElements(); | |||
6460 | if (NumElts == 4) { | |||
6461 | unsigned PFIndexes[4]; | |||
6462 | for (unsigned i = 0; i != 4; ++i) { | |||
6463 | if (ShuffleMask[i] < 0) | |||
6464 | PFIndexes[i] = 8; | |||
6465 | else | |||
6466 | PFIndexes[i] = ShuffleMask[i]; | |||
6467 | } | |||
6468 | ||||
6469 | // Compute the index in the perfect shuffle table. | |||
6470 | unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + | |||
6471 | PFIndexes[2] * 9 + PFIndexes[3]; | |||
6472 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; | |||
6473 | unsigned Cost = (PFEntry >> 30); | |||
6474 | ||||
6475 | if (Cost <= 4) | |||
6476 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); | |||
6477 | } | |||
6478 | ||||
6479 | return GenerateTBL(Op, ShuffleMask, DAG); | |||
6480 | } | |||
6481 | ||||
6482 | static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, | |||
6483 | APInt &UndefBits) { | |||
6484 | EVT VT = BVN->getValueType(0); | |||
6485 | APInt SplatBits, SplatUndef; | |||
6486 | unsigned SplatBitSize; | |||
6487 | bool HasAnyUndefs; | |||
6488 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { | |||
6489 | unsigned NumSplats = VT.getSizeInBits() / SplatBitSize; | |||
6490 | ||||
6491 | for (unsigned i = 0; i < NumSplats; ++i) { | |||
6492 | CnstBits <<= SplatBitSize; | |||
6493 | UndefBits <<= SplatBitSize; | |||
6494 | CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits()); | |||
6495 | UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits()); | |||
6496 | } | |||
6497 | ||||
6498 | return true; | |||
6499 | } | |||
6500 | ||||
6501 | return false; | |||
6502 | } | |||
6503 | ||||
6504 | // Try 64-bit splatted SIMD immediate. | |||
6505 | static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | |||
6506 | const APInt &Bits) { | |||
6507 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | |||
6508 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | |||
6509 | EVT VT = Op.getValueType(); | |||
6510 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64; | |||
6511 | ||||
6512 | if (AArch64_AM::isAdvSIMDModImmType10(Value)) { | |||
6513 | Value = AArch64_AM::encodeAdvSIMDModImmType10(Value); | |||
6514 | ||||
6515 | SDLoc dl(Op); | |||
6516 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | |||
6517 | DAG.getConstant(Value, dl, MVT::i32)); | |||
6518 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | |||
6519 | } | |||
6520 | } | |||
6521 | ||||
6522 | return SDValue(); | |||
6523 | } | |||
6524 | ||||
6525 | // Try 32-bit splatted SIMD immediate. | |||
6526 | static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | |||
6527 | const APInt &Bits, | |||
6528 | const SDValue *LHS = nullptr) { | |||
6529 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | |||
6530 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | |||
6531 | EVT VT = Op.getValueType(); | |||
6532 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; | |||
6533 | bool isAdvSIMDModImm = false; | |||
6534 | uint64_t Shift; | |||
6535 | ||||
6536 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) { | |||
6537 | Value = AArch64_AM::encodeAdvSIMDModImmType1(Value); | |||
6538 | Shift = 0; | |||
6539 | } | |||
6540 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) { | |||
6541 | Value = AArch64_AM::encodeAdvSIMDModImmType2(Value); | |||
6542 | Shift = 8; | |||
6543 | } | |||
6544 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) { | |||
6545 | Value = AArch64_AM::encodeAdvSIMDModImmType3(Value); | |||
6546 | Shift = 16; | |||
6547 | } | |||
6548 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) { | |||
6549 | Value = AArch64_AM::encodeAdvSIMDModImmType4(Value); | |||
6550 | Shift = 24; | |||
6551 | } | |||
6552 | ||||
6553 | if (isAdvSIMDModImm) { | |||
6554 | SDLoc dl(Op); | |||
6555 | SDValue Mov; | |||
6556 | ||||
6557 | if (LHS) | |||
6558 | Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, | |||
6559 | DAG.getConstant(Value, dl, MVT::i32), | |||
6560 | DAG.getConstant(Shift, dl, MVT::i32)); | |||
6561 | else | |||
6562 | Mov = DAG.getNode(NewOp, dl, MovTy, | |||
6563 | DAG.getConstant(Value, dl, MVT::i32), | |||
6564 | DAG.getConstant(Shift, dl, MVT::i32)); | |||
6565 | ||||
6566 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | |||
6567 | } | |||
6568 | } | |||
6569 | ||||
6570 | return SDValue(); | |||
6571 | } | |||
6572 | ||||
6573 | // Try 16-bit splatted SIMD immediate. | |||
6574 | static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | |||
6575 | const APInt &Bits, | |||
6576 | const SDValue *LHS = nullptr) { | |||
6577 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | |||
6578 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | |||
6579 | EVT VT = Op.getValueType(); | |||
6580 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; | |||
6581 | bool isAdvSIMDModImm = false; | |||
6582 | uint64_t Shift; | |||
6583 | ||||
6584 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) { | |||
6585 | Value = AArch64_AM::encodeAdvSIMDModImmType5(Value); | |||
6586 | Shift = 0; | |||
6587 | } | |||
6588 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) { | |||
6589 | Value = AArch64_AM::encodeAdvSIMDModImmType6(Value); | |||
6590 | Shift = 8; | |||
6591 | } | |||
6592 | ||||
6593 | if (isAdvSIMDModImm) { | |||
6594 | SDLoc dl(Op); | |||
6595 | SDValue Mov; | |||
6596 | ||||
6597 | if (LHS) | |||
6598 | Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, | |||
6599 | DAG.getConstant(Value, dl, MVT::i32), | |||
6600 | DAG.getConstant(Shift, dl, MVT::i32)); | |||
6601 | else | |||
6602 | Mov = DAG.getNode(NewOp, dl, MovTy, | |||
6603 | DAG.getConstant(Value, dl, MVT::i32), | |||
6604 | DAG.getConstant(Shift, dl, MVT::i32)); | |||
6605 | ||||
6606 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | |||
6607 | } | |||
6608 | } | |||
6609 | ||||
6610 | return SDValue(); | |||
6611 | } | |||
6612 | ||||
6613 | // Try 32-bit splatted SIMD immediate with shifted ones. | |||
6614 | static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op, | |||
6615 | SelectionDAG &DAG, const APInt &Bits) { | |||
6616 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | |||
6617 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | |||
6618 | EVT VT = Op.getValueType(); | |||
6619 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; | |||
6620 | bool isAdvSIMDModImm = false; | |||
6621 | uint64_t Shift; | |||
6622 | ||||
6623 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) { | |||
6624 | Value = AArch64_AM::encodeAdvSIMDModImmType7(Value); | |||
6625 | Shift = 264; | |||
6626 | } | |||
6627 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) { | |||
6628 | Value = AArch64_AM::encodeAdvSIMDModImmType8(Value); | |||
6629 | Shift = 272; | |||
6630 | } | |||
6631 | ||||
6632 | if (isAdvSIMDModImm) { | |||
6633 | SDLoc dl(Op); | |||
6634 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | |||
6635 | DAG.getConstant(Value, dl, MVT::i32), | |||
6636 | DAG.getConstant(Shift, dl, MVT::i32)); | |||
6637 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | |||
6638 | } | |||
6639 | } | |||
6640 | ||||
6641 | return SDValue(); | |||
6642 | } | |||
6643 | ||||
6644 | // Try 8-bit splatted SIMD immediate. | |||
6645 | static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | |||
6646 | const APInt &Bits) { | |||
6647 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | |||
6648 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | |||
6649 | EVT VT = Op.getValueType(); | |||
6650 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8; | |||
6651 | ||||
6652 | if (AArch64_AM::isAdvSIMDModImmType9(Value)) { | |||
6653 | Value = AArch64_AM::encodeAdvSIMDModImmType9(Value); | |||
6654 | ||||
6655 | SDLoc dl(Op); | |||
6656 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | |||
6657 | DAG.getConstant(Value, dl, MVT::i32)); | |||
6658 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | |||
6659 | } | |||
6660 | } | |||
6661 | ||||
6662 | return SDValue(); | |||
6663 | } | |||
6664 | ||||
6665 | // Try FP splatted SIMD immediate. | |||
6666 | static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | |||
6667 | const APInt &Bits) { | |||
6668 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | |||
6669 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | |||
6670 | EVT VT = Op.getValueType(); | |||
6671 | bool isWide = (VT.getSizeInBits() == 128); | |||
6672 | MVT MovTy; | |||
6673 | bool isAdvSIMDModImm = false; | |||
6674 | ||||
6675 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) { | |||
6676 | Value = AArch64_AM::encodeAdvSIMDModImmType11(Value); | |||
6677 | MovTy = isWide ? MVT::v4f32 : MVT::v2f32; | |||
6678 | } | |||
6679 | else if (isWide && | |||
6680 | (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) { | |||
6681 | Value = AArch64_AM::encodeAdvSIMDModImmType12(Value); | |||
6682 | MovTy = MVT::v2f64; | |||
6683 | } | |||
6684 | ||||
6685 | if (isAdvSIMDModImm) { | |||
6686 | SDLoc dl(Op); | |||
6687 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | |||
6688 | DAG.getConstant(Value, dl, MVT::i32)); | |||
6689 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | |||
6690 | } | |||
6691 | } | |||
6692 | ||||
6693 | return SDValue(); | |||
6694 | } | |||
6695 | ||||
6696 | SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op, | |||
6697 | SelectionDAG &DAG) const { | |||
6698 | SDValue LHS = Op.getOperand(0); | |||
6699 | EVT VT = Op.getValueType(); | |||
6700 | ||||
6701 | BuildVectorSDNode *BVN = | |||
6702 | dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); | |||
6703 | if (!BVN) { | |||
6704 | // AND commutes, so try swapping the operands. | |||
6705 | LHS = Op.getOperand(1); | |||
6706 | BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); | |||
6707 | } | |||
6708 | if (!BVN) | |||
6709 | return Op; | |||
6710 | ||||
6711 | APInt DefBits(VT.getSizeInBits(), 0); | |||
6712 | APInt UndefBits(VT.getSizeInBits(), 0); | |||
6713 | if (resolveBuildVector(BVN, DefBits, UndefBits)) { | |||
6714 | SDValue NewOp; | |||
6715 | ||||
6716 | // We only have BIC vector immediate instruction, which is and-not. | |||
6717 | DefBits = ~DefBits; | |||
6718 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG, | |||
6719 | DefBits, &LHS)) || | |||
6720 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG, | |||
6721 | DefBits, &LHS))) | |||
6722 | return NewOp; | |||
6723 | ||||
6724 | UndefBits = ~UndefBits; | |||
6725 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG, | |||
6726 | UndefBits, &LHS)) || | |||
6727 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG, | |||
6728 | UndefBits, &LHS))) | |||
6729 | return NewOp; | |||
6730 | } | |||
6731 | ||||
6732 | // We can always fall back to a non-immediate AND. | |||
6733 | return Op; | |||
6734 | } | |||
6735 | ||||
6736 | // Specialized code to quickly find if PotentialBVec is a BuildVector that | |||
6737 | // consists of only the same constant int value, returned in reference arg | |||
6738 | // ConstVal | |||
6739 | static bool isAllConstantBuildVector(const SDValue &PotentialBVec, | |||
6740 | uint64_t &ConstVal) { | |||
6741 | BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec); | |||
6742 | if (!Bvec) | |||
6743 | return false; | |||
6744 | ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0)); | |||
6745 | if (!FirstElt) | |||
6746 | return false; | |||
6747 | EVT VT = Bvec->getValueType(0); | |||
6748 | unsigned NumElts = VT.getVectorNumElements(); | |||
6749 | for (unsigned i = 1; i < NumElts; ++i) | |||
6750 | if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt) | |||
6751 | return false; | |||
6752 | ConstVal = FirstElt->getZExtValue(); | |||
6753 | return true; | |||
6754 | } | |||
6755 | ||||
6756 | static unsigned getIntrinsicID(const SDNode *N) { | |||
6757 | unsigned Opcode = N->getOpcode(); | |||
6758 | switch (Opcode) { | |||
6759 | default: | |||
6760 | return Intrinsic::not_intrinsic; | |||
6761 | case ISD::INTRINSIC_WO_CHAIN: { | |||
6762 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |||
6763 | if (IID < Intrinsic::num_intrinsics) | |||
6764 | return IID; | |||
6765 | return Intrinsic::not_intrinsic; | |||
6766 | } | |||
6767 | } | |||
6768 | } | |||
6769 | ||||
6770 | // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)), | |||
6771 | // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a | |||
6772 | // BUILD_VECTORs with constant element C1, C2 is a constant, and C1 == ~C2. | |||
6773 | // Also, logical shift right -> sri, with the same structure. | |||
6774 | static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { | |||
6775 | EVT VT = N->getValueType(0); | |||
6776 | ||||
6777 | if (!VT.isVector()) | |||
6778 | return SDValue(); | |||
6779 | ||||
6780 | SDLoc DL(N); | |||
6781 | ||||
6782 | // Is the first op an AND? | |||
6783 | const SDValue And = N->getOperand(0); | |||
6784 | if (And.getOpcode() != ISD::AND) | |||
6785 | return SDValue(); | |||
6786 | ||||
6787 | // Is the second op an shl or lshr? | |||
6788 | SDValue Shift = N->getOperand(1); | |||
6789 | // This will have been turned into: AArch64ISD::VSHL vector, #shift | |||
6790 | // or AArch64ISD::VLSHR vector, #shift | |||
6791 | unsigned ShiftOpc = Shift.getOpcode(); | |||
6792 | if ((ShiftOpc != AArch64ISD::VSHL && ShiftOpc != AArch64ISD::VLSHR)) | |||
6793 | return SDValue(); | |||
6794 | bool IsShiftRight = ShiftOpc == AArch64ISD::VLSHR; | |||
6795 | ||||
6796 | // Is the shift amount constant? | |||
6797 | ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); | |||
6798 | if (!C2node) | |||
6799 | return SDValue(); | |||
6800 | ||||
6801 | // Is the and mask vector all constant? | |||
6802 | uint64_t C1; | |||
6803 | if (!isAllConstantBuildVector(And.getOperand(1), C1)) | |||
6804 | return SDValue(); | |||
6805 | ||||
6806 | // Is C1 == ~C2, taking into account how much one can shift elements of a | |||
6807 | // particular size? | |||
6808 | uint64_t C2 = C2node->getZExtValue(); | |||
6809 | unsigned ElemSizeInBits = VT.getScalarSizeInBits(); | |||
6810 | if (C2 > ElemSizeInBits) | |||
6811 | return SDValue(); | |||
6812 | unsigned ElemMask = (1 << ElemSizeInBits) - 1; | |||
6813 | if ((C1 & ElemMask) != (~C2 & ElemMask)) | |||
6814 | return SDValue(); | |||
6815 | ||||
6816 | SDValue X = And.getOperand(0); | |||
6817 | SDValue Y = Shift.getOperand(0); | |||
6818 | ||||
6819 | unsigned Intrin = | |||
6820 | IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli; | |||
6821 | SDValue ResultSLI = | |||
6822 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, | |||
6823 | DAG.getConstant(Intrin, DL, MVT::i32), X, Y, | |||
6824 | Shift.getOperand(1)); | |||
6825 | ||||
6826 | LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: transformed: \n" ; } } while (false); | |||
6827 | LLVM_DEBUG(N->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { N->dump(&DAG); } } while (false); | |||
6828 | LLVM_DEBUG(dbgs() << "into: \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "into: \n"; } } while (false ); | |||
6829 | LLVM_DEBUG(ResultSLI->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { ResultSLI->dump(&DAG); } } while ( false); | |||
6830 | ||||
6831 | ++NumShiftInserts; | |||
6832 | return ResultSLI; | |||
6833 | } | |||
6834 | ||||
6835 | SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op, | |||
6836 | SelectionDAG &DAG) const { | |||
6837 | // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2)) | |||
6838 | if (EnableAArch64SlrGeneration) { | |||
6839 | if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG)) | |||
6840 | return Res; | |||
6841 | } | |||
6842 | ||||
6843 | EVT VT = Op.getValueType(); | |||
6844 | ||||
6845 | SDValue LHS = Op.getOperand(0); | |||
6846 | BuildVectorSDNode *BVN = | |||
6847 | dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); | |||
6848 | if (!BVN) { | |||
6849 | // OR commutes, so try swapping the operands. | |||
6850 | LHS = Op.getOperand(1); | |||
6851 | BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); | |||
6852 | } | |||
6853 | if (!BVN) | |||
6854 | return Op; | |||
6855 | ||||
6856 | APInt DefBits(VT.getSizeInBits(), 0); | |||
6857 | APInt UndefBits(VT.getSizeInBits(), 0); | |||
6858 | if (resolveBuildVector(BVN, DefBits, UndefBits)) { | |||
6859 | SDValue NewOp; | |||
6860 | ||||
6861 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, | |||
6862 | DefBits, &LHS)) || | |||
6863 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, | |||
6864 | DefBits, &LHS))) | |||
6865 | return NewOp; | |||
6866 | ||||
6867 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, | |||
6868 | UndefBits, &LHS)) || | |||
6869 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, | |||
6870 | UndefBits, &LHS))) | |||
6871 | return NewOp; | |||
6872 | } | |||
6873 | ||||
6874 | // We can always fall back to a non-immediate OR. | |||
6875 | return Op; | |||
6876 | } | |||
6877 | ||||
6878 | // Normalize the operands of BUILD_VECTOR. The value of constant operands will | |||
6879 | // be truncated to fit element width. | |||
6880 | static SDValue NormalizeBuildVector(SDValue Op, | |||
6881 | SelectionDAG &DAG) { | |||
6882 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6882, __extension__ __PRETTY_FUNCTION__)); | |||
6883 | SDLoc dl(Op); | |||
6884 | EVT VT = Op.getValueType(); | |||
6885 | EVT EltTy= VT.getVectorElementType(); | |||
6886 | ||||
6887 | if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16) | |||
6888 | return Op; | |||
6889 | ||||
6890 | SmallVector<SDValue, 16> Ops; | |||
6891 | for (SDValue Lane : Op->ops()) { | |||
6892 | if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) { | |||
6893 | APInt LowBits(EltTy.getSizeInBits(), | |||
6894 | CstLane->getZExtValue()); | |||
6895 | Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32); | |||
6896 | } | |||
6897 | Ops.push_back(Lane); | |||
6898 | } | |||
6899 | return DAG.getBuildVector(VT, dl, Ops); | |||
6900 | } | |||
6901 | ||||
6902 | static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) { | |||
6903 | EVT VT = Op.getValueType(); | |||
6904 | ||||
6905 | APInt DefBits(VT.getSizeInBits(), 0); | |||
6906 | APInt UndefBits(VT.getSizeInBits(), 0); | |||
6907 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); | |||
6908 | if (resolveBuildVector(BVN, DefBits, UndefBits)) { | |||
6909 | SDValue NewOp; | |||
6910 | if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || | |||
6911 | (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | |||
6912 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || | |||
6913 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | |||
6914 | (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || | |||
6915 | (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) | |||
6916 | return NewOp; | |||
6917 | ||||
6918 | DefBits = ~DefBits; | |||
6919 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || | |||
6920 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || | |||
6921 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) | |||
6922 | return NewOp; | |||
6923 | ||||
6924 | DefBits = UndefBits; | |||
6925 | if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || | |||
6926 | (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | |||
6927 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || | |||
6928 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | |||
6929 | (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || | |||
6930 | (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) | |||
6931 | return NewOp; | |||
6932 | ||||
6933 | DefBits = ~UndefBits; | |||
6934 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || | |||
6935 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || | |||
6936 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) | |||
6937 | return NewOp; | |||
6938 | } | |||
6939 | ||||
6940 | return SDValue(); | |||
6941 | } | |||
6942 | ||||
6943 | SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, | |||
6944 | SelectionDAG &DAG) const { | |||
6945 | EVT VT = Op.getValueType(); | |||
6946 | ||||
6947 | // Try to build a simple constant vector. | |||
6948 | Op = NormalizeBuildVector(Op, DAG); | |||
6949 | if (VT.isInteger()) { | |||
6950 | // Certain vector constants, used to express things like logical NOT and | |||
6951 | // arithmetic NEG, are passed through unmodified. This allows special | |||
6952 | // patterns for these operations to match, which will lower these constants | |||
6953 | // to whatever is proven necessary. | |||
6954 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); | |||
6955 | if (BVN->isConstant()) | |||
6956 | if (ConstantSDNode *Const = BVN->getConstantSplatNode()) { | |||
6957 | unsigned BitSize = VT.getVectorElementType().getSizeInBits(); | |||
6958 | APInt Val(BitSize, | |||
6959 | Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue()); | |||
6960 | if (Val.isNullValue() || Val.isAllOnesValue()) | |||
6961 | return Op; | |||
6962 | } | |||
6963 | } | |||
6964 | ||||
6965 | if (SDValue V = ConstantBuildVector(Op, DAG)) | |||
6966 | return V; | |||
6967 | ||||
6968 | // Scan through the operands to find some interesting properties we can | |||
6969 | // exploit: | |||
6970 | // 1) If only one value is used, we can use a DUP, or | |||
6971 | // 2) if only the low element is not undef, we can just insert that, or | |||
6972 | // 3) if only one constant value is used (w/ some non-constant lanes), | |||
6973 | // we can splat the constant value into the whole vector then fill | |||
6974 | // in the non-constant lanes. | |||
6975 | // 4) FIXME: If different constant values are used, but we can intelligently | |||
6976 | // select the values we'll be overwriting for the non-constant | |||
6977 | // lanes such that we can directly materialize the vector | |||
6978 | // some other way (MOVI, e.g.), we can be sneaky. | |||
6979 | // 5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP. | |||
6980 | SDLoc dl(Op); | |||
6981 | unsigned NumElts = VT.getVectorNumElements(); | |||
6982 | bool isOnlyLowElement = true; | |||
6983 | bool usesOnlyOneValue = true; | |||
6984 | bool usesOnlyOneConstantValue = true; | |||
6985 | bool isConstant = true; | |||
6986 | bool AllLanesExtractElt = true; | |||
6987 | unsigned NumConstantLanes = 0; | |||
6988 | SDValue Value; | |||
6989 | SDValue ConstantValue; | |||
6990 | for (unsigned i = 0; i < NumElts; ++i) { | |||
6991 | SDValue V = Op.getOperand(i); | |||
6992 | if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
6993 | AllLanesExtractElt = false; | |||
6994 | if (V.isUndef()) | |||
6995 | continue; | |||
6996 | if (i > 0) | |||
6997 | isOnlyLowElement = false; | |||
6998 | if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) | |||
6999 | isConstant = false; | |||
7000 | ||||
7001 | if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) { | |||
7002 | ++NumConstantLanes; | |||
7003 | if (!ConstantValue.getNode()) | |||
7004 | ConstantValue = V; | |||
7005 | else if (ConstantValue != V) | |||
7006 | usesOnlyOneConstantValue = false; | |||
7007 | } | |||
7008 | ||||
7009 | if (!Value.getNode()) | |||
7010 | Value = V; | |||
7011 | else if (V != Value) | |||
7012 | usesOnlyOneValue = false; | |||
7013 | } | |||
7014 | ||||
7015 | if (!Value.getNode()) { | |||
7016 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n" ; } } while (false) | |||
7017 | dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n" ; } } while (false); | |||
7018 | return DAG.getUNDEF(VT); | |||
7019 | } | |||
7020 | ||||
7021 | if (isOnlyLowElement) { | |||
7022 | LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " "SCALAR_TO_VECTOR node\n"; } } while (false) | |||
7023 | "SCALAR_TO_VECTOR node\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " "SCALAR_TO_VECTOR node\n"; } } while (false); | |||
7024 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); | |||
7025 | } | |||
7026 | ||||
7027 | if (AllLanesExtractElt) { | |||
7028 | SDNode *Vector = nullptr; | |||
7029 | bool Even = false; | |||
7030 | bool Odd = false; | |||
7031 | // Check whether the extract elements match the Even pattern <0,2,4,...> or | |||
7032 | // the Odd pattern <1,3,5,...>. | |||
7033 | for (unsigned i = 0; i < NumElts; ++i) { | |||
7034 | SDValue V = Op.getOperand(i); | |||
7035 | const SDNode *N = V.getNode(); | |||
7036 | if (!isa<ConstantSDNode>(N->getOperand(1))) | |||
7037 | break; | |||
7038 | SDValue N0 = N->getOperand(0); | |||
7039 | ||||
7040 | // All elements are extracted from the same vector. | |||
7041 | if (!Vector) { | |||
7042 | Vector = N0.getNode(); | |||
7043 | // Check that the type of EXTRACT_VECTOR_ELT matches the type of | |||
7044 | // BUILD_VECTOR. | |||
7045 | if (VT.getVectorElementType() != | |||
7046 | N0.getValueType().getVectorElementType()) | |||
7047 | break; | |||
7048 | } else if (Vector != N0.getNode()) { | |||
7049 | Odd = false; | |||
7050 | Even = false; | |||
7051 | break; | |||
7052 | } | |||
7053 | ||||
7054 | // Extracted values are either at Even indices <0,2,4,...> or at Odd | |||
7055 | // indices <1,3,5,...>. | |||
7056 | uint64_t Val = N->getConstantOperandVal(1); | |||
7057 | if (Val == 2 * i) { | |||
7058 | Even = true; | |||
7059 | continue; | |||
7060 | } | |||
7061 | if (Val - 1 == 2 * i) { | |||
7062 | Odd = true; | |||
7063 | continue; | |||
7064 | } | |||
7065 | ||||
7066 | // Something does not match: abort. | |||
7067 | Odd = false; | |||
7068 | Even = false; | |||
7069 | break; | |||
7070 | } | |||
7071 | if (Even || Odd) { | |||
7072 | SDValue LHS = | |||
7073 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), | |||
7074 | DAG.getConstant(0, dl, MVT::i64)); | |||
7075 | SDValue RHS = | |||
7076 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), | |||
7077 | DAG.getConstant(NumElts, dl, MVT::i64)); | |||
7078 | ||||
7079 | if (Even && !Odd) | |||
7080 | return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS, | |||
7081 | RHS); | |||
7082 | if (Odd && !Even) | |||
7083 | return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS, | |||
7084 | RHS); | |||
7085 | } | |||
7086 | } | |||
7087 | ||||
7088 | // Use DUP for non-constant splats. For f32 constant splats, reduce to | |||
7089 | // i32 and try again. | |||
7090 | if (usesOnlyOneValue) { | |||
7091 | if (!isConstant) { | |||
7092 | if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
7093 | Value.getValueType() != VT) { | |||
7094 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n" ; } } while (false) | |||
7095 | dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n" ; } } while (false); | |||
7096 | return DAG.getNode(AArch64ISD::DUP, dl, VT, Value); | |||
7097 | } | |||
7098 | ||||
7099 | // This is actually a DUPLANExx operation, which keeps everything vectory. | |||
7100 | ||||
7101 | SDValue Lane = Value.getOperand(1); | |||
7102 | Value = Value.getOperand(0); | |||
7103 | if (Value.getValueSizeInBits() == 64) { | |||
7104 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"; } } while (false) | |||
7105 | dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"; } } while (false) | |||
7106 | "widening it\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"; } } while (false); | |||
7107 | Value = WidenVector(Value, DAG); | |||
7108 | } | |||
7109 | ||||
7110 | unsigned Opcode = getDUPLANEOp(VT.getVectorElementType()); | |||
7111 | return DAG.getNode(Opcode, dl, VT, Value, Lane); | |||
7112 | } | |||
7113 | ||||
7114 | if (VT.getVectorElementType().isFloatingPoint()) { | |||
7115 | SmallVector<SDValue, 8> Ops; | |||
7116 | EVT EltTy = VT.getVectorElementType(); | |||
7117 | assert ((EltTy == MVT::f16 || EltTy == MVT::f32 || EltTy == MVT::f64) &&(static_cast <bool> ((EltTy == MVT::f16 || EltTy == MVT ::f32 || EltTy == MVT::f64) && "Unsupported floating-point vector type" ) ? void (0) : __assert_fail ("(EltTy == MVT::f16 || EltTy == MVT::f32 || EltTy == MVT::f64) && \"Unsupported floating-point vector type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7118, __extension__ __PRETTY_FUNCTION__)) | |||
7118 | "Unsupported floating-point vector type")(static_cast <bool> ((EltTy == MVT::f16 || EltTy == MVT ::f32 || EltTy == MVT::f64) && "Unsupported floating-point vector type" ) ? void (0) : __assert_fail ("(EltTy == MVT::f16 || EltTy == MVT::f32 || EltTy == MVT::f64) && \"Unsupported floating-point vector type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7118, __extension__ __PRETTY_FUNCTION__)); | |||
7119 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"; } } while (false) | |||
7120 | dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"; } } while (false) | |||
7121 | "BITCASTS, and try again\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"; } } while (false); | |||
7122 | MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits()); | |||
7123 | for (unsigned i = 0; i < NumElts; ++i) | |||
7124 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i))); | |||
7125 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts); | |||
7126 | SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); | |||
7127 | LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: " ; Val.dump();; } } while (false) | |||
7128 | Val.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: " ; Val.dump();; } } while (false); | |||
7129 | Val = LowerBUILD_VECTOR(Val, DAG); | |||
7130 | if (Val.getNode()) | |||
7131 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); | |||
7132 | } | |||
7133 | } | |||
7134 | ||||
7135 | // If there was only one constant value used and for more than one lane, | |||
7136 | // start by splatting that value, then replace the non-constant lanes. This | |||
7137 | // is better than the default, which will perform a separate initialization | |||
7138 | // for each lane. | |||
7139 | if (NumConstantLanes > 0 && usesOnlyOneConstantValue) { | |||
7140 | // Firstly, try to materialize the splat constant. | |||
7141 | SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue), | |||
7142 | Val = ConstantBuildVector(Vec, DAG); | |||
7143 | if (!Val) { | |||
7144 | // Otherwise, materialize the constant and splat it. | |||
7145 | Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue); | |||
7146 | DAG.ReplaceAllUsesWith(Vec.getNode(), &Val); | |||
7147 | } | |||
7148 | ||||
7149 | // Now insert the non-constant lanes. | |||
7150 | for (unsigned i = 0; i < NumElts; ++i) { | |||
7151 | SDValue V = Op.getOperand(i); | |||
7152 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); | |||
7153 | if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) | |||
7154 | // Note that type legalization likely mucked about with the VT of the | |||
7155 | // source operand, so we may have to convert it here before inserting. | |||
7156 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx); | |||
7157 | } | |||
7158 | return Val; | |||
7159 | } | |||
7160 | ||||
7161 | // This will generate a load from the constant pool. | |||
7162 | if (isConstant) { | |||
7163 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"; } } while (false) | |||
7164 | dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"; } } while (false) | |||
7165 | "expansion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"; } } while (false); | |||
7166 | return SDValue(); | |||
7167 | } | |||
7168 | ||||
7169 | // Empirical tests suggest this is rarely worth it for vectors of length <= 2. | |||
7170 | if (NumElts >= 4) { | |||
7171 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) | |||
7172 | return shuffle; | |||
7173 | } | |||
7174 | ||||
7175 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we | |||
7176 | // know the default expansion would otherwise fall back on something even | |||
7177 | // worse. For a vector with one or two non-undef values, that's | |||
7178 | // scalar_to_vector for the elements followed by a shuffle (provided the | |||
7179 | // shuffle is valid for the target) and materialization element by element | |||
7180 | // on the stack followed by a load for everything else. | |||
7181 | if (!isConstant && !usesOnlyOneValue) { | |||
7182 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"; } } while (false) | |||
7183 | dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"; } } while (false) | |||
7184 | "of INSERT_VECTOR_ELT\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"; } } while (false); | |||
7185 | ||||
7186 | SDValue Vec = DAG.getUNDEF(VT); | |||
7187 | SDValue Op0 = Op.getOperand(0); | |||
7188 | unsigned i = 0; | |||
7189 | ||||
7190 | // Use SCALAR_TO_VECTOR for lane zero to | |||
7191 | // a) Avoid a RMW dependency on the full vector register, and | |||
7192 | // b) Allow the register coalescer to fold away the copy if the | |||
7193 | // value is already in an S or D register, and we're forced to emit an | |||
7194 | // INSERT_SUBREG that we can't fold anywhere. | |||
7195 | // | |||
7196 | // We also allow types like i8 and i16 which are illegal scalar but legal | |||
7197 | // vector element types. After type-legalization the inserted value is | |||
7198 | // extended (i32) and it is safe to cast them to the vector type by ignoring | |||
7199 | // the upper bits of the lowest lane (e.g. v8i8, v4i16). | |||
7200 | if (!Op0.isUndef()) { | |||
7201 | LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Creating node for op0, it is not undefined:\n" ; } } while (false); | |||
7202 | Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0); | |||
7203 | ++i; | |||
7204 | } | |||
7205 | LLVM_DEBUG(if (i < NumElts) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { if (i < NumElts) dbgs() << "Creating nodes for the other vector elements:\n" ;; } } while (false) | |||
7206 | << "Creating nodes for the other vector elements:\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { if (i < NumElts) dbgs() << "Creating nodes for the other vector elements:\n" ;; } } while (false); | |||
7207 | for (; i < NumElts; ++i) { | |||
7208 | SDValue V = Op.getOperand(i); | |||
7209 | if (V.isUndef()) | |||
7210 | continue; | |||
7211 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); | |||
7212 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); | |||
7213 | } | |||
7214 | return Vec; | |||
7215 | } | |||
7216 | ||||
7217 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"; } } while (false) | |||
7218 | dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"; } } while (false) | |||
7219 | "better alternative\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"; } } while (false); | |||
7220 | return SDValue(); | |||
7221 | } | |||
7222 | ||||
7223 | SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, | |||
7224 | SelectionDAG &DAG) const { | |||
7225 | assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::INSERT_VECTOR_ELT && \"Unknown opcode!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7225, __extension__ __PRETTY_FUNCTION__)); | |||
7226 | ||||
7227 | // Check for non-constant or out of range lane. | |||
7228 | EVT VT = Op.getOperand(0).getValueType(); | |||
7229 | ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2)); | |||
7230 | if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) | |||
7231 | return SDValue(); | |||
7232 | ||||
7233 | ||||
7234 | // Insertion/extraction are legal for V128 types. | |||
7235 | if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || | |||
7236 | VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || | |||
7237 | VT == MVT::v8f16) | |||
7238 | return Op; | |||
7239 | ||||
7240 | if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && | |||
7241 | VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16) | |||
7242 | return SDValue(); | |||
7243 | ||||
7244 | // For V64 types, we perform insertion by expanding the value | |||
7245 | // to a V128 type and perform the insertion on that. | |||
7246 | SDLoc DL(Op); | |||
7247 | SDValue WideVec = WidenVector(Op.getOperand(0), DAG); | |||
7248 | EVT WideTy = WideVec.getValueType(); | |||
7249 | ||||
7250 | SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec, | |||
7251 | Op.getOperand(1), Op.getOperand(2)); | |||
7252 | // Re-narrow the resultant vector. | |||
7253 | return NarrowVector(Node, DAG); | |||
7254 | } | |||
7255 | ||||
7256 | SDValue | |||
7257 | AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, | |||
7258 | SelectionDAG &DAG) const { | |||
7259 | assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && \"Unknown opcode!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7259, __extension__ __PRETTY_FUNCTION__)); | |||
7260 | ||||
7261 | // Check for non-constant or out of range lane. | |||
7262 | EVT VT = Op.getOperand(0).getValueType(); | |||
7263 | ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
7264 | if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) | |||
7265 | return SDValue(); | |||
7266 | ||||
7267 | ||||
7268 | // Insertion/extraction are legal for V128 types. | |||
7269 | if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || | |||
7270 | VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || | |||
7271 | VT == MVT::v8f16) | |||
7272 | return Op; | |||
7273 | ||||
7274 | if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && | |||
7275 | VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16) | |||
7276 | return SDValue(); | |||
7277 | ||||
7278 | // For V64 types, we perform extraction by expanding the value | |||
7279 | // to a V128 type and perform the extraction on that. | |||
7280 | SDLoc DL(Op); | |||
7281 | SDValue WideVec = WidenVector(Op.getOperand(0), DAG); | |||
7282 | EVT WideTy = WideVec.getValueType(); | |||
7283 | ||||
7284 | EVT ExtrTy = WideTy.getVectorElementType(); | |||
7285 | if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8) | |||
7286 | ExtrTy = MVT::i32; | |||
7287 | ||||
7288 | // For extractions, we just return the result directly. | |||
7289 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec, | |||
7290 | Op.getOperand(1)); | |||
7291 | } | |||
7292 | ||||
7293 | SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, | |||
7294 | SelectionDAG &DAG) const { | |||
7295 | EVT VT = Op.getOperand(0).getValueType(); | |||
7296 | SDLoc dl(Op); | |||
7297 | // Just in case... | |||
7298 | if (!VT.isVector()) | |||
7299 | return SDValue(); | |||
7300 | ||||
7301 | ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
7302 | if (!Cst) | |||
7303 | return SDValue(); | |||
7304 | unsigned Val = Cst->getZExtValue(); | |||
7305 | ||||
7306 | unsigned Size = Op.getValueSizeInBits(); | |||
7307 | ||||
7308 | // This will get lowered to an appropriate EXTRACT_SUBREG in ISel. | |||
7309 | if (Val == 0) | |||
7310 | return Op; | |||
7311 | ||||
7312 | // If this is extracting the upper 64-bits of a 128-bit vector, we match | |||
7313 | // that directly. | |||
7314 | if (Size == 64 && Val * VT.getScalarSizeInBits() == 64) | |||
7315 | return Op; | |||
7316 | ||||
7317 | return SDValue(); | |||
7318 | } | |||
7319 | ||||
7320 | bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { | |||
7321 | if (VT.getVectorNumElements() == 4 && | |||
7322 | (VT.is128BitVector() || VT.is64BitVector())) { | |||
7323 | unsigned PFIndexes[4]; | |||
7324 | for (unsigned i = 0; i != 4; ++i) { | |||
7325 | if (M[i] < 0) | |||
7326 | PFIndexes[i] = 8; | |||
7327 | else | |||
7328 | PFIndexes[i] = M[i]; | |||
7329 | } | |||
7330 | ||||
7331 | // Compute the index in the perfect shuffle table. | |||
7332 | unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + | |||
7333 | PFIndexes[2] * 9 + PFIndexes[3]; | |||
7334 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; | |||
7335 | unsigned Cost = (PFEntry >> 30); | |||
7336 | ||||
7337 | if (Cost <= 4) | |||
7338 | return true; | |||
7339 | } | |||
7340 | ||||
7341 | bool DummyBool; | |||
7342 | int DummyInt; | |||
7343 | unsigned DummyUnsigned; | |||
7344 | ||||
7345 | return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) || | |||
7346 | isREVMask(M, VT, 32) || isREVMask(M, VT, 16) || | |||
7347 | isEXTMask(M, VT, DummyBool, DummyUnsigned) || | |||
7348 | // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM. | |||
7349 | isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) || | |||
7350 | isZIPMask(M, VT, DummyUnsigned) || | |||
7351 | isTRN_v_undef_Mask(M, VT, DummyUnsigned) || | |||
7352 | isUZP_v_undef_Mask(M, VT, DummyUnsigned) || | |||
7353 | isZIP_v_undef_Mask(M, VT, DummyUnsigned) || | |||
7354 | isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) || | |||
7355 | isConcatMask(M, VT, VT.getSizeInBits() == 128)); | |||
7356 | } | |||
7357 | ||||
7358 | /// getVShiftImm - Check if this is a valid build_vector for the immediate | |||
7359 | /// operand of a vector shift operation, where all the elements of the | |||
7360 | /// build_vector must have the same constant integer value. | |||
7361 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { | |||
7362 | // Ignore bit_converts. | |||
7363 | while (Op.getOpcode() == ISD::BITCAST) | |||
7364 | Op = Op.getOperand(0); | |||
7365 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); | |||
7366 | APInt SplatBits, SplatUndef; | |||
7367 | unsigned SplatBitSize; | |||
7368 | bool HasAnyUndefs; | |||
7369 | if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, | |||
7370 | HasAnyUndefs, ElementBits) || | |||
7371 | SplatBitSize > ElementBits) | |||
7372 | return false; | |||
7373 | Cnt = SplatBits.getSExtValue(); | |||
7374 | return true; | |||
7375 | } | |||
7376 | ||||
7377 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate | |||
7378 | /// operand of a vector shift left operation. That value must be in the range: | |||
7379 | /// 0 <= Value < ElementBits for a left shift; or | |||
7380 | /// 0 <= Value <= ElementBits for a long left shift. | |||
7381 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { | |||
7382 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7382, __extension__ __PRETTY_FUNCTION__)); | |||
7383 | int64_t ElementBits = VT.getScalarSizeInBits(); | |||
7384 | if (!getVShiftImm(Op, ElementBits, Cnt)) | |||
7385 | return false; | |||
7386 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); | |||
7387 | } | |||
7388 | ||||
7389 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate | |||
7390 | /// operand of a vector shift right operation. The value must be in the range: | |||
7391 | /// 1 <= Value <= ElementBits for a right shift; or | |||
7392 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) { | |||
7393 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7393, __extension__ __PRETTY_FUNCTION__)); | |||
7394 | int64_t ElementBits = VT.getScalarSizeInBits(); | |||
7395 | if (!getVShiftImm(Op, ElementBits, Cnt)) | |||
7396 | return false; | |||
7397 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); | |||
7398 | } | |||
7399 | ||||
7400 | SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op, | |||
7401 | SelectionDAG &DAG) const { | |||
7402 | EVT VT = Op.getValueType(); | |||
7403 | SDLoc DL(Op); | |||
7404 | int64_t Cnt; | |||
7405 | ||||
7406 | if (!Op.getOperand(1).getValueType().isVector()) | |||
7407 | return Op; | |||
7408 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
7409 | ||||
7410 | switch (Op.getOpcode()) { | |||
7411 | default: | |||
7412 | llvm_unreachable("unexpected shift opcode")::llvm::llvm_unreachable_internal("unexpected shift opcode", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7412); | |||
7413 | ||||
7414 | case ISD::SHL: | |||
7415 | if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) | |||
7416 | return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0), | |||
7417 | DAG.getConstant(Cnt, DL, MVT::i32)); | |||
7418 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, | |||
7419 | DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL, | |||
7420 | MVT::i32), | |||
7421 | Op.getOperand(0), Op.getOperand(1)); | |||
7422 | case ISD::SRA: | |||
7423 | case ISD::SRL: | |||
7424 | // Right shift immediate | |||
7425 | if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) { | |||
7426 | unsigned Opc = | |||
7427 | (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR; | |||
7428 | return DAG.getNode(Opc, DL, VT, Op.getOperand(0), | |||
7429 | DAG.getConstant(Cnt, DL, MVT::i32)); | |||
7430 | } | |||
7431 | ||||
7432 | // Right shift register. Note, there is not a shift right register | |||
7433 | // instruction, but the shift left register instruction takes a signed | |||
7434 | // value, where negative numbers specify a right shift. | |||
7435 | unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl | |||
7436 | : Intrinsic::aarch64_neon_ushl; | |||
7437 | // negate the shift amount | |||
7438 | SDValue NegShift = DAG.getNode(AArch64ISD::NEG, DL, VT, Op.getOperand(1)); | |||
7439 | SDValue NegShiftLeft = | |||
7440 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, | |||
7441 | DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0), | |||
7442 | NegShift); | |||
7443 | return NegShiftLeft; | |||
7444 | } | |||
7445 | ||||
7446 | return SDValue(); | |||
7447 | } | |||
7448 | ||||
7449 | static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, | |||
7450 | AArch64CC::CondCode CC, bool NoNans, EVT VT, | |||
7451 | const SDLoc &dl, SelectionDAG &DAG) { | |||
7452 | EVT SrcVT = LHS.getValueType(); | |||
7453 | assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&(static_cast <bool> (VT.getSizeInBits() == SrcVT.getSizeInBits () && "function only supposed to emit natural comparisons" ) ? void (0) : __assert_fail ("VT.getSizeInBits() == SrcVT.getSizeInBits() && \"function only supposed to emit natural comparisons\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7454, __extension__ __PRETTY_FUNCTION__)) | |||
7454 | "function only supposed to emit natural comparisons")(static_cast <bool> (VT.getSizeInBits() == SrcVT.getSizeInBits () && "function only supposed to emit natural comparisons" ) ? void (0) : __assert_fail ("VT.getSizeInBits() == SrcVT.getSizeInBits() && \"function only supposed to emit natural comparisons\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7454, __extension__ __PRETTY_FUNCTION__)); | |||
7455 | ||||
7456 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); | |||
7457 | APInt CnstBits(VT.getSizeInBits(), 0); | |||
7458 | APInt UndefBits(VT.getSizeInBits(), 0); | |||
7459 | bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits); | |||
7460 | bool IsZero = IsCnst && (CnstBits == 0); | |||
7461 | ||||
7462 | if (SrcVT.getVectorElementType().isFloatingPoint()) { | |||
7463 | switch (CC) { | |||
7464 | default: | |||
7465 | return SDValue(); | |||
7466 | case AArch64CC::NE: { | |||
7467 | SDValue Fcmeq; | |||
7468 | if (IsZero) | |||
7469 | Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); | |||
7470 | else | |||
7471 | Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); | |||
7472 | return DAG.getNode(AArch64ISD::NOT, dl, VT, Fcmeq); | |||
7473 | } | |||
7474 | case AArch64CC::EQ: | |||
7475 | if (IsZero) | |||
7476 | return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); | |||
7477 | return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); | |||
7478 | case AArch64CC::GE: | |||
7479 | if (IsZero) | |||
7480 | return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS); | |||
7481 | return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS); | |||
7482 | case AArch64CC::GT: | |||
7483 | if (IsZero) | |||
7484 | return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS); | |||
7485 | return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS); | |||
7486 | case AArch64CC::LS: | |||
7487 | if (IsZero) | |||
7488 | return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS); | |||
7489 | return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS); | |||
7490 | case AArch64CC::LT: | |||
7491 | if (!NoNans) | |||
7492 | return SDValue(); | |||
7493 | // If we ignore NaNs then we can use to the MI implementation. | |||
7494 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
7495 | case AArch64CC::MI: | |||
7496 | if (IsZero) | |||
7497 | return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS); | |||
7498 | return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS); | |||
7499 | } | |||
7500 | } | |||
7501 | ||||
7502 | switch (CC) { | |||
7503 | default: | |||
7504 | return SDValue(); | |||
7505 | case AArch64CC::NE: { | |||
7506 | SDValue Cmeq; | |||
7507 | if (IsZero) | |||
7508 | Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); | |||
7509 | else | |||
7510 | Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); | |||
7511 | return DAG.getNode(AArch64ISD::NOT, dl, VT, Cmeq); | |||
7512 | } | |||
7513 | case AArch64CC::EQ: | |||
7514 | if (IsZero) | |||
7515 | return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); | |||
7516 | return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); | |||
7517 | case AArch64CC::GE: | |||
7518 | if (IsZero) | |||
7519 | return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS); | |||
7520 | return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS); | |||
7521 | case AArch64CC::GT: | |||
7522 | if (IsZero) | |||
7523 | return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS); | |||
7524 | return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS); | |||
7525 | case AArch64CC::LE: | |||
7526 | if (IsZero) | |||
7527 | return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS); | |||
7528 | return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS); | |||
7529 | case AArch64CC::LS: | |||
7530 | return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS); | |||
7531 | case AArch64CC::LO: | |||
7532 | return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS); | |||
7533 | case AArch64CC::LT: | |||
7534 | if (IsZero) | |||
7535 | return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS); | |||
7536 | return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS); | |||
7537 | case AArch64CC::HI: | |||
7538 | return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS); | |||
7539 | case AArch64CC::HS: | |||
7540 | return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS); | |||
7541 | } | |||
7542 | } | |||
7543 | ||||
7544 | SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op, | |||
7545 | SelectionDAG &DAG) const { | |||
7546 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
7547 | SDValue LHS = Op.getOperand(0); | |||
7548 | SDValue RHS = Op.getOperand(1); | |||
7549 | EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger(); | |||
7550 | SDLoc dl(Op); | |||
7551 | ||||
7552 | if (LHS.getValueType().getVectorElementType().isInteger()) { | |||
7553 | assert(LHS.getValueType() == RHS.getValueType())(static_cast <bool> (LHS.getValueType() == RHS.getValueType ()) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType()" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7553, __extension__ __PRETTY_FUNCTION__)); | |||
7554 | AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); | |||
7555 | SDValue Cmp = | |||
7556 | EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG); | |||
7557 | return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); | |||
7558 | } | |||
7559 | ||||
7560 | const bool FullFP16 = | |||
7561 | static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16(); | |||
7562 | ||||
7563 | // Make v4f16 (only) fcmp operations utilise vector instructions | |||
7564 | // v8f16 support will be a litle more complicated | |||
7565 | if (LHS.getValueType().getVectorElementType() == MVT::f16) { | |||
7566 | if (!FullFP16 && LHS.getValueType().getVectorNumElements() == 4) { | |||
7567 | LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS); | |||
7568 | RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS); | |||
7569 | SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC); | |||
7570 | DAG.ReplaceAllUsesWith(Op, NewSetcc); | |||
7571 | CmpVT = MVT::v4i32; | |||
7572 | } else | |||
7573 | return SDValue(); | |||
7574 | } | |||
7575 | ||||
7576 | assert(LHS.getValueType().getVectorElementType() == MVT::f32 ||(static_cast <bool> (LHS.getValueType().getVectorElementType () == MVT::f32 || LHS.getValueType().getVectorElementType() == MVT::f64) ? void (0) : __assert_fail ("LHS.getValueType().getVectorElementType() == MVT::f32 || LHS.getValueType().getVectorElementType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7577, __extension__ __PRETTY_FUNCTION__)) | |||
7577 | LHS.getValueType().getVectorElementType() == MVT::f64)(static_cast <bool> (LHS.getValueType().getVectorElementType () == MVT::f32 || LHS.getValueType().getVectorElementType() == MVT::f64) ? void (0) : __assert_fail ("LHS.getValueType().getVectorElementType() == MVT::f32 || LHS.getValueType().getVectorElementType() == MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7577, __extension__ __PRETTY_FUNCTION__)); | |||
7578 | ||||
7579 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally | |||
7580 | // clean. Some of them require two branches to implement. | |||
7581 | AArch64CC::CondCode CC1, CC2; | |||
7582 | bool ShouldInvert; | |||
7583 | changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert); | |||
7584 | ||||
7585 | bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath; | |||
7586 | SDValue Cmp = | |||
7587 | EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG); | |||
7588 | if (!Cmp.getNode()) | |||
7589 | return SDValue(); | |||
7590 | ||||
7591 | if (CC2 != AArch64CC::AL) { | |||
7592 | SDValue Cmp2 = | |||
7593 | EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG); | |||
7594 | if (!Cmp2.getNode()) | |||
7595 | return SDValue(); | |||
7596 | ||||
7597 | Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2); | |||
7598 | } | |||
7599 | ||||
7600 | Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); | |||
7601 | ||||
7602 | if (ShouldInvert) | |||
7603 | return Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType()); | |||
7604 | ||||
7605 | return Cmp; | |||
7606 | } | |||
7607 | ||||
7608 | static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, | |||
7609 | SelectionDAG &DAG) { | |||
7610 | SDValue VecOp = ScalarOp.getOperand(0); | |||
7611 | auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp); | |||
7612 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx, | |||
7613 | DAG.getConstant(0, DL, MVT::i64)); | |||
7614 | } | |||
7615 | ||||
7616 | SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op, | |||
7617 | SelectionDAG &DAG) const { | |||
7618 | SDLoc dl(Op); | |||
7619 | switch (Op.getOpcode()) { | |||
7620 | case ISD::VECREDUCE_ADD: | |||
7621 | return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG); | |||
7622 | case ISD::VECREDUCE_SMAX: | |||
7623 | return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG); | |||
7624 | case ISD::VECREDUCE_SMIN: | |||
7625 | return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG); | |||
7626 | case ISD::VECREDUCE_UMAX: | |||
7627 | return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG); | |||
7628 | case ISD::VECREDUCE_UMIN: | |||
7629 | return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG); | |||
7630 | case ISD::VECREDUCE_FMAX: { | |||
7631 | assert(Op->getFlags().hasNoNaNs() && "fmax vector reduction needs NoNaN flag")(static_cast <bool> (Op->getFlags().hasNoNaNs() && "fmax vector reduction needs NoNaN flag") ? void (0) : __assert_fail ("Op->getFlags().hasNoNaNs() && \"fmax vector reduction needs NoNaN flag\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7631, __extension__ __PRETTY_FUNCTION__)); | |||
7632 | return DAG.getNode( | |||
7633 | ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), | |||
7634 | DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32), | |||
7635 | Op.getOperand(0)); | |||
7636 | } | |||
7637 | case ISD::VECREDUCE_FMIN: { | |||
7638 | assert(Op->getFlags().hasNoNaNs() && "fmin vector reduction needs NoNaN flag")(static_cast <bool> (Op->getFlags().hasNoNaNs() && "fmin vector reduction needs NoNaN flag") ? void (0) : __assert_fail ("Op->getFlags().hasNoNaNs() && \"fmin vector reduction needs NoNaN flag\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7638, __extension__ __PRETTY_FUNCTION__)); | |||
7639 | return DAG.getNode( | |||
7640 | ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), | |||
7641 | DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32), | |||
7642 | Op.getOperand(0)); | |||
7643 | } | |||
7644 | default: | |||
7645 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7645); | |||
7646 | } | |||
7647 | } | |||
7648 | ||||
7649 | SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op, | |||
7650 | SelectionDAG &DAG) const { | |||
7651 | auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget()); | |||
7652 | if (!Subtarget.hasLSE()) | |||
7653 | return SDValue(); | |||
7654 | ||||
7655 | // LSE has an atomic load-add instruction, but not a load-sub. | |||
7656 | SDLoc dl(Op); | |||
7657 | MVT VT = Op.getSimpleValueType(); | |||
7658 | SDValue RHS = Op.getOperand(2); | |||
7659 | AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode()); | |||
7660 | RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS); | |||
7661 | return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(), | |||
7662 | Op.getOperand(0), Op.getOperand(1), RHS, | |||
7663 | AN->getMemOperand()); | |||
7664 | } | |||
7665 | ||||
7666 | SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op, | |||
7667 | SelectionDAG &DAG) const { | |||
7668 | auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget()); | |||
7669 | if (!Subtarget.hasLSE()) | |||
7670 | return SDValue(); | |||
7671 | ||||
7672 | // LSE has an atomic load-clear instruction, but not a load-and. | |||
7673 | SDLoc dl(Op); | |||
7674 | MVT VT = Op.getSimpleValueType(); | |||
7675 | SDValue RHS = Op.getOperand(2); | |||
7676 | AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode()); | |||
7677 | RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS); | |||
7678 | return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(), | |||
7679 | Op.getOperand(0), Op.getOperand(1), RHS, | |||
7680 | AN->getMemOperand()); | |||
7681 | } | |||
7682 | ||||
7683 | SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC( | |||
7684 | SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const { | |||
7685 | SDLoc dl(Op); | |||
7686 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
7687 | SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0); | |||
7688 | ||||
7689 | const uint32_t *Mask = | |||
7690 | Subtarget->getRegisterInfo()->getWindowsStackProbePreservedMask(); | |||
7691 | ||||
7692 | Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size, | |||
7693 | DAG.getConstant(4, dl, MVT::i64)); | |||
7694 | Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue()); | |||
7695 | Chain = | |||
7696 | DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue), | |||
7697 | Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64), | |||
7698 | DAG.getRegisterMask(Mask), Chain.getValue(1)); | |||
7699 | // To match the actual intent better, we should read the output from X15 here | |||
7700 | // again (instead of potentially spilling it to the stack), but rereading Size | |||
7701 | // from X15 here doesn't work at -O0, since it thinks that X15 is undefined | |||
7702 | // here. | |||
7703 | ||||
7704 | Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size, | |||
7705 | DAG.getConstant(4, dl, MVT::i64)); | |||
7706 | return Chain; | |||
7707 | } | |||
7708 | ||||
7709 | SDValue | |||
7710 | AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, | |||
7711 | SelectionDAG &DAG) const { | |||
7712 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "Only Windows alloca probing supported") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Only Windows alloca probing supported\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7713, __extension__ __PRETTY_FUNCTION__)) | |||
7713 | "Only Windows alloca probing supported")(static_cast <bool> (Subtarget->isTargetWindows() && "Only Windows alloca probing supported") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Only Windows alloca probing supported\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7713, __extension__ __PRETTY_FUNCTION__)); | |||
7714 | SDLoc dl(Op); | |||
7715 | // Get the inputs. | |||
7716 | SDNode *Node = Op.getNode(); | |||
7717 | SDValue Chain = Op.getOperand(0); | |||
7718 | SDValue Size = Op.getOperand(1); | |||
7719 | unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | |||
7720 | EVT VT = Node->getValueType(0); | |||
7721 | ||||
7722 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( | |||
7723 | "no-stack-arg-probe")) { | |||
7724 | SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); | |||
7725 | Chain = SP.getValue(1); | |||
7726 | SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); | |||
7727 | if (Align) | |||
7728 | SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), | |||
7729 | DAG.getConstant(-(uint64_t)Align, dl, VT)); | |||
7730 | Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); | |||
7731 | SDValue Ops[2] = {SP, Chain}; | |||
7732 | return DAG.getMergeValues(Ops, dl); | |||
7733 | } | |||
7734 | ||||
7735 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); | |||
7736 | ||||
7737 | Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG); | |||
7738 | ||||
7739 | SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); | |||
7740 | Chain = SP.getValue(1); | |||
7741 | SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); | |||
7742 | if (Align) | |||
7743 | SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), | |||
7744 | DAG.getConstant(-(uint64_t)Align, dl, VT)); | |||
7745 | Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); | |||
7746 | ||||
7747 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), | |||
7748 | DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); | |||
7749 | ||||
7750 | SDValue Ops[2] = {SP, Chain}; | |||
7751 | return DAG.getMergeValues(Ops, dl); | |||
7752 | } | |||
7753 | ||||
7754 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as | |||
7755 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment | |||
7756 | /// specified in the intrinsic calls. | |||
7757 | bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | |||
7758 | const CallInst &I, | |||
7759 | MachineFunction &MF, | |||
7760 | unsigned Intrinsic) const { | |||
7761 | auto &DL = I.getModule()->getDataLayout(); | |||
7762 | switch (Intrinsic) { | |||
7763 | case Intrinsic::aarch64_neon_ld2: | |||
7764 | case Intrinsic::aarch64_neon_ld3: | |||
7765 | case Intrinsic::aarch64_neon_ld4: | |||
7766 | case Intrinsic::aarch64_neon_ld1x2: | |||
7767 | case Intrinsic::aarch64_neon_ld1x3: | |||
7768 | case Intrinsic::aarch64_neon_ld1x4: | |||
7769 | case Intrinsic::aarch64_neon_ld2lane: | |||
7770 | case Intrinsic::aarch64_neon_ld3lane: | |||
7771 | case Intrinsic::aarch64_neon_ld4lane: | |||
7772 | case Intrinsic::aarch64_neon_ld2r: | |||
7773 | case Intrinsic::aarch64_neon_ld3r: | |||
7774 | case Intrinsic::aarch64_neon_ld4r: { | |||
7775 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
7776 | // Conservatively set memVT to the entire set of vectors loaded. | |||
7777 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; | |||
7778 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | |||
7779 | Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); | |||
7780 | Info.offset = 0; | |||
7781 | Info.align = 0; | |||
7782 | // volatile loads with NEON intrinsics not supported | |||
7783 | Info.flags = MachineMemOperand::MOLoad; | |||
7784 | return true; | |||
7785 | } | |||
7786 | case Intrinsic::aarch64_neon_st2: | |||
7787 | case Intrinsic::aarch64_neon_st3: | |||
7788 | case Intrinsic::aarch64_neon_st4: | |||
7789 | case Intrinsic::aarch64_neon_st1x2: | |||
7790 | case Intrinsic::aarch64_neon_st1x3: | |||
7791 | case Intrinsic::aarch64_neon_st1x4: | |||
7792 | case Intrinsic::aarch64_neon_st2lane: | |||
7793 | case Intrinsic::aarch64_neon_st3lane: | |||
7794 | case Intrinsic::aarch64_neon_st4lane: { | |||
7795 | Info.opc = ISD::INTRINSIC_VOID; | |||
7796 | // Conservatively set memVT to the entire set of vectors stored. | |||
7797 | unsigned NumElts = 0; | |||
7798 | for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { | |||
7799 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); | |||
7800 | if (!ArgTy->isVectorTy()) | |||
7801 | break; | |||
7802 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; | |||
7803 | } | |||
7804 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | |||
7805 | Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); | |||
7806 | Info.offset = 0; | |||
7807 | Info.align = 0; | |||
7808 | // volatile stores with NEON intrinsics not supported | |||
7809 | Info.flags = MachineMemOperand::MOStore; | |||
7810 | return true; | |||
7811 | } | |||
7812 | case Intrinsic::aarch64_ldaxr: | |||
7813 | case Intrinsic::aarch64_ldxr: { | |||
7814 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); | |||
7815 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
7816 | Info.memVT = MVT::getVT(PtrTy->getElementType()); | |||
7817 | Info.ptrVal = I.getArgOperand(0); | |||
7818 | Info.offset = 0; | |||
7819 | Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); | |||
7820 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; | |||
7821 | return true; | |||
7822 | } | |||
7823 | case Intrinsic::aarch64_stlxr: | |||
7824 | case Intrinsic::aarch64_stxr: { | |||
7825 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); | |||
7826 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
7827 | Info.memVT = MVT::getVT(PtrTy->getElementType()); | |||
7828 | Info.ptrVal = I.getArgOperand(1); | |||
7829 | Info.offset = 0; | |||
7830 | Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); | |||
7831 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; | |||
7832 | return true; | |||
7833 | } | |||
7834 | case Intrinsic::aarch64_ldaxp: | |||
7835 | case Intrinsic::aarch64_ldxp: | |||
7836 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
7837 | Info.memVT = MVT::i128; | |||
7838 | Info.ptrVal = I.getArgOperand(0); | |||
7839 | Info.offset = 0; | |||
7840 | Info.align = 16; | |||
7841 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; | |||
7842 | return true; | |||
7843 | case Intrinsic::aarch64_stlxp: | |||
7844 | case Intrinsic::aarch64_stxp: | |||
7845 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
7846 | Info.memVT = MVT::i128; | |||
7847 | Info.ptrVal = I.getArgOperand(2); | |||
7848 | Info.offset = 0; | |||
7849 | Info.align = 16; | |||
7850 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; | |||
7851 | return true; | |||
7852 | default: | |||
7853 | break; | |||
7854 | } | |||
7855 | ||||
7856 | return false; | |||
7857 | } | |||
7858 | ||||
7859 | bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load, | |||
7860 | ISD::LoadExtType ExtTy, | |||
7861 | EVT NewVT) const { | |||
7862 | // If we're reducing the load width in order to avoid having to use an extra | |||
7863 | // instruction to do extension then it's probably a good idea. | |||
7864 | if (ExtTy != ISD::NON_EXTLOAD) | |||
7865 | return true; | |||
7866 | // Don't reduce load width if it would prevent us from combining a shift into | |||
7867 | // the offset. | |||
7868 | MemSDNode *Mem = dyn_cast<MemSDNode>(Load); | |||
7869 | assert(Mem)(static_cast <bool> (Mem) ? void (0) : __assert_fail ("Mem" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7869, __extension__ __PRETTY_FUNCTION__)); | |||
7870 | const SDValue &Base = Mem->getBasePtr(); | |||
7871 | if (Base.getOpcode() == ISD::ADD && | |||
7872 | Base.getOperand(1).getOpcode() == ISD::SHL && | |||
7873 | Base.getOperand(1).hasOneUse() && | |||
7874 | Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) { | |||
7875 | // The shift can be combined if it matches the size of the value being | |||
7876 | // loaded (and so reducing the width would make it not match). | |||
7877 | uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1); | |||
7878 | uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8; | |||
7879 | if (ShiftAmount == Log2_32(LoadBytes)) | |||
7880 | return false; | |||
7881 | } | |||
7882 | // We have no reason to disallow reducing the load width, so allow it. | |||
7883 | return true; | |||
7884 | } | |||
7885 | ||||
7886 | // Truncations from 64-bit GPR to 32-bit GPR is free. | |||
7887 | bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { | |||
7888 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | |||
7889 | return false; | |||
7890 | unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); | |||
7891 | unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); | |||
7892 | return NumBits1 > NumBits2; | |||
7893 | } | |||
7894 | bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { | |||
7895 | if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) | |||
7896 | return false; | |||
7897 | unsigned NumBits1 = VT1.getSizeInBits(); | |||
7898 | unsigned NumBits2 = VT2.getSizeInBits(); | |||
7899 | return NumBits1 > NumBits2; | |||
7900 | } | |||
7901 | ||||
7902 | /// Check if it is profitable to hoist instruction in then/else to if. | |||
7903 | /// Not profitable if I and it's user can form a FMA instruction | |||
7904 | /// because we prefer FMSUB/FMADD. | |||
7905 | bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { | |||
7906 | if (I->getOpcode() != Instruction::FMul) | |||
| ||||
7907 | return true; | |||
7908 | ||||
7909 | if (!I->hasOneUse()) | |||
7910 | return true; | |||
7911 | ||||
7912 | Instruction *User = I->user_back(); | |||
7913 | ||||
7914 | if (User && | |||
7915 | !(User->getOpcode() == Instruction::FSub || | |||
7916 | User->getOpcode() == Instruction::FAdd)) | |||
7917 | return true; | |||
7918 | ||||
7919 | const TargetOptions &Options = getTargetMachine().Options; | |||
7920 | const DataLayout &DL = I->getModule()->getDataLayout(); | |||
7921 | EVT VT = getValueType(DL, User->getOperand(0)->getType()); | |||
| ||||
7922 | ||||
7923 | return !(isFMAFasterThanFMulAndFAdd(VT) && | |||
7924 | isOperationLegalOrCustom(ISD::FMA, VT) && | |||
7925 | (Options.AllowFPOpFusion == FPOpFusion::Fast || | |||
7926 | Options.UnsafeFPMath)); | |||
7927 | } | |||
7928 | ||||
7929 | // All 32-bit GPR operations implicitly zero the high-half of the corresponding | |||
7930 | // 64-bit GPR. | |||
7931 | bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { | |||
7932 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | |||
7933 | return false; | |||
7934 | unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); | |||
7935 | unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); | |||
7936 | return NumBits1 == 32 && NumBits2 == 64; | |||
7937 | } | |||
7938 | bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { | |||
7939 | if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) | |||
7940 | return false; | |||
7941 | unsigned NumBits1 = VT1.getSizeInBits(); | |||
7942 | unsigned NumBits2 = VT2.getSizeInBits(); | |||
7943 | return NumBits1 == 32 && NumBits2 == 64; | |||
7944 | } | |||
7945 | ||||
7946 | bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { | |||
7947 | EVT VT1 = Val.getValueType(); | |||
7948 | if (isZExtFree(VT1, VT2)) { | |||
7949 | return true; | |||
7950 | } | |||
7951 | ||||
7952 | if (Val.getOpcode() != ISD::LOAD) | |||
7953 | return false; | |||
7954 | ||||
7955 | // 8-, 16-, and 32-bit integer loads all implicitly zero-extend. | |||
7956 | return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() && | |||
7957 | VT2.isSimple() && !VT2.isVector() && VT2.isInteger() && | |||
7958 | VT1.getSizeInBits() <= 32); | |||
7959 | } | |||
7960 | ||||
7961 | bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const { | |||
7962 | if (isa<FPExtInst>(Ext)) | |||
7963 | return false; | |||
7964 | ||||
7965 | // Vector types are not free. | |||
7966 | if (Ext->getType()->isVectorTy()) | |||
7967 | return false; | |||
7968 | ||||
7969 | for (const Use &U : Ext->uses()) { | |||
7970 | // The extension is free if we can fold it with a left shift in an | |||
7971 | // addressing mode or an arithmetic operation: add, sub, and cmp. | |||
7972 | ||||
7973 | // Is there a shift? | |||
7974 | const Instruction *Instr = cast<Instruction>(U.getUser()); | |||
7975 | ||||
7976 | // Is this a constant shift? | |||
7977 | switch (Instr->getOpcode()) { | |||
7978 | case Instruction::Shl: | |||
7979 | if (!isa<ConstantInt>(Instr->getOperand(1))) | |||
7980 | return false; | |||
7981 | break; | |||
7982 | case Instruction::GetElementPtr: { | |||
7983 | gep_type_iterator GTI = gep_type_begin(Instr); | |||
7984 | auto &DL = Ext->getModule()->getDataLayout(); | |||
7985 | std::advance(GTI, U.getOperandNo()-1); | |||
7986 | Type *IdxTy = GTI.getIndexedType(); | |||
7987 | // This extension will end up with a shift because of the scaling factor. | |||
7988 | // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. | |||
7989 | // Get the shift amount based on the scaling factor: | |||
7990 | // log2(sizeof(IdxTy)) - log2(8). | |||
7991 | uint64_t ShiftAmt = | |||
7992 | countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy)) - 3; | |||
7993 | // Is the constant foldable in the shift of the addressing mode? | |||
7994 | // I.e., shift amount is between 1 and 4 inclusive. | |||
7995 | if (ShiftAmt == 0 || ShiftAmt > 4) | |||
7996 | return false; | |||
7997 | break; | |||
7998 | } | |||
7999 | case Instruction::Trunc: | |||
8000 | // Check if this is a noop. | |||
8001 | // trunc(sext ty1 to ty2) to ty1. | |||
8002 | if (Instr->getType() == Ext->getOperand(0)->getType()) | |||
8003 | continue; | |||
8004 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
8005 | default: | |||
8006 | return false; | |||
8007 | } | |||
8008 | ||||
8009 | // At this point we can use the bfm family, so this extension is free | |||
8010 | // for that use. | |||
8011 | } | |||
8012 | return true; | |||
8013 | } | |||
8014 | ||||
8015 | bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType, | |||
8016 | unsigned &RequiredAligment) const { | |||
8017 | if (!LoadedType.isSimple() || | |||
8018 | (!LoadedType.isInteger() && !LoadedType.isFloatingPoint())) | |||
8019 | return false; | |||
8020 | // Cyclone supports unaligned accesses. | |||
8021 | RequiredAligment = 0; | |||
8022 | unsigned NumBits = LoadedType.getSizeInBits(); | |||
8023 | return NumBits == 32 || NumBits == 64; | |||
8024 | } | |||
8025 | ||||
8026 | /// A helper function for determining the number of interleaved accesses we | |||
8027 | /// will generate when lowering accesses of the given type. | |||
8028 | unsigned | |||
8029 | AArch64TargetLowering::getNumInterleavedAccesses(VectorType *VecTy, | |||
8030 | const DataLayout &DL) const { | |||
8031 | return (DL.getTypeSizeInBits(VecTy) + 127) / 128; | |||
8032 | } | |||
8033 | ||||
8034 | MachineMemOperand::Flags | |||
8035 | AArch64TargetLowering::getMMOFlags(const Instruction &I) const { | |||
8036 | if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor && | |||
8037 | I.getMetadata(FALKOR_STRIDED_ACCESS_MD"falkor.strided.access") != nullptr) | |||
8038 | return MOStridedAccess; | |||
8039 | return MachineMemOperand::MONone; | |||
8040 | } | |||
8041 | ||||
8042 | bool AArch64TargetLowering::isLegalInterleavedAccessType( | |||
8043 | VectorType *VecTy, const DataLayout &DL) const { | |||
8044 | ||||
8045 | unsigned VecSize = DL.getTypeSizeInBits(VecTy); | |||
8046 | unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); | |||
8047 | ||||
8048 | // Ensure the number of vector elements is greater than 1. | |||
8049 | if (VecTy->getNumElements() < 2) | |||
8050 | return false; | |||
8051 | ||||
8052 | // Ensure the element type is legal. | |||
8053 | if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) | |||
8054 | return false; | |||
8055 | ||||
8056 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than | |||
8057 | // 128 will be split into multiple interleaved accesses. | |||
8058 | return VecSize == 64 || VecSize % 128 == 0; | |||
8059 | } | |||
8060 | ||||
8061 | /// Lower an interleaved load into a ldN intrinsic. | |||
8062 | /// | |||
8063 | /// E.g. Lower an interleaved load (Factor = 2): | |||
8064 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr | |||
8065 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements | |||
8066 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements | |||
8067 | /// | |||
8068 | /// Into: | |||
8069 | /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr) | |||
8070 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0 | |||
8071 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1 | |||
8072 | bool AArch64TargetLowering::lowerInterleavedLoad( | |||
8073 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, | |||
8074 | ArrayRef<unsigned> Indices, unsigned Factor) const { | |||
8075 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8076, __extension__ __PRETTY_FUNCTION__)) | |||
8076 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8076, __extension__ __PRETTY_FUNCTION__)); | |||
8077 | assert(!Shuffles.empty() && "Empty shufflevector input")(static_cast <bool> (!Shuffles.empty() && "Empty shufflevector input" ) ? void (0) : __assert_fail ("!Shuffles.empty() && \"Empty shufflevector input\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8077, __extension__ __PRETTY_FUNCTION__)); | |||
8078 | assert(Shuffles.size() == Indices.size() &&(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8079, __extension__ __PRETTY_FUNCTION__)) | |||
8079 | "Unmatched number of shufflevectors and indices")(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8079, __extension__ __PRETTY_FUNCTION__)); | |||
8080 | ||||
8081 | const DataLayout &DL = LI->getModule()->getDataLayout(); | |||
8082 | ||||
8083 | VectorType *VecTy = Shuffles[0]->getType(); | |||
8084 | ||||
8085 | // Skip if we do not have NEON and skip illegal vector types. We can | |||
8086 | // "legalize" wide vector types into multiple interleaved accesses as long as | |||
8087 | // the vector types are divisible by 128. | |||
8088 | if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL)) | |||
8089 | return false; | |||
8090 | ||||
8091 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); | |||
8092 | ||||
8093 | // A pointer vector can not be the return type of the ldN intrinsics. Need to | |||
8094 | // load integer vectors first and then convert to pointer vectors. | |||
8095 | Type *EltTy = VecTy->getVectorElementType(); | |||
8096 | if (EltTy->isPointerTy()) | |||
8097 | VecTy = | |||
8098 | VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); | |||
8099 | ||||
8100 | IRBuilder<> Builder(LI); | |||
8101 | ||||
8102 | // The base address of the load. | |||
8103 | Value *BaseAddr = LI->getPointerOperand(); | |||
8104 | ||||
8105 | if (NumLoads > 1) { | |||
8106 | // If we're going to generate more than one load, reset the sub-vector type | |||
8107 | // to something legal. | |||
8108 | VecTy = VectorType::get(VecTy->getVectorElementType(), | |||
8109 | VecTy->getVectorNumElements() / NumLoads); | |||
8110 | ||||
8111 | // We will compute the pointer operand of each load from the original base | |||
8112 | // address using GEPs. Cast the base address to a pointer to the scalar | |||
8113 | // element type. | |||
8114 | BaseAddr = Builder.CreateBitCast( | |||
8115 | BaseAddr, VecTy->getVectorElementType()->getPointerTo( | |||
8116 | LI->getPointerAddressSpace())); | |||
8117 | } | |||
8118 | ||||
8119 | Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace()); | |||
8120 | Type *Tys[2] = {VecTy, PtrTy}; | |||
8121 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::aarch64_neon_ld2, | |||
8122 | Intrinsic::aarch64_neon_ld3, | |||
8123 | Intrinsic::aarch64_neon_ld4}; | |||
8124 | Function *LdNFunc = | |||
8125 | Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); | |||
8126 | ||||
8127 | // Holds sub-vectors extracted from the load intrinsic return values. The | |||
8128 | // sub-vectors are associated with the shufflevector instructions they will | |||
8129 | // replace. | |||
8130 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; | |||
8131 | ||||
8132 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { | |||
8133 | ||||
8134 | // If we're generating more than one load, compute the base address of | |||
8135 | // subsequent loads as an offset from the previous. | |||
8136 | if (LoadCount > 0) | |||
8137 | BaseAddr = Builder.CreateConstGEP1_32( | |||
8138 | BaseAddr, VecTy->getVectorNumElements() * Factor); | |||
8139 | ||||
8140 | CallInst *LdN = Builder.CreateCall( | |||
8141 | LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN"); | |||
8142 | ||||
8143 | // Extract and store the sub-vectors returned by the load intrinsic. | |||
8144 | for (unsigned i = 0; i < Shuffles.size(); i++) { | |||
8145 | ShuffleVectorInst *SVI = Shuffles[i]; | |||
8146 | unsigned Index = Indices[i]; | |||
8147 | ||||
8148 | Value *SubVec = Builder.CreateExtractValue(LdN, Index); | |||
8149 | ||||
8150 | // Convert the integer vector to pointer vector if the element is pointer. | |||
8151 | if (EltTy->isPointerTy()) | |||
8152 | SubVec = Builder.CreateIntToPtr( | |||
8153 | SubVec, VectorType::get(SVI->getType()->getVectorElementType(), | |||
8154 | VecTy->getVectorNumElements())); | |||
8155 | SubVecs[SVI].push_back(SubVec); | |||
8156 | } | |||
8157 | } | |||
8158 | ||||
8159 | // Replace uses of the shufflevector instructions with the sub-vectors | |||
8160 | // returned by the load intrinsic. If a shufflevector instruction is | |||
8161 | // associated with more than one sub-vector, those sub-vectors will be | |||
8162 | // concatenated into a single wide vector. | |||
8163 | for (ShuffleVectorInst *SVI : Shuffles) { | |||
8164 | auto &SubVec = SubVecs[SVI]; | |||
8165 | auto *WideVec = | |||
8166 | SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; | |||
8167 | SVI->replaceAllUsesWith(WideVec); | |||
8168 | } | |||
8169 | ||||
8170 | return true; | |||
8171 | } | |||
8172 | ||||
8173 | /// Lower an interleaved store into a stN intrinsic. | |||
8174 | /// | |||
8175 | /// E.g. Lower an interleaved store (Factor = 3): | |||
8176 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, | |||
8177 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> | |||
8178 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr | |||
8179 | /// | |||
8180 | /// Into: | |||
8181 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> | |||
8182 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> | |||
8183 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> | |||
8184 | /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) | |||
8185 | /// | |||
8186 | /// Note that the new shufflevectors will be removed and we'll only generate one | |||
8187 | /// st3 instruction in CodeGen. | |||
8188 | /// | |||
8189 | /// Example for a more general valid mask (Factor 3). Lower: | |||
8190 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, | |||
8191 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> | |||
8192 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr | |||
8193 | /// | |||
8194 | /// Into: | |||
8195 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> | |||
8196 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> | |||
8197 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> | |||
8198 | /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) | |||
8199 | bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI, | |||
8200 | ShuffleVectorInst *SVI, | |||
8201 | unsigned Factor) const { | |||
8202 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8203, __extension__ __PRETTY_FUNCTION__)) | |||
8203 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8203, __extension__ __PRETTY_FUNCTION__)); | |||
8204 | ||||
8205 | VectorType *VecTy = SVI->getType(); | |||
8206 | assert(VecTy->getVectorNumElements() % Factor == 0 &&(static_cast <bool> (VecTy->getVectorNumElements() % Factor == 0 && "Invalid interleaved store") ? void ( 0) : __assert_fail ("VecTy->getVectorNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8207, __extension__ __PRETTY_FUNCTION__)) | |||
8207 | "Invalid interleaved store")(static_cast <bool> (VecTy->getVectorNumElements() % Factor == 0 && "Invalid interleaved store") ? void ( 0) : __assert_fail ("VecTy->getVectorNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8207, __extension__ __PRETTY_FUNCTION__)); | |||
8208 | ||||
8209 | unsigned LaneLen = VecTy->getVectorNumElements() / Factor; | |||
8210 | Type *EltTy = VecTy->getVectorElementType(); | |||
8211 | VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); | |||
8212 | ||||
8213 | const DataLayout &DL = SI->getModule()->getDataLayout(); | |||
8214 | ||||
8215 | // Skip if we do not have NEON and skip illegal vector types. We can | |||
8216 | // "legalize" wide vector types into multiple interleaved accesses as long as | |||
8217 | // the vector types are divisible by 128. | |||
8218 | if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL)) | |||
8219 | return false; | |||
8220 | ||||
8221 | unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); | |||
8222 | ||||
8223 | Value *Op0 = SVI->getOperand(0); | |||
8224 | Value *Op1 = SVI->getOperand(1); | |||
8225 | IRBuilder<> Builder(SI); | |||
8226 | ||||
8227 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer | |||
8228 | // vectors to integer vectors. | |||
8229 | if (EltTy->isPointerTy()) { | |||
8230 | Type *IntTy = DL.getIntPtrType(EltTy); | |||
8231 | unsigned NumOpElts = Op0->getType()->getVectorNumElements(); | |||
8232 | ||||
8233 | // Convert to the corresponding integer vector. | |||
8234 | Type *IntVecTy = VectorType::get(IntTy, NumOpElts); | |||
8235 | Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); | |||
8236 | Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); | |||
8237 | ||||
8238 | SubVecTy = VectorType::get(IntTy, LaneLen); | |||
8239 | } | |||
8240 | ||||
8241 | // The base address of the store. | |||
8242 | Value *BaseAddr = SI->getPointerOperand(); | |||
8243 | ||||
8244 | if (NumStores > 1) { | |||
8245 | // If we're going to generate more than one store, reset the lane length | |||
8246 | // and sub-vector type to something legal. | |||
8247 | LaneLen /= NumStores; | |||
8248 | SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); | |||
8249 | ||||
8250 | // We will compute the pointer operand of each store from the original base | |||
8251 | // address using GEPs. Cast the base address to a pointer to the scalar | |||
8252 | // element type. | |||
8253 | BaseAddr = Builder.CreateBitCast( | |||
8254 | BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( | |||
8255 | SI->getPointerAddressSpace())); | |||
8256 | } | |||
8257 | ||||
8258 | auto Mask = SVI->getShuffleMask(); | |||
8259 | ||||
8260 | Type *PtrTy = SubVecTy->getPointerTo(SI->getPointerAddressSpace()); | |||
8261 | Type *Tys[2] = {SubVecTy, PtrTy}; | |||
8262 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::aarch64_neon_st2, | |||
8263 | Intrinsic::aarch64_neon_st3, | |||
8264 | Intrinsic::aarch64_neon_st4}; | |||
8265 | Function *StNFunc = | |||
8266 | Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys); | |||
8267 | ||||
8268 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { | |||
8269 | ||||
8270 | SmallVector<Value *, 5> Ops; | |||
8271 | ||||
8272 | // Split the shufflevector operands into sub vectors for the new stN call. | |||
8273 | for (unsigned i = 0; i < Factor; i++) { | |||
8274 | unsigned IdxI = StoreCount * LaneLen * Factor + i; | |||
8275 | if (Mask[IdxI] >= 0) { | |||
8276 | Ops.push_back(Builder.CreateShuffleVector( | |||
8277 | Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0))); | |||
8278 | } else { | |||
8279 | unsigned StartMask = 0; | |||
8280 | for (unsigned j = 1; j < LaneLen; j++) { | |||
8281 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; | |||
8282 | if (Mask[IdxJ * Factor + IdxI] >= 0) { | |||
8283 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; | |||
8284 | break; | |||
8285 | } | |||
8286 | } | |||
8287 | // Note: Filling undef gaps with random elements is ok, since | |||
8288 | // those elements were being written anyway (with undefs). | |||
8289 | // In the case of all undefs we're defaulting to using elems from 0 | |||
8290 | // Note: StartMask cannot be negative, it's checked in | |||
8291 | // isReInterleaveMask | |||
8292 | Ops.push_back(Builder.CreateShuffleVector( | |||
8293 | Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0))); | |||
8294 | } | |||
8295 | } | |||
8296 | ||||
8297 | // If we generating more than one store, we compute the base address of | |||
8298 | // subsequent stores as an offset from the previous. | |||
8299 | if (StoreCount > 0) | |||
8300 | BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor); | |||
8301 | ||||
8302 | Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy)); | |||
8303 | Builder.CreateCall(StNFunc, Ops); | |||
8304 | } | |||
8305 | return true; | |||
8306 | } | |||
8307 | ||||
8308 | static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, | |||
8309 | unsigned AlignCheck) { | |||
8310 | return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && | |||
8311 | (DstAlign == 0 || DstAlign % AlignCheck == 0)); | |||
8312 | } | |||
8313 | ||||
8314 | EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, | |||
8315 | unsigned SrcAlign, bool IsMemset, | |||
8316 | bool ZeroMemset, | |||
8317 | bool MemcpyStrSrc, | |||
8318 | MachineFunction &MF) const { | |||
8319 | // Don't use AdvSIMD to implement 16-byte memset. It would have taken one | |||
8320 | // instruction to materialize the v2i64 zero and one store (with restrictive | |||
8321 | // addressing mode). Just do two i64 store of zero-registers. | |||
8322 | bool Fast; | |||
8323 | const Function &F = MF.getFunction(); | |||
8324 | if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 && | |||
8325 | !F.hasFnAttribute(Attribute::NoImplicitFloat) && | |||
8326 | (memOpAlign(SrcAlign, DstAlign, 16) || | |||
8327 | (allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast))) | |||
8328 | return MVT::f128; | |||
8329 | ||||
8330 | if (Size >= 8 && | |||
8331 | (memOpAlign(SrcAlign, DstAlign, 8) || | |||
8332 | (allowsMisalignedMemoryAccesses(MVT::i64, 0, 1, &Fast) && Fast))) | |||
8333 | return MVT::i64; | |||
8334 | ||||
8335 | if (Size >= 4 && | |||
8336 | (memOpAlign(SrcAlign, DstAlign, 4) || | |||
8337 | (allowsMisalignedMemoryAccesses(MVT::i32, 0, 1, &Fast) && Fast))) | |||
8338 | return MVT::i32; | |||
8339 | ||||
8340 | return MVT::Other; | |||
8341 | } | |||
8342 | ||||
8343 | // 12-bit optionally shifted immediates are legal for adds. | |||
8344 | bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { | |||
8345 | if (Immed == std::numeric_limits<int64_t>::min()) { | |||
8346 | LLVM_DEBUG(dbgs() << "Illegal add imm " << Immeddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n"; } } while (false ) | |||
8347 | << ": avoid UB for INT64_MIN\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n"; } } while (false ); | |||
8348 | return false; | |||
8349 | } | |||
8350 | // Same encoding for add/sub, just flip the sign. | |||
8351 | Immed = std::abs(Immed); | |||
8352 | bool IsLegal = ((Immed >> 12) == 0 || | |||
8353 | ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); | |||
8354 | LLVM_DEBUG(dbgs() << "Is " << Immeddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is " << Immed << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"; } } while (false) | |||
8355 | << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is " << Immed << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"; } } while (false); | |||
8356 | return IsLegal; | |||
8357 | } | |||
8358 | ||||
8359 | // Integer comparisons are implemented with ADDS/SUBS, so the range of valid | |||
8360 | // immediates is the same as for an add or a sub. | |||
8361 | bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const { | |||
8362 | return isLegalAddImmediate(Immed); | |||
8363 | } | |||
8364 | ||||
8365 | /// isLegalAddressingMode - Return true if the addressing mode represented | |||
8366 | /// by AM is legal for this target, for a load/store of the specified type. | |||
8367 | bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
8368 | const AddrMode &AM, Type *Ty, | |||
8369 | unsigned AS, Instruction *I) const { | |||
8370 | // AArch64 has five basic addressing modes: | |||
8371 | // reg | |||
8372 | // reg + 9-bit signed offset | |||
8373 | // reg + SIZE_IN_BYTES * 12-bit unsigned offset | |||
8374 | // reg1 + reg2 | |||
8375 | // reg + SIZE_IN_BYTES * reg | |||
8376 | ||||
8377 | // No global is ever allowed as a base. | |||
8378 | if (AM.BaseGV) | |||
8379 | return false; | |||
8380 | ||||
8381 | // No reg+reg+imm addressing. | |||
8382 | if (AM.HasBaseReg && AM.BaseOffs && AM.Scale) | |||
8383 | return false; | |||
8384 | ||||
8385 | // check reg + imm case: | |||
8386 | // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12 | |||
8387 | uint64_t NumBytes = 0; | |||
8388 | if (Ty->isSized()) { | |||
8389 | uint64_t NumBits = DL.getTypeSizeInBits(Ty); | |||
8390 | NumBytes = NumBits / 8; | |||
8391 | if (!isPowerOf2_64(NumBits)) | |||
8392 | NumBytes = 0; | |||
8393 | } | |||
8394 | ||||
8395 | if (!AM.Scale) { | |||
8396 | int64_t Offset = AM.BaseOffs; | |||
8397 | ||||
8398 | // 9-bit signed offset | |||
8399 | if (isInt<9>(Offset)) | |||
8400 | return true; | |||
8401 | ||||
8402 | // 12-bit unsigned offset | |||
8403 | unsigned shift = Log2_64(NumBytes); | |||
8404 | if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && | |||
8405 | // Must be a multiple of NumBytes (NumBytes is a power of 2) | |||
8406 | (Offset >> shift) << shift == Offset) | |||
8407 | return true; | |||
8408 | return false; | |||
8409 | } | |||
8410 | ||||
8411 | // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 | |||
8412 | ||||
8413 | return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); | |||
8414 | } | |||
8415 | ||||
8416 | bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const { | |||
8417 | // Consider splitting large offset of struct or array. | |||
8418 | return true; | |||
8419 | } | |||
8420 | ||||
8421 | int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL, | |||
8422 | const AddrMode &AM, Type *Ty, | |||
8423 | unsigned AS) const { | |||
8424 | // Scaling factors are not free at all. | |||
8425 | // Operands | Rt Latency | |||
8426 | // ------------------------------------------- | |||
8427 | // Rt, [Xn, Xm] | 4 | |||
8428 | // ------------------------------------------- | |||
8429 | // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5 | |||
8430 | // Rt, [Xn, Wm, <extend> #imm] | | |||
8431 | if (isLegalAddressingMode(DL, AM, Ty, AS)) | |||
8432 | // Scale represents reg2 * scale, thus account for 1 if | |||
8433 | // it is not equal to 0 or 1. | |||
8434 | return AM.Scale != 0 && AM.Scale != 1; | |||
8435 | return -1; | |||
8436 | } | |||
8437 | ||||
8438 | bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { | |||
8439 | VT = VT.getScalarType(); | |||
8440 | ||||
8441 | if (!VT.isSimple()) | |||
8442 | return false; | |||
8443 | ||||
8444 | switch (VT.getSimpleVT().SimpleTy) { | |||
8445 | case MVT::f32: | |||
8446 | case MVT::f64: | |||
8447 | return true; | |||
8448 | default: | |||
8449 | break; | |||
8450 | } | |||
8451 | ||||
8452 | return false; | |||
8453 | } | |||
8454 | ||||
8455 | const MCPhysReg * | |||
8456 | AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const { | |||
8457 | // LR is a callee-save register, but we must treat it as clobbered by any call | |||
8458 | // site. Hence we include LR in the scratch registers, which are in turn added | |||
8459 | // as implicit-defs for stackmaps and patchpoints. | |||
8460 | static const MCPhysReg ScratchRegs[] = { | |||
8461 | AArch64::X16, AArch64::X17, AArch64::LR, 0 | |||
8462 | }; | |||
8463 | return ScratchRegs; | |||
8464 | } | |||
8465 | ||||
8466 | bool | |||
8467 | AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N) const { | |||
8468 | EVT VT = N->getValueType(0); | |||
8469 | // If N is unsigned bit extraction: ((x >> C) & mask), then do not combine | |||
8470 | // it with shift to let it be lowered to UBFX. | |||
8471 | if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) && | |||
8472 | isa<ConstantSDNode>(N->getOperand(1))) { | |||
8473 | uint64_t TruncMask = N->getConstantOperandVal(1); | |||
8474 | if (isMask_64(TruncMask) && | |||
8475 | N->getOperand(0).getOpcode() == ISD::SRL && | |||
8476 | isa<ConstantSDNode>(N->getOperand(0)->getOperand(1))) | |||
8477 | return false; | |||
8478 | } | |||
8479 | return true; | |||
8480 | } | |||
8481 | ||||
8482 | bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, | |||
8483 | Type *Ty) const { | |||
8484 | assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) : __assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8484, __extension__ __PRETTY_FUNCTION__)); | |||
8485 | ||||
8486 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | |||
8487 | if (BitSize == 0) | |||
8488 | return false; | |||
8489 | ||||
8490 | int64_t Val = Imm.getSExtValue(); | |||
8491 | if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize)) | |||
8492 | return true; | |||
8493 | ||||
8494 | if ((int64_t)Val < 0) | |||
8495 | Val = ~Val; | |||
8496 | if (BitSize == 32) | |||
8497 | Val &= (1LL << 32) - 1; | |||
8498 | ||||
8499 | unsigned LZ = countLeadingZeros((uint64_t)Val); | |||
8500 | unsigned Shift = (63 - LZ) / 16; | |||
8501 | // MOVZ is free so return true for one or fewer MOVK. | |||
8502 | return Shift < 3; | |||
8503 | } | |||
8504 | ||||
8505 | bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, | |||
8506 | unsigned Index) const { | |||
8507 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) | |||
8508 | return false; | |||
8509 | ||||
8510 | return (Index == 0 || Index == ResVT.getVectorNumElements()); | |||
8511 | } | |||
8512 | ||||
8513 | /// Turn vector tests of the signbit in the form of: | |||
8514 | /// xor (sra X, elt_size(X)-1), -1 | |||
8515 | /// into: | |||
8516 | /// cmge X, X, #0 | |||
8517 | static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, | |||
8518 | const AArch64Subtarget *Subtarget) { | |||
8519 | EVT VT = N->getValueType(0); | |||
8520 | if (!Subtarget->hasNEON() || !VT.isVector()) | |||
8521 | return SDValue(); | |||
8522 | ||||
8523 | // There must be a shift right algebraic before the xor, and the xor must be a | |||
8524 | // 'not' operation. | |||
8525 | SDValue Shift = N->getOperand(0); | |||
8526 | SDValue Ones = N->getOperand(1); | |||
8527 | if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() || | |||
8528 | !ISD::isBuildVectorAllOnes(Ones.getNode())) | |||
8529 | return SDValue(); | |||
8530 | ||||
8531 | // The shift should be smearing the sign bit across each vector element. | |||
8532 | auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); | |||
8533 | EVT ShiftEltTy = Shift.getValueType().getVectorElementType(); | |||
8534 | if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1) | |||
8535 | return SDValue(); | |||
8536 | ||||
8537 | return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0)); | |||
8538 | } | |||
8539 | ||||
8540 | // Generate SUBS and CSEL for integer abs. | |||
8541 | static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { | |||
8542 | EVT VT = N->getValueType(0); | |||
8543 | ||||
8544 | SDValue N0 = N->getOperand(0); | |||
8545 | SDValue N1 = N->getOperand(1); | |||
8546 | SDLoc DL(N); | |||
8547 | ||||
8548 | // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) | |||
8549 | // and change it to SUB and CSEL. | |||
8550 | if (VT.isInteger() && N->getOpcode() == ISD::XOR && | |||
8551 | N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 && | |||
8552 | N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0)) | |||
8553 | if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) | |||
8554 | if (Y1C->getAPIntValue() == VT.getSizeInBits() - 1) { | |||
8555 | SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), | |||
8556 | N0.getOperand(0)); | |||
8557 | // Generate SUBS & CSEL. | |||
8558 | SDValue Cmp = | |||
8559 | DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32), | |||
8560 | N0.getOperand(0), DAG.getConstant(0, DL, VT)); | |||
8561 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0.getOperand(0), Neg, | |||
8562 | DAG.getConstant(AArch64CC::PL, DL, MVT::i32), | |||
8563 | SDValue(Cmp.getNode(), 1)); | |||
8564 | } | |||
8565 | return SDValue(); | |||
8566 | } | |||
8567 | ||||
8568 | static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, | |||
8569 | TargetLowering::DAGCombinerInfo &DCI, | |||
8570 | const AArch64Subtarget *Subtarget) { | |||
8571 | if (DCI.isBeforeLegalizeOps()) | |||
8572 | return SDValue(); | |||
8573 | ||||
8574 | if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget)) | |||
8575 | return Cmp; | |||
8576 | ||||
8577 | return performIntegerAbsCombine(N, DAG); | |||
8578 | } | |||
8579 | ||||
8580 | SDValue | |||
8581 | AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, | |||
8582 | SelectionDAG &DAG, | |||
8583 | std::vector<SDNode *> *Created) const { | |||
8584 | AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); | |||
8585 | if (isIntDivCheap(N->getValueType(0), Attr)) | |||
8586 | return SDValue(N,0); // Lower SDIV as SDIV | |||
8587 | ||||
8588 | // fold (sdiv X, pow2) | |||
8589 | EVT VT = N->getValueType(0); | |||
8590 | if ((VT != MVT::i32 && VT != MVT::i64) || | |||
8591 | !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) | |||
8592 | return SDValue(); | |||
8593 | ||||
8594 | SDLoc DL(N); | |||
8595 | SDValue N0 = N->getOperand(0); | |||
8596 | unsigned Lg2 = Divisor.countTrailingZeros(); | |||
8597 | SDValue Zero = DAG.getConstant(0, DL, VT); | |||
8598 | SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT); | |||
8599 | ||||
8600 | // Add (N0 < 0) ? Pow2 - 1 : 0; | |||
8601 | SDValue CCVal; | |||
8602 | SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL); | |||
8603 | SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne); | |||
8604 | SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp); | |||
8605 | ||||
8606 | if (Created) { | |||
8607 | Created->push_back(Cmp.getNode()); | |||
8608 | Created->push_back(Add.getNode()); | |||
8609 | Created->push_back(CSel.getNode()); | |||
8610 | } | |||
8611 | ||||
8612 | // Divide by pow2. | |||
8613 | SDValue SRA = | |||
8614 | DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64)); | |||
8615 | ||||
8616 | // If we're dividing by a positive value, we're done. Otherwise, we must | |||
8617 | // negate the result. | |||
8618 | if (Divisor.isNonNegative()) | |||
8619 | return SRA; | |||
8620 | ||||
8621 | if (Created) | |||
8622 | Created->push_back(SRA.getNode()); | |||
8623 | return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); | |||
8624 | } | |||
8625 | ||||
8626 | static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, | |||
8627 | TargetLowering::DAGCombinerInfo &DCI, | |||
8628 | const AArch64Subtarget *Subtarget) { | |||
8629 | if (DCI.isBeforeLegalizeOps()) | |||
8630 | return SDValue(); | |||
8631 | ||||
8632 | // The below optimizations require a constant RHS. | |||
8633 | if (!isa<ConstantSDNode>(N->getOperand(1))) | |||
8634 | return SDValue(); | |||
8635 | ||||
8636 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(1)); | |||
8637 | const APInt &ConstValue = C->getAPIntValue(); | |||
8638 | ||||
8639 | // Multiplication of a power of two plus/minus one can be done more | |||
8640 | // cheaply as as shift+add/sub. For now, this is true unilaterally. If | |||
8641 | // future CPUs have a cheaper MADD instruction, this may need to be | |||
8642 | // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and | |||
8643 | // 64-bit is 5 cycles, so this is always a win. | |||
8644 | // More aggressively, some multiplications N0 * C can be lowered to | |||
8645 | // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M, | |||
8646 | // e.g. 6=3*2=(2+1)*2. | |||
8647 | // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45 | |||
8648 | // which equals to (1+2)*16-(1+2). | |||
8649 | SDValue N0 = N->getOperand(0); | |||
8650 | // TrailingZeroes is used to test if the mul can be lowered to | |||
8651 | // shift+add+shift. | |||
8652 | unsigned TrailingZeroes = ConstValue.countTrailingZeros(); | |||
8653 | if (TrailingZeroes) { | |||
8654 | // Conservatively do not lower to shift+add+shift if the mul might be | |||
8655 | // folded into smul or umul. | |||
8656 | if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) || | |||
8657 | isZeroExtended(N0.getNode(), DAG))) | |||
8658 | return SDValue(); | |||
8659 | // Conservatively do not lower to shift+add+shift if the mul might be | |||
8660 | // folded into madd or msub. | |||
8661 | if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD || | |||
8662 | N->use_begin()->getOpcode() == ISD::SUB)) | |||
8663 | return SDValue(); | |||
8664 | } | |||
8665 | // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub | |||
8666 | // and shift+add+shift. | |||
8667 | APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes); | |||
8668 | ||||
8669 | unsigned ShiftAmt, AddSubOpc; | |||
8670 | // Is the shifted value the LHS operand of the add/sub? | |||
8671 | bool ShiftValUseIsN0 = true; | |||
8672 | // Do we need to negate the result? | |||
8673 | bool NegateResult = false; | |||
8674 | ||||
8675 | if (ConstValue.isNonNegative()) { | |||
8676 | // (mul x, 2^N + 1) => (add (shl x, N), x) | |||
8677 | // (mul x, 2^N - 1) => (sub (shl x, N), x) | |||
8678 | // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M) | |||
8679 | APInt SCVMinus1 = ShiftedConstValue - 1; | |||
8680 | APInt CVPlus1 = ConstValue + 1; | |||
8681 | if (SCVMinus1.isPowerOf2()) { | |||
8682 | ShiftAmt = SCVMinus1.logBase2(); | |||
8683 | AddSubOpc = ISD::ADD; | |||
8684 | } else if (CVPlus1.isPowerOf2()) { | |||
8685 | ShiftAmt = CVPlus1.logBase2(); | |||
8686 | AddSubOpc = ISD::SUB; | |||
8687 | } else | |||
8688 | return SDValue(); | |||
8689 | } else { | |||
8690 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) | |||
8691 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) | |||
8692 | APInt CVNegPlus1 = -ConstValue + 1; | |||
8693 | APInt CVNegMinus1 = -ConstValue - 1; | |||
8694 | if (CVNegPlus1.isPowerOf2()) { | |||
8695 | ShiftAmt = CVNegPlus1.logBase2(); | |||
8696 | AddSubOpc = ISD::SUB; | |||
8697 | ShiftValUseIsN0 = false; | |||
8698 | } else if (CVNegMinus1.isPowerOf2()) { | |||
8699 | ShiftAmt = CVNegMinus1.logBase2(); | |||
8700 | AddSubOpc = ISD::ADD; | |||
8701 | NegateResult = true; | |||
8702 | } else | |||
8703 | return SDValue(); | |||
8704 | } | |||
8705 | ||||
8706 | SDLoc DL(N); | |||
8707 | EVT VT = N->getValueType(0); | |||
8708 | SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0, | |||
8709 | DAG.getConstant(ShiftAmt, DL, MVT::i64)); | |||
8710 | ||||
8711 | SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0; | |||
8712 | SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal; | |||
8713 | SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1); | |||
8714 | assert(!(NegateResult && TrailingZeroes) &&(static_cast <bool> (!(NegateResult && TrailingZeroes ) && "NegateResult and TrailingZeroes cannot both be true for now." ) ? void (0) : __assert_fail ("!(NegateResult && TrailingZeroes) && \"NegateResult and TrailingZeroes cannot both be true for now.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8715, __extension__ __PRETTY_FUNCTION__)) | |||
8715 | "NegateResult and TrailingZeroes cannot both be true for now.")(static_cast <bool> (!(NegateResult && TrailingZeroes ) && "NegateResult and TrailingZeroes cannot both be true for now." ) ? void (0) : __assert_fail ("!(NegateResult && TrailingZeroes) && \"NegateResult and TrailingZeroes cannot both be true for now.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8715, __extension__ __PRETTY_FUNCTION__)); | |||
8716 | // Negate the result. | |||
8717 | if (NegateResult) | |||
8718 | return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); | |||
8719 | // Shift the result. | |||
8720 | if (TrailingZeroes) | |||
8721 | return DAG.getNode(ISD::SHL, DL, VT, Res, | |||
8722 | DAG.getConstant(TrailingZeroes, DL, MVT::i64)); | |||
8723 | return Res; | |||
8724 | } | |||
8725 | ||||
8726 | static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, | |||
8727 | SelectionDAG &DAG) { | |||
8728 | // Take advantage of vector comparisons producing 0 or -1 in each lane to | |||
8729 | // optimize away operation when it's from a constant. | |||
8730 | // | |||
8731 | // The general transformation is: | |||
8732 | // UNARYOP(AND(VECTOR_CMP(x,y), constant)) --> | |||
8733 | // AND(VECTOR_CMP(x,y), constant2) | |||
8734 | // constant2 = UNARYOP(constant) | |||
8735 | ||||
8736 | // Early exit if this isn't a vector operation, the operand of the | |||
8737 | // unary operation isn't a bitwise AND, or if the sizes of the operations | |||
8738 | // aren't the same. | |||
8739 | EVT VT = N->getValueType(0); | |||
8740 | if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || | |||
8741 | N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || | |||
8742 | VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) | |||
8743 | return SDValue(); | |||
8744 | ||||
8745 | // Now check that the other operand of the AND is a constant. We could | |||
8746 | // make the transformation for non-constant splats as well, but it's unclear | |||
8747 | // that would be a benefit as it would not eliminate any operations, just | |||
8748 | // perform one more step in scalar code before moving to the vector unit. | |||
8749 | if (BuildVectorSDNode *BV = | |||
8750 | dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) { | |||
8751 | // Bail out if the vector isn't a constant. | |||
8752 | if (!BV->isConstant()) | |||
8753 | return SDValue(); | |||
8754 | ||||
8755 | // Everything checks out. Build up the new and improved node. | |||
8756 | SDLoc DL(N); | |||
8757 | EVT IntVT = BV->getValueType(0); | |||
8758 | // Create a new constant of the appropriate type for the transformed | |||
8759 | // DAG. | |||
8760 | SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); | |||
8761 | // The AND node needs bitcasts to/from an integer vector type around it. | |||
8762 | SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst); | |||
8763 | SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, | |||
8764 | N->getOperand(0)->getOperand(0), MaskConst); | |||
8765 | SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd); | |||
8766 | return Res; | |||
8767 | } | |||
8768 | ||||
8769 | return SDValue(); | |||
8770 | } | |||
8771 | ||||
8772 | static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, | |||
8773 | const AArch64Subtarget *Subtarget) { | |||
8774 | // First try to optimize away the conversion when it's conditionally from | |||
8775 | // a constant. Vectors only. | |||
8776 | if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG)) | |||
8777 | return Res; | |||
8778 | ||||
8779 | EVT VT = N->getValueType(0); | |||
8780 | if (VT != MVT::f32 && VT != MVT::f64) | |||
8781 | return SDValue(); | |||
8782 | ||||
8783 | // Only optimize when the source and destination types have the same width. | |||
8784 | if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits()) | |||
8785 | return SDValue(); | |||
8786 | ||||
8787 | // If the result of an integer load is only used by an integer-to-float | |||
8788 | // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead. | |||
8789 | // This eliminates an "integer-to-vector-move" UOP and improves throughput. | |||
8790 | SDValue N0 = N->getOperand(0); | |||
8791 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && | |||
8792 | // Do not change the width of a volatile load. | |||
8793 | !cast<LoadSDNode>(N0)->isVolatile()) { | |||
8794 | LoadSDNode *LN0 = cast<LoadSDNode>(N0); | |||
8795 | SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), | |||
8796 | LN0->getPointerInfo(), LN0->getAlignment(), | |||
8797 | LN0->getMemOperand()->getFlags()); | |||
8798 | ||||
8799 | // Make sure successors of the original load stay after it by updating them | |||
8800 | // to use the new Chain. | |||
8801 | DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1)); | |||
8802 | ||||
8803 | unsigned Opcode = | |||
8804 | (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF; | |||
8805 | return DAG.getNode(Opcode, SDLoc(N), VT, Load); | |||
8806 | } | |||
8807 | ||||
8808 | return SDValue(); | |||
8809 | } | |||
8810 | ||||
8811 | /// Fold a floating-point multiply by power of two into floating-point to | |||
8812 | /// fixed-point conversion. | |||
8813 | static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, | |||
8814 | TargetLowering::DAGCombinerInfo &DCI, | |||
8815 | const AArch64Subtarget *Subtarget) { | |||
8816 | if (!Subtarget->hasNEON()) | |||
8817 | return SDValue(); | |||
8818 | ||||
8819 | SDValue Op = N->getOperand(0); | |||
8820 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || | |||
8821 | Op.getOpcode() != ISD::FMUL) | |||
8822 | return SDValue(); | |||
8823 | ||||
8824 | SDValue ConstVec = Op->getOperand(1); | |||
8825 | if (!isa<BuildVectorSDNode>(ConstVec)) | |||
8826 | return SDValue(); | |||
8827 | ||||
8828 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); | |||
8829 | uint32_t FloatBits = FloatTy.getSizeInBits(); | |||
8830 | if (FloatBits != 32 && FloatBits != 64) | |||
8831 | return SDValue(); | |||
8832 | ||||
8833 | MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); | |||
8834 | uint32_t IntBits = IntTy.getSizeInBits(); | |||
8835 | if (IntBits != 16 && IntBits != 32 && IntBits != 64) | |||
8836 | return SDValue(); | |||
8837 | ||||
8838 | // Avoid conversions where iN is larger than the float (e.g., float -> i64). | |||
8839 | if (IntBits > FloatBits) | |||
8840 | return SDValue(); | |||
8841 | ||||
8842 | BitVector UndefElements; | |||
8843 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); | |||
8844 | int32_t Bits = IntBits == 64 ? 64 : 32; | |||
8845 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1); | |||
8846 | if (C == -1 || C == 0 || C > Bits) | |||
8847 | return SDValue(); | |||
8848 | ||||
8849 | MVT ResTy; | |||
8850 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); | |||
8851 | switch (NumLanes) { | |||
8852 | default: | |||
8853 | return SDValue(); | |||
8854 | case 2: | |||
8855 | ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; | |||
8856 | break; | |||
8857 | case 4: | |||
8858 | ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; | |||
8859 | break; | |||
8860 | } | |||
8861 | ||||
8862 | if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) | |||
8863 | return SDValue(); | |||
8864 | ||||
8865 | assert((ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps()) &&(static_cast <bool> ((ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps ()) && "Illegal vector type after legalization") ? void (0) : __assert_fail ("(ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps()) && \"Illegal vector type after legalization\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8866, __extension__ __PRETTY_FUNCTION__)) | |||
8866 | "Illegal vector type after legalization")(static_cast <bool> ((ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps ()) && "Illegal vector type after legalization") ? void (0) : __assert_fail ("(ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps()) && \"Illegal vector type after legalization\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8866, __extension__ __PRETTY_FUNCTION__)); | |||
8867 | ||||
8868 | SDLoc DL(N); | |||
8869 | bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; | |||
8870 | unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs | |||
8871 | : Intrinsic::aarch64_neon_vcvtfp2fxu; | |||
8872 | SDValue FixConv = | |||
8873 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy, | |||
8874 | DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), | |||
8875 | Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32)); | |||
8876 | // We can handle smaller integers by generating an extra trunc. | |||
8877 | if (IntBits < FloatBits) | |||
8878 | FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv); | |||
8879 | ||||
8880 | return FixConv; | |||
8881 | } | |||
8882 | ||||
8883 | /// Fold a floating-point divide by power of two into fixed-point to | |||
8884 | /// floating-point conversion. | |||
8885 | static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, | |||
8886 | TargetLowering::DAGCombinerInfo &DCI, | |||
8887 | const AArch64Subtarget *Subtarget) { | |||
8888 | if (!Subtarget->hasNEON()) | |||
8889 | return SDValue(); | |||
8890 | ||||
8891 | SDValue Op = N->getOperand(0); | |||
8892 | unsigned Opc = Op->getOpcode(); | |||
8893 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || | |||
8894 | !Op.getOperand(0).getValueType().isSimple() || | |||
8895 | (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP)) | |||
8896 | return SDValue(); | |||
8897 | ||||
8898 | SDValue ConstVec = N->getOperand(1); | |||
8899 | if (!isa<BuildVectorSDNode>(ConstVec)) | |||
8900 | return SDValue(); | |||
8901 | ||||
8902 | MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); | |||
8903 | int32_t IntBits = IntTy.getSizeInBits(); | |||
8904 | if (IntBits != 16 && IntBits != 32 && IntBits != 64) | |||
8905 | return SDValue(); | |||
8906 | ||||
8907 | MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); | |||
8908 | int32_t FloatBits = FloatTy.getSizeInBits(); | |||
8909 | if (FloatBits != 32 && FloatBits != 64) | |||
8910 | return SDValue(); | |||
8911 | ||||
8912 | // Avoid conversions where iN is larger than the float (e.g., i64 -> float). | |||
8913 | if (IntBits > FloatBits) | |||
8914 | return SDValue(); | |||
8915 | ||||
8916 | BitVector UndefElements; | |||
8917 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); | |||
8918 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1); | |||
8919 | if (C == -1 || C == 0 || C > FloatBits) | |||
8920 | return SDValue(); | |||
8921 | ||||
8922 | MVT ResTy; | |||
8923 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); | |||
8924 | switch (NumLanes) { | |||
8925 | default: | |||
8926 | return SDValue(); | |||
8927 | case 2: | |||
8928 | ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; | |||
8929 | break; | |||
8930 | case 4: | |||
8931 | ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; | |||
8932 | break; | |||
8933 | } | |||
8934 | ||||
8935 | if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) | |||
8936 | return SDValue(); | |||
8937 | ||||
8938 | SDLoc DL(N); | |||
8939 | SDValue ConvInput = Op.getOperand(0); | |||
8940 | bool IsSigned = Opc == ISD::SINT_TO_FP; | |||
8941 | if (IntBits < FloatBits) | |||
8942 | ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, | |||
8943 | ResTy, ConvInput); | |||
8944 | ||||
8945 | unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp | |||
8946 | : Intrinsic::aarch64_neon_vcvtfxu2fp; | |||
8947 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), | |||
8948 | DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput, | |||
8949 | DAG.getConstant(C, DL, MVT::i32)); | |||
8950 | } | |||
8951 | ||||
8952 | /// An EXTR instruction is made up of two shifts, ORed together. This helper | |||
8953 | /// searches for and classifies those shifts. | |||
8954 | static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, | |||
8955 | bool &FromHi) { | |||
8956 | if (N.getOpcode() == ISD::SHL) | |||
8957 | FromHi = false; | |||
8958 | else if (N.getOpcode() == ISD::SRL) | |||
8959 | FromHi = true; | |||
8960 | else | |||
8961 | return false; | |||
8962 | ||||
8963 | if (!isa<ConstantSDNode>(N.getOperand(1))) | |||
8964 | return false; | |||
8965 | ||||
8966 | ShiftAmount = N->getConstantOperandVal(1); | |||
8967 | Src = N->getOperand(0); | |||
8968 | return true; | |||
8969 | } | |||
8970 | ||||
8971 | /// EXTR instruction extracts a contiguous chunk of bits from two existing | |||
8972 | /// registers viewed as a high/low pair. This function looks for the pattern: | |||
8973 | /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it | |||
8974 | /// with an EXTR. Can't quite be done in TableGen because the two immediates | |||
8975 | /// aren't independent. | |||
8976 | static SDValue tryCombineToEXTR(SDNode *N, | |||
8977 | TargetLowering::DAGCombinerInfo &DCI) { | |||
8978 | SelectionDAG &DAG = DCI.DAG; | |||
8979 | SDLoc DL(N); | |||
8980 | EVT VT = N->getValueType(0); | |||
8981 | ||||
8982 | assert(N->getOpcode() == ISD::OR && "Unexpected root")(static_cast <bool> (N->getOpcode() == ISD::OR && "Unexpected root") ? void (0) : __assert_fail ("N->getOpcode() == ISD::OR && \"Unexpected root\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8982, __extension__ __PRETTY_FUNCTION__)); | |||
8983 | ||||
8984 | if (VT != MVT::i32 && VT != MVT::i64) | |||
8985 | return SDValue(); | |||
8986 | ||||
8987 | SDValue LHS; | |||
8988 | uint32_t ShiftLHS = 0; | |||
8989 | bool LHSFromHi = false; | |||
8990 | if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) | |||
8991 | return SDValue(); | |||
8992 | ||||
8993 | SDValue RHS; | |||
8994 | uint32_t ShiftRHS = 0; | |||
8995 | bool RHSFromHi = false; | |||
8996 | if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) | |||
8997 | return SDValue(); | |||
8998 | ||||
8999 | // If they're both trying to come from the high part of the register, they're | |||
9000 | // not really an EXTR. | |||
9001 | if (LHSFromHi == RHSFromHi) | |||
9002 | return SDValue(); | |||
9003 | ||||
9004 | if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) | |||
9005 | return SDValue(); | |||
9006 | ||||
9007 | if (LHSFromHi) { | |||
9008 | std::swap(LHS, RHS); | |||
9009 | std::swap(ShiftLHS, ShiftRHS); | |||
9010 | } | |||
9011 | ||||
9012 | return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS, | |||
9013 | DAG.getConstant(ShiftRHS, DL, MVT::i64)); | |||
9014 | } | |||
9015 | ||||
9016 | static SDValue tryCombineToBSL(SDNode *N, | |||
9017 | TargetLowering::DAGCombinerInfo &DCI) { | |||
9018 | EVT VT = N->getValueType(0); | |||
9019 | SelectionDAG &DAG = DCI.DAG; | |||
9020 | SDLoc DL(N); | |||
9021 | ||||
9022 | if (!VT.isVector()) | |||
9023 | return SDValue(); | |||
9024 | ||||
9025 | SDValue N0 = N->getOperand(0); | |||
9026 | if (N0.getOpcode() != ISD::AND) | |||
9027 | return SDValue(); | |||
9028 | ||||
9029 | SDValue N1 = N->getOperand(1); | |||
9030 | if (N1.getOpcode() != ISD::AND) | |||
9031 | return SDValue(); | |||
9032 | ||||
9033 | // We only have to look for constant vectors here since the general, variable | |||
9034 | // case can be handled in TableGen. | |||
9035 | unsigned Bits = VT.getScalarSizeInBits(); | |||
9036 | uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); | |||
9037 | for (int i = 1; i >= 0; --i) | |||
9038 | for (int j = 1; j >= 0; --j) { | |||
9039 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i)); | |||
9040 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j)); | |||
9041 | if (!BVN0 || !BVN1) | |||
9042 | continue; | |||
9043 | ||||
9044 | bool FoundMatch = true; | |||
9045 | for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { | |||
9046 | ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k)); | |||
9047 | ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k)); | |||
9048 | if (!CN0 || !CN1 || | |||
9049 | CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) { | |||
9050 | FoundMatch = false; | |||
9051 | break; | |||
9052 | } | |||
9053 | } | |||
9054 | ||||
9055 | if (FoundMatch) | |||
9056 | return DAG.getNode(AArch64ISD::BSL, DL, VT, SDValue(BVN0, 0), | |||
9057 | N0->getOperand(1 - i), N1->getOperand(1 - j)); | |||
9058 | } | |||
9059 | ||||
9060 | return SDValue(); | |||
9061 | } | |||
9062 | ||||
9063 | static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | |||
9064 | const AArch64Subtarget *Subtarget) { | |||
9065 | // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) | |||
9066 | SelectionDAG &DAG = DCI.DAG; | |||
9067 | EVT VT = N->getValueType(0); | |||
9068 | ||||
9069 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
9070 | return SDValue(); | |||
9071 | ||||
9072 | if (SDValue Res = tryCombineToEXTR(N, DCI)) | |||
9073 | return Res; | |||
9074 | ||||
9075 | if (SDValue Res = tryCombineToBSL(N, DCI)) | |||
9076 | return Res; | |||
9077 | ||||
9078 | return SDValue(); | |||
9079 | } | |||
9080 | ||||
9081 | static SDValue performSRLCombine(SDNode *N, | |||
9082 | TargetLowering::DAGCombinerInfo &DCI) { | |||
9083 | SelectionDAG &DAG = DCI.DAG; | |||
9084 | EVT VT = N->getValueType(0); | |||
9085 | if (VT != MVT::i32 && VT != MVT::i64) | |||
9086 | return SDValue(); | |||
9087 | ||||
9088 | // Canonicalize (srl (bswap i32 x), 16) to (rotr (bswap i32 x), 16), if the | |||
9089 | // high 16-bits of x are zero. Similarly, canonicalize (srl (bswap i64 x), 32) | |||
9090 | // to (rotr (bswap i64 x), 32), if the high 32-bits of x are zero. | |||
9091 | SDValue N0 = N->getOperand(0); | |||
9092 | if (N0.getOpcode() == ISD::BSWAP) { | |||
9093 | SDLoc DL(N); | |||
9094 | SDValue N1 = N->getOperand(1); | |||
9095 | SDValue N00 = N0.getOperand(0); | |||
9096 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { | |||
9097 | uint64_t ShiftAmt = C->getZExtValue(); | |||
9098 | if (VT == MVT::i32 && ShiftAmt == 16 && | |||
9099 | DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(32, 16))) | |||
9100 | return DAG.getNode(ISD::ROTR, DL, VT, N0, N1); | |||
9101 | if (VT == MVT::i64 && ShiftAmt == 32 && | |||
9102 | DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(64, 32))) | |||
9103 | return DAG.getNode(ISD::ROTR, DL, VT, N0, N1); | |||
9104 | } | |||
9105 | } | |||
9106 | return SDValue(); | |||
9107 | } | |||
9108 | ||||
9109 | static SDValue performBitcastCombine(SDNode *N, | |||
9110 | TargetLowering::DAGCombinerInfo &DCI, | |||
9111 | SelectionDAG &DAG) { | |||
9112 | // Wait 'til after everything is legalized to try this. That way we have | |||
9113 | // legal vector types and such. | |||
9114 | if (DCI.isBeforeLegalizeOps()) | |||
9115 | return SDValue(); | |||
9116 | ||||
9117 | // Remove extraneous bitcasts around an extract_subvector. | |||
9118 | // For example, | |||
9119 | // (v4i16 (bitconvert | |||
9120 | // (extract_subvector (v2i64 (bitconvert (v8i16 ...)), (i64 1))))) | |||
9121 | // becomes | |||
9122 | // (extract_subvector ((v8i16 ...), (i64 4))) | |||
9123 | ||||
9124 | // Only interested in 64-bit vectors as the ultimate result. | |||
9125 | EVT VT = N->getValueType(0); | |||
9126 | if (!VT.isVector()) | |||
9127 | return SDValue(); | |||
9128 | if (VT.getSimpleVT().getSizeInBits() != 64) | |||
9129 | return SDValue(); | |||
9130 | // Is the operand an extract_subvector starting at the beginning or halfway | |||
9131 | // point of the vector? A low half may also come through as an | |||
9132 | // EXTRACT_SUBREG, so look for that, too. | |||
9133 | SDValue Op0 = N->getOperand(0); | |||
9134 | if (Op0->getOpcode() != ISD::EXTRACT_SUBVECTOR && | |||
9135 | !(Op0->isMachineOpcode() && | |||
9136 | Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG)) | |||
9137 | return SDValue(); | |||
9138 | uint64_t idx = cast<ConstantSDNode>(Op0->getOperand(1))->getZExtValue(); | |||
9139 | if (Op0->getOpcode() == ISD::EXTRACT_SUBVECTOR) { | |||
9140 | if (Op0->getValueType(0).getVectorNumElements() != idx && idx != 0) | |||
9141 | return SDValue(); | |||
9142 | } else if (Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG) { | |||
9143 | if (idx != AArch64::dsub) | |||
9144 | return SDValue(); | |||
9145 | // The dsub reference is equivalent to a lane zero subvector reference. | |||
9146 | idx = 0; | |||
9147 | } | |||
9148 | // Look through the bitcast of the input to the extract. | |||
9149 | if (Op0->getOperand(0)->getOpcode() != ISD::BITCAST) | |||
9150 | return SDValue(); | |||
9151 | SDValue Source = Op0->getOperand(0)->getOperand(0); | |||
9152 | // If the source type has twice the number of elements as our destination | |||
9153 | // type, we know this is an extract of the high or low half of the vector. | |||
9154 | EVT SVT = Source->getValueType(0); | |||
9155 | if (!SVT.isVector() || | |||
9156 | SVT.getVectorNumElements() != VT.getVectorNumElements() * 2) | |||
9157 | return SDValue(); | |||
9158 | ||||
9159 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n" ; } } while (false) | |||
9160 | dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n" ; } } while (false); | |||
9161 | ||||
9162 | // Create the simplified form to just extract the low or high half of the | |||
9163 | // vector directly rather than bothering with the bitcasts. | |||
9164 | SDLoc dl(N); | |||
9165 | unsigned NumElements = VT.getVectorNumElements(); | |||
9166 | if (idx) { | |||
9167 | SDValue HalfIdx = DAG.getConstant(NumElements, dl, MVT::i64); | |||
9168 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Source, HalfIdx); | |||
9169 | } else { | |||
9170 | SDValue SubReg = DAG.getTargetConstant(AArch64::dsub, dl, MVT::i32); | |||
9171 | return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, VT, | |||
9172 | Source, SubReg), | |||
9173 | 0); | |||
9174 | } | |||
9175 | } | |||
9176 | ||||
9177 | static SDValue performConcatVectorsCombine(SDNode *N, | |||
9178 | TargetLowering::DAGCombinerInfo &DCI, | |||
9179 | SelectionDAG &DAG) { | |||
9180 | SDLoc dl(N); | |||
9181 | EVT VT = N->getValueType(0); | |||
9182 | SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); | |||
9183 | ||||
9184 | // Optimize concat_vectors of truncated vectors, where the intermediate | |||
9185 | // type is illegal, to avoid said illegality, e.g., | |||
9186 | // (v4i16 (concat_vectors (v2i16 (truncate (v2i64))), | |||
9187 | // (v2i16 (truncate (v2i64))))) | |||
9188 | // -> | |||
9189 | // (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))), | |||
9190 | // (v4i32 (bitcast (v2i64))), | |||
9191 | // <0, 2, 4, 6>))) | |||
9192 | // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed | |||
9193 | // on both input and result type, so we might generate worse code. | |||
9194 | // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8. | |||
9195 | if (N->getNumOperands() == 2 && | |||
9196 | N0->getOpcode() == ISD::TRUNCATE && | |||
9197 | N1->getOpcode() == ISD::TRUNCATE) { | |||
9198 | SDValue N00 = N0->getOperand(0); | |||
9199 | SDValue N10 = N1->getOperand(0); | |||
9200 | EVT N00VT = N00.getValueType(); | |||
9201 | ||||
9202 | if (N00VT == N10.getValueType() && | |||
9203 | (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) && | |||
9204 | N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) { | |||
9205 | MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16); | |||
9206 | SmallVector<int, 8> Mask(MidVT.getVectorNumElements()); | |||
9207 | for (size_t i = 0; i < Mask.size(); ++i) | |||
9208 | Mask[i] = i * 2; | |||
9209 | return DAG.getNode(ISD::TRUNCATE, dl, VT, | |||
9210 | DAG.getVectorShuffle( | |||
9211 | MidVT, dl, | |||
9212 | DAG.getNode(ISD::BITCAST, dl, MidVT, N00), | |||
9213 | DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask)); | |||
9214 | } | |||
9215 | } | |||
9216 | ||||
9217 | // Wait 'til after everything is legalized to try this. That way we have | |||
9218 | // legal vector types and such. | |||
9219 | if (DCI.isBeforeLegalizeOps()) | |||
9220 | return SDValue(); | |||
9221 | ||||
9222 | // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector | |||
9223 | // splat. The indexed instructions are going to be expecting a DUPLANE64, so | |||
9224 | // canonicalise to that. | |||
9225 | if (N0 == N1 && VT.getVectorNumElements() == 2) { | |||
9226 | assert(VT.getScalarSizeInBits() == 64)(static_cast <bool> (VT.getScalarSizeInBits() == 64) ? void (0) : __assert_fail ("VT.getScalarSizeInBits() == 64", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9226, __extension__ __PRETTY_FUNCTION__)); | |||
9227 | return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG), | |||
9228 | DAG.getConstant(0, dl, MVT::i64)); | |||
9229 | } | |||
9230 | ||||
9231 | // Canonicalise concat_vectors so that the right-hand vector has as few | |||
9232 | // bit-casts as possible before its real operation. The primary matching | |||
9233 | // destination for these operations will be the narrowing "2" instructions, | |||
9234 | // which depend on the operation being performed on this right-hand vector. | |||
9235 | // For example, | |||
9236 | // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS)))) | |||
9237 | // becomes | |||
9238 | // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS)) | |||
9239 | ||||
9240 | if (N1->getOpcode() != ISD::BITCAST) | |||
9241 | return SDValue(); | |||
9242 | SDValue RHS = N1->getOperand(0); | |||
9243 | MVT RHSTy = RHS.getValueType().getSimpleVT(); | |||
9244 | // If the RHS is not a vector, this is not the pattern we're looking for. | |||
9245 | if (!RHSTy.isVector()) | |||
9246 | return SDValue(); | |||
9247 | ||||
9248 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n" ; } } while (false) | |||
9249 | dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n" ; } } while (false); | |||
9250 | ||||
9251 | MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(), | |||
9252 | RHSTy.getVectorNumElements() * 2); | |||
9253 | return DAG.getNode(ISD::BITCAST, dl, VT, | |||
9254 | DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy, | |||
9255 | DAG.getNode(ISD::BITCAST, dl, RHSTy, N0), | |||
9256 | RHS)); | |||
9257 | } | |||
9258 | ||||
9259 | static SDValue tryCombineFixedPointConvert(SDNode *N, | |||
9260 | TargetLowering::DAGCombinerInfo &DCI, | |||
9261 | SelectionDAG &DAG) { | |||
9262 | // Wait until after everything is legalized to try this. That way we have | |||
9263 | // legal vector types and such. | |||
9264 | if (DCI.isBeforeLegalizeOps()) | |||
9265 | return SDValue(); | |||
9266 | // Transform a scalar conversion of a value from a lane extract into a | |||
9267 | // lane extract of a vector conversion. E.g., from foo1 to foo2: | |||
9268 | // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); } | |||
9269 | // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; } | |||
9270 | // | |||
9271 | // The second form interacts better with instruction selection and the | |||
9272 | // register allocator to avoid cross-class register copies that aren't | |||
9273 | // coalescable due to a lane reference. | |||
9274 | ||||
9275 | // Check the operand and see if it originates from a lane extract. | |||
9276 | SDValue Op1 = N->getOperand(1); | |||
9277 | if (Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { | |||
9278 | // Yep, no additional predication needed. Perform the transform. | |||
9279 | SDValue IID = N->getOperand(0); | |||
9280 | SDValue Shift = N->getOperand(2); | |||
9281 | SDValue Vec = Op1.getOperand(0); | |||
9282 | SDValue Lane = Op1.getOperand(1); | |||
9283 | EVT ResTy = N->getValueType(0); | |||
9284 | EVT VecResTy; | |||
9285 | SDLoc DL(N); | |||
9286 | ||||
9287 | // The vector width should be 128 bits by the time we get here, even | |||
9288 | // if it started as 64 bits (the extract_vector handling will have | |||
9289 | // done so). | |||
9290 | assert(Vec.getValueSizeInBits() == 128 &&(static_cast <bool> (Vec.getValueSizeInBits() == 128 && "unexpected vector size on extract_vector_elt!") ? void (0) : __assert_fail ("Vec.getValueSizeInBits() == 128 && \"unexpected vector size on extract_vector_elt!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9291, __extension__ __PRETTY_FUNCTION__)) | |||
9291 | "unexpected vector size on extract_vector_elt!")(static_cast <bool> (Vec.getValueSizeInBits() == 128 && "unexpected vector size on extract_vector_elt!") ? void (0) : __assert_fail ("Vec.getValueSizeInBits() == 128 && \"unexpected vector size on extract_vector_elt!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9291, __extension__ __PRETTY_FUNCTION__)); | |||
9292 | if (Vec.getValueType() == MVT::v4i32) | |||
9293 | VecResTy = MVT::v4f32; | |||
9294 | else if (Vec.getValueType() == MVT::v2i64) | |||
9295 | VecResTy = MVT::v2f64; | |||
9296 | else | |||
9297 | llvm_unreachable("unexpected vector type!")::llvm::llvm_unreachable_internal("unexpected vector type!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9297); | |||
9298 | ||||
9299 | SDValue Convert = | |||
9300 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift); | |||
9301 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane); | |||
9302 | } | |||
9303 | return SDValue(); | |||
9304 | } | |||
9305 | ||||
9306 | // AArch64 high-vector "long" operations are formed by performing the non-high | |||
9307 | // version on an extract_subvector of each operand which gets the high half: | |||
9308 | // | |||
9309 | // (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS)) | |||
9310 | // | |||
9311 | // However, there are cases which don't have an extract_high explicitly, but | |||
9312 | // have another operation that can be made compatible with one for free. For | |||
9313 | // example: | |||
9314 | // | |||
9315 | // (dupv64 scalar) --> (extract_high (dup128 scalar)) | |||
9316 | // | |||
9317 | // This routine does the actual conversion of such DUPs, once outer routines | |||
9318 | // have determined that everything else is in order. | |||
9319 | // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold | |||
9320 | // similarly here. | |||
9321 | static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) { | |||
9322 | switch (N.getOpcode()) { | |||
9323 | case AArch64ISD::DUP: | |||
9324 | case AArch64ISD::DUPLANE8: | |||
9325 | case AArch64ISD::DUPLANE16: | |||
9326 | case AArch64ISD::DUPLANE32: | |||
9327 | case AArch64ISD::DUPLANE64: | |||
9328 | case AArch64ISD::MOVI: | |||
9329 | case AArch64ISD::MOVIshift: | |||
9330 | case AArch64ISD::MOVIedit: | |||
9331 | case AArch64ISD::MOVImsl: | |||
9332 | case AArch64ISD::MVNIshift: | |||
9333 | case AArch64ISD::MVNImsl: | |||
9334 | break; | |||
9335 | default: | |||
9336 | // FMOV could be supported, but isn't very useful, as it would only occur | |||
9337 | // if you passed a bitcast' floating point immediate to an eligible long | |||
9338 | // integer op (addl, smull, ...). | |||
9339 | return SDValue(); | |||
9340 | } | |||
9341 | ||||
9342 | MVT NarrowTy = N.getSimpleValueType(); | |||
9343 | if (!NarrowTy.is64BitVector()) | |||
9344 | return SDValue(); | |||
9345 | ||||
9346 | MVT ElementTy = NarrowTy.getVectorElementType(); | |||
9347 | unsigned NumElems = NarrowTy.getVectorNumElements(); | |||
9348 | MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2); | |||
9349 | ||||
9350 | SDLoc dl(N); | |||
9351 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy, | |||
9352 | DAG.getNode(N->getOpcode(), dl, NewVT, N->ops()), | |||
9353 | DAG.getConstant(NumElems, dl, MVT::i64)); | |||
9354 | } | |||
9355 | ||||
9356 | static bool isEssentiallyExtractSubvector(SDValue N) { | |||
9357 | if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR) | |||
9358 | return true; | |||
9359 | ||||
9360 | return N.getOpcode() == ISD::BITCAST && | |||
9361 | N.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR; | |||
9362 | } | |||
9363 | ||||
9364 | /// Helper structure to keep track of ISD::SET_CC operands. | |||
9365 | struct GenericSetCCInfo { | |||
9366 | const SDValue *Opnd0; | |||
9367 | const SDValue *Opnd1; | |||
9368 | ISD::CondCode CC; | |||
9369 | }; | |||
9370 | ||||
9371 | /// Helper structure to keep track of a SET_CC lowered into AArch64 code. | |||
9372 | struct AArch64SetCCInfo { | |||
9373 | const SDValue *Cmp; | |||
9374 | AArch64CC::CondCode CC; | |||
9375 | }; | |||
9376 | ||||
9377 | /// Helper structure to keep track of SetCC information. | |||
9378 | union SetCCInfo { | |||
9379 | GenericSetCCInfo Generic; | |||
9380 | AArch64SetCCInfo AArch64; | |||
9381 | }; | |||
9382 | ||||
9383 | /// Helper structure to be able to read SetCC information. If set to | |||
9384 | /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a | |||
9385 | /// GenericSetCCInfo. | |||
9386 | struct SetCCInfoAndKind { | |||
9387 | SetCCInfo Info; | |||
9388 | bool IsAArch64; | |||
9389 | }; | |||
9390 | ||||
9391 | /// Check whether or not \p Op is a SET_CC operation, either a generic or | |||
9392 | /// an | |||
9393 | /// AArch64 lowered one. | |||
9394 | /// \p SetCCInfo is filled accordingly. | |||
9395 | /// \post SetCCInfo is meanginfull only when this function returns true. | |||
9396 | /// \return True when Op is a kind of SET_CC operation. | |||
9397 | static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { | |||
9398 | // If this is a setcc, this is straight forward. | |||
9399 | if (Op.getOpcode() == ISD::SETCC) { | |||
9400 | SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0); | |||
9401 | SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1); | |||
9402 | SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
9403 | SetCCInfo.IsAArch64 = false; | |||
9404 | return true; | |||
9405 | } | |||
9406 | // Otherwise, check if this is a matching csel instruction. | |||
9407 | // In other words: | |||
9408 | // - csel 1, 0, cc | |||
9409 | // - csel 0, 1, !cc | |||
9410 | if (Op.getOpcode() != AArch64ISD::CSEL) | |||
9411 | return false; | |||
9412 | // Set the information about the operands. | |||
9413 | // TODO: we want the operands of the Cmp not the csel | |||
9414 | SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3); | |||
9415 | SetCCInfo.IsAArch64 = true; | |||
9416 | SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>( | |||
9417 | cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); | |||
9418 | ||||
9419 | // Check that the operands matches the constraints: | |||
9420 | // (1) Both operands must be constants. | |||
9421 | // (2) One must be 1 and the other must be 0. | |||
9422 | ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0)); | |||
9423 | ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
9424 | ||||
9425 | // Check (1). | |||
9426 | if (!TValue || !FValue) | |||
9427 | return false; | |||
9428 | ||||
9429 | // Check (2). | |||
9430 | if (!TValue->isOne()) { | |||
9431 | // Update the comparison when we are interested in !cc. | |||
9432 | std::swap(TValue, FValue); | |||
9433 | SetCCInfo.Info.AArch64.CC = | |||
9434 | AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC); | |||
9435 | } | |||
9436 | return TValue->isOne() && FValue->isNullValue(); | |||
9437 | } | |||
9438 | ||||
9439 | // Returns true if Op is setcc or zext of setcc. | |||
9440 | static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) { | |||
9441 | if (isSetCC(Op, Info)) | |||
9442 | return true; | |||
9443 | return ((Op.getOpcode() == ISD::ZERO_EXTEND) && | |||
9444 | isSetCC(Op->getOperand(0), Info)); | |||
9445 | } | |||
9446 | ||||
9447 | // The folding we want to perform is: | |||
9448 | // (add x, [zext] (setcc cc ...) ) | |||
9449 | // --> | |||
9450 | // (csel x, (add x, 1), !cc ...) | |||
9451 | // | |||
9452 | // The latter will get matched to a CSINC instruction. | |||
9453 | static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) { | |||
9454 | assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!")(static_cast <bool> (Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!") ? void (0) : __assert_fail ("Op && Op->getOpcode() == ISD::ADD && \"Unexpected operation!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9454, __extension__ __PRETTY_FUNCTION__)); | |||
9455 | SDValue LHS = Op->getOperand(0); | |||
9456 | SDValue RHS = Op->getOperand(1); | |||
9457 | SetCCInfoAndKind InfoAndKind; | |||
9458 | ||||
9459 | // If neither operand is a SET_CC, give up. | |||
9460 | if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) { | |||
9461 | std::swap(LHS, RHS); | |||
9462 | if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) | |||
9463 | return SDValue(); | |||
9464 | } | |||
9465 | ||||
9466 | // FIXME: This could be generatized to work for FP comparisons. | |||
9467 | EVT CmpVT = InfoAndKind.IsAArch64 | |||
9468 | ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType() | |||
9469 | : InfoAndKind.Info.Generic.Opnd0->getValueType(); | |||
9470 | if (CmpVT != MVT::i32 && CmpVT != MVT::i64) | |||
9471 | return SDValue(); | |||
9472 | ||||
9473 | SDValue CCVal; | |||
9474 | SDValue Cmp; | |||
9475 | SDLoc dl(Op); | |||
9476 | if (InfoAndKind.IsAArch64) { | |||
9477 | CCVal = DAG.getConstant( | |||
9478 | AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl, | |||
9479 | MVT::i32); | |||
9480 | Cmp = *InfoAndKind.Info.AArch64.Cmp; | |||
9481 | } else | |||
9482 | Cmp = getAArch64Cmp(*InfoAndKind.Info.Generic.Opnd0, | |||
9483 | *InfoAndKind.Info.Generic.Opnd1, | |||
9484 | ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, true), | |||
9485 | CCVal, DAG, dl); | |||
9486 | ||||
9487 | EVT VT = Op->getValueType(0); | |||
9488 | LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT)); | |||
9489 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp); | |||
9490 | } | |||
9491 | ||||
9492 | // The basic add/sub long vector instructions have variants with "2" on the end | |||
9493 | // which act on the high-half of their inputs. They are normally matched by | |||
9494 | // patterns like: | |||
9495 | // | |||
9496 | // (add (zeroext (extract_high LHS)), | |||
9497 | // (zeroext (extract_high RHS))) | |||
9498 | // -> uaddl2 vD, vN, vM | |||
9499 | // | |||
9500 | // However, if one of the extracts is something like a duplicate, this | |||
9501 | // instruction can still be used profitably. This function puts the DAG into a | |||
9502 | // more appropriate form for those patterns to trigger. | |||
9503 | static SDValue performAddSubLongCombine(SDNode *N, | |||
9504 | TargetLowering::DAGCombinerInfo &DCI, | |||
9505 | SelectionDAG &DAG) { | |||
9506 | if (DCI.isBeforeLegalizeOps()) | |||
9507 | return SDValue(); | |||
9508 | ||||
9509 | MVT VT = N->getSimpleValueType(0); | |||
9510 | if (!VT.is128BitVector()) { | |||
9511 | if (N->getOpcode() == ISD::ADD) | |||
9512 | return performSetccAddFolding(N, DAG); | |||
9513 | return SDValue(); | |||
9514 | } | |||
9515 | ||||
9516 | // Make sure both branches are extended in the same way. | |||
9517 | SDValue LHS = N->getOperand(0); | |||
9518 | SDValue RHS = N->getOperand(1); | |||
9519 | if ((LHS.getOpcode() != ISD::ZERO_EXTEND && | |||
9520 | LHS.getOpcode() != ISD::SIGN_EXTEND) || | |||
9521 | LHS.getOpcode() != RHS.getOpcode()) | |||
9522 | return SDValue(); | |||
9523 | ||||
9524 | unsigned ExtType = LHS.getOpcode(); | |||
9525 | ||||
9526 | // It's not worth doing if at least one of the inputs isn't already an | |||
9527 | // extract, but we don't know which it'll be so we have to try both. | |||
9528 | if (isEssentiallyExtractSubvector(LHS.getOperand(0))) { | |||
9529 | RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG); | |||
9530 | if (!RHS.getNode()) | |||
9531 | return SDValue(); | |||
9532 | ||||
9533 | RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS); | |||
9534 | } else if (isEssentiallyExtractSubvector(RHS.getOperand(0))) { | |||
9535 | LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG); | |||
9536 | if (!LHS.getNode()) | |||
9537 | return SDValue(); | |||
9538 | ||||
9539 | LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS); | |||
9540 | } | |||
9541 | ||||
9542 | return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); | |||
9543 | } | |||
9544 | ||||
9545 | // Massage DAGs which we can use the high-half "long" operations on into | |||
9546 | // something isel will recognize better. E.g. | |||
9547 | // | |||
9548 | // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) --> | |||
9549 | // (aarch64_neon_umull (extract_high (v2i64 vec))) | |||
9550 | // (extract_high (v2i64 (dup128 scalar))))) | |||
9551 | // | |||
9552 | static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, | |||
9553 | TargetLowering::DAGCombinerInfo &DCI, | |||
9554 | SelectionDAG &DAG) { | |||
9555 | if (DCI.isBeforeLegalizeOps()) | |||
9556 | return SDValue(); | |||
9557 | ||||
9558 | SDValue LHS = N->getOperand(1); | |||
9559 | SDValue RHS = N->getOperand(2); | |||
9560 | assert(LHS.getValueType().is64BitVector() &&(static_cast <bool> (LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation" ) ? void (0) : __assert_fail ("LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && \"unexpected shape for long operation\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9562, __extension__ __PRETTY_FUNCTION__)) | |||
9561 | RHS.getValueType().is64BitVector() &&(static_cast <bool> (LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation" ) ? void (0) : __assert_fail ("LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && \"unexpected shape for long operation\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9562, __extension__ __PRETTY_FUNCTION__)) | |||
9562 | "unexpected shape for long operation")(static_cast <bool> (LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation" ) ? void (0) : __assert_fail ("LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && \"unexpected shape for long operation\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9562, __extension__ __PRETTY_FUNCTION__)); | |||
9563 | ||||
9564 | // Either node could be a DUP, but it's not worth doing both of them (you'd | |||
9565 | // just as well use the non-high version) so look for a corresponding extract | |||
9566 | // operation on the other "wing". | |||
9567 | if (isEssentiallyExtractSubvector(LHS)) { | |||
9568 | RHS = tryExtendDUPToExtractHigh(RHS, DAG); | |||
9569 | if (!RHS.getNode()) | |||
9570 | return SDValue(); | |||
9571 | } else if (isEssentiallyExtractSubvector(RHS)) { | |||
9572 | LHS = tryExtendDUPToExtractHigh(LHS, DAG); | |||
9573 | if (!LHS.getNode()) | |||
9574 | return SDValue(); | |||
9575 | } | |||
9576 | ||||
9577 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), | |||
9578 | N->getOperand(0), LHS, RHS); | |||
9579 | } | |||
9580 | ||||
9581 | static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { | |||
9582 | MVT ElemTy = N->getSimpleValueType(0).getScalarType(); | |||
9583 | unsigned ElemBits = ElemTy.getSizeInBits(); | |||
9584 | ||||
9585 | int64_t ShiftAmount; | |||
9586 | if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) { | |||
9587 | APInt SplatValue, SplatUndef; | |||
9588 | unsigned SplatBitSize; | |||
9589 | bool HasAnyUndefs; | |||
9590 | if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, | |||
9591 | HasAnyUndefs, ElemBits) || | |||
9592 | SplatBitSize != ElemBits) | |||
9593 | return SDValue(); | |||
9594 | ||||
9595 | ShiftAmount = SplatValue.getSExtValue(); | |||
9596 | } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) { | |||
9597 | ShiftAmount = CVN->getSExtValue(); | |||
9598 | } else | |||
9599 | return SDValue(); | |||
9600 | ||||
9601 | unsigned Opcode; | |||
9602 | bool IsRightShift; | |||
9603 | switch (IID) { | |||
9604 | default: | |||
9605 | llvm_unreachable("Unknown shift intrinsic")::llvm::llvm_unreachable_internal("Unknown shift intrinsic", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9605); | |||
9606 | case Intrinsic::aarch64_neon_sqshl: | |||
9607 | Opcode = AArch64ISD::SQSHL_I; | |||
9608 | IsRightShift = false; | |||
9609 | break; | |||
9610 | case Intrinsic::aarch64_neon_uqshl: | |||
9611 | Opcode = AArch64ISD::UQSHL_I; | |||
9612 | IsRightShift = false; | |||
9613 | break; | |||
9614 | case Intrinsic::aarch64_neon_srshl: | |||
9615 | Opcode = AArch64ISD::SRSHR_I; | |||
9616 | IsRightShift = true; | |||
9617 | break; | |||
9618 | case Intrinsic::aarch64_neon_urshl: | |||
9619 | Opcode = AArch64ISD::URSHR_I; | |||
9620 | IsRightShift = true; | |||
9621 | break; | |||
9622 | case Intrinsic::aarch64_neon_sqshlu: | |||
9623 | Opcode = AArch64ISD::SQSHLU_I; | |||
9624 | IsRightShift = false; | |||
9625 | break; | |||
9626 | } | |||
9627 | ||||
9628 | if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) { | |||
9629 | SDLoc dl(N); | |||
9630 | return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), | |||
9631 | DAG.getConstant(-ShiftAmount, dl, MVT::i32)); | |||
9632 | } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) { | |||
9633 | SDLoc dl(N); | |||
9634 | return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), | |||
9635 | DAG.getConstant(ShiftAmount, dl, MVT::i32)); | |||
9636 | } | |||
9637 | ||||
9638 | return SDValue(); | |||
9639 | } | |||
9640 | ||||
9641 | // The CRC32[BH] instructions ignore the high bits of their data operand. Since | |||
9642 | // the intrinsics must be legal and take an i32, this means there's almost | |||
9643 | // certainly going to be a zext in the DAG which we can eliminate. | |||
9644 | static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) { | |||
9645 | SDValue AndN = N->getOperand(2); | |||
9646 | if (AndN.getOpcode() != ISD::AND) | |||
9647 | return SDValue(); | |||
9648 | ||||
9649 | ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1)); | |||
9650 | if (!CMask || CMask->getZExtValue() != Mask) | |||
9651 | return SDValue(); | |||
9652 | ||||
9653 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32, | |||
9654 | N->getOperand(0), N->getOperand(1), AndN.getOperand(0)); | |||
9655 | } | |||
9656 | ||||
9657 | static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, | |||
9658 | SelectionDAG &DAG) { | |||
9659 | SDLoc dl(N); | |||
9660 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), | |||
9661 | DAG.getNode(Opc, dl, | |||
9662 | N->getOperand(1).getSimpleValueType(), | |||
9663 | N->getOperand(1)), | |||
9664 | DAG.getConstant(0, dl, MVT::i64)); | |||
9665 | } | |||
9666 | ||||
9667 | static SDValue performIntrinsicCombine(SDNode *N, | |||
9668 | TargetLowering::DAGCombinerInfo &DCI, | |||
9669 | const AArch64Subtarget *Subtarget) { | |||
9670 | SelectionDAG &DAG = DCI.DAG; | |||
9671 | unsigned IID = getIntrinsicID(N); | |||
9672 | switch (IID) { | |||
9673 | default: | |||
9674 | break; | |||
9675 | case Intrinsic::aarch64_neon_vcvtfxs2fp: | |||
9676 | case Intrinsic::aarch64_neon_vcvtfxu2fp: | |||
9677 | return tryCombineFixedPointConvert(N, DCI, DAG); | |||
9678 | case Intrinsic::aarch64_neon_saddv: | |||
9679 | return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG); | |||
9680 | case Intrinsic::aarch64_neon_uaddv: | |||
9681 | return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG); | |||
9682 | case Intrinsic::aarch64_neon_sminv: | |||
9683 | return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG); | |||
9684 | case Intrinsic::aarch64_neon_uminv: | |||
9685 | return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG); | |||
9686 | case Intrinsic::aarch64_neon_smaxv: | |||
9687 | return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG); | |||
9688 | case Intrinsic::aarch64_neon_umaxv: | |||
9689 | return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG); | |||
9690 | case Intrinsic::aarch64_neon_fmax: | |||
9691 | return DAG.getNode(ISD::FMAXNAN, SDLoc(N), N->getValueType(0), | |||
9692 | N->getOperand(1), N->getOperand(2)); | |||
9693 | case Intrinsic::aarch64_neon_fmin: | |||
9694 | return DAG.getNode(ISD::FMINNAN, SDLoc(N), N->getValueType(0), | |||
9695 | N->getOperand(1), N->getOperand(2)); | |||
9696 | case Intrinsic::aarch64_neon_fmaxnm: | |||
9697 | return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0), | |||
9698 | N->getOperand(1), N->getOperand(2)); | |||
9699 | case Intrinsic::aarch64_neon_fminnm: | |||
9700 | return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0), | |||
9701 | N->getOperand(1), N->getOperand(2)); | |||
9702 | case Intrinsic::aarch64_neon_smull: | |||
9703 | case Intrinsic::aarch64_neon_umull: | |||
9704 | case Intrinsic::aarch64_neon_pmull: | |||
9705 | case Intrinsic::aarch64_neon_sqdmull: | |||
9706 | return tryCombineLongOpWithDup(IID, N, DCI, DAG); | |||
9707 | case Intrinsic::aarch64_neon_sqshl: | |||
9708 | case Intrinsic::aarch64_neon_uqshl: | |||
9709 | case Intrinsic::aarch64_neon_sqshlu: | |||
9710 | case Intrinsic::aarch64_neon_srshl: | |||
9711 | case Intrinsic::aarch64_neon_urshl: | |||
9712 | return tryCombineShiftImm(IID, N, DAG); | |||
9713 | case Intrinsic::aarch64_crc32b: | |||
9714 | case Intrinsic::aarch64_crc32cb: | |||
9715 | return tryCombineCRC32(0xff, N, DAG); | |||
9716 | case Intrinsic::aarch64_crc32h: | |||
9717 | case Intrinsic::aarch64_crc32ch: | |||
9718 | return tryCombineCRC32(0xffff, N, DAG); | |||
9719 | } | |||
9720 | return SDValue(); | |||
9721 | } | |||
9722 | ||||
9723 | static SDValue performExtendCombine(SDNode *N, | |||
9724 | TargetLowering::DAGCombinerInfo &DCI, | |||
9725 | SelectionDAG &DAG) { | |||
9726 | // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then | |||
9727 | // we can convert that DUP into another extract_high (of a bigger DUP), which | |||
9728 | // helps the backend to decide that an sabdl2 would be useful, saving a real | |||
9729 | // extract_high operation. | |||
9730 | if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND && | |||
9731 | N->getOperand(0).getOpcode() == ISD::INTRINSIC_WO_CHAIN) { | |||
9732 | SDNode *ABDNode = N->getOperand(0).getNode(); | |||
9733 | unsigned IID = getIntrinsicID(ABDNode); | |||
9734 | if (IID == Intrinsic::aarch64_neon_sabd || | |||
9735 | IID == Intrinsic::aarch64_neon_uabd) { | |||
9736 | SDValue NewABD = tryCombineLongOpWithDup(IID, ABDNode, DCI, DAG); | |||
9737 | if (!NewABD.getNode()) | |||
9738 | return SDValue(); | |||
9739 | ||||
9740 | return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), | |||
9741 | NewABD); | |||
9742 | } | |||
9743 | } | |||
9744 | ||||
9745 | // This is effectively a custom type legalization for AArch64. | |||
9746 | // | |||
9747 | // Type legalization will split an extend of a small, legal, type to a larger | |||
9748 | // illegal type by first splitting the destination type, often creating | |||
9749 | // illegal source types, which then get legalized in isel-confusing ways, | |||
9750 | // leading to really terrible codegen. E.g., | |||
9751 | // %result = v8i32 sext v8i8 %value | |||
9752 | // becomes | |||
9753 | // %losrc = extract_subreg %value, ... | |||
9754 | // %hisrc = extract_subreg %value, ... | |||
9755 | // %lo = v4i32 sext v4i8 %losrc | |||
9756 | // %hi = v4i32 sext v4i8 %hisrc | |||
9757 | // Things go rapidly downhill from there. | |||
9758 | // | |||
9759 | // For AArch64, the [sz]ext vector instructions can only go up one element | |||
9760 | // size, so we can, e.g., extend from i8 to i16, but to go from i8 to i32 | |||
9761 | // take two instructions. | |||
9762 | // | |||
9763 | // This implies that the most efficient way to do the extend from v8i8 | |||
9764 | // to two v4i32 values is to first extend the v8i8 to v8i16, then do | |||
9765 | // the normal splitting to happen for the v8i16->v8i32. | |||
9766 | ||||
9767 | // This is pre-legalization to catch some cases where the default | |||
9768 | // type legalization will create ill-tempered code. | |||
9769 | if (!DCI.isBeforeLegalizeOps()) | |||
9770 | return SDValue(); | |||
9771 | ||||
9772 | // We're only interested in cleaning things up for non-legal vector types | |||
9773 | // here. If both the source and destination are legal, things will just | |||
9774 | // work naturally without any fiddling. | |||
9775 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
9776 | EVT ResVT = N->getValueType(0); | |||
9777 | if (!ResVT.isVector() || TLI.isTypeLegal(ResVT)) | |||
9778 | return SDValue(); | |||
9779 | // If the vector type isn't a simple VT, it's beyond the scope of what | |||
9780 | // we're worried about here. Let legalization do its thing and hope for | |||
9781 | // the best. | |||
9782 | SDValue Src = N->getOperand(0); | |||
9783 | EVT SrcVT = Src->getValueType(0); | |||
9784 | if (!ResVT.isSimple() || !SrcVT.isSimple()) | |||
9785 | return SDValue(); | |||
9786 | ||||
9787 | // If the source VT is a 64-bit vector, we can play games and get the | |||
9788 | // better results we want. | |||
9789 | if (SrcVT.getSizeInBits() != 64) | |||
9790 | return SDValue(); | |||
9791 | ||||
9792 | unsigned SrcEltSize = SrcVT.getScalarSizeInBits(); | |||
9793 | unsigned ElementCount = SrcVT.getVectorNumElements(); | |||
9794 | SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount); | |||
9795 | SDLoc DL(N); | |||
9796 | Src = DAG.getNode(N->getOpcode(), DL, SrcVT, Src); | |||
9797 | ||||
9798 | // Now split the rest of the operation into two halves, each with a 64 | |||
9799 | // bit source. | |||
9800 | EVT LoVT, HiVT; | |||
9801 | SDValue Lo, Hi; | |||
9802 | unsigned NumElements = ResVT.getVectorNumElements(); | |||
9803 | assert(!(NumElements & 1) && "Splitting vector, but not in half!")(static_cast <bool> (!(NumElements & 1) && "Splitting vector, but not in half!" ) ? void (0) : __assert_fail ("!(NumElements & 1) && \"Splitting vector, but not in half!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 9803, __extension__ __PRETTY_FUNCTION__)); | |||
9804 | LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(), | |||
9805 | ResVT.getVectorElementType(), NumElements / 2); | |||
9806 | ||||
9807 | EVT InNVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(), | |||
9808 | LoVT.getVectorNumElements()); | |||
9809 | Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src, | |||
9810 | DAG.getConstant(0, DL, MVT::i64)); | |||
9811 | Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src, | |||
9812 | DAG.getConstant(InNVT.getVectorNumElements(), DL, MVT::i64)); | |||
9813 | Lo = DAG.getNode(N->getOpcode(), DL, LoVT, Lo); | |||
9814 | Hi = DAG.getNode(N->getOpcode(), DL, HiVT, Hi); | |||
9815 | ||||
9816 | // Now combine the parts back together so we still have a single result | |||
9817 | // like the combiner expects. | |||
9818 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi); | |||
9819 | } | |||
9820 | ||||
9821 | static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, | |||
9822 | SDValue SplatVal, unsigned NumVecElts) { | |||
9823 | unsigned OrigAlignment = St.getAlignment(); | |||
9824 | unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8; | |||
9825 | ||||
9826 | // Create scalar stores. This is at least as good as the code sequence for a | |||
9827 | // split unaligned store which is a dup.s, ext.b, and two stores. | |||
9828 | // Most of the time the three stores should be replaced by store pair | |||
9829 | // instructions (stp). | |||
9830 | SDLoc DL(&St); | |||
9831 | SDValue BasePtr = St.getBasePtr(); | |||
9832 | uint64_t BaseOffset = 0; | |||
9833 | ||||
9834 | const MachinePointerInfo &PtrInfo = St.getPointerInfo(); | |||
9835 | SDValue NewST1 = | |||
9836 | DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo, | |||
9837 | OrigAlignment, St.getMemOperand()->getFlags()); | |||
9838 | ||||
9839 | // As this in ISel, we will not merge this add which may degrade results. | |||
9840 | if (BasePtr->getOpcode() == ISD::ADD && | |||
9841 | isa<ConstantSDNode>(BasePtr->getOperand(1))) { | |||
9842 | BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); | |||
9843 | BasePtr = BasePtr->getOperand(0); | |||
9844 | } | |||
9845 | ||||
9846 | unsigned Offset = EltOffset; | |||
9847 | while (--NumVecElts) { | |||
9848 | unsigned Alignment = MinAlign(OrigAlignment, Offset); | |||
9849 | SDValue OffsetPtr = | |||
9850 | DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, | |||
9851 | DAG.getConstant(BaseOffset + Offset, DL, MVT::i64)); | |||
9852 | NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr, | |||
9853 | PtrInfo.getWithOffset(Offset), Alignment, | |||
9854 | St.getMemOperand()->getFlags()); | |||
9855 | Offset += EltOffset; | |||
9856 | } | |||
9857 | return NewST1; | |||
9858 | } | |||
9859 | ||||
9860 | /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The | |||
9861 | /// load store optimizer pass will merge them to store pair stores. This should | |||
9862 | /// be better than a movi to create the vector zero followed by a vector store | |||
9863 | /// if the zero constant is not re-used, since one instructions and one register | |||
9864 | /// live range will be removed. | |||
9865 | /// | |||
9866 | /// For example, the final generated code should be: | |||
9867 | /// | |||
9868 | /// stp xzr, xzr, [x0] | |||
9869 | /// | |||
9870 | /// instead of: | |||
9871 | /// | |||
9872 | /// movi v0.2d, #0 | |||
9873 | /// str q0, [x0] | |||
9874 | /// | |||
9875 | static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) { | |||
9876 | SDValue StVal = St.getValue(); | |||
9877 | EVT VT = StVal.getValueType(); | |||
9878 | ||||
9879 | // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or | |||
9880 | // 2, 3 or 4 i32 elements. | |||
9881 | int NumVecElts = VT.getVectorNumElements(); | |||
9882 | if (!(((NumVecElts == 2 || NumVecElts == 3) && | |||
9883 | VT.getVectorElementType().getSizeInBits() == 64) || | |||
9884 | ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) && | |||
9885 | VT.getVectorElementType().getSizeInBits() == 32))) | |||
9886 | return SDValue(); | |||
9887 | ||||
9888 | if (StVal.getOpcode() != ISD::BUILD_VECTOR) | |||
9889 | return SDValue(); | |||
9890 | ||||
9891 | // If the zero constant has more than one use then the vector store could be | |||
9892 | // better since the constant mov will be amortized and stp q instructions | |||
9893 | // should be able to be formed. | |||
9894 | if (!StVal.hasOneUse()) | |||
9895 | return SDValue(); | |||
9896 | ||||
9897 | // If the immediate offset of the address operand is too large for the stp | |||
9898 | // instruction, then bail out. | |||
9899 | if (DAG.isBaseWithConstantOffset(St.getBasePtr())) { | |||
9900 | int64_t Offset = St.getBasePtr()->getConstantOperandVal(1); | |||
9901 | if (Offset < -512 || Offset > 504) | |||
9902 | return SDValue(); | |||
9903 | } | |||
9904 | ||||
9905 | for (int I = 0; I < NumVecElts; ++I) { | |||
9906 | SDValue EltVal = StVal.getOperand(I); | |||
9907 | if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal)) | |||
9908 | return SDValue(); | |||
9909 | } | |||
9910 | ||||
9911 | // Use a CopyFromReg WZR/XZR here to prevent | |||
9912 | // DAGCombiner::MergeConsecutiveStores from undoing this transformation. | |||
9913 | SDLoc DL(&St); | |||
9914 | unsigned ZeroReg; | |||
9915 | EVT ZeroVT; | |||
9916 | if (VT.getVectorElementType().getSizeInBits() == 32) { | |||
9917 | ZeroReg = AArch64::WZR; | |||
9918 | ZeroVT = MVT::i32; | |||
9919 | } else { | |||
9920 | ZeroReg = AArch64::XZR; | |||
9921 | ZeroVT = MVT::i64; | |||
9922 | } | |||
9923 | SDValue SplatVal = | |||
9924 | DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT); | |||
9925 | return splitStoreSplat(DAG, St, SplatVal, NumVecElts); | |||
9926 | } | |||
9927 | ||||
9928 | /// Replace a splat of a scalar to a vector store by scalar stores of the scalar | |||
9929 | /// value. The load store optimizer pass will merge them to store pair stores. | |||
9930 | /// This has better performance than a splat of the scalar followed by a split | |||
9931 | /// vector store. Even if the stores are not merged it is four stores vs a dup, | |||
9932 | /// followed by an ext.b and two stores. | |||
9933 | static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) { | |||
9934 | SDValue StVal = St.getValue(); | |||
9935 | EVT VT = StVal.getValueType(); | |||
9936 | ||||
9937 | // Don't replace floating point stores, they possibly won't be transformed to | |||
9938 | // stp because of the store pair suppress pass. | |||
9939 | if (VT.isFloatingPoint()) | |||
9940 | return SDValue(); | |||
9941 | ||||
9942 | // We can express a splat as store pair(s) for 2 or 4 elements. | |||
9943 | unsigned NumVecElts = VT.getVectorNumElements(); | |||
9944 | if (NumVecElts != 4 && NumVecElts != 2) | |||
9945 | return SDValue(); | |||
9946 | ||||
9947 | // Check that this is a splat. | |||
9948 | // Make sure that each of the relevant vector element locations are inserted | |||
9949 | // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32. | |||
9950 | std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1); | |||
9951 | SDValue SplatVal; | |||
9952 | for (unsigned I = 0; I < NumVecElts; ++I) { | |||
9953 | // Check for insert vector elements. | |||
9954 | if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT) | |||
9955 | return SDValue(); | |||
9956 | ||||
9957 | // Check that same value is inserted at each vector element. | |||
9958 | if (I == 0) | |||
9959 | SplatVal = StVal.getOperand(1); | |||
9960 | else if (StVal.getOperand(1) != SplatVal) | |||
9961 | return SDValue(); | |||
9962 | ||||
9963 | // Check insert element index. | |||
9964 | ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2)); | |||
9965 | if (!CIndex) | |||
9966 | return SDValue(); | |||
9967 | uint64_t IndexVal = CIndex->getZExtValue(); | |||
9968 | if (IndexVal >= NumVecElts) | |||
9969 | return SDValue(); | |||
9970 | IndexNotInserted.reset(IndexVal); | |||
9971 | ||||
9972 | StVal = StVal.getOperand(0); | |||
9973 | } | |||
9974 | // Check that all vector element locations were inserted to. | |||
9975 | if (IndexNotInserted.any()) | |||
9976 | return SDValue(); | |||
9977 | ||||
9978 | return splitStoreSplat(DAG, St, SplatVal, NumVecElts); | |||
9979 | } | |||
9980 | ||||
9981 | static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | |||
9982 | SelectionDAG &DAG, | |||
9983 | const AArch64Subtarget *Subtarget) { | |||
9984 | ||||
9985 | StoreSDNode *S = cast<StoreSDNode>(N); | |||
9986 | if (S->isVolatile() || S->isIndexed()) | |||
9987 | return SDValue(); | |||
9988 | ||||
9989 | SDValue StVal = S->getValue(); | |||
9990 | EVT VT = StVal.getValueType(); | |||
9991 | if (!VT.isVector()) | |||
9992 | return SDValue(); | |||
9993 | ||||
9994 | // If we get a splat of zeros, convert this vector store to a store of | |||
9995 | // scalars. They will be merged into store pairs of xzr thereby removing one | |||
9996 | // instruction and one register. | |||
9997 | if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S)) | |||
9998 | return ReplacedZeroSplat; | |||
9999 | ||||
10000 | // FIXME: The logic for deciding if an unaligned store should be split should | |||
10001 | // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be | |||
10002 | // a call to that function here. | |||
10003 | ||||
10004 | if (!Subtarget->isMisaligned128StoreSlow()) | |||
10005 | return SDValue(); | |||
10006 | ||||
10007 | // Don't split at -Oz. | |||
10008 | if (DAG.getMachineFunction().getFunction().optForMinSize()) | |||
10009 | return SDValue(); | |||
10010 | ||||
10011 | // Don't split v2i64 vectors. Memcpy lowering produces those and splitting | |||
10012 | // those up regresses performance on micro-benchmarks and olden/bh. | |||
10013 | if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64) | |||
10014 | return SDValue(); | |||
10015 | ||||
10016 | // Split unaligned 16B stores. They are terrible for performance. | |||
10017 | // Don't split stores with alignment of 1 or 2. Code that uses clang vector | |||
10018 | // extensions can use this to mark that it does not want splitting to happen | |||
10019 | // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of | |||
10020 | // eliminating alignment hazards is only 1 in 8 for alignment of 2. | |||
10021 | if (VT.getSizeInBits() != 128 || S->getAlignment() >= 16 || | |||
10022 | S->getAlignment() <= 2) | |||
10023 | return SDValue(); | |||
10024 | ||||
10025 | // If we get a splat of a scalar convert this vector store to a store of | |||
10026 | // scalars. They will be merged into store pairs thereby removing two | |||
10027 | // instructions. | |||
10028 | if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S)) | |||
10029 | return ReplacedSplat; | |||
10030 | ||||
10031 | SDLoc DL(S); | |||
10032 | unsigned NumElts = VT.getVectorNumElements() / 2; | |||
10033 | // Split VT into two. | |||
10034 | EVT HalfVT = | |||
10035 | EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), NumElts); | |||
10036 | SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, | |||
10037 | DAG.getConstant(0, DL, MVT::i64)); | |||
10038 | SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, | |||
10039 | DAG.getConstant(NumElts, DL, MVT::i64)); | |||
10040 | SDValue BasePtr = S->getBasePtr(); | |||
10041 | SDValue NewST1 = | |||
10042 | DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(), | |||
10043 | S->getAlignment(), S->getMemOperand()->getFlags()); | |||
10044 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, | |||
10045 | DAG.getConstant(8, DL, MVT::i64)); | |||
10046 | return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, | |||
10047 | S->getPointerInfo(), S->getAlignment(), | |||
10048 | S->getMemOperand()->getFlags()); | |||
10049 | } | |||
10050 | ||||
10051 | /// Target-specific DAG combine function for post-increment LD1 (lane) and | |||
10052 | /// post-increment LD1R. | |||
10053 | static SDValue performPostLD1Combine(SDNode *N, | |||
10054 | TargetLowering::DAGCombinerInfo &DCI, | |||
10055 | bool IsLaneOp) { | |||
10056 | if (DCI.isBeforeLegalizeOps()) | |||
10057 | return SDValue(); | |||
10058 | ||||
10059 | SelectionDAG &DAG = DCI.DAG; | |||
10060 | EVT VT = N->getValueType(0); | |||
10061 | ||||
10062 | unsigned LoadIdx = IsLaneOp ? 1 : 0; | |||
10063 | SDNode *LD = N->getOperand(LoadIdx).getNode(); | |||
10064 | // If it is not LOAD, can not do such combine. | |||
10065 | if (LD->getOpcode() != ISD::LOAD) | |||
10066 | return SDValue(); | |||
10067 | ||||
10068 | // The vector lane must be a constant in the LD1LANE opcode. | |||
10069 | SDValue Lane; | |||
10070 | if (IsLaneOp) { | |||
10071 | Lane = N->getOperand(2); | |||
10072 | auto *LaneC = dyn_cast<ConstantSDNode>(Lane); | |||
10073 | if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements()) | |||
10074 | return SDValue(); | |||
10075 | } | |||
10076 | ||||
10077 | LoadSDNode *LoadSDN = cast<LoadSDNode>(LD); | |||
10078 | EVT MemVT = LoadSDN->getMemoryVT(); | |||
10079 | // Check if memory operand is the same type as the vector element. | |||
10080 | if (MemVT != VT.getVectorElementType()) | |||
10081 | return SDValue(); | |||
10082 | ||||
10083 | // Check if there are other uses. If so, do not combine as it will introduce | |||
10084 | // an extra load. | |||
10085 | for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE; | |||
10086 | ++UI) { | |||
10087 | if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result. | |||
10088 | continue; | |||
10089 | if (*UI != N) | |||
10090 | return SDValue(); | |||
10091 | } | |||
10092 | ||||
10093 | SDValue Addr = LD->getOperand(1); | |||
10094 | SDValue Vector = N->getOperand(0); | |||
10095 | // Search for a use of the address operand that is an increment. | |||
10096 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE = | |||
10097 | Addr.getNode()->use_end(); UI != UE; ++UI) { | |||
10098 | SDNode *User = *UI; | |||
10099 | if (User->getOpcode() != ISD::ADD | |||
10100 | || UI.getUse().getResNo() != Addr.getResNo()) | |||
10101 | continue; | |||
10102 | ||||
10103 | // Check that the add is independent of the load. Otherwise, folding it | |||
10104 | // would create a cycle. | |||
10105 | if (User->isPredecessorOf(LD) || LD->isPredecessorOf(User)) | |||
10106 | continue; | |||
10107 | // Also check that add is not used in the vector operand. This would also | |||
10108 | // create a cycle. | |||
10109 | if (User->isPredecessorOf(Vector.getNode())) | |||
10110 | continue; | |||
10111 | ||||
10112 | // If the increment is a constant, it must match the memory ref size. | |||
10113 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); | |||
10114 | if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { | |||
10115 | uint32_t IncVal = CInc->getZExtValue(); | |||
10116 | unsigned NumBytes = VT.getScalarSizeInBits() / 8; | |||
10117 | if (IncVal != NumBytes) | |||
10118 | continue; | |||
10119 | Inc = DAG.getRegister(AArch64::XZR, MVT::i64); | |||
10120 | } | |||
10121 | ||||
10122 | // Finally, check that the vector doesn't depend on the load. | |||
10123 | // Again, this would create a cycle. | |||
10124 | // The load depending on the vector is fine, as that's the case for the | |||
10125 | // LD1*post we'll eventually generate anyway. | |||
10126 | if (LoadSDN->isPredecessorOf(Vector.getNode())) | |||
10127 | continue; | |||
10128 | ||||
10129 | SmallVector<SDValue, 8> Ops; | |||
10130 | Ops.push_back(LD->getOperand(0)); // Chain | |||
10131 | if (IsLaneOp) { | |||
10132 | Ops.push_back(Vector); // The vector to be inserted | |||
10133 | Ops.push_back(Lane); // The lane to be inserted in the vector | |||
10134 | } | |||
10135 | Ops.push_back(Addr); | |||
10136 | Ops.push_back(Inc); | |||
10137 | ||||
10138 | EVT Tys[3] = { VT, MVT::i64, MVT::Other }; | |||
10139 | SDVTList SDTys = DAG.getVTList(Tys); | |||
10140 | unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost; | |||
10141 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops, | |||
10142 | MemVT, | |||
10143 | LoadSDN->getMemOperand()); | |||
10144 | ||||
10145 | // Update the uses. | |||
10146 | SDValue NewResults[] = { | |||
10147 | SDValue(LD, 0), // The result of load | |||
10148 | SDValue(UpdN.getNode(), 2) // Chain | |||
10149 | }; | |||
10150 | DCI.CombineTo(LD, NewResults); | |||
10151 | DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result | |||
10152 | DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register | |||
10153 | ||||
10154 | break; | |||
10155 | } | |||
10156 | return SDValue(); | |||
10157 | } | |||
10158 | ||||
10159 | /// Simplify ``Addr`` given that the top byte of it is ignored by HW during | |||
10160 | /// address translation. | |||
10161 | static bool performTBISimplification(SDValue Addr, | |||
10162 | TargetLowering::DAGCombinerInfo &DCI, | |||
10163 | SelectionDAG &DAG) { | |||
10164 | APInt DemandedMask = APInt::getLowBitsSet(64, 56); | |||
10165 | KnownBits Known; | |||
10166 | TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), | |||
10167 | !DCI.isBeforeLegalizeOps()); | |||
10168 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
10169 | if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) { | |||
10170 | DCI.CommitTargetLoweringOpt(TLO); | |||
10171 | return true; | |||
10172 | } | |||
10173 | return false; | |||
10174 | } | |||
10175 | ||||
10176 | static SDValue performSTORECombine(SDNode *N, | |||
10177 | TargetLowering::DAGCombinerInfo &DCI, | |||
10178 | SelectionDAG &DAG, | |||
10179 | const AArch64Subtarget *Subtarget) { | |||
10180 | if (SDValue Split = splitStores(N, DCI, DAG, Subtarget)) | |||
10181 | return Split; | |||
10182 | ||||
10183 | if (Subtarget->supportsAddressTopByteIgnored() && | |||
10184 | performTBISimplification(N->getOperand(2), DCI, DAG)) | |||
10185 | return SDValue(N, 0); | |||
10186 | ||||
10187 | return SDValue(); | |||
10188 | } | |||
10189 | ||||
10190 | ||||
10191 | /// Target-specific DAG combine function for NEON load/store intrinsics | |||
10192 | /// to merge base address updates. | |||
10193 | static SDValue performNEONPostLDSTCombine(SDNode *N, | |||
10194 | TargetLowering::DAGCombinerInfo &DCI, | |||
10195 | SelectionDAG &DAG) { | |||
10196 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
10197 | return SDValue(); | |||
10198 | ||||
10199 | unsigned AddrOpIdx = N->getNumOperands() - 1; | |||
10200 | SDValue Addr = N->getOperand(AddrOpIdx); | |||
10201 | ||||
10202 | // Search for a use of the address operand that is an increment. | |||
10203 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), | |||
10204 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { | |||
10205 | SDNode *User = *UI; | |||
10206 | if (User->getOpcode() != ISD::ADD || | |||
10207 | UI.getUse().getResNo() != Addr.getResNo()) | |||
10208 | continue; | |||
10209 | ||||
10210 | // Check that the add is independent of the load/store. Otherwise, folding | |||
10211 | // it would create a cycle. | |||
10212 | if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) | |||
10213 | continue; | |||
10214 | ||||
10215 | // Find the new opcode for the updating load/store. | |||
10216 | bool IsStore = false; | |||
10217 | bool IsLaneOp = false; | |||
10218 | bool IsDupOp = false; | |||
10219 | unsigned NewOpc = 0; | |||
10220 | unsigned NumVecs = 0; | |||
10221 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | |||
10222 | switch (IntNo) { | |||
10223 | default: llvm_unreachable("unexpected intrinsic for Neon base update")::llvm::llvm_unreachable_internal("unexpected intrinsic for Neon base update" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10223); | |||
10224 | case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post; | |||
10225 | NumVecs = 2; break; | |||
10226 | case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post; | |||
10227 | NumVecs = 3; break; | |||
10228 | case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post; | |||
10229 | NumVecs = 4; break; | |||
10230 | case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post; | |||
10231 | NumVecs = 2; IsStore = true; break; | |||
10232 | case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post; | |||
10233 | NumVecs = 3; IsStore = true; break; | |||
10234 | case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post; | |||
10235 | NumVecs = 4; IsStore = true; break; | |||
10236 | case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post; | |||
10237 | NumVecs = 2; break; | |||
10238 | case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post; | |||
10239 | NumVecs = 3; break; | |||
10240 | case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post; | |||
10241 | NumVecs = 4; break; | |||
10242 | case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post; | |||
10243 | NumVecs = 2; IsStore = true; break; | |||
10244 | case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post; | |||
10245 | NumVecs = 3; IsStore = true; break; | |||
10246 | case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post; | |||
10247 | NumVecs = 4; IsStore = true; break; | |||
10248 | case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost; | |||
10249 | NumVecs = 2; IsDupOp = true; break; | |||
10250 | case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost; | |||
10251 | NumVecs = 3; IsDupOp = true; break; | |||
10252 | case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost; | |||
10253 | NumVecs = 4; IsDupOp = true; break; | |||
10254 | case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost; | |||
10255 | NumVecs = 2; IsLaneOp = true; break; | |||
10256 | case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost; | |||
10257 | NumVecs = 3; IsLaneOp = true; break; | |||
10258 | case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost; | |||
10259 | NumVecs = 4; IsLaneOp = true; break; | |||
10260 | case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost; | |||
10261 | NumVecs = 2; IsStore = true; IsLaneOp = true; break; | |||
10262 | case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost; | |||
10263 | NumVecs = 3; IsStore = true; IsLaneOp = true; break; | |||
10264 | case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost; | |||
10265 | NumVecs = 4; IsStore = true; IsLaneOp = true; break; | |||
10266 | } | |||
10267 | ||||
10268 | EVT VecTy; | |||
10269 | if (IsStore) | |||
10270 | VecTy = N->getOperand(2).getValueType(); | |||
10271 | else | |||
10272 | VecTy = N->getValueType(0); | |||
10273 | ||||
10274 | // If the increment is a constant, it must match the memory ref size. | |||
10275 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); | |||
10276 | if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { | |||
10277 | uint32_t IncVal = CInc->getZExtValue(); | |||
10278 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; | |||
10279 | if (IsLaneOp || IsDupOp) | |||
10280 | NumBytes /= VecTy.getVectorNumElements(); | |||
10281 | if (IncVal != NumBytes) | |||
10282 | continue; | |||
10283 | Inc = DAG.getRegister(AArch64::XZR, MVT::i64); | |||
10284 | } | |||
10285 | SmallVector<SDValue, 8> Ops; | |||
10286 | Ops.push_back(N->getOperand(0)); // Incoming chain | |||
10287 | // Load lane and store have vector list as input. | |||
10288 | if (IsLaneOp || IsStore) | |||
10289 | for (unsigned i = 2; i < AddrOpIdx; ++i) | |||
10290 | Ops.push_back(N->getOperand(i)); | |||
10291 | Ops.push_back(Addr); // Base register | |||
10292 | Ops.push_back(Inc); | |||
10293 | ||||
10294 | // Return Types. | |||
10295 | EVT Tys[6]; | |||
10296 | unsigned NumResultVecs = (IsStore ? 0 : NumVecs); | |||
10297 | unsigned n; | |||
10298 | for (n = 0; n < NumResultVecs; ++n) | |||
10299 | Tys[n] = VecTy; | |||
10300 | Tys[n++] = MVT::i64; // Type of write back register | |||
10301 | Tys[n] = MVT::Other; // Type of the chain | |||
10302 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); | |||
10303 | ||||
10304 | MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); | |||
10305 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops, | |||
10306 | MemInt->getMemoryVT(), | |||
10307 | MemInt->getMemOperand()); | |||
10308 | ||||
10309 | // Update the uses. | |||
10310 | std::vector<SDValue> NewResults; | |||
10311 | for (unsigned i = 0; i < NumResultVecs; ++i) { | |||
10312 | NewResults.push_back(SDValue(UpdN.getNode(), i)); | |||
10313 | } | |||
10314 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); | |||
10315 | DCI.CombineTo(N, NewResults); | |||
10316 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); | |||
10317 | ||||
10318 | break; | |||
10319 | } | |||
10320 | return SDValue(); | |||
10321 | } | |||
10322 | ||||
10323 | // Checks to see if the value is the prescribed width and returns information | |||
10324 | // about its extension mode. | |||
10325 | static | |||
10326 | bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) { | |||
10327 | ExtType = ISD::NON_EXTLOAD; | |||
10328 | switch(V.getNode()->getOpcode()) { | |||
10329 | default: | |||
10330 | return false; | |||
10331 | case ISD::LOAD: { | |||
10332 | LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode()); | |||
10333 | if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8) | |||
10334 | || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) { | |||
10335 | ExtType = LoadNode->getExtensionType(); | |||
10336 | return true; | |||
10337 | } | |||
10338 | return false; | |||
10339 | } | |||
10340 | case ISD::AssertSext: { | |||
10341 | VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1)); | |||
10342 | if ((TypeNode->getVT() == MVT::i8 && width == 8) | |||
10343 | || (TypeNode->getVT() == MVT::i16 && width == 16)) { | |||
10344 | ExtType = ISD::SEXTLOAD; | |||
10345 | return true; | |||
10346 | } | |||
10347 | return false; | |||
10348 | } | |||
10349 | case ISD::AssertZext: { | |||
10350 | VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1)); | |||
10351 | if ((TypeNode->getVT() == MVT::i8 && width == 8) | |||
10352 | || (TypeNode->getVT() == MVT::i16 && width == 16)) { | |||
10353 | ExtType = ISD::ZEXTLOAD; | |||
10354 | return true; | |||
10355 | } | |||
10356 | return false; | |||
10357 | } | |||
10358 | case ISD::Constant: | |||
10359 | case ISD::TargetConstant: { | |||
10360 | return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) < | |||
10361 | 1LL << (width - 1); | |||
10362 | } | |||
10363 | } | |||
10364 | ||||
10365 | return true; | |||
10366 | } | |||
10367 | ||||
10368 | // This function does a whole lot of voodoo to determine if the tests are | |||
10369 | // equivalent without and with a mask. Essentially what happens is that given a | |||
10370 | // DAG resembling: | |||
10371 | // | |||
10372 | // +-------------+ +-------------+ +-------------+ +-------------+ | |||
10373 | // | Input | | AddConstant | | CompConstant| | CC | | |||
10374 | // +-------------+ +-------------+ +-------------+ +-------------+ | |||
10375 | // | | | | | |||
10376 | // V V | +----------+ | |||
10377 | // +-------------+ +----+ | | | |||
10378 | // | ADD | |0xff| | | | |||
10379 | // +-------------+ +----+ | | | |||
10380 | // | | | | | |||
10381 | // V V | | | |||
10382 | // +-------------+ | | | |||
10383 | // | AND | | | | |||
10384 | // +-------------+ | | | |||
10385 | // | | | | |||
10386 | // +-----+ | | | |||
10387 | // | | | | |||
10388 | // V V V | |||
10389 | // +-------------+ | |||
10390 | // | CMP | | |||
10391 | // +-------------+ | |||
10392 | // | |||
10393 | // The AND node may be safely removed for some combinations of inputs. In | |||
10394 | // particular we need to take into account the extension type of the Input, | |||
10395 | // the exact values of AddConstant, CompConstant, and CC, along with the nominal | |||
10396 | // width of the input (this can work for any width inputs, the above graph is | |||
10397 | // specific to 8 bits. | |||
10398 | // | |||
10399 | // The specific equations were worked out by generating output tables for each | |||
10400 | // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The | |||
10401 | // problem was simplified by working with 4 bit inputs, which means we only | |||
10402 | // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero | |||
10403 | // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8 | |||
10404 | // patterns present in both extensions (0,7). For every distinct set of | |||
10405 | // AddConstant and CompConstants bit patterns we can consider the masked and | |||
10406 | // unmasked versions to be equivalent if the result of this function is true for | |||
10407 | // all 16 distinct bit patterns of for the current extension type of Input (w0). | |||
10408 | // | |||
10409 | // sub w8, w0, w1 | |||
10410 | // and w10, w8, #0x0f | |||
10411 | // cmp w8, w2 | |||
10412 | // cset w9, AArch64CC | |||
10413 | // cmp w10, w2 | |||
10414 | // cset w11, AArch64CC | |||
10415 | // cmp w9, w11 | |||
10416 | // cset w0, eq | |||
10417 | // ret | |||
10418 | // | |||
10419 | // Since the above function shows when the outputs are equivalent it defines | |||
10420 | // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and | |||
10421 | // would be expensive to run during compiles. The equations below were written | |||
10422 | // in a test harness that confirmed they gave equivalent outputs to the above | |||
10423 | // for all inputs function, so they can be used determine if the removal is | |||
10424 | // legal instead. | |||
10425 | // | |||
10426 | // isEquivalentMaskless() is the code for testing if the AND can be removed | |||
10427 | // factored out of the DAG recognition as the DAG can take several forms. | |||
10428 | ||||
10429 | static bool isEquivalentMaskless(unsigned CC, unsigned width, | |||
10430 | ISD::LoadExtType ExtType, int AddConstant, | |||
10431 | int CompConstant) { | |||
10432 | // By being careful about our equations and only writing the in term | |||
10433 | // symbolic values and well known constants (0, 1, -1, MaxUInt) we can | |||
10434 | // make them generally applicable to all bit widths. | |||
10435 | int MaxUInt = (1 << width); | |||
10436 | ||||
10437 | // For the purposes of these comparisons sign extending the type is | |||
10438 | // equivalent to zero extending the add and displacing it by half the integer | |||
10439 | // width. Provided we are careful and make sure our equations are valid over | |||
10440 | // the whole range we can just adjust the input and avoid writing equations | |||
10441 | // for sign extended inputs. | |||
10442 | if (ExtType == ISD::SEXTLOAD) | |||
10443 | AddConstant -= (1 << (width-1)); | |||
10444 | ||||
10445 | switch(CC) { | |||
10446 | case AArch64CC::LE: | |||
10447 | case AArch64CC::GT: | |||
10448 | if ((AddConstant == 0) || | |||
10449 | (CompConstant == MaxUInt - 1 && AddConstant < 0) || | |||
10450 | (AddConstant >= 0 && CompConstant < 0) || | |||
10451 | (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant)) | |||
10452 | return true; | |||
10453 | break; | |||
10454 | case AArch64CC::LT: | |||
10455 | case AArch64CC::GE: | |||
10456 | if ((AddConstant == 0) || | |||
10457 | (AddConstant >= 0 && CompConstant <= 0) || | |||
10458 | (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant)) | |||
10459 | return true; | |||
10460 | break; | |||
10461 | case AArch64CC::HI: | |||
10462 | case AArch64CC::LS: | |||
10463 | if ((AddConstant >= 0 && CompConstant < 0) || | |||
10464 | (AddConstant <= 0 && CompConstant >= -1 && | |||
10465 | CompConstant < AddConstant + MaxUInt)) | |||
10466 | return true; | |||
10467 | break; | |||
10468 | case AArch64CC::PL: | |||
10469 | case AArch64CC::MI: | |||
10470 | if ((AddConstant == 0) || | |||
10471 | (AddConstant > 0 && CompConstant <= 0) || | |||
10472 | (AddConstant < 0 && CompConstant <= AddConstant)) | |||
10473 | return true; | |||
10474 | break; | |||
10475 | case AArch64CC::LO: | |||
10476 | case AArch64CC::HS: | |||
10477 | if ((AddConstant >= 0 && CompConstant <= 0) || | |||
10478 | (AddConstant <= 0 && CompConstant >= 0 && | |||
10479 | CompConstant <= AddConstant + MaxUInt)) | |||
10480 | return true; | |||
10481 | break; | |||
10482 | case AArch64CC::EQ: | |||
10483 | case AArch64CC::NE: | |||
10484 | if ((AddConstant > 0 && CompConstant < 0) || | |||
10485 | (AddConstant < 0 && CompConstant >= 0 && | |||
10486 | CompConstant < AddConstant + MaxUInt) || | |||
10487 | (AddConstant >= 0 && CompConstant >= 0 && | |||
10488 | CompConstant >= AddConstant) || | |||
10489 | (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant)) | |||
10490 | return true; | |||
10491 | break; | |||
10492 | case AArch64CC::VS: | |||
10493 | case AArch64CC::VC: | |||
10494 | case AArch64CC::AL: | |||
10495 | case AArch64CC::NV: | |||
10496 | return true; | |||
10497 | case AArch64CC::Invalid: | |||
10498 | break; | |||
10499 | } | |||
10500 | ||||
10501 | return false; | |||
10502 | } | |||
10503 | ||||
10504 | static | |||
10505 | SDValue performCONDCombine(SDNode *N, | |||
10506 | TargetLowering::DAGCombinerInfo &DCI, | |||
10507 | SelectionDAG &DAG, unsigned CCIndex, | |||
10508 | unsigned CmpIndex) { | |||
10509 | unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue(); | |||
10510 | SDNode *SubsNode = N->getOperand(CmpIndex).getNode(); | |||
10511 | unsigned CondOpcode = SubsNode->getOpcode(); | |||
10512 | ||||
10513 | if (CondOpcode != AArch64ISD::SUBS) | |||
10514 | return SDValue(); | |||
10515 | ||||
10516 | // There is a SUBS feeding this condition. Is it fed by a mask we can | |||
10517 | // use? | |||
10518 | ||||
10519 | SDNode *AndNode = SubsNode->getOperand(0).getNode(); | |||
10520 | unsigned MaskBits = 0; | |||
10521 | ||||
10522 | if (AndNode->getOpcode() != ISD::AND) | |||
10523 | return SDValue(); | |||
10524 | ||||
10525 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) { | |||
10526 | uint32_t CNV = CN->getZExtValue(); | |||
10527 | if (CNV == 255) | |||
10528 | MaskBits = 8; | |||
10529 | else if (CNV == 65535) | |||
10530 | MaskBits = 16; | |||
10531 | } | |||
10532 | ||||
10533 | if (!MaskBits) | |||
10534 | return SDValue(); | |||
10535 | ||||
10536 | SDValue AddValue = AndNode->getOperand(0); | |||
10537 | ||||
10538 | if (AddValue.getOpcode() != ISD::ADD) | |||
10539 | return SDValue(); | |||
10540 | ||||
10541 | // The basic dag structure is correct, grab the inputs and validate them. | |||
10542 | ||||
10543 | SDValue AddInputValue1 = AddValue.getNode()->getOperand(0); | |||
10544 | SDValue AddInputValue2 = AddValue.getNode()->getOperand(1); | |||
10545 | SDValue SubsInputValue = SubsNode->getOperand(1); | |||
10546 | ||||
10547 | // The mask is present and the provenance of all the values is a smaller type, | |||
10548 | // lets see if the mask is superfluous. | |||
10549 | ||||
10550 | if (!isa<ConstantSDNode>(AddInputValue2.getNode()) || | |||
10551 | !isa<ConstantSDNode>(SubsInputValue.getNode())) | |||
10552 | return SDValue(); | |||
10553 | ||||
10554 | ISD::LoadExtType ExtType; | |||
10555 | ||||
10556 | if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) || | |||
10557 | !checkValueWidth(AddInputValue2, MaskBits, ExtType) || | |||
10558 | !checkValueWidth(AddInputValue1, MaskBits, ExtType) ) | |||
10559 | return SDValue(); | |||
10560 | ||||
10561 | if(!isEquivalentMaskless(CC, MaskBits, ExtType, | |||
10562 | cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(), | |||
10563 | cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue())) | |||
10564 | return SDValue(); | |||
10565 | ||||
10566 | // The AND is not necessary, remove it. | |||
10567 | ||||
10568 | SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0), | |||
10569 | SubsNode->getValueType(1)); | |||
10570 | SDValue Ops[] = { AddValue, SubsNode->getOperand(1) }; | |||
10571 | ||||
10572 | SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops); | |||
10573 | DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode()); | |||
10574 | ||||
10575 | return SDValue(N, 0); | |||
10576 | } | |||
10577 | ||||
10578 | // Optimize compare with zero and branch. | |||
10579 | static SDValue performBRCONDCombine(SDNode *N, | |||
10580 | TargetLowering::DAGCombinerInfo &DCI, | |||
10581 | SelectionDAG &DAG) { | |||
10582 | if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3)) | |||
10583 | N = NV.getNode(); | |||
10584 | SDValue Chain = N->getOperand(0); | |||
10585 | SDValue Dest = N->getOperand(1); | |||
10586 | SDValue CCVal = N->getOperand(2); | |||
10587 | SDValue Cmp = N->getOperand(3); | |||
10588 | ||||
10589 | assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!")(static_cast <bool> (isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!") ? void (0) : __assert_fail ("isa<ConstantSDNode>(CCVal) && \"Expected a ConstantSDNode here!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10589, __extension__ __PRETTY_FUNCTION__)); | |||
10590 | unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue(); | |||
10591 | if (CC != AArch64CC::EQ && CC != AArch64CC::NE) | |||
10592 | return SDValue(); | |||
10593 | ||||
10594 | unsigned CmpOpc = Cmp.getOpcode(); | |||
10595 | if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS) | |||
10596 | return SDValue(); | |||
10597 | ||||
10598 | // Only attempt folding if there is only one use of the flag and no use of the | |||
10599 | // value. | |||
10600 | if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1)) | |||
10601 | return SDValue(); | |||
10602 | ||||
10603 | SDValue LHS = Cmp.getOperand(0); | |||
10604 | SDValue RHS = Cmp.getOperand(1); | |||
10605 | ||||
10606 | assert(LHS.getValueType() == RHS.getValueType() &&(static_cast <bool> (LHS.getValueType() == RHS.getValueType () && "Expected the value type to be the same for both operands!" ) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Expected the value type to be the same for both operands!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10607, __extension__ __PRETTY_FUNCTION__)) | |||
10607 | "Expected the value type to be the same for both operands!")(static_cast <bool> (LHS.getValueType() == RHS.getValueType () && "Expected the value type to be the same for both operands!" ) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Expected the value type to be the same for both operands!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10607, __extension__ __PRETTY_FUNCTION__)); | |||
10608 | if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) | |||
10609 | return SDValue(); | |||
10610 | ||||
10611 | if (isNullConstant(LHS)) | |||
10612 | std::swap(LHS, RHS); | |||
10613 | ||||
10614 | if (!isNullConstant(RHS)) | |||
10615 | return SDValue(); | |||
10616 | ||||
10617 | if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA || | |||
10618 | LHS.getOpcode() == ISD::SRL) | |||
10619 | return SDValue(); | |||
10620 | ||||
10621 | // Fold the compare into the branch instruction. | |||
10622 | SDValue BR; | |||
10623 | if (CC == AArch64CC::EQ) | |||
10624 | BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); | |||
10625 | else | |||
10626 | BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); | |||
10627 | ||||
10628 | // Do not add new nodes to DAG combiner worklist. | |||
10629 | DCI.CombineTo(N, BR, false); | |||
10630 | ||||
10631 | return SDValue(); | |||
10632 | } | |||
10633 | ||||
10634 | // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test | |||
10635 | // as well as whether the test should be inverted. This code is required to | |||
10636 | // catch these cases (as opposed to standard dag combines) because | |||
10637 | // AArch64ISD::TBZ is matched during legalization. | |||
10638 | static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, | |||
10639 | SelectionDAG &DAG) { | |||
10640 | ||||
10641 | if (!Op->hasOneUse()) | |||
10642 | return Op; | |||
10643 | ||||
10644 | // We don't handle undef/constant-fold cases below, as they should have | |||
10645 | // already been taken care of (e.g. and of 0, test of undefined shifted bits, | |||
10646 | // etc.) | |||
10647 | ||||
10648 | // (tbz (trunc x), b) -> (tbz x, b) | |||
10649 | // This case is just here to enable more of the below cases to be caught. | |||
10650 | if (Op->getOpcode() == ISD::TRUNCATE && | |||
10651 | Bit < Op->getValueType(0).getSizeInBits()) { | |||
10652 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | |||
10653 | } | |||
10654 | ||||
10655 | if (Op->getNumOperands() != 2) | |||
10656 | return Op; | |||
10657 | ||||
10658 | auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1)); | |||
10659 | if (!C) | |||
10660 | return Op; | |||
10661 | ||||
10662 | switch (Op->getOpcode()) { | |||
10663 | default: | |||
10664 | return Op; | |||
10665 | ||||
10666 | // (tbz (and x, m), b) -> (tbz x, b) | |||
10667 | case ISD::AND: | |||
10668 | if ((C->getZExtValue() >> Bit) & 1) | |||
10669 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | |||
10670 | return Op; | |||
10671 | ||||
10672 | // (tbz (shl x, c), b) -> (tbz x, b-c) | |||
10673 | case ISD::SHL: | |||
10674 | if (C->getZExtValue() <= Bit && | |||
10675 | (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { | |||
10676 | Bit = Bit - C->getZExtValue(); | |||
10677 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | |||
10678 | } | |||
10679 | return Op; | |||
10680 | ||||
10681 | // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x | |||
10682 | case ISD::SRA: | |||
10683 | Bit = Bit + C->getZExtValue(); | |||
10684 | if (Bit >= Op->getValueType(0).getSizeInBits()) | |||
10685 | Bit = Op->getValueType(0).getSizeInBits() - 1; | |||
10686 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | |||
10687 | ||||
10688 | // (tbz (srl x, c), b) -> (tbz x, b+c) | |||
10689 | case ISD::SRL: | |||
10690 | if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { | |||
10691 | Bit = Bit + C->getZExtValue(); | |||
10692 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | |||
10693 | } | |||
10694 | return Op; | |||
10695 | ||||
10696 | // (tbz (xor x, -1), b) -> (tbnz x, b) | |||
10697 | case ISD::XOR: | |||
10698 | if ((C->getZExtValue() >> Bit) & 1) | |||
10699 | Invert = !Invert; | |||
10700 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | |||
10701 | } | |||
10702 | } | |||
10703 | ||||
10704 | // Optimize test single bit zero/non-zero and branch. | |||
10705 | static SDValue performTBZCombine(SDNode *N, | |||
10706 | TargetLowering::DAGCombinerInfo &DCI, | |||
10707 | SelectionDAG &DAG) { | |||
10708 | unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); | |||
10709 | bool Invert = false; | |||
10710 | SDValue TestSrc = N->getOperand(1); | |||
10711 | SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG); | |||
10712 | ||||
10713 | if (TestSrc == NewTestSrc) | |||
10714 | return SDValue(); | |||
10715 | ||||
10716 | unsigned NewOpc = N->getOpcode(); | |||
10717 | if (Invert) { | |||
10718 | if (NewOpc == AArch64ISD::TBZ) | |||
10719 | NewOpc = AArch64ISD::TBNZ; | |||
10720 | else { | |||
10721 | assert(NewOpc == AArch64ISD::TBNZ)(static_cast <bool> (NewOpc == AArch64ISD::TBNZ) ? void (0) : __assert_fail ("NewOpc == AArch64ISD::TBNZ", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10721, __extension__ __PRETTY_FUNCTION__)); | |||
10722 | NewOpc = AArch64ISD::TBZ; | |||
10723 | } | |||
10724 | } | |||
10725 | ||||
10726 | SDLoc DL(N); | |||
10727 | return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc, | |||
10728 | DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3)); | |||
10729 | } | |||
10730 | ||||
10731 | // vselect (v1i1 setcc) -> | |||
10732 | // vselect (v1iXX setcc) (XX is the size of the compared operand type) | |||
10733 | // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as | |||
10734 | // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine | |||
10735 | // such VSELECT. | |||
10736 | static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) { | |||
10737 | SDValue N0 = N->getOperand(0); | |||
10738 | EVT CCVT = N0.getValueType(); | |||
10739 | ||||
10740 | if (N0.getOpcode() != ISD::SETCC || CCVT.getVectorNumElements() != 1 || | |||
10741 | CCVT.getVectorElementType() != MVT::i1) | |||
10742 | return SDValue(); | |||
10743 | ||||
10744 | EVT ResVT = N->getValueType(0); | |||
10745 | EVT CmpVT = N0.getOperand(0).getValueType(); | |||
10746 | // Only combine when the result type is of the same size as the compared | |||
10747 | // operands. | |||
10748 | if (ResVT.getSizeInBits() != CmpVT.getSizeInBits()) | |||
10749 | return SDValue(); | |||
10750 | ||||
10751 | SDValue IfTrue = N->getOperand(1); | |||
10752 | SDValue IfFalse = N->getOperand(2); | |||
10753 | SDValue SetCC = | |||
10754 | DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(), | |||
10755 | N0.getOperand(0), N0.getOperand(1), | |||
10756 | cast<CondCodeSDNode>(N0.getOperand(2))->get()); | |||
10757 | return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC, | |||
10758 | IfTrue, IfFalse); | |||
10759 | } | |||
10760 | ||||
10761 | /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with | |||
10762 | /// the compare-mask instructions rather than going via NZCV, even if LHS and | |||
10763 | /// RHS are really scalar. This replaces any scalar setcc in the above pattern | |||
10764 | /// with a vector one followed by a DUP shuffle on the result. | |||
10765 | static SDValue performSelectCombine(SDNode *N, | |||
10766 | TargetLowering::DAGCombinerInfo &DCI) { | |||
10767 | SelectionDAG &DAG = DCI.DAG; | |||
10768 | SDValue N0 = N->getOperand(0); | |||
10769 | EVT ResVT = N->getValueType(0); | |||
10770 | ||||
10771 | if (N0.getOpcode() != ISD::SETCC) | |||
10772 | return SDValue(); | |||
10773 | ||||
10774 | // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered | |||
10775 | // scalar SetCCResultType. We also don't expect vectors, because we assume | |||
10776 | // that selects fed by vector SETCCs are canonicalized to VSELECT. | |||
10777 | assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&(static_cast <bool> ((N0.getValueType() == MVT::i1 || N0 .getValueType() == MVT::i32) && "Scalar-SETCC feeding SELECT has unexpected result type!" ) ? void (0) : __assert_fail ("(N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) && \"Scalar-SETCC feeding SELECT has unexpected result type!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10778, __extension__ __PRETTY_FUNCTION__)) | |||
10778 | "Scalar-SETCC feeding SELECT has unexpected result type!")(static_cast <bool> ((N0.getValueType() == MVT::i1 || N0 .getValueType() == MVT::i32) && "Scalar-SETCC feeding SELECT has unexpected result type!" ) ? void (0) : __assert_fail ("(N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) && \"Scalar-SETCC feeding SELECT has unexpected result type!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10778, __extension__ __PRETTY_FUNCTION__)); | |||
10779 | ||||
10780 | // If NumMaskElts == 0, the comparison is larger than select result. The | |||
10781 | // largest real NEON comparison is 64-bits per lane, which means the result is | |||
10782 | // at most 32-bits and an illegal vector. Just bail out for now. | |||
10783 | EVT SrcVT = N0.getOperand(0).getValueType(); | |||
10784 | ||||
10785 | // Don't try to do this optimization when the setcc itself has i1 operands. | |||
10786 | // There are no legal vectors of i1, so this would be pointless. | |||
10787 | if (SrcVT == MVT::i1) | |||
10788 | return SDValue(); | |||
10789 | ||||
10790 | int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits(); | |||
10791 | if (!ResVT.isVector() || NumMaskElts == 0) | |||
10792 | return SDValue(); | |||
10793 | ||||
10794 | SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts); | |||
10795 | EVT CCVT = SrcVT.changeVectorElementTypeToInteger(); | |||
10796 | ||||
10797 | // Also bail out if the vector CCVT isn't the same size as ResVT. | |||
10798 | // This can happen if the SETCC operand size doesn't divide the ResVT size | |||
10799 | // (e.g., f64 vs v3f32). | |||
10800 | if (CCVT.getSizeInBits() != ResVT.getSizeInBits()) | |||
10801 | return SDValue(); | |||
10802 | ||||
10803 | // Make sure we didn't create illegal types, if we're not supposed to. | |||
10804 | assert(DCI.isBeforeLegalize() ||(static_cast <bool> (DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo ().isTypeLegal(SrcVT)) ? void (0) : __assert_fail ("DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10805, __extension__ __PRETTY_FUNCTION__)) | |||
10805 | DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))(static_cast <bool> (DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo ().isTypeLegal(SrcVT)) ? void (0) : __assert_fail ("DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10805, __extension__ __PRETTY_FUNCTION__)); | |||
10806 | ||||
10807 | // First perform a vector comparison, where lane 0 is the one we're interested | |||
10808 | // in. | |||
10809 | SDLoc DL(N0); | |||
10810 | SDValue LHS = | |||
10811 | DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); | |||
10812 | SDValue RHS = | |||
10813 | DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1)); | |||
10814 | SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2)); | |||
10815 | ||||
10816 | // Now duplicate the comparison mask we want across all other lanes. | |||
10817 | SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0); | |||
10818 | SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask); | |||
10819 | Mask = DAG.getNode(ISD::BITCAST, DL, | |||
10820 | ResVT.changeVectorElementTypeToInteger(), Mask); | |||
10821 | ||||
10822 | return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2)); | |||
10823 | } | |||
10824 | ||||
10825 | /// Get rid of unnecessary NVCASTs (that don't change the type). | |||
10826 | static SDValue performNVCASTCombine(SDNode *N) { | |||
10827 | if (N->getValueType(0) == N->getOperand(0).getValueType()) | |||
10828 | return N->getOperand(0); | |||
10829 | ||||
10830 | return SDValue(); | |||
10831 | } | |||
10832 | ||||
10833 | // If all users of the globaladdr are of the form (globaladdr + constant), find | |||
10834 | // the smallest constant, fold it into the globaladdr's offset and rewrite the | |||
10835 | // globaladdr as (globaladdr + constant) - constant. | |||
10836 | static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, | |||
10837 | const AArch64Subtarget *Subtarget, | |||
10838 | const TargetMachine &TM) { | |||
10839 | auto *GN = dyn_cast<GlobalAddressSDNode>(N); | |||
10840 | if (!GN || Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) != | |||
10841 | AArch64II::MO_NO_FLAG) | |||
10842 | return SDValue(); | |||
10843 | ||||
10844 | uint64_t MinOffset = -1ull; | |||
10845 | for (SDNode *N : GN->uses()) { | |||
10846 | if (N->getOpcode() != ISD::ADD) | |||
10847 | return SDValue(); | |||
10848 | auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0)); | |||
10849 | if (!C) | |||
10850 | C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
10851 | if (!C) | |||
10852 | return SDValue(); | |||
10853 | MinOffset = std::min(MinOffset, C->getZExtValue()); | |||
10854 | } | |||
10855 | uint64_t Offset = MinOffset + GN->getOffset(); | |||
10856 | ||||
10857 | // Require that the new offset is larger than the existing one. Otherwise, we | |||
10858 | // can end up oscillating between two possible DAGs, for example, | |||
10859 | // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1). | |||
10860 | if (Offset <= uint64_t(GN->getOffset())) | |||
10861 | return SDValue(); | |||
10862 | ||||
10863 | // Check whether folding this offset is legal. It must not go out of bounds of | |||
10864 | // the referenced object to avoid violating the code model, and must be | |||
10865 | // smaller than 2^21 because this is the largest offset expressible in all | |||
10866 | // object formats. | |||
10867 | // | |||
10868 | // This check also prevents us from folding negative offsets, which will end | |||
10869 | // up being treated in the same way as large positive ones. They could also | |||
10870 | // cause code model violations, and aren't really common enough to matter. | |||
10871 | if (Offset >= (1 << 21)) | |||
10872 | return SDValue(); | |||
10873 | ||||
10874 | const GlobalValue *GV = GN->getGlobal(); | |||
10875 | Type *T = GV->getValueType(); | |||
10876 | if (!T->isSized() || | |||
10877 | Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T)) | |||
10878 | return SDValue(); | |||
10879 | ||||
10880 | SDLoc DL(GN); | |||
10881 | SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset); | |||
10882 | return DAG.getNode(ISD::SUB, DL, MVT::i64, Result, | |||
10883 | DAG.getConstant(MinOffset, DL, MVT::i64)); | |||
10884 | } | |||
10885 | ||||
10886 | SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, | |||
10887 | DAGCombinerInfo &DCI) const { | |||
10888 | SelectionDAG &DAG = DCI.DAG; | |||
10889 | switch (N->getOpcode()) { | |||
10890 | default: | |||
10891 | LLVM_DEBUG(dbgs() << "Custom combining: skipping\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Custom combining: skipping\n" ; } } while (false); | |||
10892 | break; | |||
10893 | case ISD::ADD: | |||
10894 | case ISD::SUB: | |||
10895 | return performAddSubLongCombine(N, DCI, DAG); | |||
10896 | case ISD::XOR: | |||
10897 | return performXorCombine(N, DAG, DCI, Subtarget); | |||
10898 | case ISD::MUL: | |||
10899 | return performMulCombine(N, DAG, DCI, Subtarget); | |||
10900 | case ISD::SINT_TO_FP: | |||
10901 | case ISD::UINT_TO_FP: | |||
10902 | return performIntToFpCombine(N, DAG, Subtarget); | |||
10903 | case ISD::FP_TO_SINT: | |||
10904 | case ISD::FP_TO_UINT: | |||
10905 | return performFpToIntCombine(N, DAG, DCI, Subtarget); | |||
10906 | case ISD::FDIV: | |||
10907 | return performFDivCombine(N, DAG, DCI, Subtarget); | |||
10908 | case ISD::OR: | |||
10909 | return performORCombine(N, DCI, Subtarget); | |||
10910 | case ISD::SRL: | |||
10911 | return performSRLCombine(N, DCI); | |||
10912 | case ISD::INTRINSIC_WO_CHAIN: | |||
10913 | return performIntrinsicCombine(N, DCI, Subtarget); | |||
10914 | case ISD::ANY_EXTEND: | |||
10915 | case ISD::ZERO_EXTEND: | |||
10916 | case ISD::SIGN_EXTEND: | |||
10917 | return performExtendCombine(N, DCI, DAG); | |||
10918 | case ISD::BITCAST: | |||
10919 | return performBitcastCombine(N, DCI, DAG); | |||
10920 | case ISD::CONCAT_VECTORS: | |||
10921 | return performConcatVectorsCombine(N, DCI, DAG); | |||
10922 | case ISD::SELECT: | |||
10923 | return performSelectCombine(N, DCI); | |||
10924 | case ISD::VSELECT: | |||
10925 | return performVSelectCombine(N, DCI.DAG); | |||
10926 | case ISD::LOAD: | |||
10927 | if (performTBISimplification(N->getOperand(1), DCI, DAG)) | |||
10928 | return SDValue(N, 0); | |||
10929 | break; | |||
10930 | case ISD::STORE: | |||
10931 | return performSTORECombine(N, DCI, DAG, Subtarget); | |||
10932 | case AArch64ISD::BRCOND: | |||
10933 | return performBRCONDCombine(N, DCI, DAG); | |||
10934 | case AArch64ISD::TBNZ: | |||
10935 | case AArch64ISD::TBZ: | |||
10936 | return performTBZCombine(N, DCI, DAG); | |||
10937 | case AArch64ISD::CSEL: | |||
10938 | return performCONDCombine(N, DCI, DAG, 2, 3); | |||
10939 | case AArch64ISD::DUP: | |||
10940 | return performPostLD1Combine(N, DCI, false); | |||
10941 | case AArch64ISD::NVCAST: | |||
10942 | return performNVCASTCombine(N); | |||
10943 | case ISD::INSERT_VECTOR_ELT: | |||
10944 | return performPostLD1Combine(N, DCI, true); | |||
10945 | case ISD::INTRINSIC_VOID: | |||
10946 | case ISD::INTRINSIC_W_CHAIN: | |||
10947 | switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { | |||
10948 | case Intrinsic::aarch64_neon_ld2: | |||
10949 | case Intrinsic::aarch64_neon_ld3: | |||
10950 | case Intrinsic::aarch64_neon_ld4: | |||
10951 | case Intrinsic::aarch64_neon_ld1x2: | |||
10952 | case Intrinsic::aarch64_neon_ld1x3: | |||
10953 | case Intrinsic::aarch64_neon_ld1x4: | |||
10954 | case Intrinsic::aarch64_neon_ld2lane: | |||
10955 | case Intrinsic::aarch64_neon_ld3lane: | |||
10956 | case Intrinsic::aarch64_neon_ld4lane: | |||
10957 | case Intrinsic::aarch64_neon_ld2r: | |||
10958 | case Intrinsic::aarch64_neon_ld3r: | |||
10959 | case Intrinsic::aarch64_neon_ld4r: | |||
10960 | case Intrinsic::aarch64_neon_st2: | |||
10961 | case Intrinsic::aarch64_neon_st3: | |||
10962 | case Intrinsic::aarch64_neon_st4: | |||
10963 | case Intrinsic::aarch64_neon_st1x2: | |||
10964 | case Intrinsic::aarch64_neon_st1x3: | |||
10965 | case Intrinsic::aarch64_neon_st1x4: | |||
10966 | case Intrinsic::aarch64_neon_st2lane: | |||
10967 | case Intrinsic::aarch64_neon_st3lane: | |||
10968 | case Intrinsic::aarch64_neon_st4lane: | |||
10969 | return performNEONPostLDSTCombine(N, DCI, DAG); | |||
10970 | default: | |||
10971 | break; | |||
10972 | } | |||
10973 | case ISD::GlobalAddress: | |||
10974 | return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine()); | |||
10975 | } | |||
10976 | return SDValue(); | |||
10977 | } | |||
10978 | ||||
10979 | // Check if the return value is used as only a return value, as otherwise | |||
10980 | // we can't perform a tail-call. In particular, we need to check for | |||
10981 | // target ISD nodes that are returns and any other "odd" constructs | |||
10982 | // that the generic analysis code won't necessarily catch. | |||
10983 | bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, | |||
10984 | SDValue &Chain) const { | |||
10985 | if (N->getNumValues() != 1) | |||
10986 | return false; | |||
10987 | if (!N->hasNUsesOfValue(1, 0)) | |||
10988 | return false; | |||
10989 | ||||
10990 | SDValue TCChain = Chain; | |||
10991 | SDNode *Copy = *N->use_begin(); | |||
10992 | if (Copy->getOpcode() == ISD::CopyToReg) { | |||
10993 | // If the copy has a glue operand, we conservatively assume it isn't safe to | |||
10994 | // perform a tail call. | |||
10995 | if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == | |||
10996 | MVT::Glue) | |||
10997 | return false; | |||
10998 | TCChain = Copy->getOperand(0); | |||
10999 | } else if (Copy->getOpcode() != ISD::FP_EXTEND) | |||
11000 | return false; | |||
11001 | ||||
11002 | bool HasRet = false; | |||
11003 | for (SDNode *Node : Copy->uses()) { | |||
11004 | if (Node->getOpcode() != AArch64ISD::RET_FLAG) | |||
11005 | return false; | |||
11006 | HasRet = true; | |||
11007 | } | |||
11008 | ||||
11009 | if (!HasRet) | |||
11010 | return false; | |||
11011 | ||||
11012 | Chain = TCChain; | |||
11013 | return true; | |||
11014 | } | |||
11015 | ||||
11016 | // Return whether the an instruction can potentially be optimized to a tail | |||
11017 | // call. This will cause the optimizers to attempt to move, or duplicate, | |||
11018 | // return instructions to help enable tail call optimizations for this | |||
11019 | // instruction. | |||
11020 | bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
11021 | return CI->isTailCall(); | |||
11022 | } | |||
11023 | ||||
11024 | bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, | |||
11025 | SDValue &Offset, | |||
11026 | ISD::MemIndexedMode &AM, | |||
11027 | bool &IsInc, | |||
11028 | SelectionDAG &DAG) const { | |||
11029 | if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) | |||
11030 | return false; | |||
11031 | ||||
11032 | Base = Op->getOperand(0); | |||
11033 | // All of the indexed addressing mode instructions take a signed | |||
11034 | // 9 bit immediate offset. | |||
11035 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { | |||
11036 | int64_t RHSC = RHS->getSExtValue(); | |||
11037 | if (Op->getOpcode() == ISD::SUB) | |||
11038 | RHSC = -(uint64_t)RHSC; | |||
11039 | if (!isInt<9>(RHSC)) | |||
11040 | return false; | |||
11041 | IsInc = (Op->getOpcode() == ISD::ADD); | |||
11042 | Offset = Op->getOperand(1); | |||
11043 | return true; | |||
11044 | } | |||
11045 | return false; | |||
11046 | } | |||
11047 | ||||
11048 | bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, | |||
11049 | SDValue &Offset, | |||
11050 | ISD::MemIndexedMode &AM, | |||
11051 | SelectionDAG &DAG) const { | |||
11052 | EVT VT; | |||
11053 | SDValue Ptr; | |||
11054 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
11055 | VT = LD->getMemoryVT(); | |||
11056 | Ptr = LD->getBasePtr(); | |||
11057 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | |||
11058 | VT = ST->getMemoryVT(); | |||
11059 | Ptr = ST->getBasePtr(); | |||
11060 | } else | |||
11061 | return false; | |||
11062 | ||||
11063 | bool IsInc; | |||
11064 | if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG)) | |||
11065 | return false; | |||
11066 | AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC; | |||
11067 | return true; | |||
11068 | } | |||
11069 | ||||
11070 | bool AArch64TargetLowering::getPostIndexedAddressParts( | |||
11071 | SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, | |||
11072 | ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { | |||
11073 | EVT VT; | |||
11074 | SDValue Ptr; | |||
11075 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
11076 | VT = LD->getMemoryVT(); | |||
11077 | Ptr = LD->getBasePtr(); | |||
11078 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | |||
11079 | VT = ST->getMemoryVT(); | |||
11080 | Ptr = ST->getBasePtr(); | |||
11081 | } else | |||
11082 | return false; | |||
11083 | ||||
11084 | bool IsInc; | |||
11085 | if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG)) | |||
11086 | return false; | |||
11087 | // Post-indexing updates the base, so it's not a valid transform | |||
11088 | // if that's not the same as the load's pointer. | |||
11089 | if (Ptr != Base) | |||
11090 | return false; | |||
11091 | AM = IsInc ? ISD::POST_INC : ISD::POST_DEC; | |||
11092 | return true; | |||
11093 | } | |||
11094 | ||||
11095 | static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results, | |||
11096 | SelectionDAG &DAG) { | |||
11097 | SDLoc DL(N); | |||
11098 | SDValue Op = N->getOperand(0); | |||
11099 | ||||
11100 | if (N->getValueType(0) != MVT::i16 || Op.getValueType() != MVT::f16) | |||
11101 | return; | |||
11102 | ||||
11103 | Op = SDValue( | |||
11104 | DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32, | |||
11105 | DAG.getUNDEF(MVT::i32), Op, | |||
11106 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | |||
11107 | 0); | |||
11108 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op); | |||
11109 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op)); | |||
11110 | } | |||
11111 | ||||
11112 | static void ReplaceReductionResults(SDNode *N, | |||
11113 | SmallVectorImpl<SDValue> &Results, | |||
11114 | SelectionDAG &DAG, unsigned InterOp, | |||
11115 | unsigned AcrossOp) { | |||
11116 | EVT LoVT, HiVT; | |||
11117 | SDValue Lo, Hi; | |||
11118 | SDLoc dl(N); | |||
11119 | std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); | |||
11120 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); | |||
11121 | SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi); | |||
11122 | SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal); | |||
11123 | Results.push_back(SplitVal); | |||
11124 | } | |||
11125 | ||||
11126 | static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) { | |||
11127 | SDLoc DL(N); | |||
11128 | SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N); | |||
11129 | SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, | |||
11130 | DAG.getNode(ISD::SRL, DL, MVT::i128, N, | |||
11131 | DAG.getConstant(64, DL, MVT::i64))); | |||
11132 | return std::make_pair(Lo, Hi); | |||
11133 | } | |||
11134 | ||||
11135 | // Create an even/odd pair of X registers holding integer value V. | |||
11136 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { | |||
11137 | SDLoc dl(V.getNode()); | |||
11138 | SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64); | |||
11139 | SDValue VHi = DAG.getAnyExtOrTrunc( | |||
11140 | DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)), | |||
11141 | dl, MVT::i64); | |||
11142 | if (DAG.getDataLayout().isBigEndian()) | |||
11143 | std::swap (VLo, VHi); | |||
11144 | SDValue RegClass = | |||
11145 | DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32); | |||
11146 | SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32); | |||
11147 | SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32); | |||
11148 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; | |||
11149 | return SDValue( | |||
11150 | DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); | |||
11151 | } | |||
11152 | ||||
11153 | static void ReplaceCMP_SWAP_128Results(SDNode *N, | |||
11154 | SmallVectorImpl<SDValue> &Results, | |||
11155 | SelectionDAG &DAG, | |||
11156 | const AArch64Subtarget *Subtarget) { | |||
11157 | assert(N->getValueType(0) == MVT::i128 &&(static_cast <bool> (N->getValueType(0) == MVT::i128 && "AtomicCmpSwap on types less than 128 should be legal" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"AtomicCmpSwap on types less than 128 should be legal\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11158, __extension__ __PRETTY_FUNCTION__)) | |||
11158 | "AtomicCmpSwap on types less than 128 should be legal")(static_cast <bool> (N->getValueType(0) == MVT::i128 && "AtomicCmpSwap on types less than 128 should be legal" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"AtomicCmpSwap on types less than 128 should be legal\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11158, __extension__ __PRETTY_FUNCTION__)); | |||
11159 | ||||
11160 | if (Subtarget->hasLSE()) { | |||
11161 | // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type, | |||
11162 | // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG. | |||
11163 | SDValue Ops[] = { | |||
11164 | createGPRPairNode(DAG, N->getOperand(2)), // Compare value | |||
11165 | createGPRPairNode(DAG, N->getOperand(3)), // Store value | |||
11166 | N->getOperand(1), // Ptr | |||
11167 | N->getOperand(0), // Chain in | |||
11168 | }; | |||
11169 | ||||
11170 | MachineFunction &MF = DAG.getMachineFunction(); | |||
11171 | MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); | |||
11172 | MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); | |||
11173 | ||||
11174 | unsigned Opcode; | |||
11175 | switch (MemOp[0]->getOrdering()) { | |||
11176 | case AtomicOrdering::Monotonic: | |||
11177 | Opcode = AArch64::CASPX; | |||
11178 | break; | |||
11179 | case AtomicOrdering::Acquire: | |||
11180 | Opcode = AArch64::CASPAX; | |||
11181 | break; | |||
11182 | case AtomicOrdering::Release: | |||
11183 | Opcode = AArch64::CASPLX; | |||
11184 | break; | |||
11185 | case AtomicOrdering::AcquireRelease: | |||
11186 | case AtomicOrdering::SequentiallyConsistent: | |||
11187 | Opcode = AArch64::CASPALX; | |||
11188 | break; | |||
11189 | default: | |||
11190 | llvm_unreachable("Unexpected ordering!")::llvm::llvm_unreachable_internal("Unexpected ordering!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11190); | |||
11191 | } | |||
11192 | ||||
11193 | MachineSDNode *CmpSwap = DAG.getMachineNode( | |||
11194 | Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops); | |||
11195 | CmpSwap->setMemRefs(MemOp, MemOp + 1); | |||
11196 | ||||
11197 | unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64; | |||
11198 | if (DAG.getDataLayout().isBigEndian()) | |||
11199 | std::swap(SubReg1, SubReg2); | |||
11200 | Results.push_back(DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64, | |||
11201 | SDValue(CmpSwap, 0))); | |||
11202 | Results.push_back(DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64, | |||
11203 | SDValue(CmpSwap, 0))); | |||
11204 | Results.push_back(SDValue(CmpSwap, 1)); // Chain out | |||
11205 | return; | |||
11206 | } | |||
11207 | ||||
11208 | auto Desired = splitInt128(N->getOperand(2), DAG); | |||
11209 | auto New = splitInt128(N->getOperand(3), DAG); | |||
11210 | SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second, | |||
11211 | New.first, New.second, N->getOperand(0)}; | |||
11212 | SDNode *CmpSwap = DAG.getMachineNode( | |||
11213 | AArch64::CMP_SWAP_128, SDLoc(N), | |||
11214 | DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other), Ops); | |||
11215 | ||||
11216 | MachineFunction &MF = DAG.getMachineFunction(); | |||
11217 | MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); | |||
11218 | MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); | |||
11219 | cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); | |||
11220 | ||||
11221 | Results.push_back(SDValue(CmpSwap, 0)); | |||
11222 | Results.push_back(SDValue(CmpSwap, 1)); | |||
11223 | Results.push_back(SDValue(CmpSwap, 3)); | |||
11224 | } | |||
11225 | ||||
11226 | void AArch64TargetLowering::ReplaceNodeResults( | |||
11227 | SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { | |||
11228 | switch (N->getOpcode()) { | |||
11229 | default: | |||
11230 | llvm_unreachable("Don't know how to custom expand this")::llvm::llvm_unreachable_internal("Don't know how to custom expand this" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11230); | |||
11231 | case ISD::BITCAST: | |||
11232 | ReplaceBITCASTResults(N, Results, DAG); | |||
11233 | return; | |||
11234 | case ISD::VECREDUCE_ADD: | |||
11235 | case ISD::VECREDUCE_SMAX: | |||
11236 | case ISD::VECREDUCE_SMIN: | |||
11237 | case ISD::VECREDUCE_UMAX: | |||
11238 | case ISD::VECREDUCE_UMIN: | |||
11239 | Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG)); | |||
11240 | return; | |||
11241 | ||||
11242 | case AArch64ISD::SADDV: | |||
11243 | ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV); | |||
11244 | return; | |||
11245 | case AArch64ISD::UADDV: | |||
11246 | ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV); | |||
11247 | return; | |||
11248 | case AArch64ISD::SMINV: | |||
11249 | ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV); | |||
11250 | return; | |||
11251 | case AArch64ISD::UMINV: | |||
11252 | ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV); | |||
11253 | return; | |||
11254 | case AArch64ISD::SMAXV: | |||
11255 | ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV); | |||
11256 | return; | |||
11257 | case AArch64ISD::UMAXV: | |||
11258 | ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV); | |||
11259 | return; | |||
11260 | case ISD::FP_TO_UINT: | |||
11261 | case ISD::FP_TO_SINT: | |||
11262 | assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion")(static_cast <bool> (N->getValueType(0) == MVT::i128 && "unexpected illegal conversion") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"unexpected illegal conversion\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11262, __extension__ __PRETTY_FUNCTION__)); | |||
11263 | // Let normal code take care of it by not adding anything to Results. | |||
11264 | return; | |||
11265 | case ISD::ATOMIC_CMP_SWAP: | |||
11266 | ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget); | |||
11267 | return; | |||
11268 | } | |||
11269 | } | |||
11270 | ||||
11271 | bool AArch64TargetLowering::useLoadStackGuardNode() const { | |||
11272 | if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia()) | |||
11273 | return TargetLowering::useLoadStackGuardNode(); | |||
11274 | return true; | |||
11275 | } | |||
11276 | ||||
11277 | unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const { | |||
11278 | // Combine multiple FDIVs with the same divisor into multiple FMULs by the | |||
11279 | // reciprocal if there are three or more FDIVs. | |||
11280 | return 3; | |||
11281 | } | |||
11282 | ||||
11283 | TargetLoweringBase::LegalizeTypeAction | |||
11284 | AArch64TargetLowering::getPreferredVectorAction(EVT VT) const { | |||
11285 | MVT SVT = VT.getSimpleVT(); | |||
11286 | // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8, | |||
11287 | // v4i16, v2i32 instead of to promote. | |||
11288 | if (SVT == MVT::v1i8 || SVT == MVT::v1i16 || SVT == MVT::v1i32 | |||
11289 | || SVT == MVT::v1f32) | |||
11290 | return TypeWidenVector; | |||
11291 | ||||
11292 | return TargetLoweringBase::getPreferredVectorAction(VT); | |||
11293 | } | |||
11294 | ||||
11295 | // Loads and stores less than 128-bits are already atomic; ones above that | |||
11296 | // are doomed anyway, so defer to the default libcall and blame the OS when | |||
11297 | // things go wrong. | |||
11298 | bool AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { | |||
11299 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); | |||
11300 | return Size == 128; | |||
11301 | } | |||
11302 | ||||
11303 | // Loads and stores less than 128-bits are already atomic; ones above that | |||
11304 | // are doomed anyway, so defer to the default libcall and blame the OS when | |||
11305 | // things go wrong. | |||
11306 | TargetLowering::AtomicExpansionKind | |||
11307 | AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { | |||
11308 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); | |||
11309 | return Size == 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None; | |||
11310 | } | |||
11311 | ||||
11312 | // For the real atomic operations, we have ldxr/stxr up to 128 bits, | |||
11313 | TargetLowering::AtomicExpansionKind | |||
11314 | AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { | |||
11315 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); | |||
11316 | if (Size > 128) return AtomicExpansionKind::None; | |||
11317 | // Nand not supported in LSE. | |||
11318 | if (AI->getOperation() == AtomicRMWInst::Nand) return AtomicExpansionKind::LLSC; | |||
11319 | // Leave 128 bits to LLSC. | |||
11320 | return (Subtarget->hasLSE() && Size < 128) ? AtomicExpansionKind::None : AtomicExpansionKind::LLSC; | |||
11321 | } | |||
11322 | ||||
11323 | bool AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR( | |||
11324 | AtomicCmpXchgInst *AI) const { | |||
11325 | // If subtarget has LSE, leave cmpxchg intact for codegen. | |||
11326 | if (Subtarget->hasLSE()) return false; | |||
11327 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to | |||
11328 | // implement cmpxchg without spilling. If the address being exchanged is also | |||
11329 | // on the stack and close enough to the spill slot, this can lead to a | |||
11330 | // situation where the monitor always gets cleared and the atomic operation | |||
11331 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. | |||
11332 | return getTargetMachine().getOptLevel() != 0; | |||
11333 | } | |||
11334 | ||||
11335 | Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, | |||
11336 | AtomicOrdering Ord) const { | |||
11337 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
11338 | Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); | |||
11339 | bool IsAcquire = isAcquireOrStronger(Ord); | |||
11340 | ||||
11341 | // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd | |||
11342 | // intrinsic must return {i64, i64} and we have to recombine them into a | |||
11343 | // single i128 here. | |||
11344 | if (ValTy->getPrimitiveSizeInBits() == 128) { | |||
11345 | Intrinsic::ID Int = | |||
11346 | IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp; | |||
11347 | Function *Ldxr = Intrinsic::getDeclaration(M, Int); | |||
11348 | ||||
11349 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); | |||
11350 | Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi"); | |||
11351 | ||||
11352 | Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); | |||
11353 | Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); | |||
11354 | Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); | |||
11355 | Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); | |||
11356 | return Builder.CreateOr( | |||
11357 | Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64"); | |||
11358 | } | |||
11359 | ||||
11360 | Type *Tys[] = { Addr->getType() }; | |||
11361 | Intrinsic::ID Int = | |||
11362 | IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr; | |||
11363 | Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys); | |||
11364 | ||||
11365 | return Builder.CreateTruncOrBitCast( | |||
11366 | Builder.CreateCall(Ldxr, Addr), | |||
11367 | cast<PointerType>(Addr->getType())->getElementType()); | |||
11368 | } | |||
11369 | ||||
11370 | void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance( | |||
11371 | IRBuilder<> &Builder) const { | |||
11372 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
11373 | Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex)); | |||
11374 | } | |||
11375 | ||||
11376 | Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder, | |||
11377 | Value *Val, Value *Addr, | |||
11378 | AtomicOrdering Ord) const { | |||
11379 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
11380 | bool IsRelease = isReleaseOrStronger(Ord); | |||
11381 | ||||
11382 | // Since the intrinsics must have legal type, the i128 intrinsics take two | |||
11383 | // parameters: "i64, i64". We must marshal Val into the appropriate form | |||
11384 | // before the call. | |||
11385 | if (Val->getType()->getPrimitiveSizeInBits() == 128) { | |||
11386 | Intrinsic::ID Int = | |||
11387 | IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp; | |||
11388 | Function *Stxr = Intrinsic::getDeclaration(M, Int); | |||
11389 | Type *Int64Ty = Type::getInt64Ty(M->getContext()); | |||
11390 | ||||
11391 | Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo"); | |||
11392 | Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi"); | |||
11393 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); | |||
11394 | return Builder.CreateCall(Stxr, {Lo, Hi, Addr}); | |||
11395 | } | |||
11396 | ||||
11397 | Intrinsic::ID Int = | |||
11398 | IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr; | |||
11399 | Type *Tys[] = { Addr->getType() }; | |||
11400 | Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys); | |||
11401 | ||||
11402 | return Builder.CreateCall(Stxr, | |||
11403 | {Builder.CreateZExtOrBitCast( | |||
11404 | Val, Stxr->getFunctionType()->getParamType(0)), | |||
11405 | Addr}); | |||
11406 | } | |||
11407 | ||||
11408 | bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters( | |||
11409 | Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { | |||
11410 | return Ty->isArrayTy(); | |||
11411 | } | |||
11412 | ||||
11413 | bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &, | |||
11414 | EVT) const { | |||
11415 | return false; | |||
11416 | } | |||
11417 | ||||
11418 | static Value *UseTlsOffset(IRBuilder<> &IRB, unsigned Offset) { | |||
11419 | Module *M = IRB.GetInsertBlock()->getParent()->getParent(); | |||
11420 | Function *ThreadPointerFunc = | |||
11421 | Intrinsic::getDeclaration(M, Intrinsic::thread_pointer); | |||
11422 | return IRB.CreatePointerCast( | |||
11423 | IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), Offset), | |||
11424 | Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0)); | |||
11425 | } | |||
11426 | ||||
11427 | Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const { | |||
11428 | // Android provides a fixed TLS slot for the stack cookie. See the definition | |||
11429 | // of TLS_SLOT_STACK_GUARD in | |||
11430 | // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h | |||
11431 | if (Subtarget->isTargetAndroid()) | |||
11432 | return UseTlsOffset(IRB, 0x28); | |||
11433 | ||||
11434 | // Fuchsia is similar. | |||
11435 | // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value. | |||
11436 | if (Subtarget->isTargetFuchsia()) | |||
11437 | return UseTlsOffset(IRB, -0x10); | |||
11438 | ||||
11439 | return TargetLowering::getIRStackGuard(IRB); | |||
11440 | } | |||
11441 | ||||
11442 | Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const { | |||
11443 | // Android provides a fixed TLS slot for the SafeStack pointer. See the | |||
11444 | // definition of TLS_SLOT_SAFESTACK in | |||
11445 | // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h | |||
11446 | if (Subtarget->isTargetAndroid()) | |||
11447 | return UseTlsOffset(IRB, 0x48); | |||
11448 | ||||
11449 | // Fuchsia is similar. | |||
11450 | // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value. | |||
11451 | if (Subtarget->isTargetFuchsia()) | |||
11452 | return UseTlsOffset(IRB, -0x8); | |||
11453 | ||||
11454 | return TargetLowering::getSafeStackPointerLocation(IRB); | |||
11455 | } | |||
11456 | ||||
11457 | bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial( | |||
11458 | const Instruction &AndI) const { | |||
11459 | // Only sink 'and' mask to cmp use block if it is masking a single bit, since | |||
11460 | // this is likely to be fold the and/cmp/br into a single tbz instruction. It | |||
11461 | // may be beneficial to sink in other cases, but we would have to check that | |||
11462 | // the cmp would not get folded into the br to form a cbz for these to be | |||
11463 | // beneficial. | |||
11464 | ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1)); | |||
11465 | if (!Mask) | |||
11466 | return false; | |||
11467 | return Mask->getValue().isPowerOf2(); | |||
11468 | } | |||
11469 | ||||
11470 | void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { | |||
11471 | // Update IsSplitCSR in AArch64unctionInfo. | |||
11472 | AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>(); | |||
11473 | AFI->setIsSplitCSR(true); | |||
11474 | } | |||
11475 | ||||
11476 | void AArch64TargetLowering::insertCopiesSplitCSR( | |||
11477 | MachineBasicBlock *Entry, | |||
11478 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { | |||
11479 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
11480 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); | |||
11481 | if (!IStart) | |||
11482 | return; | |||
11483 | ||||
11484 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
11485 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); | |||
11486 | MachineBasicBlock::iterator MBBI = Entry->begin(); | |||
11487 | for (const MCPhysReg *I = IStart; *I; ++I) { | |||
11488 | const TargetRegisterClass *RC = nullptr; | |||
11489 | if (AArch64::GPR64RegClass.contains(*I)) | |||
11490 | RC = &AArch64::GPR64RegClass; | |||
11491 | else if (AArch64::FPR64RegClass.contains(*I)) | |||
11492 | RC = &AArch64::FPR64RegClass; | |||
11493 | else | |||
11494 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11494); | |||
11495 | ||||
11496 | unsigned NewVR = MRI->createVirtualRegister(RC); | |||
11497 | // Create copy from CSR to a virtual register. | |||
11498 | // FIXME: this currently does not emit CFI pseudo-instructions, it works | |||
11499 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be | |||
11500 | // nounwind. If we want to generalize this later, we may need to emit | |||
11501 | // CFI pseudo-instructions. | |||
11502 | assert(Entry->getParent()->getFunction().hasFnAttribute((static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11504, __extension__ __PRETTY_FUNCTION__)) | |||
11503 | Attribute::NoUnwind) &&(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11504, __extension__ __PRETTY_FUNCTION__)) | |||
11504 | "Function should be nounwind in insertCopiesSplitCSR!")(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AArch64ISelLowering.cpp" , 11504, __extension__ __PRETTY_FUNCTION__)); | |||
11505 | Entry->addLiveIn(*I); | |||
11506 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) | |||
11507 | .addReg(*I); | |||
11508 | ||||
11509 | // Insert the copy-back instructions right before the terminator. | |||
11510 | for (auto *Exit : Exits) | |||
11511 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), | |||
11512 | TII->get(TargetOpcode::COPY), *I) | |||
11513 | .addReg(NewVR); | |||
11514 | } | |||
11515 | } | |||
11516 | ||||
11517 | bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const { | |||
11518 | // Integer division on AArch64 is expensive. However, when aggressively | |||
11519 | // optimizing for code size, we prefer to use a div instruction, as it is | |||
11520 | // usually smaller than the alternative sequence. | |||
11521 | // The exception to this is vector division. Since AArch64 doesn't have vector | |||
11522 | // integer division, leaving the division as-is is a loss even in terms of | |||
11523 | // size, because it will have to be scalarized, while the alternative code | |||
11524 | // sequence can be performed in vector form. | |||
11525 | bool OptSize = | |||
11526 | Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize); | |||
11527 | return OptSize && !VT.isVector(); | |||
11528 | } | |||
11529 | ||||
11530 | bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const { | |||
11531 | return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint(); | |||
11532 | } | |||
11533 | ||||
11534 | unsigned | |||
11535 | AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const { | |||
11536 | if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows()) | |||
11537 | return getPointerTy(DL).getSizeInBits(); | |||
11538 | ||||
11539 | return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32; | |||
11540 | } | |||
11541 | ||||
11542 | void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { | |||
11543 | MF.getFrameInfo().computeMaxCallFrameSize(MF); | |||
11544 | TargetLoweringBase::finalizeLowering(MF); | |||
11545 | } |