File: | lib/Target/SystemZ/SystemZISelLowering.cpp |
Warning: | line 4567, column 41 The result of the left shift is undefined due to shifting by '18446744073709551615', which is greater or equal to the width of type 'uint64_t' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This file implements the SystemZTargetLowering class. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "SystemZISelLowering.h" | |||
15 | #include "SystemZCallingConv.h" | |||
16 | #include "SystemZConstantPoolValue.h" | |||
17 | #include "SystemZMachineFunctionInfo.h" | |||
18 | #include "SystemZTargetMachine.h" | |||
19 | #include "llvm/CodeGen/CallingConvLower.h" | |||
20 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
21 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
22 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" | |||
23 | #include "llvm/IR/Intrinsics.h" | |||
24 | #include "llvm/IR/IntrinsicInst.h" | |||
25 | #include "llvm/Support/CommandLine.h" | |||
26 | #include "llvm/Support/KnownBits.h" | |||
27 | #include <cctype> | |||
28 | ||||
29 | using namespace llvm; | |||
30 | ||||
31 | #define DEBUG_TYPE"systemz-lower" "systemz-lower" | |||
32 | ||||
33 | namespace { | |||
34 | // Represents information about a comparison. | |||
35 | struct Comparison { | |||
36 | Comparison(SDValue Op0In, SDValue Op1In) | |||
37 | : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} | |||
38 | ||||
39 | // The operands to the comparison. | |||
40 | SDValue Op0, Op1; | |||
41 | ||||
42 | // The opcode that should be used to compare Op0 and Op1. | |||
43 | unsigned Opcode; | |||
44 | ||||
45 | // A SystemZICMP value. Only used for integer comparisons. | |||
46 | unsigned ICmpType; | |||
47 | ||||
48 | // The mask of CC values that Opcode can produce. | |||
49 | unsigned CCValid; | |||
50 | ||||
51 | // The mask of CC values for which the original condition is true. | |||
52 | unsigned CCMask; | |||
53 | }; | |||
54 | } // end anonymous namespace | |||
55 | ||||
56 | // Classify VT as either 32 or 64 bit. | |||
57 | static bool is32Bit(EVT VT) { | |||
58 | switch (VT.getSimpleVT().SimpleTy) { | |||
59 | case MVT::i32: | |||
60 | return true; | |||
61 | case MVT::i64: | |||
62 | return false; | |||
63 | default: | |||
64 | llvm_unreachable("Unsupported type")::llvm::llvm_unreachable_internal("Unsupported type", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 64); | |||
65 | } | |||
66 | } | |||
67 | ||||
68 | // Return a version of MachineOperand that can be safely used before the | |||
69 | // final use. | |||
70 | static MachineOperand earlyUseOperand(MachineOperand Op) { | |||
71 | if (Op.isReg()) | |||
72 | Op.setIsKill(false); | |||
73 | return Op; | |||
74 | } | |||
75 | ||||
76 | SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, | |||
77 | const SystemZSubtarget &STI) | |||
78 | : TargetLowering(TM), Subtarget(STI) { | |||
79 | MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0)); | |||
80 | ||||
81 | // Set up the register classes. | |||
82 | if (Subtarget.hasHighWord()) | |||
83 | addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); | |||
84 | else | |||
85 | addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); | |||
86 | addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); | |||
87 | if (Subtarget.hasVector()) { | |||
88 | addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); | |||
89 | addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); | |||
90 | } else { | |||
91 | addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); | |||
92 | addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); | |||
93 | } | |||
94 | if (Subtarget.hasVectorEnhancements1()) | |||
95 | addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); | |||
96 | else | |||
97 | addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); | |||
98 | ||||
99 | if (Subtarget.hasVector()) { | |||
100 | addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); | |||
101 | addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); | |||
102 | addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); | |||
103 | addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); | |||
104 | addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); | |||
105 | addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); | |||
106 | } | |||
107 | ||||
108 | // Compute derived properties from the register classes | |||
109 | computeRegisterProperties(Subtarget.getRegisterInfo()); | |||
110 | ||||
111 | // Set up special registers. | |||
112 | setStackPointerRegisterToSaveRestore(SystemZ::R15D); | |||
113 | ||||
114 | // TODO: It may be better to default to latency-oriented scheduling, however | |||
115 | // LLVM's current latency-oriented scheduler can't handle physreg definitions | |||
116 | // such as SystemZ has with CC, so set this to the register-pressure | |||
117 | // scheduler, because it can. | |||
118 | setSchedulingPreference(Sched::RegPressure); | |||
119 | ||||
120 | setBooleanContents(ZeroOrOneBooleanContent); | |||
121 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); | |||
122 | ||||
123 | // Instructions are strings of 2-byte aligned 2-byte values. | |||
124 | setMinFunctionAlignment(2); | |||
125 | // For performance reasons we prefer 16-byte alignment. | |||
126 | setPrefFunctionAlignment(4); | |||
127 | ||||
128 | // Handle operations that are handled in a similar way for all types. | |||
129 | for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; | |||
130 | I <= MVT::LAST_FP_VALUETYPE; | |||
131 | ++I) { | |||
132 | MVT VT = MVT::SimpleValueType(I); | |||
133 | if (isTypeLegal(VT)) { | |||
134 | // Lower SET_CC into an IPM-based sequence. | |||
135 | setOperationAction(ISD::SETCC, VT, Custom); | |||
136 | ||||
137 | // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). | |||
138 | setOperationAction(ISD::SELECT, VT, Expand); | |||
139 | ||||
140 | // Lower SELECT_CC and BR_CC into separate comparisons and branches. | |||
141 | setOperationAction(ISD::SELECT_CC, VT, Custom); | |||
142 | setOperationAction(ISD::BR_CC, VT, Custom); | |||
143 | } | |||
144 | } | |||
145 | ||||
146 | // Expand jump table branches as address arithmetic followed by an | |||
147 | // indirect jump. | |||
148 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); | |||
149 | ||||
150 | // Expand BRCOND into a BR_CC (see above). | |||
151 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); | |||
152 | ||||
153 | // Handle integer types. | |||
154 | for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; | |||
155 | I <= MVT::LAST_INTEGER_VALUETYPE; | |||
156 | ++I) { | |||
157 | MVT VT = MVT::SimpleValueType(I); | |||
158 | if (isTypeLegal(VT)) { | |||
159 | // Expand individual DIV and REMs into DIVREMs. | |||
160 | setOperationAction(ISD::SDIV, VT, Expand); | |||
161 | setOperationAction(ISD::UDIV, VT, Expand); | |||
162 | setOperationAction(ISD::SREM, VT, Expand); | |||
163 | setOperationAction(ISD::UREM, VT, Expand); | |||
164 | setOperationAction(ISD::SDIVREM, VT, Custom); | |||
165 | setOperationAction(ISD::UDIVREM, VT, Custom); | |||
166 | ||||
167 | // Support addition/subtraction with overflow. | |||
168 | setOperationAction(ISD::SADDO, VT, Custom); | |||
169 | setOperationAction(ISD::SSUBO, VT, Custom); | |||
170 | ||||
171 | // Support addition/subtraction with carry. | |||
172 | setOperationAction(ISD::UADDO, VT, Custom); | |||
173 | setOperationAction(ISD::USUBO, VT, Custom); | |||
174 | ||||
175 | // Support carry in as value rather than glue. | |||
176 | setOperationAction(ISD::ADDCARRY, VT, Custom); | |||
177 | setOperationAction(ISD::SUBCARRY, VT, Custom); | |||
178 | ||||
179 | // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and | |||
180 | // stores, putting a serialization instruction after the stores. | |||
181 | setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); | |||
182 | setOperationAction(ISD::ATOMIC_STORE, VT, Custom); | |||
183 | ||||
184 | // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are | |||
185 | // available, or if the operand is constant. | |||
186 | setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); | |||
187 | ||||
188 | // Use POPCNT on z196 and above. | |||
189 | if (Subtarget.hasPopulationCount()) | |||
190 | setOperationAction(ISD::CTPOP, VT, Custom); | |||
191 | else | |||
192 | setOperationAction(ISD::CTPOP, VT, Expand); | |||
193 | ||||
194 | // No special instructions for these. | |||
195 | setOperationAction(ISD::CTTZ, VT, Expand); | |||
196 | setOperationAction(ISD::ROTR, VT, Expand); | |||
197 | ||||
198 | // Use *MUL_LOHI where possible instead of MULH*. | |||
199 | setOperationAction(ISD::MULHS, VT, Expand); | |||
200 | setOperationAction(ISD::MULHU, VT, Expand); | |||
201 | setOperationAction(ISD::SMUL_LOHI, VT, Custom); | |||
202 | setOperationAction(ISD::UMUL_LOHI, VT, Custom); | |||
203 | ||||
204 | // Only z196 and above have native support for conversions to unsigned. | |||
205 | // On z10, promoting to i64 doesn't generate an inexact condition for | |||
206 | // values that are outside the i32 range but in the i64 range, so use | |||
207 | // the default expansion. | |||
208 | if (!Subtarget.hasFPExtension()) | |||
209 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); | |||
210 | } | |||
211 | } | |||
212 | ||||
213 | // Type legalization will convert 8- and 16-bit atomic operations into | |||
214 | // forms that operate on i32s (but still keeping the original memory VT). | |||
215 | // Lower them into full i32 operations. | |||
216 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); | |||
217 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); | |||
218 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); | |||
219 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); | |||
220 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); | |||
221 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); | |||
222 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); | |||
223 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); | |||
224 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); | |||
225 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); | |||
226 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); | |||
227 | ||||
228 | // Even though i128 is not a legal type, we still need to custom lower | |||
229 | // the atomic operations in order to exploit SystemZ instructions. | |||
230 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); | |||
231 | setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); | |||
232 | ||||
233 | // We can use the CC result of compare-and-swap to implement | |||
234 | // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. | |||
235 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); | |||
236 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); | |||
237 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); | |||
238 | ||||
239 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); | |||
240 | ||||
241 | // Traps are legal, as we will convert them to "j .+2". | |||
242 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | |||
243 | ||||
244 | // z10 has instructions for signed but not unsigned FP conversion. | |||
245 | // Handle unsigned 32-bit types as signed 64-bit types. | |||
246 | if (!Subtarget.hasFPExtension()) { | |||
247 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); | |||
248 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); | |||
249 | } | |||
250 | ||||
251 | // We have native support for a 64-bit CTLZ, via FLOGR. | |||
252 | setOperationAction(ISD::CTLZ, MVT::i32, Promote); | |||
253 | setOperationAction(ISD::CTLZ, MVT::i64, Legal); | |||
254 | ||||
255 | // Give LowerOperation the chance to replace 64-bit ORs with subregs. | |||
256 | setOperationAction(ISD::OR, MVT::i64, Custom); | |||
257 | ||||
258 | // FIXME: Can we support these natively? | |||
259 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); | |||
260 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); | |||
261 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); | |||
262 | ||||
263 | // We have native instructions for i8, i16 and i32 extensions, but not i1. | |||
264 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); | |||
265 | for (MVT VT : MVT::integer_valuetypes()) { | |||
266 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); | |||
267 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); | |||
268 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); | |||
269 | } | |||
270 | ||||
271 | // Handle the various types of symbolic address. | |||
272 | setOperationAction(ISD::ConstantPool, PtrVT, Custom); | |||
273 | setOperationAction(ISD::GlobalAddress, PtrVT, Custom); | |||
274 | setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); | |||
275 | setOperationAction(ISD::BlockAddress, PtrVT, Custom); | |||
276 | setOperationAction(ISD::JumpTable, PtrVT, Custom); | |||
277 | ||||
278 | // We need to handle dynamic allocations specially because of the | |||
279 | // 160-byte area at the bottom of the stack. | |||
280 | setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); | |||
281 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); | |||
282 | ||||
283 | // Use custom expanders so that we can force the function to use | |||
284 | // a frame pointer. | |||
285 | setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); | |||
286 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); | |||
287 | ||||
288 | // Handle prefetches with PFD or PFDRL. | |||
289 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); | |||
290 | ||||
291 | for (MVT VT : MVT::vector_valuetypes()) { | |||
292 | // Assume by default that all vector operations need to be expanded. | |||
293 | for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) | |||
294 | if (getOperationAction(Opcode, VT) == Legal) | |||
295 | setOperationAction(Opcode, VT, Expand); | |||
296 | ||||
297 | // Likewise all truncating stores and extending loads. | |||
298 | for (MVT InnerVT : MVT::vector_valuetypes()) { | |||
299 | setTruncStoreAction(VT, InnerVT, Expand); | |||
300 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); | |||
301 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); | |||
302 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); | |||
303 | } | |||
304 | ||||
305 | if (isTypeLegal(VT)) { | |||
306 | // These operations are legal for anything that can be stored in a | |||
307 | // vector register, even if there is no native support for the format | |||
308 | // as such. In particular, we can do these for v4f32 even though there | |||
309 | // are no specific instructions for that format. | |||
310 | setOperationAction(ISD::LOAD, VT, Legal); | |||
311 | setOperationAction(ISD::STORE, VT, Legal); | |||
312 | setOperationAction(ISD::VSELECT, VT, Legal); | |||
313 | setOperationAction(ISD::BITCAST, VT, Legal); | |||
314 | setOperationAction(ISD::UNDEF, VT, Legal); | |||
315 | ||||
316 | // Likewise, except that we need to replace the nodes with something | |||
317 | // more specific. | |||
318 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
319 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
320 | } | |||
321 | } | |||
322 | ||||
323 | // Handle integer vector types. | |||
324 | for (MVT VT : MVT::integer_vector_valuetypes()) { | |||
325 | if (isTypeLegal(VT)) { | |||
326 | // These operations have direct equivalents. | |||
327 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); | |||
328 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); | |||
329 | setOperationAction(ISD::ADD, VT, Legal); | |||
330 | setOperationAction(ISD::SUB, VT, Legal); | |||
331 | if (VT != MVT::v2i64) | |||
332 | setOperationAction(ISD::MUL, VT, Legal); | |||
333 | setOperationAction(ISD::AND, VT, Legal); | |||
334 | setOperationAction(ISD::OR, VT, Legal); | |||
335 | setOperationAction(ISD::XOR, VT, Legal); | |||
336 | if (Subtarget.hasVectorEnhancements1()) | |||
337 | setOperationAction(ISD::CTPOP, VT, Legal); | |||
338 | else | |||
339 | setOperationAction(ISD::CTPOP, VT, Custom); | |||
340 | setOperationAction(ISD::CTTZ, VT, Legal); | |||
341 | setOperationAction(ISD::CTLZ, VT, Legal); | |||
342 | ||||
343 | // Convert a GPR scalar to a vector by inserting it into element 0. | |||
344 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); | |||
345 | ||||
346 | // Use a series of unpacks for extensions. | |||
347 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); | |||
348 | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); | |||
349 | ||||
350 | // Detect shifts by a scalar amount and convert them into | |||
351 | // V*_BY_SCALAR. | |||
352 | setOperationAction(ISD::SHL, VT, Custom); | |||
353 | setOperationAction(ISD::SRA, VT, Custom); | |||
354 | setOperationAction(ISD::SRL, VT, Custom); | |||
355 | ||||
356 | // At present ROTL isn't matched by DAGCombiner. ROTR should be | |||
357 | // converted into ROTL. | |||
358 | setOperationAction(ISD::ROTL, VT, Expand); | |||
359 | setOperationAction(ISD::ROTR, VT, Expand); | |||
360 | ||||
361 | // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands | |||
362 | // and inverting the result as necessary. | |||
363 | setOperationAction(ISD::SETCC, VT, Custom); | |||
364 | } | |||
365 | } | |||
366 | ||||
367 | if (Subtarget.hasVector()) { | |||
368 | // There should be no need to check for float types other than v2f64 | |||
369 | // since <2 x f32> isn't a legal type. | |||
370 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); | |||
371 | setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); | |||
372 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); | |||
373 | setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); | |||
374 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); | |||
375 | setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); | |||
376 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); | |||
377 | setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); | |||
378 | } | |||
379 | ||||
380 | // Handle floating-point types. | |||
381 | for (unsigned I = MVT::FIRST_FP_VALUETYPE; | |||
382 | I <= MVT::LAST_FP_VALUETYPE; | |||
383 | ++I) { | |||
384 | MVT VT = MVT::SimpleValueType(I); | |||
385 | if (isTypeLegal(VT)) { | |||
386 | // We can use FI for FRINT. | |||
387 | setOperationAction(ISD::FRINT, VT, Legal); | |||
388 | ||||
389 | // We can use the extended form of FI for other rounding operations. | |||
390 | if (Subtarget.hasFPExtension()) { | |||
391 | setOperationAction(ISD::FNEARBYINT, VT, Legal); | |||
392 | setOperationAction(ISD::FFLOOR, VT, Legal); | |||
393 | setOperationAction(ISD::FCEIL, VT, Legal); | |||
394 | setOperationAction(ISD::FTRUNC, VT, Legal); | |||
395 | setOperationAction(ISD::FROUND, VT, Legal); | |||
396 | } | |||
397 | ||||
398 | // No special instructions for these. | |||
399 | setOperationAction(ISD::FSIN, VT, Expand); | |||
400 | setOperationAction(ISD::FCOS, VT, Expand); | |||
401 | setOperationAction(ISD::FSINCOS, VT, Expand); | |||
402 | setOperationAction(ISD::FREM, VT, Expand); | |||
403 | setOperationAction(ISD::FPOW, VT, Expand); | |||
404 | } | |||
405 | } | |||
406 | ||||
407 | // Handle floating-point vector types. | |||
408 | if (Subtarget.hasVector()) { | |||
409 | // Scalar-to-vector conversion is just a subreg. | |||
410 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); | |||
411 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); | |||
412 | ||||
413 | // Some insertions and extractions can be done directly but others | |||
414 | // need to go via integers. | |||
415 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); | |||
416 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); | |||
417 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); | |||
418 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); | |||
419 | ||||
420 | // These operations have direct equivalents. | |||
421 | setOperationAction(ISD::FADD, MVT::v2f64, Legal); | |||
422 | setOperationAction(ISD::FNEG, MVT::v2f64, Legal); | |||
423 | setOperationAction(ISD::FSUB, MVT::v2f64, Legal); | |||
424 | setOperationAction(ISD::FMUL, MVT::v2f64, Legal); | |||
425 | setOperationAction(ISD::FMA, MVT::v2f64, Legal); | |||
426 | setOperationAction(ISD::FDIV, MVT::v2f64, Legal); | |||
427 | setOperationAction(ISD::FABS, MVT::v2f64, Legal); | |||
428 | setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); | |||
429 | setOperationAction(ISD::FRINT, MVT::v2f64, Legal); | |||
430 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); | |||
431 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); | |||
432 | setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); | |||
433 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); | |||
434 | setOperationAction(ISD::FROUND, MVT::v2f64, Legal); | |||
435 | } | |||
436 | ||||
437 | // The vector enhancements facility 1 has instructions for these. | |||
438 | if (Subtarget.hasVectorEnhancements1()) { | |||
439 | setOperationAction(ISD::FADD, MVT::v4f32, Legal); | |||
440 | setOperationAction(ISD::FNEG, MVT::v4f32, Legal); | |||
441 | setOperationAction(ISD::FSUB, MVT::v4f32, Legal); | |||
442 | setOperationAction(ISD::FMUL, MVT::v4f32, Legal); | |||
443 | setOperationAction(ISD::FMA, MVT::v4f32, Legal); | |||
444 | setOperationAction(ISD::FDIV, MVT::v4f32, Legal); | |||
445 | setOperationAction(ISD::FABS, MVT::v4f32, Legal); | |||
446 | setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); | |||
447 | setOperationAction(ISD::FRINT, MVT::v4f32, Legal); | |||
448 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); | |||
449 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); | |||
450 | setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); | |||
451 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); | |||
452 | setOperationAction(ISD::FROUND, MVT::v4f32, Legal); | |||
453 | ||||
454 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); | |||
455 | setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal); | |||
456 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); | |||
457 | setOperationAction(ISD::FMINIMUM, MVT::f64, Legal); | |||
458 | ||||
459 | setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); | |||
460 | setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal); | |||
461 | setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); | |||
462 | setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal); | |||
463 | ||||
464 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); | |||
465 | setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); | |||
466 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); | |||
467 | setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); | |||
468 | ||||
469 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); | |||
470 | setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); | |||
471 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); | |||
472 | setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); | |||
473 | ||||
474 | setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); | |||
475 | setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal); | |||
476 | setOperationAction(ISD::FMINNUM, MVT::f128, Legal); | |||
477 | setOperationAction(ISD::FMINIMUM, MVT::f128, Legal); | |||
478 | } | |||
479 | ||||
480 | // We have fused multiply-addition for f32 and f64 but not f128. | |||
481 | setOperationAction(ISD::FMA, MVT::f32, Legal); | |||
482 | setOperationAction(ISD::FMA, MVT::f64, Legal); | |||
483 | if (Subtarget.hasVectorEnhancements1()) | |||
484 | setOperationAction(ISD::FMA, MVT::f128, Legal); | |||
485 | else | |||
486 | setOperationAction(ISD::FMA, MVT::f128, Expand); | |||
487 | ||||
488 | // We don't have a copysign instruction on vector registers. | |||
489 | if (Subtarget.hasVectorEnhancements1()) | |||
490 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); | |||
491 | ||||
492 | // Needed so that we don't try to implement f128 constant loads using | |||
493 | // a load-and-extend of a f80 constant (in cases where the constant | |||
494 | // would fit in an f80). | |||
495 | for (MVT VT : MVT::fp_valuetypes()) | |||
496 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); | |||
497 | ||||
498 | // We don't have extending load instruction on vector registers. | |||
499 | if (Subtarget.hasVectorEnhancements1()) { | |||
500 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); | |||
501 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); | |||
502 | } | |||
503 | ||||
504 | // Floating-point truncation and stores need to be done separately. | |||
505 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | |||
506 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); | |||
507 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); | |||
508 | ||||
509 | // We have 64-bit FPR<->GPR moves, but need special handling for | |||
510 | // 32-bit forms. | |||
511 | if (!Subtarget.hasVector()) { | |||
512 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); | |||
513 | setOperationAction(ISD::BITCAST, MVT::f32, Custom); | |||
514 | } | |||
515 | ||||
516 | // VASTART and VACOPY need to deal with the SystemZ-specific varargs | |||
517 | // structure, but VAEND is a no-op. | |||
518 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | |||
519 | setOperationAction(ISD::VACOPY, MVT::Other, Custom); | |||
520 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | |||
521 | ||||
522 | // Codes for which we want to perform some z-specific combinations. | |||
523 | setTargetDAGCombine(ISD::ZERO_EXTEND); | |||
524 | setTargetDAGCombine(ISD::SIGN_EXTEND); | |||
525 | setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); | |||
526 | setTargetDAGCombine(ISD::LOAD); | |||
527 | setTargetDAGCombine(ISD::STORE); | |||
528 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); | |||
529 | setTargetDAGCombine(ISD::FP_ROUND); | |||
530 | setTargetDAGCombine(ISD::FP_EXTEND); | |||
531 | setTargetDAGCombine(ISD::BSWAP); | |||
532 | setTargetDAGCombine(ISD::SDIV); | |||
533 | setTargetDAGCombine(ISD::UDIV); | |||
534 | setTargetDAGCombine(ISD::SREM); | |||
535 | setTargetDAGCombine(ISD::UREM); | |||
536 | ||||
537 | // Handle intrinsics. | |||
538 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | |||
539 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
540 | ||||
541 | // We want to use MVC in preference to even a single load/store pair. | |||
542 | MaxStoresPerMemcpy = 0; | |||
543 | MaxStoresPerMemcpyOptSize = 0; | |||
544 | ||||
545 | // The main memset sequence is a byte store followed by an MVC. | |||
546 | // Two STC or MV..I stores win over that, but the kind of fused stores | |||
547 | // generated by target-independent code don't when the byte value is | |||
548 | // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better | |||
549 | // than "STC;MVC". Handle the choice in target-specific code instead. | |||
550 | MaxStoresPerMemset = 0; | |||
551 | MaxStoresPerMemsetOptSize = 0; | |||
552 | } | |||
553 | ||||
554 | EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, | |||
555 | LLVMContext &, EVT VT) const { | |||
556 | if (!VT.isVector()) | |||
557 | return MVT::i32; | |||
558 | return VT.changeVectorElementTypeToInteger(); | |||
559 | } | |||
560 | ||||
561 | bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { | |||
562 | VT = VT.getScalarType(); | |||
563 | ||||
564 | if (!VT.isSimple()) | |||
565 | return false; | |||
566 | ||||
567 | switch (VT.getSimpleVT().SimpleTy) { | |||
568 | case MVT::f32: | |||
569 | case MVT::f64: | |||
570 | return true; | |||
571 | case MVT::f128: | |||
572 | return Subtarget.hasVectorEnhancements1(); | |||
573 | default: | |||
574 | break; | |||
575 | } | |||
576 | ||||
577 | return false; | |||
578 | } | |||
579 | ||||
580 | bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { | |||
581 | // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. | |||
582 | return Imm.isZero() || Imm.isNegZero(); | |||
583 | } | |||
584 | ||||
585 | bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { | |||
586 | // We can use CGFI or CLGFI. | |||
587 | return isInt<32>(Imm) || isUInt<32>(Imm); | |||
588 | } | |||
589 | ||||
590 | bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { | |||
591 | // We can use ALGFI or SLGFI. | |||
592 | return isUInt<32>(Imm) || isUInt<32>(-Imm); | |||
593 | } | |||
594 | ||||
595 | bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, | |||
596 | unsigned, | |||
597 | unsigned, | |||
598 | bool *Fast) const { | |||
599 | // Unaligned accesses should never be slower than the expanded version. | |||
600 | // We check specifically for aligned accesses in the few cases where | |||
601 | // they are required. | |||
602 | if (Fast) | |||
603 | *Fast = true; | |||
604 | return true; | |||
605 | } | |||
606 | ||||
607 | // Information about the addressing mode for a memory access. | |||
608 | struct AddressingMode { | |||
609 | // True if a long displacement is supported. | |||
610 | bool LongDisplacement; | |||
611 | ||||
612 | // True if use of index register is supported. | |||
613 | bool IndexReg; | |||
614 | ||||
615 | AddressingMode(bool LongDispl, bool IdxReg) : | |||
616 | LongDisplacement(LongDispl), IndexReg(IdxReg) {} | |||
617 | }; | |||
618 | ||||
619 | // Return the desired addressing mode for a Load which has only one use (in | |||
620 | // the same block) which is a Store. | |||
621 | static AddressingMode getLoadStoreAddrMode(bool HasVector, | |||
622 | Type *Ty) { | |||
623 | // With vector support a Load->Store combination may be combined to either | |||
624 | // an MVC or vector operations and it seems to work best to allow the | |||
625 | // vector addressing mode. | |||
626 | if (HasVector) | |||
627 | return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); | |||
628 | ||||
629 | // Otherwise only the MVC case is special. | |||
630 | bool MVC = Ty->isIntegerTy(8); | |||
631 | return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); | |||
632 | } | |||
633 | ||||
634 | // Return the addressing mode which seems most desirable given an LLVM | |||
635 | // Instruction pointer. | |||
636 | static AddressingMode | |||
637 | supportedAddressingMode(Instruction *I, bool HasVector) { | |||
638 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | |||
639 | switch (II->getIntrinsicID()) { | |||
640 | default: break; | |||
641 | case Intrinsic::memset: | |||
642 | case Intrinsic::memmove: | |||
643 | case Intrinsic::memcpy: | |||
644 | return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); | |||
645 | } | |||
646 | } | |||
647 | ||||
648 | if (isa<LoadInst>(I) && I->hasOneUse()) { | |||
649 | auto *SingleUser = dyn_cast<Instruction>(*I->user_begin()); | |||
650 | if (SingleUser->getParent() == I->getParent()) { | |||
651 | if (isa<ICmpInst>(SingleUser)) { | |||
652 | if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) | |||
653 | if (C->getBitWidth() <= 64 && | |||
654 | (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue()))) | |||
655 | // Comparison of memory with 16 bit signed / unsigned immediate | |||
656 | return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); | |||
657 | } else if (isa<StoreInst>(SingleUser)) | |||
658 | // Load->Store | |||
659 | return getLoadStoreAddrMode(HasVector, I->getType()); | |||
660 | } | |||
661 | } else if (auto *StoreI = dyn_cast<StoreInst>(I)) { | |||
662 | if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) | |||
663 | if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) | |||
664 | // Load->Store | |||
665 | return getLoadStoreAddrMode(HasVector, LoadI->getType()); | |||
666 | } | |||
667 | ||||
668 | if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) { | |||
669 | ||||
670 | // * Use LDE instead of LE/LEY for z13 to avoid partial register | |||
671 | // dependencies (LDE only supports small offsets). | |||
672 | // * Utilize the vector registers to hold floating point | |||
673 | // values (vector load / store instructions only support small | |||
674 | // offsets). | |||
675 | ||||
676 | Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : | |||
677 | I->getOperand(0)->getType()); | |||
678 | bool IsFPAccess = MemAccessTy->isFloatingPointTy(); | |||
679 | bool IsVectorAccess = MemAccessTy->isVectorTy(); | |||
680 | ||||
681 | // A store of an extracted vector element will be combined into a VSTE type | |||
682 | // instruction. | |||
683 | if (!IsVectorAccess && isa<StoreInst>(I)) { | |||
684 | Value *DataOp = I->getOperand(0); | |||
685 | if (isa<ExtractElementInst>(DataOp)) | |||
686 | IsVectorAccess = true; | |||
687 | } | |||
688 | ||||
689 | // A load which gets inserted into a vector element will be combined into a | |||
690 | // VLE type instruction. | |||
691 | if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { | |||
692 | User *LoadUser = *I->user_begin(); | |||
693 | if (isa<InsertElementInst>(LoadUser)) | |||
694 | IsVectorAccess = true; | |||
695 | } | |||
696 | ||||
697 | if (IsFPAccess || IsVectorAccess) | |||
698 | return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); | |||
699 | } | |||
700 | ||||
701 | return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); | |||
702 | } | |||
703 | ||||
704 | bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
705 | const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { | |||
706 | // Punt on globals for now, although they can be used in limited | |||
707 | // RELATIVE LONG cases. | |||
708 | if (AM.BaseGV) | |||
709 | return false; | |||
710 | ||||
711 | // Require a 20-bit signed offset. | |||
712 | if (!isInt<20>(AM.BaseOffs)) | |||
713 | return false; | |||
714 | ||||
715 | AddressingMode SupportedAM(true, true); | |||
716 | if (I != nullptr) | |||
717 | SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); | |||
718 | ||||
719 | if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs)) | |||
720 | return false; | |||
721 | ||||
722 | if (!SupportedAM.IndexReg) | |||
723 | // No indexing allowed. | |||
724 | return AM.Scale == 0; | |||
725 | else | |||
726 | // Indexing is OK but no scale factor can be applied. | |||
727 | return AM.Scale == 0 || AM.Scale == 1; | |||
728 | } | |||
729 | ||||
730 | bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { | |||
731 | if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) | |||
732 | return false; | |||
733 | unsigned FromBits = FromType->getPrimitiveSizeInBits(); | |||
734 | unsigned ToBits = ToType->getPrimitiveSizeInBits(); | |||
735 | return FromBits > ToBits; | |||
736 | } | |||
737 | ||||
738 | bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { | |||
739 | if (!FromVT.isInteger() || !ToVT.isInteger()) | |||
740 | return false; | |||
741 | unsigned FromBits = FromVT.getSizeInBits(); | |||
742 | unsigned ToBits = ToVT.getSizeInBits(); | |||
743 | return FromBits > ToBits; | |||
744 | } | |||
745 | ||||
746 | //===----------------------------------------------------------------------===// | |||
747 | // Inline asm support | |||
748 | //===----------------------------------------------------------------------===// | |||
749 | ||||
750 | TargetLowering::ConstraintType | |||
751 | SystemZTargetLowering::getConstraintType(StringRef Constraint) const { | |||
752 | if (Constraint.size() == 1) { | |||
753 | switch (Constraint[0]) { | |||
754 | case 'a': // Address register | |||
755 | case 'd': // Data register (equivalent to 'r') | |||
756 | case 'f': // Floating-point register | |||
757 | case 'h': // High-part register | |||
758 | case 'r': // General-purpose register | |||
759 | case 'v': // Vector register | |||
760 | return C_RegisterClass; | |||
761 | ||||
762 | case 'Q': // Memory with base and unsigned 12-bit displacement | |||
763 | case 'R': // Likewise, plus an index | |||
764 | case 'S': // Memory with base and signed 20-bit displacement | |||
765 | case 'T': // Likewise, plus an index | |||
766 | case 'm': // Equivalent to 'T'. | |||
767 | return C_Memory; | |||
768 | ||||
769 | case 'I': // Unsigned 8-bit constant | |||
770 | case 'J': // Unsigned 12-bit constant | |||
771 | case 'K': // Signed 16-bit constant | |||
772 | case 'L': // Signed 20-bit displacement (on all targets we support) | |||
773 | case 'M': // 0x7fffffff | |||
774 | return C_Other; | |||
775 | ||||
776 | default: | |||
777 | break; | |||
778 | } | |||
779 | } | |||
780 | return TargetLowering::getConstraintType(Constraint); | |||
781 | } | |||
782 | ||||
783 | TargetLowering::ConstraintWeight SystemZTargetLowering:: | |||
784 | getSingleConstraintMatchWeight(AsmOperandInfo &info, | |||
785 | const char *constraint) const { | |||
786 | ConstraintWeight weight = CW_Invalid; | |||
787 | Value *CallOperandVal = info.CallOperandVal; | |||
788 | // If we don't have a value, we can't do a match, | |||
789 | // but allow it at the lowest weight. | |||
790 | if (!CallOperandVal) | |||
791 | return CW_Default; | |||
792 | Type *type = CallOperandVal->getType(); | |||
793 | // Look at the constraint type. | |||
794 | switch (*constraint) { | |||
795 | default: | |||
796 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); | |||
797 | break; | |||
798 | ||||
799 | case 'a': // Address register | |||
800 | case 'd': // Data register (equivalent to 'r') | |||
801 | case 'h': // High-part register | |||
802 | case 'r': // General-purpose register | |||
803 | if (CallOperandVal->getType()->isIntegerTy()) | |||
804 | weight = CW_Register; | |||
805 | break; | |||
806 | ||||
807 | case 'f': // Floating-point register | |||
808 | if (type->isFloatingPointTy()) | |||
809 | weight = CW_Register; | |||
810 | break; | |||
811 | ||||
812 | case 'v': // Vector register | |||
813 | if ((type->isVectorTy() || type->isFloatingPointTy()) && | |||
814 | Subtarget.hasVector()) | |||
815 | weight = CW_Register; | |||
816 | break; | |||
817 | ||||
818 | case 'I': // Unsigned 8-bit constant | |||
819 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) | |||
820 | if (isUInt<8>(C->getZExtValue())) | |||
821 | weight = CW_Constant; | |||
822 | break; | |||
823 | ||||
824 | case 'J': // Unsigned 12-bit constant | |||
825 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) | |||
826 | if (isUInt<12>(C->getZExtValue())) | |||
827 | weight = CW_Constant; | |||
828 | break; | |||
829 | ||||
830 | case 'K': // Signed 16-bit constant | |||
831 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) | |||
832 | if (isInt<16>(C->getSExtValue())) | |||
833 | weight = CW_Constant; | |||
834 | break; | |||
835 | ||||
836 | case 'L': // Signed 20-bit displacement (on all targets we support) | |||
837 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) | |||
838 | if (isInt<20>(C->getSExtValue())) | |||
839 | weight = CW_Constant; | |||
840 | break; | |||
841 | ||||
842 | case 'M': // 0x7fffffff | |||
843 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) | |||
844 | if (C->getZExtValue() == 0x7fffffff) | |||
845 | weight = CW_Constant; | |||
846 | break; | |||
847 | } | |||
848 | return weight; | |||
849 | } | |||
850 | ||||
851 | // Parse a "{tNNN}" register constraint for which the register type "t" | |||
852 | // has already been verified. MC is the class associated with "t" and | |||
853 | // Map maps 0-based register numbers to LLVM register numbers. | |||
854 | static std::pair<unsigned, const TargetRegisterClass *> | |||
855 | parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, | |||
856 | const unsigned *Map, unsigned Size) { | |||
857 | assert(*(Constraint.end()-1) == '}' && "Missing '}'")((*(Constraint.end()-1) == '}' && "Missing '}'") ? static_cast <void> (0) : __assert_fail ("*(Constraint.end()-1) == '}' && \"Missing '}'\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 857, __PRETTY_FUNCTION__)); | |||
858 | if (isdigit(Constraint[2])) { | |||
859 | unsigned Index; | |||
860 | bool Failed = | |||
861 | Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); | |||
862 | if (!Failed && Index < Size && Map[Index]) | |||
863 | return std::make_pair(Map[Index], RC); | |||
864 | } | |||
865 | return std::make_pair(0U, nullptr); | |||
866 | } | |||
867 | ||||
868 | std::pair<unsigned, const TargetRegisterClass *> | |||
869 | SystemZTargetLowering::getRegForInlineAsmConstraint( | |||
870 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { | |||
871 | if (Constraint.size() == 1) { | |||
872 | // GCC Constraint Letters | |||
873 | switch (Constraint[0]) { | |||
874 | default: break; | |||
875 | case 'd': // Data register (equivalent to 'r') | |||
876 | case 'r': // General-purpose register | |||
877 | if (VT == MVT::i64) | |||
878 | return std::make_pair(0U, &SystemZ::GR64BitRegClass); | |||
879 | else if (VT == MVT::i128) | |||
880 | return std::make_pair(0U, &SystemZ::GR128BitRegClass); | |||
881 | return std::make_pair(0U, &SystemZ::GR32BitRegClass); | |||
882 | ||||
883 | case 'a': // Address register | |||
884 | if (VT == MVT::i64) | |||
885 | return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); | |||
886 | else if (VT == MVT::i128) | |||
887 | return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); | |||
888 | return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); | |||
889 | ||||
890 | case 'h': // High-part register (an LLVM extension) | |||
891 | return std::make_pair(0U, &SystemZ::GRH32BitRegClass); | |||
892 | ||||
893 | case 'f': // Floating-point register | |||
894 | if (VT == MVT::f64) | |||
895 | return std::make_pair(0U, &SystemZ::FP64BitRegClass); | |||
896 | else if (VT == MVT::f128) | |||
897 | return std::make_pair(0U, &SystemZ::FP128BitRegClass); | |||
898 | return std::make_pair(0U, &SystemZ::FP32BitRegClass); | |||
899 | ||||
900 | case 'v': // Vector register | |||
901 | if (Subtarget.hasVector()) { | |||
902 | if (VT == MVT::f32) | |||
903 | return std::make_pair(0U, &SystemZ::VR32BitRegClass); | |||
904 | if (VT == MVT::f64) | |||
905 | return std::make_pair(0U, &SystemZ::VR64BitRegClass); | |||
906 | return std::make_pair(0U, &SystemZ::VR128BitRegClass); | |||
907 | } | |||
908 | break; | |||
909 | } | |||
910 | } | |||
911 | if (Constraint.size() > 0 && Constraint[0] == '{') { | |||
912 | // We need to override the default register parsing for GPRs and FPRs | |||
913 | // because the interpretation depends on VT. The internal names of | |||
914 | // the registers are also different from the external names | |||
915 | // (F0D and F0S instead of F0, etc.). | |||
916 | if (Constraint[1] == 'r') { | |||
917 | if (VT == MVT::i32) | |||
918 | return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, | |||
919 | SystemZMC::GR32Regs, 16); | |||
920 | if (VT == MVT::i128) | |||
921 | return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, | |||
922 | SystemZMC::GR128Regs, 16); | |||
923 | return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, | |||
924 | SystemZMC::GR64Regs, 16); | |||
925 | } | |||
926 | if (Constraint[1] == 'f') { | |||
927 | if (VT == MVT::f32) | |||
928 | return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, | |||
929 | SystemZMC::FP32Regs, 16); | |||
930 | if (VT == MVT::f128) | |||
931 | return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, | |||
932 | SystemZMC::FP128Regs, 16); | |||
933 | return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, | |||
934 | SystemZMC::FP64Regs, 16); | |||
935 | } | |||
936 | if (Constraint[1] == 'v') { | |||
937 | if (VT == MVT::f32) | |||
938 | return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass, | |||
939 | SystemZMC::VR32Regs, 32); | |||
940 | if (VT == MVT::f64) | |||
941 | return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass, | |||
942 | SystemZMC::VR64Regs, 32); | |||
943 | return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass, | |||
944 | SystemZMC::VR128Regs, 32); | |||
945 | } | |||
946 | } | |||
947 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
948 | } | |||
949 | ||||
950 | void SystemZTargetLowering:: | |||
951 | LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, | |||
952 | std::vector<SDValue> &Ops, | |||
953 | SelectionDAG &DAG) const { | |||
954 | // Only support length 1 constraints for now. | |||
955 | if (Constraint.length() == 1) { | |||
956 | switch (Constraint[0]) { | |||
957 | case 'I': // Unsigned 8-bit constant | |||
958 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) | |||
959 | if (isUInt<8>(C->getZExtValue())) | |||
960 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
961 | Op.getValueType())); | |||
962 | return; | |||
963 | ||||
964 | case 'J': // Unsigned 12-bit constant | |||
965 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) | |||
966 | if (isUInt<12>(C->getZExtValue())) | |||
967 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
968 | Op.getValueType())); | |||
969 | return; | |||
970 | ||||
971 | case 'K': // Signed 16-bit constant | |||
972 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) | |||
973 | if (isInt<16>(C->getSExtValue())) | |||
974 | Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), | |||
975 | Op.getValueType())); | |||
976 | return; | |||
977 | ||||
978 | case 'L': // Signed 20-bit displacement (on all targets we support) | |||
979 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) | |||
980 | if (isInt<20>(C->getSExtValue())) | |||
981 | Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), | |||
982 | Op.getValueType())); | |||
983 | return; | |||
984 | ||||
985 | case 'M': // 0x7fffffff | |||
986 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) | |||
987 | if (C->getZExtValue() == 0x7fffffff) | |||
988 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
989 | Op.getValueType())); | |||
990 | return; | |||
991 | } | |||
992 | } | |||
993 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
994 | } | |||
995 | ||||
996 | //===----------------------------------------------------------------------===// | |||
997 | // Calling conventions | |||
998 | //===----------------------------------------------------------------------===// | |||
999 | ||||
1000 | #include "SystemZGenCallingConv.inc" | |||
1001 | ||||
1002 | const MCPhysReg *SystemZTargetLowering::getScratchRegisters( | |||
1003 | CallingConv::ID) const { | |||
1004 | static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D, | |||
1005 | SystemZ::R14D, 0 }; | |||
1006 | return ScratchRegs; | |||
1007 | } | |||
1008 | ||||
1009 | bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, | |||
1010 | Type *ToType) const { | |||
1011 | return isTruncateFree(FromType, ToType); | |||
1012 | } | |||
1013 | ||||
1014 | bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
1015 | return CI->isTailCall(); | |||
1016 | } | |||
1017 | ||||
1018 | // We do not yet support 128-bit single-element vector types. If the user | |||
1019 | // attempts to use such types as function argument or return type, prefer | |||
1020 | // to error out instead of emitting code violating the ABI. | |||
1021 | static void VerifyVectorType(MVT VT, EVT ArgVT) { | |||
1022 | if (ArgVT.isVector() && !VT.isVector()) | |||
1023 | report_fatal_error("Unsupported vector argument or return type"); | |||
1024 | } | |||
1025 | ||||
1026 | static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) { | |||
1027 | for (unsigned i = 0; i < Ins.size(); ++i) | |||
1028 | VerifyVectorType(Ins[i].VT, Ins[i].ArgVT); | |||
1029 | } | |||
1030 | ||||
1031 | static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) { | |||
1032 | for (unsigned i = 0; i < Outs.size(); ++i) | |||
1033 | VerifyVectorType(Outs[i].VT, Outs[i].ArgVT); | |||
1034 | } | |||
1035 | ||||
1036 | // Value is a value that has been passed to us in the location described by VA | |||
1037 | // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining | |||
1038 | // any loads onto Chain. | |||
1039 | static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, | |||
1040 | CCValAssign &VA, SDValue Chain, | |||
1041 | SDValue Value) { | |||
1042 | // If the argument has been promoted from a smaller type, insert an | |||
1043 | // assertion to capture this. | |||
1044 | if (VA.getLocInfo() == CCValAssign::SExt) | |||
1045 | Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, | |||
1046 | DAG.getValueType(VA.getValVT())); | |||
1047 | else if (VA.getLocInfo() == CCValAssign::ZExt) | |||
1048 | Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, | |||
1049 | DAG.getValueType(VA.getValVT())); | |||
1050 | ||||
1051 | if (VA.isExtInLoc()) | |||
1052 | Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); | |||
1053 | else if (VA.getLocInfo() == CCValAssign::BCvt) { | |||
1054 | // If this is a short vector argument loaded from the stack, | |||
1055 | // extend from i64 to full vector size and then bitcast. | |||
1056 | assert(VA.getLocVT() == MVT::i64)((VA.getLocVT() == MVT::i64) ? static_cast<void> (0) : __assert_fail ("VA.getLocVT() == MVT::i64", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1056, __PRETTY_FUNCTION__)); | |||
1057 | assert(VA.getValVT().isVector())((VA.getValVT().isVector()) ? static_cast<void> (0) : __assert_fail ("VA.getValVT().isVector()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1057, __PRETTY_FUNCTION__)); | |||
1058 | Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); | |||
1059 | Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); | |||
1060 | } else | |||
1061 | assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo")((VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo" ) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() == CCValAssign::Full && \"Unsupported getLocInfo\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1061, __PRETTY_FUNCTION__)); | |||
1062 | return Value; | |||
1063 | } | |||
1064 | ||||
1065 | // Value is a value of type VA.getValVT() that we need to copy into | |||
1066 | // the location described by VA. Return a copy of Value converted to | |||
1067 | // VA.getValVT(). The caller is responsible for handling indirect values. | |||
1068 | static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, | |||
1069 | CCValAssign &VA, SDValue Value) { | |||
1070 | switch (VA.getLocInfo()) { | |||
1071 | case CCValAssign::SExt: | |||
1072 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); | |||
1073 | case CCValAssign::ZExt: | |||
1074 | return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); | |||
1075 | case CCValAssign::AExt: | |||
1076 | return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); | |||
1077 | case CCValAssign::BCvt: | |||
1078 | // If this is a short vector argument to be stored to the stack, | |||
1079 | // bitcast to v2i64 and then extract first element. | |||
1080 | assert(VA.getLocVT() == MVT::i64)((VA.getLocVT() == MVT::i64) ? static_cast<void> (0) : __assert_fail ("VA.getLocVT() == MVT::i64", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1080, __PRETTY_FUNCTION__)); | |||
1081 | assert(VA.getValVT().isVector())((VA.getValVT().isVector()) ? static_cast<void> (0) : __assert_fail ("VA.getValVT().isVector()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1081, __PRETTY_FUNCTION__)); | |||
1082 | Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value); | |||
1083 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, | |||
1084 | DAG.getConstant(0, DL, MVT::i32)); | |||
1085 | case CCValAssign::Full: | |||
1086 | return Value; | |||
1087 | default: | |||
1088 | llvm_unreachable("Unhandled getLocInfo()")::llvm::llvm_unreachable_internal("Unhandled getLocInfo()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1088); | |||
1089 | } | |||
1090 | } | |||
1091 | ||||
1092 | SDValue SystemZTargetLowering::LowerFormalArguments( | |||
1093 | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, | |||
1094 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
1095 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
1096 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1097 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
1098 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
1099 | SystemZMachineFunctionInfo *FuncInfo = | |||
1100 | MF.getInfo<SystemZMachineFunctionInfo>(); | |||
1101 | auto *TFL = | |||
1102 | static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); | |||
1103 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
1104 | ||||
1105 | // Detect unsupported vector argument types. | |||
1106 | if (Subtarget.hasVector()) | |||
1107 | VerifyVectorTypes(Ins); | |||
1108 | ||||
1109 | // Assign locations to all of the incoming arguments. | |||
1110 | SmallVector<CCValAssign, 16> ArgLocs; | |||
1111 | SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
1112 | CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); | |||
1113 | ||||
1114 | unsigned NumFixedGPRs = 0; | |||
1115 | unsigned NumFixedFPRs = 0; | |||
1116 | for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { | |||
1117 | SDValue ArgValue; | |||
1118 | CCValAssign &VA = ArgLocs[I]; | |||
1119 | EVT LocVT = VA.getLocVT(); | |||
1120 | if (VA.isRegLoc()) { | |||
1121 | // Arguments passed in registers | |||
1122 | const TargetRegisterClass *RC; | |||
1123 | switch (LocVT.getSimpleVT().SimpleTy) { | |||
1124 | default: | |||
1125 | // Integers smaller than i64 should be promoted to i64. | |||
1126 | llvm_unreachable("Unexpected argument type")::llvm::llvm_unreachable_internal("Unexpected argument type", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1126); | |||
1127 | case MVT::i32: | |||
1128 | NumFixedGPRs += 1; | |||
1129 | RC = &SystemZ::GR32BitRegClass; | |||
1130 | break; | |||
1131 | case MVT::i64: | |||
1132 | NumFixedGPRs += 1; | |||
1133 | RC = &SystemZ::GR64BitRegClass; | |||
1134 | break; | |||
1135 | case MVT::f32: | |||
1136 | NumFixedFPRs += 1; | |||
1137 | RC = &SystemZ::FP32BitRegClass; | |||
1138 | break; | |||
1139 | case MVT::f64: | |||
1140 | NumFixedFPRs += 1; | |||
1141 | RC = &SystemZ::FP64BitRegClass; | |||
1142 | break; | |||
1143 | case MVT::v16i8: | |||
1144 | case MVT::v8i16: | |||
1145 | case MVT::v4i32: | |||
1146 | case MVT::v2i64: | |||
1147 | case MVT::v4f32: | |||
1148 | case MVT::v2f64: | |||
1149 | RC = &SystemZ::VR128BitRegClass; | |||
1150 | break; | |||
1151 | } | |||
1152 | ||||
1153 | unsigned VReg = MRI.createVirtualRegister(RC); | |||
1154 | MRI.addLiveIn(VA.getLocReg(), VReg); | |||
1155 | ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); | |||
1156 | } else { | |||
1157 | assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory") ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1157, __PRETTY_FUNCTION__)); | |||
1158 | ||||
1159 | // Create the frame index object for this incoming parameter. | |||
1160 | int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, | |||
1161 | VA.getLocMemOffset(), true); | |||
1162 | ||||
1163 | // Create the SelectionDAG nodes corresponding to a load | |||
1164 | // from this parameter. Unpromoted ints and floats are | |||
1165 | // passed as right-justified 8-byte values. | |||
1166 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); | |||
1167 | if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) | |||
1168 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, | |||
1169 | DAG.getIntPtrConstant(4, DL)); | |||
1170 | ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, | |||
1171 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
1172 | } | |||
1173 | ||||
1174 | // Convert the value of the argument register into the value that's | |||
1175 | // being passed. | |||
1176 | if (VA.getLocInfo() == CCValAssign::Indirect) { | |||
1177 | InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, | |||
1178 | MachinePointerInfo())); | |||
1179 | // If the original argument was split (e.g. i128), we need | |||
1180 | // to load all parts of it here (using the same address). | |||
1181 | unsigned ArgIndex = Ins[I].OrigArgIndex; | |||
1182 | assert (Ins[I].PartOffset == 0)((Ins[I].PartOffset == 0) ? static_cast<void> (0) : __assert_fail ("Ins[I].PartOffset == 0", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1182, __PRETTY_FUNCTION__)); | |||
1183 | while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) { | |||
1184 | CCValAssign &PartVA = ArgLocs[I + 1]; | |||
1185 | unsigned PartOffset = Ins[I + 1].PartOffset; | |||
1186 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, | |||
1187 | DAG.getIntPtrConstant(PartOffset, DL)); | |||
1188 | InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, | |||
1189 | MachinePointerInfo())); | |||
1190 | ++I; | |||
1191 | } | |||
1192 | } else | |||
1193 | InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); | |||
1194 | } | |||
1195 | ||||
1196 | if (IsVarArg) { | |||
1197 | // Save the number of non-varargs registers for later use by va_start, etc. | |||
1198 | FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); | |||
1199 | FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); | |||
1200 | ||||
1201 | // Likewise the address (in the form of a frame index) of where the | |||
1202 | // first stack vararg would be. The 1-byte size here is arbitrary. | |||
1203 | int64_t StackSize = CCInfo.getNextStackOffset(); | |||
1204 | FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); | |||
1205 | ||||
1206 | // ...and a similar frame index for the caller-allocated save area | |||
1207 | // that will be used to store the incoming registers. | |||
1208 | int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); | |||
1209 | unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); | |||
1210 | FuncInfo->setRegSaveFrameIndex(RegSaveIndex); | |||
1211 | ||||
1212 | // Store the FPR varargs in the reserved frame slots. (We store the | |||
1213 | // GPRs as part of the prologue.) | |||
1214 | if (NumFixedFPRs < SystemZ::NumArgFPRs) { | |||
1215 | SDValue MemOps[SystemZ::NumArgFPRs]; | |||
1216 | for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { | |||
1217 | unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); | |||
1218 | int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true); | |||
1219 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
1220 | unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], | |||
1221 | &SystemZ::FP64BitRegClass); | |||
1222 | SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); | |||
1223 | MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, | |||
1224 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
1225 | } | |||
1226 | // Join the stores, which are independent of one another. | |||
1227 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, | |||
1228 | makeArrayRef(&MemOps[NumFixedFPRs], | |||
1229 | SystemZ::NumArgFPRs-NumFixedFPRs)); | |||
1230 | } | |||
1231 | } | |||
1232 | ||||
1233 | return Chain; | |||
1234 | } | |||
1235 | ||||
1236 | static bool canUseSiblingCall(const CCState &ArgCCInfo, | |||
1237 | SmallVectorImpl<CCValAssign> &ArgLocs, | |||
1238 | SmallVectorImpl<ISD::OutputArg> &Outs) { | |||
1239 | // Punt if there are any indirect or stack arguments, or if the call | |||
1240 | // needs the callee-saved argument register R6, or if the call uses | |||
1241 | // the callee-saved register arguments SwiftSelf and SwiftError. | |||
1242 | for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { | |||
1243 | CCValAssign &VA = ArgLocs[I]; | |||
1244 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
1245 | return false; | |||
1246 | if (!VA.isRegLoc()) | |||
1247 | return false; | |||
1248 | unsigned Reg = VA.getLocReg(); | |||
1249 | if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) | |||
1250 | return false; | |||
1251 | if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) | |||
1252 | return false; | |||
1253 | } | |||
1254 | return true; | |||
1255 | } | |||
1256 | ||||
1257 | SDValue | |||
1258 | SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, | |||
1259 | SmallVectorImpl<SDValue> &InVals) const { | |||
1260 | SelectionDAG &DAG = CLI.DAG; | |||
1261 | SDLoc &DL = CLI.DL; | |||
1262 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; | |||
1263 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; | |||
1264 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; | |||
1265 | SDValue Chain = CLI.Chain; | |||
1266 | SDValue Callee = CLI.Callee; | |||
1267 | bool &IsTailCall = CLI.IsTailCall; | |||
1268 | CallingConv::ID CallConv = CLI.CallConv; | |||
1269 | bool IsVarArg = CLI.IsVarArg; | |||
1270 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1271 | EVT PtrVT = getPointerTy(MF.getDataLayout()); | |||
1272 | ||||
1273 | // Detect unsupported vector argument and return types. | |||
1274 | if (Subtarget.hasVector()) { | |||
1275 | VerifyVectorTypes(Outs); | |||
1276 | VerifyVectorTypes(Ins); | |||
1277 | } | |||
1278 | ||||
1279 | // Analyze the operands of the call, assigning locations to each operand. | |||
1280 | SmallVector<CCValAssign, 16> ArgLocs; | |||
1281 | SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
1282 | ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); | |||
1283 | ||||
1284 | // We don't support GuaranteedTailCallOpt, only automatically-detected | |||
1285 | // sibling calls. | |||
1286 | if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)) | |||
1287 | IsTailCall = false; | |||
1288 | ||||
1289 | // Get a count of how many bytes are to be pushed on the stack. | |||
1290 | unsigned NumBytes = ArgCCInfo.getNextStackOffset(); | |||
1291 | ||||
1292 | // Mark the start of the call. | |||
1293 | if (!IsTailCall) | |||
1294 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); | |||
1295 | ||||
1296 | // Copy argument values to their designated locations. | |||
1297 | SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; | |||
1298 | SmallVector<SDValue, 8> MemOpChains; | |||
1299 | SDValue StackPtr; | |||
1300 | for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { | |||
1301 | CCValAssign &VA = ArgLocs[I]; | |||
1302 | SDValue ArgValue = OutVals[I]; | |||
1303 | ||||
1304 | if (VA.getLocInfo() == CCValAssign::Indirect) { | |||
1305 | // Store the argument in a stack slot and pass its address. | |||
1306 | SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT); | |||
1307 | int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); | |||
1308 | MemOpChains.push_back( | |||
1309 | DAG.getStore(Chain, DL, ArgValue, SpillSlot, | |||
1310 | MachinePointerInfo::getFixedStack(MF, FI))); | |||
1311 | // If the original argument was split (e.g. i128), we need | |||
1312 | // to store all parts of it here (and pass just one address). | |||
1313 | unsigned ArgIndex = Outs[I].OrigArgIndex; | |||
1314 | assert (Outs[I].PartOffset == 0)((Outs[I].PartOffset == 0) ? static_cast<void> (0) : __assert_fail ("Outs[I].PartOffset == 0", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1314, __PRETTY_FUNCTION__)); | |||
1315 | while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { | |||
1316 | SDValue PartValue = OutVals[I + 1]; | |||
1317 | unsigned PartOffset = Outs[I + 1].PartOffset; | |||
1318 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, | |||
1319 | DAG.getIntPtrConstant(PartOffset, DL)); | |||
1320 | MemOpChains.push_back( | |||
1321 | DAG.getStore(Chain, DL, PartValue, Address, | |||
1322 | MachinePointerInfo::getFixedStack(MF, FI))); | |||
1323 | ++I; | |||
1324 | } | |||
1325 | ArgValue = SpillSlot; | |||
1326 | } else | |||
1327 | ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); | |||
1328 | ||||
1329 | if (VA.isRegLoc()) | |||
1330 | // Queue up the argument copies and emit them at the end. | |||
1331 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); | |||
1332 | else { | |||
1333 | assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory") ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1333, __PRETTY_FUNCTION__)); | |||
1334 | ||||
1335 | // Work out the address of the stack slot. Unpromoted ints and | |||
1336 | // floats are passed as right-justified 8-byte values. | |||
1337 | if (!StackPtr.getNode()) | |||
1338 | StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); | |||
1339 | unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); | |||
1340 | if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) | |||
1341 | Offset += 4; | |||
1342 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, | |||
1343 | DAG.getIntPtrConstant(Offset, DL)); | |||
1344 | ||||
1345 | // Emit the store. | |||
1346 | MemOpChains.push_back( | |||
1347 | DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); | |||
1348 | } | |||
1349 | } | |||
1350 | ||||
1351 | // Join the stores, which are independent of one another. | |||
1352 | if (!MemOpChains.empty()) | |||
1353 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | |||
1354 | ||||
1355 | // Accept direct calls by converting symbolic call addresses to the | |||
1356 | // associated Target* opcodes. Force %r1 to be used for indirect | |||
1357 | // tail calls. | |||
1358 | SDValue Glue; | |||
1359 | if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
1360 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); | |||
1361 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); | |||
1362 | } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
1363 | Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); | |||
1364 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); | |||
1365 | } else if (IsTailCall) { | |||
1366 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); | |||
1367 | Glue = Chain.getValue(1); | |||
1368 | Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); | |||
1369 | } | |||
1370 | ||||
1371 | // Build a sequence of copy-to-reg nodes, chained and glued together. | |||
1372 | for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { | |||
1373 | Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, | |||
1374 | RegsToPass[I].second, Glue); | |||
1375 | Glue = Chain.getValue(1); | |||
1376 | } | |||
1377 | ||||
1378 | // The first call operand is the chain and the second is the target address. | |||
1379 | SmallVector<SDValue, 8> Ops; | |||
1380 | Ops.push_back(Chain); | |||
1381 | Ops.push_back(Callee); | |||
1382 | ||||
1383 | // Add argument registers to the end of the list so that they are | |||
1384 | // known live into the call. | |||
1385 | for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) | |||
1386 | Ops.push_back(DAG.getRegister(RegsToPass[I].first, | |||
1387 | RegsToPass[I].second.getValueType())); | |||
1388 | ||||
1389 | // Add a register mask operand representing the call-preserved registers. | |||
1390 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
1391 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); | |||
1392 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1392, __PRETTY_FUNCTION__)); | |||
1393 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
1394 | ||||
1395 | // Glue the call to the argument copies, if any. | |||
1396 | if (Glue.getNode()) | |||
1397 | Ops.push_back(Glue); | |||
1398 | ||||
1399 | // Emit the call. | |||
1400 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
1401 | if (IsTailCall) | |||
1402 | return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); | |||
1403 | Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); | |||
1404 | Glue = Chain.getValue(1); | |||
1405 | ||||
1406 | // Mark the end of the call, which is glued to the call itself. | |||
1407 | Chain = DAG.getCALLSEQ_END(Chain, | |||
1408 | DAG.getConstant(NumBytes, DL, PtrVT, true), | |||
1409 | DAG.getConstant(0, DL, PtrVT, true), | |||
1410 | Glue, DL); | |||
1411 | Glue = Chain.getValue(1); | |||
1412 | ||||
1413 | // Assign locations to each value returned by this call. | |||
1414 | SmallVector<CCValAssign, 16> RetLocs; | |||
1415 | CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); | |||
1416 | RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); | |||
1417 | ||||
1418 | // Copy all of the result registers out of their specified physreg. | |||
1419 | for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { | |||
1420 | CCValAssign &VA = RetLocs[I]; | |||
1421 | ||||
1422 | // Copy the value out, gluing the copy to the end of the call sequence. | |||
1423 | SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), | |||
1424 | VA.getLocVT(), Glue); | |||
1425 | Chain = RetValue.getValue(1); | |||
1426 | Glue = RetValue.getValue(2); | |||
1427 | ||||
1428 | // Convert the value of the return register into the value that's | |||
1429 | // being returned. | |||
1430 | InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); | |||
1431 | } | |||
1432 | ||||
1433 | return Chain; | |||
1434 | } | |||
1435 | ||||
1436 | bool SystemZTargetLowering:: | |||
1437 | CanLowerReturn(CallingConv::ID CallConv, | |||
1438 | MachineFunction &MF, bool isVarArg, | |||
1439 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
1440 | LLVMContext &Context) const { | |||
1441 | // Detect unsupported vector return types. | |||
1442 | if (Subtarget.hasVector()) | |||
1443 | VerifyVectorTypes(Outs); | |||
1444 | ||||
1445 | // Special case that we cannot easily detect in RetCC_SystemZ since | |||
1446 | // i128 is not a legal type. | |||
1447 | for (auto &Out : Outs) | |||
1448 | if (Out.ArgVT == MVT::i128) | |||
1449 | return false; | |||
1450 | ||||
1451 | SmallVector<CCValAssign, 16> RetLocs; | |||
1452 | CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); | |||
1453 | return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); | |||
1454 | } | |||
1455 | ||||
1456 | SDValue | |||
1457 | SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
1458 | bool IsVarArg, | |||
1459 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
1460 | const SmallVectorImpl<SDValue> &OutVals, | |||
1461 | const SDLoc &DL, SelectionDAG &DAG) const { | |||
1462 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1463 | ||||
1464 | // Detect unsupported vector return types. | |||
1465 | if (Subtarget.hasVector()) | |||
1466 | VerifyVectorTypes(Outs); | |||
1467 | ||||
1468 | // Assign locations to each returned value. | |||
1469 | SmallVector<CCValAssign, 16> RetLocs; | |||
1470 | CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); | |||
1471 | RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); | |||
1472 | ||||
1473 | // Quick exit for void returns | |||
1474 | if (RetLocs.empty()) | |||
1475 | return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); | |||
1476 | ||||
1477 | // Copy the result values into the output registers. | |||
1478 | SDValue Glue; | |||
1479 | SmallVector<SDValue, 4> RetOps; | |||
1480 | RetOps.push_back(Chain); | |||
1481 | for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { | |||
1482 | CCValAssign &VA = RetLocs[I]; | |||
1483 | SDValue RetValue = OutVals[I]; | |||
1484 | ||||
1485 | // Make the return register live on exit. | |||
1486 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1486, __PRETTY_FUNCTION__)); | |||
1487 | ||||
1488 | // Promote the value as required. | |||
1489 | RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); | |||
1490 | ||||
1491 | // Chain and glue the copies together. | |||
1492 | unsigned Reg = VA.getLocReg(); | |||
1493 | Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); | |||
1494 | Glue = Chain.getValue(1); | |||
1495 | RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); | |||
1496 | } | |||
1497 | ||||
1498 | // Update chain and glue. | |||
1499 | RetOps[0] = Chain; | |||
1500 | if (Glue.getNode()) | |||
1501 | RetOps.push_back(Glue); | |||
1502 | ||||
1503 | return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); | |||
1504 | } | |||
1505 | ||||
1506 | // Return true if Op is an intrinsic node with chain that returns the CC value | |||
1507 | // as its only (other) argument. Provide the associated SystemZISD opcode and | |||
1508 | // the mask of valid CC values if so. | |||
1509 | static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, | |||
1510 | unsigned &CCValid) { | |||
1511 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
1512 | switch (Id) { | |||
1513 | case Intrinsic::s390_tbegin: | |||
1514 | Opcode = SystemZISD::TBEGIN; | |||
1515 | CCValid = SystemZ::CCMASK_TBEGIN; | |||
1516 | return true; | |||
1517 | ||||
1518 | case Intrinsic::s390_tbegin_nofloat: | |||
1519 | Opcode = SystemZISD::TBEGIN_NOFLOAT; | |||
1520 | CCValid = SystemZ::CCMASK_TBEGIN; | |||
1521 | return true; | |||
1522 | ||||
1523 | case Intrinsic::s390_tend: | |||
1524 | Opcode = SystemZISD::TEND; | |||
1525 | CCValid = SystemZ::CCMASK_TEND; | |||
1526 | return true; | |||
1527 | ||||
1528 | default: | |||
1529 | return false; | |||
1530 | } | |||
1531 | } | |||
1532 | ||||
1533 | // Return true if Op is an intrinsic node without chain that returns the | |||
1534 | // CC value as its final argument. Provide the associated SystemZISD | |||
1535 | // opcode and the mask of valid CC values if so. | |||
1536 | static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { | |||
1537 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
1538 | switch (Id) { | |||
1539 | case Intrinsic::s390_vpkshs: | |||
1540 | case Intrinsic::s390_vpksfs: | |||
1541 | case Intrinsic::s390_vpksgs: | |||
1542 | Opcode = SystemZISD::PACKS_CC; | |||
1543 | CCValid = SystemZ::CCMASK_VCMP; | |||
1544 | return true; | |||
1545 | ||||
1546 | case Intrinsic::s390_vpklshs: | |||
1547 | case Intrinsic::s390_vpklsfs: | |||
1548 | case Intrinsic::s390_vpklsgs: | |||
1549 | Opcode = SystemZISD::PACKLS_CC; | |||
1550 | CCValid = SystemZ::CCMASK_VCMP; | |||
1551 | return true; | |||
1552 | ||||
1553 | case Intrinsic::s390_vceqbs: | |||
1554 | case Intrinsic::s390_vceqhs: | |||
1555 | case Intrinsic::s390_vceqfs: | |||
1556 | case Intrinsic::s390_vceqgs: | |||
1557 | Opcode = SystemZISD::VICMPES; | |||
1558 | CCValid = SystemZ::CCMASK_VCMP; | |||
1559 | return true; | |||
1560 | ||||
1561 | case Intrinsic::s390_vchbs: | |||
1562 | case Intrinsic::s390_vchhs: | |||
1563 | case Intrinsic::s390_vchfs: | |||
1564 | case Intrinsic::s390_vchgs: | |||
1565 | Opcode = SystemZISD::VICMPHS; | |||
1566 | CCValid = SystemZ::CCMASK_VCMP; | |||
1567 | return true; | |||
1568 | ||||
1569 | case Intrinsic::s390_vchlbs: | |||
1570 | case Intrinsic::s390_vchlhs: | |||
1571 | case Intrinsic::s390_vchlfs: | |||
1572 | case Intrinsic::s390_vchlgs: | |||
1573 | Opcode = SystemZISD::VICMPHLS; | |||
1574 | CCValid = SystemZ::CCMASK_VCMP; | |||
1575 | return true; | |||
1576 | ||||
1577 | case Intrinsic::s390_vtm: | |||
1578 | Opcode = SystemZISD::VTM; | |||
1579 | CCValid = SystemZ::CCMASK_VCMP; | |||
1580 | return true; | |||
1581 | ||||
1582 | case Intrinsic::s390_vfaebs: | |||
1583 | case Intrinsic::s390_vfaehs: | |||
1584 | case Intrinsic::s390_vfaefs: | |||
1585 | Opcode = SystemZISD::VFAE_CC; | |||
1586 | CCValid = SystemZ::CCMASK_ANY; | |||
1587 | return true; | |||
1588 | ||||
1589 | case Intrinsic::s390_vfaezbs: | |||
1590 | case Intrinsic::s390_vfaezhs: | |||
1591 | case Intrinsic::s390_vfaezfs: | |||
1592 | Opcode = SystemZISD::VFAEZ_CC; | |||
1593 | CCValid = SystemZ::CCMASK_ANY; | |||
1594 | return true; | |||
1595 | ||||
1596 | case Intrinsic::s390_vfeebs: | |||
1597 | case Intrinsic::s390_vfeehs: | |||
1598 | case Intrinsic::s390_vfeefs: | |||
1599 | Opcode = SystemZISD::VFEE_CC; | |||
1600 | CCValid = SystemZ::CCMASK_ANY; | |||
1601 | return true; | |||
1602 | ||||
1603 | case Intrinsic::s390_vfeezbs: | |||
1604 | case Intrinsic::s390_vfeezhs: | |||
1605 | case Intrinsic::s390_vfeezfs: | |||
1606 | Opcode = SystemZISD::VFEEZ_CC; | |||
1607 | CCValid = SystemZ::CCMASK_ANY; | |||
1608 | return true; | |||
1609 | ||||
1610 | case Intrinsic::s390_vfenebs: | |||
1611 | case Intrinsic::s390_vfenehs: | |||
1612 | case Intrinsic::s390_vfenefs: | |||
1613 | Opcode = SystemZISD::VFENE_CC; | |||
1614 | CCValid = SystemZ::CCMASK_ANY; | |||
1615 | return true; | |||
1616 | ||||
1617 | case Intrinsic::s390_vfenezbs: | |||
1618 | case Intrinsic::s390_vfenezhs: | |||
1619 | case Intrinsic::s390_vfenezfs: | |||
1620 | Opcode = SystemZISD::VFENEZ_CC; | |||
1621 | CCValid = SystemZ::CCMASK_ANY; | |||
1622 | return true; | |||
1623 | ||||
1624 | case Intrinsic::s390_vistrbs: | |||
1625 | case Intrinsic::s390_vistrhs: | |||
1626 | case Intrinsic::s390_vistrfs: | |||
1627 | Opcode = SystemZISD::VISTR_CC; | |||
1628 | CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; | |||
1629 | return true; | |||
1630 | ||||
1631 | case Intrinsic::s390_vstrcbs: | |||
1632 | case Intrinsic::s390_vstrchs: | |||
1633 | case Intrinsic::s390_vstrcfs: | |||
1634 | Opcode = SystemZISD::VSTRC_CC; | |||
1635 | CCValid = SystemZ::CCMASK_ANY; | |||
1636 | return true; | |||
1637 | ||||
1638 | case Intrinsic::s390_vstrczbs: | |||
1639 | case Intrinsic::s390_vstrczhs: | |||
1640 | case Intrinsic::s390_vstrczfs: | |||
1641 | Opcode = SystemZISD::VSTRCZ_CC; | |||
1642 | CCValid = SystemZ::CCMASK_ANY; | |||
1643 | return true; | |||
1644 | ||||
1645 | case Intrinsic::s390_vfcedbs: | |||
1646 | case Intrinsic::s390_vfcesbs: | |||
1647 | Opcode = SystemZISD::VFCMPES; | |||
1648 | CCValid = SystemZ::CCMASK_VCMP; | |||
1649 | return true; | |||
1650 | ||||
1651 | case Intrinsic::s390_vfchdbs: | |||
1652 | case Intrinsic::s390_vfchsbs: | |||
1653 | Opcode = SystemZISD::VFCMPHS; | |||
1654 | CCValid = SystemZ::CCMASK_VCMP; | |||
1655 | return true; | |||
1656 | ||||
1657 | case Intrinsic::s390_vfchedbs: | |||
1658 | case Intrinsic::s390_vfchesbs: | |||
1659 | Opcode = SystemZISD::VFCMPHES; | |||
1660 | CCValid = SystemZ::CCMASK_VCMP; | |||
1661 | return true; | |||
1662 | ||||
1663 | case Intrinsic::s390_vftcidb: | |||
1664 | case Intrinsic::s390_vftcisb: | |||
1665 | Opcode = SystemZISD::VFTCI; | |||
1666 | CCValid = SystemZ::CCMASK_VCMP; | |||
1667 | return true; | |||
1668 | ||||
1669 | case Intrinsic::s390_tdc: | |||
1670 | Opcode = SystemZISD::TDC; | |||
1671 | CCValid = SystemZ::CCMASK_TDC; | |||
1672 | return true; | |||
1673 | ||||
1674 | default: | |||
1675 | return false; | |||
1676 | } | |||
1677 | } | |||
1678 | ||||
1679 | // Emit an intrinsic with chain and an explicit CC register result. | |||
1680 | static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, | |||
1681 | unsigned Opcode) { | |||
1682 | // Copy all operands except the intrinsic ID. | |||
1683 | unsigned NumOps = Op.getNumOperands(); | |||
1684 | SmallVector<SDValue, 6> Ops; | |||
1685 | Ops.reserve(NumOps - 1); | |||
1686 | Ops.push_back(Op.getOperand(0)); | |||
1687 | for (unsigned I = 2; I < NumOps; ++I) | |||
1688 | Ops.push_back(Op.getOperand(I)); | |||
1689 | ||||
1690 | assert(Op->getNumValues() == 2 && "Expected only CC result and chain")((Op->getNumValues() == 2 && "Expected only CC result and chain" ) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected only CC result and chain\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1690, __PRETTY_FUNCTION__)); | |||
1691 | SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other); | |||
1692 | SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); | |||
1693 | SDValue OldChain = SDValue(Op.getNode(), 1); | |||
1694 | SDValue NewChain = SDValue(Intr.getNode(), 1); | |||
1695 | DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); | |||
1696 | return Intr.getNode(); | |||
1697 | } | |||
1698 | ||||
1699 | // Emit an intrinsic with an explicit CC register result. | |||
1700 | static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, | |||
1701 | unsigned Opcode) { | |||
1702 | // Copy all operands except the intrinsic ID. | |||
1703 | unsigned NumOps = Op.getNumOperands(); | |||
1704 | SmallVector<SDValue, 6> Ops; | |||
1705 | Ops.reserve(NumOps - 1); | |||
1706 | for (unsigned I = 1; I < NumOps; ++I) | |||
1707 | Ops.push_back(Op.getOperand(I)); | |||
1708 | ||||
1709 | SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops); | |||
1710 | return Intr.getNode(); | |||
1711 | } | |||
1712 | ||||
1713 | // CC is a comparison that will be implemented using an integer or | |||
1714 | // floating-point comparison. Return the condition code mask for | |||
1715 | // a branch on true. In the integer case, CCMASK_CMP_UO is set for | |||
1716 | // unsigned comparisons and clear for signed ones. In the floating-point | |||
1717 | // case, CCMASK_CMP_UO has its normal mask meaning (unordered). | |||
1718 | static unsigned CCMaskForCondCode(ISD::CondCode CC) { | |||
1719 | #define CONV(X) \ | |||
1720 | case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ | |||
1721 | case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ | |||
1722 | case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X | |||
1723 | ||||
1724 | switch (CC) { | |||
1725 | default: | |||
1726 | llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 1726); | |||
1727 | ||||
1728 | CONV(EQ); | |||
1729 | CONV(NE); | |||
1730 | CONV(GT); | |||
1731 | CONV(GE); | |||
1732 | CONV(LT); | |||
1733 | CONV(LE); | |||
1734 | ||||
1735 | case ISD::SETO: return SystemZ::CCMASK_CMP_O; | |||
1736 | case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; | |||
1737 | } | |||
1738 | #undef CONV | |||
1739 | } | |||
1740 | ||||
1741 | // If C can be converted to a comparison against zero, adjust the operands | |||
1742 | // as necessary. | |||
1743 | static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { | |||
1744 | if (C.ICmpType == SystemZICMP::UnsignedOnly) | |||
1745 | return; | |||
1746 | ||||
1747 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); | |||
1748 | if (!ConstOp1) | |||
1749 | return; | |||
1750 | ||||
1751 | int64_t Value = ConstOp1->getSExtValue(); | |||
1752 | if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || | |||
1753 | (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || | |||
1754 | (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || | |||
1755 | (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { | |||
1756 | C.CCMask ^= SystemZ::CCMASK_CMP_EQ; | |||
1757 | C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); | |||
1758 | } | |||
1759 | } | |||
1760 | ||||
1761 | // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, | |||
1762 | // adjust the operands as necessary. | |||
1763 | static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, | |||
1764 | Comparison &C) { | |||
1765 | // For us to make any changes, it must a comparison between a single-use | |||
1766 | // load and a constant. | |||
1767 | if (!C.Op0.hasOneUse() || | |||
1768 | C.Op0.getOpcode() != ISD::LOAD || | |||
1769 | C.Op1.getOpcode() != ISD::Constant) | |||
1770 | return; | |||
1771 | ||||
1772 | // We must have an 8- or 16-bit load. | |||
1773 | auto *Load = cast<LoadSDNode>(C.Op0); | |||
1774 | unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); | |||
1775 | if (NumBits != 8 && NumBits != 16) | |||
1776 | return; | |||
1777 | ||||
1778 | // The load must be an extending one and the constant must be within the | |||
1779 | // range of the unextended value. | |||
1780 | auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); | |||
1781 | uint64_t Value = ConstOp1->getZExtValue(); | |||
1782 | uint64_t Mask = (1 << NumBits) - 1; | |||
1783 | if (Load->getExtensionType() == ISD::SEXTLOAD) { | |||
1784 | // Make sure that ConstOp1 is in range of C.Op0. | |||
1785 | int64_t SignedValue = ConstOp1->getSExtValue(); | |||
1786 | if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) | |||
1787 | return; | |||
1788 | if (C.ICmpType != SystemZICMP::SignedOnly) { | |||
1789 | // Unsigned comparison between two sign-extended values is equivalent | |||
1790 | // to unsigned comparison between two zero-extended values. | |||
1791 | Value &= Mask; | |||
1792 | } else if (NumBits == 8) { | |||
1793 | // Try to treat the comparison as unsigned, so that we can use CLI. | |||
1794 | // Adjust CCMask and Value as necessary. | |||
1795 | if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) | |||
1796 | // Test whether the high bit of the byte is set. | |||
1797 | Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; | |||
1798 | else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) | |||
1799 | // Test whether the high bit of the byte is clear. | |||
1800 | Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; | |||
1801 | else | |||
1802 | // No instruction exists for this combination. | |||
1803 | return; | |||
1804 | C.ICmpType = SystemZICMP::UnsignedOnly; | |||
1805 | } | |||
1806 | } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { | |||
1807 | if (Value > Mask) | |||
1808 | return; | |||
1809 | // If the constant is in range, we can use any comparison. | |||
1810 | C.ICmpType = SystemZICMP::Any; | |||
1811 | } else | |||
1812 | return; | |||
1813 | ||||
1814 | // Make sure that the first operand is an i32 of the right extension type. | |||
1815 | ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? | |||
1816 | ISD::SEXTLOAD : | |||
1817 | ISD::ZEXTLOAD); | |||
1818 | if (C.Op0.getValueType() != MVT::i32 || | |||
1819 | Load->getExtensionType() != ExtType) { | |||
1820 | C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), | |||
1821 | Load->getBasePtr(), Load->getPointerInfo(), | |||
1822 | Load->getMemoryVT(), Load->getAlignment(), | |||
1823 | Load->getMemOperand()->getFlags()); | |||
1824 | // Update the chain uses. | |||
1825 | DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); | |||
1826 | } | |||
1827 | ||||
1828 | // Make sure that the second operand is an i32 with the right value. | |||
1829 | if (C.Op1.getValueType() != MVT::i32 || | |||
1830 | Value != ConstOp1->getZExtValue()) | |||
1831 | C.Op1 = DAG.getConstant(Value, DL, MVT::i32); | |||
1832 | } | |||
1833 | ||||
1834 | // Return true if Op is either an unextended load, or a load suitable | |||
1835 | // for integer register-memory comparisons of type ICmpType. | |||
1836 | static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { | |||
1837 | auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); | |||
1838 | if (Load) { | |||
1839 | // There are no instructions to compare a register with a memory byte. | |||
1840 | if (Load->getMemoryVT() == MVT::i8) | |||
1841 | return false; | |||
1842 | // Otherwise decide on extension type. | |||
1843 | switch (Load->getExtensionType()) { | |||
1844 | case ISD::NON_EXTLOAD: | |||
1845 | return true; | |||
1846 | case ISD::SEXTLOAD: | |||
1847 | return ICmpType != SystemZICMP::UnsignedOnly; | |||
1848 | case ISD::ZEXTLOAD: | |||
1849 | return ICmpType != SystemZICMP::SignedOnly; | |||
1850 | default: | |||
1851 | break; | |||
1852 | } | |||
1853 | } | |||
1854 | return false; | |||
1855 | } | |||
1856 | ||||
1857 | // Return true if it is better to swap the operands of C. | |||
1858 | static bool shouldSwapCmpOperands(const Comparison &C) { | |||
1859 | // Leave f128 comparisons alone, since they have no memory forms. | |||
1860 | if (C.Op0.getValueType() == MVT::f128) | |||
1861 | return false; | |||
1862 | ||||
1863 | // Always keep a floating-point constant second, since comparisons with | |||
1864 | // zero can use LOAD TEST and comparisons with other constants make a | |||
1865 | // natural memory operand. | |||
1866 | if (isa<ConstantFPSDNode>(C.Op1)) | |||
1867 | return false; | |||
1868 | ||||
1869 | // Never swap comparisons with zero since there are many ways to optimize | |||
1870 | // those later. | |||
1871 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); | |||
1872 | if (ConstOp1 && ConstOp1->getZExtValue() == 0) | |||
1873 | return false; | |||
1874 | ||||
1875 | // Also keep natural memory operands second if the loaded value is | |||
1876 | // only used here. Several comparisons have memory forms. | |||
1877 | if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) | |||
1878 | return false; | |||
1879 | ||||
1880 | // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. | |||
1881 | // In that case we generally prefer the memory to be second. | |||
1882 | if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { | |||
1883 | // The only exceptions are when the second operand is a constant and | |||
1884 | // we can use things like CHHSI. | |||
1885 | if (!ConstOp1) | |||
1886 | return true; | |||
1887 | // The unsigned memory-immediate instructions can handle 16-bit | |||
1888 | // unsigned integers. | |||
1889 | if (C.ICmpType != SystemZICMP::SignedOnly && | |||
1890 | isUInt<16>(ConstOp1->getZExtValue())) | |||
1891 | return false; | |||
1892 | // The signed memory-immediate instructions can handle 16-bit | |||
1893 | // signed integers. | |||
1894 | if (C.ICmpType != SystemZICMP::UnsignedOnly && | |||
1895 | isInt<16>(ConstOp1->getSExtValue())) | |||
1896 | return false; | |||
1897 | return true; | |||
1898 | } | |||
1899 | ||||
1900 | // Try to promote the use of CGFR and CLGFR. | |||
1901 | unsigned Opcode0 = C.Op0.getOpcode(); | |||
1902 | if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) | |||
1903 | return true; | |||
1904 | if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) | |||
1905 | return true; | |||
1906 | if (C.ICmpType != SystemZICMP::SignedOnly && | |||
1907 | Opcode0 == ISD::AND && | |||
1908 | C.Op0.getOperand(1).getOpcode() == ISD::Constant && | |||
1909 | cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) | |||
1910 | return true; | |||
1911 | ||||
1912 | return false; | |||
1913 | } | |||
1914 | ||||
1915 | // Return a version of comparison CC mask CCMask in which the LT and GT | |||
1916 | // actions are swapped. | |||
1917 | static unsigned reverseCCMask(unsigned CCMask) { | |||
1918 | return ((CCMask & SystemZ::CCMASK_CMP_EQ) | | |||
1919 | (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | | |||
1920 | (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | | |||
1921 | (CCMask & SystemZ::CCMASK_CMP_UO)); | |||
1922 | } | |||
1923 | ||||
1924 | // Check whether C tests for equality between X and Y and whether X - Y | |||
1925 | // or Y - X is also computed. In that case it's better to compare the | |||
1926 | // result of the subtraction against zero. | |||
1927 | static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, | |||
1928 | Comparison &C) { | |||
1929 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || | |||
1930 | C.CCMask == SystemZ::CCMASK_CMP_NE) { | |||
1931 | for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { | |||
1932 | SDNode *N = *I; | |||
1933 | if (N->getOpcode() == ISD::SUB && | |||
1934 | ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || | |||
1935 | (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { | |||
1936 | C.Op0 = SDValue(N, 0); | |||
1937 | C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); | |||
1938 | return; | |||
1939 | } | |||
1940 | } | |||
1941 | } | |||
1942 | } | |||
1943 | ||||
1944 | // Check whether C compares a floating-point value with zero and if that | |||
1945 | // floating-point value is also negated. In this case we can use the | |||
1946 | // negation to set CC, so avoiding separate LOAD AND TEST and | |||
1947 | // LOAD (NEGATIVE/COMPLEMENT) instructions. | |||
1948 | static void adjustForFNeg(Comparison &C) { | |||
1949 | auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); | |||
1950 | if (C1 && C1->isZero()) { | |||
1951 | for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { | |||
1952 | SDNode *N = *I; | |||
1953 | if (N->getOpcode() == ISD::FNEG) { | |||
1954 | C.Op0 = SDValue(N, 0); | |||
1955 | C.CCMask = reverseCCMask(C.CCMask); | |||
1956 | return; | |||
1957 | } | |||
1958 | } | |||
1959 | } | |||
1960 | } | |||
1961 | ||||
1962 | // Check whether C compares (shl X, 32) with 0 and whether X is | |||
1963 | // also sign-extended. In that case it is better to test the result | |||
1964 | // of the sign extension using LTGFR. | |||
1965 | // | |||
1966 | // This case is important because InstCombine transforms a comparison | |||
1967 | // with (sext (trunc X)) into a comparison with (shl X, 32). | |||
1968 | static void adjustForLTGFR(Comparison &C) { | |||
1969 | // Check for a comparison between (shl X, 32) and 0. | |||
1970 | if (C.Op0.getOpcode() == ISD::SHL && | |||
1971 | C.Op0.getValueType() == MVT::i64 && | |||
1972 | C.Op1.getOpcode() == ISD::Constant && | |||
1973 | cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { | |||
1974 | auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); | |||
1975 | if (C1 && C1->getZExtValue() == 32) { | |||
1976 | SDValue ShlOp0 = C.Op0.getOperand(0); | |||
1977 | // See whether X has any SIGN_EXTEND_INREG uses. | |||
1978 | for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { | |||
1979 | SDNode *N = *I; | |||
1980 | if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && | |||
1981 | cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { | |||
1982 | C.Op0 = SDValue(N, 0); | |||
1983 | return; | |||
1984 | } | |||
1985 | } | |||
1986 | } | |||
1987 | } | |||
1988 | } | |||
1989 | ||||
1990 | // If C compares the truncation of an extending load, try to compare | |||
1991 | // the untruncated value instead. This exposes more opportunities to | |||
1992 | // reuse CC. | |||
1993 | static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, | |||
1994 | Comparison &C) { | |||
1995 | if (C.Op0.getOpcode() == ISD::TRUNCATE && | |||
1996 | C.Op0.getOperand(0).getOpcode() == ISD::LOAD && | |||
1997 | C.Op1.getOpcode() == ISD::Constant && | |||
1998 | cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { | |||
1999 | auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); | |||
2000 | if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) { | |||
2001 | unsigned Type = L->getExtensionType(); | |||
2002 | if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || | |||
2003 | (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { | |||
2004 | C.Op0 = C.Op0.getOperand(0); | |||
2005 | C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); | |||
2006 | } | |||
2007 | } | |||
2008 | } | |||
2009 | } | |||
2010 | ||||
2011 | // Return true if shift operation N has an in-range constant shift value. | |||
2012 | // Store it in ShiftVal if so. | |||
2013 | static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { | |||
2014 | auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); | |||
2015 | if (!Shift) | |||
2016 | return false; | |||
2017 | ||||
2018 | uint64_t Amount = Shift->getZExtValue(); | |||
2019 | if (Amount >= N.getValueSizeInBits()) | |||
2020 | return false; | |||
2021 | ||||
2022 | ShiftVal = Amount; | |||
2023 | return true; | |||
2024 | } | |||
2025 | ||||
2026 | // Check whether an AND with Mask is suitable for a TEST UNDER MASK | |||
2027 | // instruction and whether the CC value is descriptive enough to handle | |||
2028 | // a comparison of type Opcode between the AND result and CmpVal. | |||
2029 | // CCMask says which comparison result is being tested and BitSize is | |||
2030 | // the number of bits in the operands. If TEST UNDER MASK can be used, | |||
2031 | // return the corresponding CC mask, otherwise return 0. | |||
2032 | static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, | |||
2033 | uint64_t Mask, uint64_t CmpVal, | |||
2034 | unsigned ICmpType) { | |||
2035 | assert(Mask != 0 && "ANDs with zero should have been removed by now")((Mask != 0 && "ANDs with zero should have been removed by now" ) ? static_cast<void> (0) : __assert_fail ("Mask != 0 && \"ANDs with zero should have been removed by now\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2035, __PRETTY_FUNCTION__)); | |||
2036 | ||||
2037 | // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. | |||
2038 | if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && | |||
2039 | !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) | |||
2040 | return 0; | |||
2041 | ||||
2042 | // Work out the masks for the lowest and highest bits. | |||
2043 | unsigned HighShift = 63 - countLeadingZeros(Mask); | |||
2044 | uint64_t High = uint64_t(1) << HighShift; | |||
2045 | uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); | |||
2046 | ||||
2047 | // Signed ordered comparisons are effectively unsigned if the sign | |||
2048 | // bit is dropped. | |||
2049 | bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); | |||
2050 | ||||
2051 | // Check for equality comparisons with 0, or the equivalent. | |||
2052 | if (CmpVal == 0) { | |||
2053 | if (CCMask == SystemZ::CCMASK_CMP_EQ) | |||
2054 | return SystemZ::CCMASK_TM_ALL_0; | |||
2055 | if (CCMask == SystemZ::CCMASK_CMP_NE) | |||
2056 | return SystemZ::CCMASK_TM_SOME_1; | |||
2057 | } | |||
2058 | if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) { | |||
2059 | if (CCMask == SystemZ::CCMASK_CMP_LT) | |||
2060 | return SystemZ::CCMASK_TM_ALL_0; | |||
2061 | if (CCMask == SystemZ::CCMASK_CMP_GE) | |||
2062 | return SystemZ::CCMASK_TM_SOME_1; | |||
2063 | } | |||
2064 | if (EffectivelyUnsigned && CmpVal < Low) { | |||
2065 | if (CCMask == SystemZ::CCMASK_CMP_LE) | |||
2066 | return SystemZ::CCMASK_TM_ALL_0; | |||
2067 | if (CCMask == SystemZ::CCMASK_CMP_GT) | |||
2068 | return SystemZ::CCMASK_TM_SOME_1; | |||
2069 | } | |||
2070 | ||||
2071 | // Check for equality comparisons with the mask, or the equivalent. | |||
2072 | if (CmpVal == Mask) { | |||
2073 | if (CCMask == SystemZ::CCMASK_CMP_EQ) | |||
2074 | return SystemZ::CCMASK_TM_ALL_1; | |||
2075 | if (CCMask == SystemZ::CCMASK_CMP_NE) | |||
2076 | return SystemZ::CCMASK_TM_SOME_0; | |||
2077 | } | |||
2078 | if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { | |||
2079 | if (CCMask == SystemZ::CCMASK_CMP_GT) | |||
2080 | return SystemZ::CCMASK_TM_ALL_1; | |||
2081 | if (CCMask == SystemZ::CCMASK_CMP_LE) | |||
2082 | return SystemZ::CCMASK_TM_SOME_0; | |||
2083 | } | |||
2084 | if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { | |||
2085 | if (CCMask == SystemZ::CCMASK_CMP_GE) | |||
2086 | return SystemZ::CCMASK_TM_ALL_1; | |||
2087 | if (CCMask == SystemZ::CCMASK_CMP_LT) | |||
2088 | return SystemZ::CCMASK_TM_SOME_0; | |||
2089 | } | |||
2090 | ||||
2091 | // Check for ordered comparisons with the top bit. | |||
2092 | if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { | |||
2093 | if (CCMask == SystemZ::CCMASK_CMP_LE) | |||
2094 | return SystemZ::CCMASK_TM_MSB_0; | |||
2095 | if (CCMask == SystemZ::CCMASK_CMP_GT) | |||
2096 | return SystemZ::CCMASK_TM_MSB_1; | |||
2097 | } | |||
2098 | if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { | |||
2099 | if (CCMask == SystemZ::CCMASK_CMP_LT) | |||
2100 | return SystemZ::CCMASK_TM_MSB_0; | |||
2101 | if (CCMask == SystemZ::CCMASK_CMP_GE) | |||
2102 | return SystemZ::CCMASK_TM_MSB_1; | |||
2103 | } | |||
2104 | ||||
2105 | // If there are just two bits, we can do equality checks for Low and High | |||
2106 | // as well. | |||
2107 | if (Mask == Low + High) { | |||
2108 | if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) | |||
2109 | return SystemZ::CCMASK_TM_MIXED_MSB_0; | |||
2110 | if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) | |||
2111 | return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; | |||
2112 | if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) | |||
2113 | return SystemZ::CCMASK_TM_MIXED_MSB_1; | |||
2114 | if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) | |||
2115 | return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; | |||
2116 | } | |||
2117 | ||||
2118 | // Looks like we've exhausted our options. | |||
2119 | return 0; | |||
2120 | } | |||
2121 | ||||
2122 | // See whether C can be implemented as a TEST UNDER MASK instruction. | |||
2123 | // Update the arguments with the TM version if so. | |||
2124 | static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, | |||
2125 | Comparison &C) { | |||
2126 | // Check that we have a comparison with a constant. | |||
2127 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); | |||
2128 | if (!ConstOp1) | |||
2129 | return; | |||
2130 | uint64_t CmpVal = ConstOp1->getZExtValue(); | |||
2131 | ||||
2132 | // Check whether the nonconstant input is an AND with a constant mask. | |||
2133 | Comparison NewC(C); | |||
2134 | uint64_t MaskVal; | |||
2135 | ConstantSDNode *Mask = nullptr; | |||
2136 | if (C.Op0.getOpcode() == ISD::AND) { | |||
2137 | NewC.Op0 = C.Op0.getOperand(0); | |||
2138 | NewC.Op1 = C.Op0.getOperand(1); | |||
2139 | Mask = dyn_cast<ConstantSDNode>(NewC.Op1); | |||
2140 | if (!Mask) | |||
2141 | return; | |||
2142 | MaskVal = Mask->getZExtValue(); | |||
2143 | } else { | |||
2144 | // There is no instruction to compare with a 64-bit immediate | |||
2145 | // so use TMHH instead if possible. We need an unsigned ordered | |||
2146 | // comparison with an i64 immediate. | |||
2147 | if (NewC.Op0.getValueType() != MVT::i64 || | |||
2148 | NewC.CCMask == SystemZ::CCMASK_CMP_EQ || | |||
2149 | NewC.CCMask == SystemZ::CCMASK_CMP_NE || | |||
2150 | NewC.ICmpType == SystemZICMP::SignedOnly) | |||
2151 | return; | |||
2152 | // Convert LE and GT comparisons into LT and GE. | |||
2153 | if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || | |||
2154 | NewC.CCMask == SystemZ::CCMASK_CMP_GT) { | |||
2155 | if (CmpVal == uint64_t(-1)) | |||
2156 | return; | |||
2157 | CmpVal += 1; | |||
2158 | NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; | |||
2159 | } | |||
2160 | // If the low N bits of Op1 are zero than the low N bits of Op0 can | |||
2161 | // be masked off without changing the result. | |||
2162 | MaskVal = -(CmpVal & -CmpVal); | |||
2163 | NewC.ICmpType = SystemZICMP::UnsignedOnly; | |||
2164 | } | |||
2165 | if (!MaskVal) | |||
2166 | return; | |||
2167 | ||||
2168 | // Check whether the combination of mask, comparison value and comparison | |||
2169 | // type are suitable. | |||
2170 | unsigned BitSize = NewC.Op0.getValueSizeInBits(); | |||
2171 | unsigned NewCCMask, ShiftVal; | |||
2172 | if (NewC.ICmpType != SystemZICMP::SignedOnly && | |||
2173 | NewC.Op0.getOpcode() == ISD::SHL && | |||
2174 | isSimpleShift(NewC.Op0, ShiftVal) && | |||
2175 | (MaskVal >> ShiftVal != 0) && | |||
2176 | ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal && | |||
2177 | (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, | |||
2178 | MaskVal >> ShiftVal, | |||
2179 | CmpVal >> ShiftVal, | |||
2180 | SystemZICMP::Any))) { | |||
2181 | NewC.Op0 = NewC.Op0.getOperand(0); | |||
2182 | MaskVal >>= ShiftVal; | |||
2183 | } else if (NewC.ICmpType != SystemZICMP::SignedOnly && | |||
2184 | NewC.Op0.getOpcode() == ISD::SRL && | |||
2185 | isSimpleShift(NewC.Op0, ShiftVal) && | |||
2186 | (MaskVal << ShiftVal != 0) && | |||
2187 | ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal && | |||
2188 | (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, | |||
2189 | MaskVal << ShiftVal, | |||
2190 | CmpVal << ShiftVal, | |||
2191 | SystemZICMP::UnsignedOnly))) { | |||
2192 | NewC.Op0 = NewC.Op0.getOperand(0); | |||
2193 | MaskVal <<= ShiftVal; | |||
2194 | } else { | |||
2195 | NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, | |||
2196 | NewC.ICmpType); | |||
2197 | if (!NewCCMask) | |||
2198 | return; | |||
2199 | } | |||
2200 | ||||
2201 | // Go ahead and make the change. | |||
2202 | C.Opcode = SystemZISD::TM; | |||
2203 | C.Op0 = NewC.Op0; | |||
2204 | if (Mask && Mask->getZExtValue() == MaskVal) | |||
2205 | C.Op1 = SDValue(Mask, 0); | |||
2206 | else | |||
2207 | C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); | |||
2208 | C.CCValid = SystemZ::CCMASK_TM; | |||
2209 | C.CCMask = NewCCMask; | |||
2210 | } | |||
2211 | ||||
2212 | // See whether the comparison argument contains a redundant AND | |||
2213 | // and remove it if so. This sometimes happens due to the generic | |||
2214 | // BRCOND expansion. | |||
2215 | static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, | |||
2216 | Comparison &C) { | |||
2217 | if (C.Op0.getOpcode() != ISD::AND) | |||
2218 | return; | |||
2219 | auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); | |||
2220 | if (!Mask) | |||
2221 | return; | |||
2222 | KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0)); | |||
2223 | if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue()) | |||
2224 | return; | |||
2225 | ||||
2226 | C.Op0 = C.Op0.getOperand(0); | |||
2227 | } | |||
2228 | ||||
2229 | // Return a Comparison that tests the condition-code result of intrinsic | |||
2230 | // node Call against constant integer CC using comparison code Cond. | |||
2231 | // Opcode is the opcode of the SystemZISD operation for the intrinsic | |||
2232 | // and CCValid is the set of possible condition-code results. | |||
2233 | static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, | |||
2234 | SDValue Call, unsigned CCValid, uint64_t CC, | |||
2235 | ISD::CondCode Cond) { | |||
2236 | Comparison C(Call, SDValue()); | |||
2237 | C.Opcode = Opcode; | |||
2238 | C.CCValid = CCValid; | |||
2239 | if (Cond == ISD::SETEQ) | |||
2240 | // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. | |||
2241 | C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; | |||
2242 | else if (Cond == ISD::SETNE) | |||
2243 | // ...and the inverse of that. | |||
2244 | C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; | |||
2245 | else if (Cond == ISD::SETLT || Cond == ISD::SETULT) | |||
2246 | // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, | |||
2247 | // always true for CC>3. | |||
2248 | C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; | |||
2249 | else if (Cond == ISD::SETGE || Cond == ISD::SETUGE) | |||
2250 | // ...and the inverse of that. | |||
2251 | C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; | |||
2252 | else if (Cond == ISD::SETLE || Cond == ISD::SETULE) | |||
2253 | // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), | |||
2254 | // always true for CC>3. | |||
2255 | C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; | |||
2256 | else if (Cond == ISD::SETGT || Cond == ISD::SETUGT) | |||
2257 | // ...and the inverse of that. | |||
2258 | C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; | |||
2259 | else | |||
2260 | llvm_unreachable("Unexpected integer comparison type")::llvm::llvm_unreachable_internal("Unexpected integer comparison type" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2260); | |||
2261 | C.CCMask &= CCValid; | |||
2262 | return C; | |||
2263 | } | |||
2264 | ||||
2265 | // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. | |||
2266 | static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, | |||
2267 | ISD::CondCode Cond, const SDLoc &DL) { | |||
2268 | if (CmpOp1.getOpcode() == ISD::Constant) { | |||
2269 | uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue(); | |||
2270 | unsigned Opcode, CCValid; | |||
2271 | if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && | |||
2272 | CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && | |||
2273 | isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) | |||
2274 | return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); | |||
2275 | if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && | |||
2276 | CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && | |||
2277 | isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) | |||
2278 | return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); | |||
2279 | } | |||
2280 | Comparison C(CmpOp0, CmpOp1); | |||
2281 | C.CCMask = CCMaskForCondCode(Cond); | |||
2282 | if (C.Op0.getValueType().isFloatingPoint()) { | |||
2283 | C.CCValid = SystemZ::CCMASK_FCMP; | |||
2284 | C.Opcode = SystemZISD::FCMP; | |||
2285 | adjustForFNeg(C); | |||
2286 | } else { | |||
2287 | C.CCValid = SystemZ::CCMASK_ICMP; | |||
2288 | C.Opcode = SystemZISD::ICMP; | |||
2289 | // Choose the type of comparison. Equality and inequality tests can | |||
2290 | // use either signed or unsigned comparisons. The choice also doesn't | |||
2291 | // matter if both sign bits are known to be clear. In those cases we | |||
2292 | // want to give the main isel code the freedom to choose whichever | |||
2293 | // form fits best. | |||
2294 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || | |||
2295 | C.CCMask == SystemZ::CCMASK_CMP_NE || | |||
2296 | (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) | |||
2297 | C.ICmpType = SystemZICMP::Any; | |||
2298 | else if (C.CCMask & SystemZ::CCMASK_CMP_UO) | |||
2299 | C.ICmpType = SystemZICMP::UnsignedOnly; | |||
2300 | else | |||
2301 | C.ICmpType = SystemZICMP::SignedOnly; | |||
2302 | C.CCMask &= ~SystemZ::CCMASK_CMP_UO; | |||
2303 | adjustForRedundantAnd(DAG, DL, C); | |||
2304 | adjustZeroCmp(DAG, DL, C); | |||
2305 | adjustSubwordCmp(DAG, DL, C); | |||
2306 | adjustForSubtraction(DAG, DL, C); | |||
2307 | adjustForLTGFR(C); | |||
2308 | adjustICmpTruncate(DAG, DL, C); | |||
2309 | } | |||
2310 | ||||
2311 | if (shouldSwapCmpOperands(C)) { | |||
2312 | std::swap(C.Op0, C.Op1); | |||
2313 | C.CCMask = reverseCCMask(C.CCMask); | |||
2314 | } | |||
2315 | ||||
2316 | adjustForTestUnderMask(DAG, DL, C); | |||
2317 | return C; | |||
2318 | } | |||
2319 | ||||
2320 | // Emit the comparison instruction described by C. | |||
2321 | static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { | |||
2322 | if (!C.Op1.getNode()) { | |||
2323 | SDNode *Node; | |||
2324 | switch (C.Op0.getOpcode()) { | |||
2325 | case ISD::INTRINSIC_W_CHAIN: | |||
2326 | Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode); | |||
2327 | return SDValue(Node, 0); | |||
2328 | case ISD::INTRINSIC_WO_CHAIN: | |||
2329 | Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode); | |||
2330 | return SDValue(Node, Node->getNumValues() - 1); | |||
2331 | default: | |||
2332 | llvm_unreachable("Invalid comparison operands")::llvm::llvm_unreachable_internal("Invalid comparison operands" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2332); | |||
2333 | } | |||
2334 | } | |||
2335 | if (C.Opcode == SystemZISD::ICMP) | |||
2336 | return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1, | |||
2337 | DAG.getConstant(C.ICmpType, DL, MVT::i32)); | |||
2338 | if (C.Opcode == SystemZISD::TM) { | |||
2339 | bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != | |||
2340 | bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); | |||
2341 | return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1, | |||
2342 | DAG.getConstant(RegisterOnly, DL, MVT::i32)); | |||
2343 | } | |||
2344 | return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1); | |||
2345 | } | |||
2346 | ||||
2347 | // Implement a 32-bit *MUL_LOHI operation by extending both operands to | |||
2348 | // 64 bits. Extend is the extension type to use. Store the high part | |||
2349 | // in Hi and the low part in Lo. | |||
2350 | static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, | |||
2351 | SDValue Op0, SDValue Op1, SDValue &Hi, | |||
2352 | SDValue &Lo) { | |||
2353 | Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); | |||
2354 | Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); | |||
2355 | SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); | |||
2356 | Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, | |||
2357 | DAG.getConstant(32, DL, MVT::i64)); | |||
2358 | Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); | |||
2359 | Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); | |||
2360 | } | |||
2361 | ||||
2362 | // Lower a binary operation that produces two VT results, one in each | |||
2363 | // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, | |||
2364 | // and Opcode performs the GR128 operation. Store the even register result | |||
2365 | // in Even and the odd register result in Odd. | |||
2366 | static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, | |||
2367 | unsigned Opcode, SDValue Op0, SDValue Op1, | |||
2368 | SDValue &Even, SDValue &Odd) { | |||
2369 | SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); | |||
2370 | bool Is32Bit = is32Bit(VT); | |||
2371 | Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); | |||
2372 | Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); | |||
2373 | } | |||
2374 | ||||
2375 | // Return an i32 value that is 1 if the CC value produced by CCReg is | |||
2376 | // in the mask CCMask and 0 otherwise. CC is known to have a value | |||
2377 | // in CCValid, so other values can be ignored. | |||
2378 | static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, | |||
2379 | unsigned CCValid, unsigned CCMask) { | |||
2380 | SDValue Ops[] = { DAG.getConstant(1, DL, MVT::i32), | |||
2381 | DAG.getConstant(0, DL, MVT::i32), | |||
2382 | DAG.getConstant(CCValid, DL, MVT::i32), | |||
2383 | DAG.getConstant(CCMask, DL, MVT::i32), CCReg }; | |||
2384 | return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops); | |||
2385 | } | |||
2386 | ||||
2387 | // Return the SystemISD vector comparison operation for CC, or 0 if it cannot | |||
2388 | // be done directly. IsFP is true if CC is for a floating-point rather than | |||
2389 | // integer comparison. | |||
2390 | static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) { | |||
2391 | switch (CC) { | |||
2392 | case ISD::SETOEQ: | |||
2393 | case ISD::SETEQ: | |||
2394 | return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE; | |||
2395 | ||||
2396 | case ISD::SETOGE: | |||
2397 | case ISD::SETGE: | |||
2398 | return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0); | |||
2399 | ||||
2400 | case ISD::SETOGT: | |||
2401 | case ISD::SETGT: | |||
2402 | return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH; | |||
2403 | ||||
2404 | case ISD::SETUGT: | |||
2405 | return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL; | |||
2406 | ||||
2407 | default: | |||
2408 | return 0; | |||
2409 | } | |||
2410 | } | |||
2411 | ||||
2412 | // Return the SystemZISD vector comparison operation for CC or its inverse, | |||
2413 | // or 0 if neither can be done directly. Indicate in Invert whether the | |||
2414 | // result is for the inverse of CC. IsFP is true if CC is for a | |||
2415 | // floating-point rather than integer comparison. | |||
2416 | static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, | |||
2417 | bool &Invert) { | |||
2418 | if (unsigned Opcode = getVectorComparison(CC, IsFP)) { | |||
2419 | Invert = false; | |||
2420 | return Opcode; | |||
2421 | } | |||
2422 | ||||
2423 | CC = ISD::getSetCCInverse(CC, !IsFP); | |||
2424 | if (unsigned Opcode = getVectorComparison(CC, IsFP)) { | |||
2425 | Invert = true; | |||
2426 | return Opcode; | |||
2427 | } | |||
2428 | ||||
2429 | return 0; | |||
2430 | } | |||
2431 | ||||
2432 | // Return a v2f64 that contains the extended form of elements Start and Start+1 | |||
2433 | // of v4f32 value Op. | |||
2434 | static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, | |||
2435 | SDValue Op) { | |||
2436 | int Mask[] = { Start, -1, Start + 1, -1 }; | |||
2437 | Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); | |||
2438 | return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); | |||
2439 | } | |||
2440 | ||||
2441 | // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, | |||
2442 | // producing a result of type VT. | |||
2443 | SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, | |||
2444 | const SDLoc &DL, EVT VT, | |||
2445 | SDValue CmpOp0, | |||
2446 | SDValue CmpOp1) const { | |||
2447 | // There is no hardware support for v4f32 (unless we have the vector | |||
2448 | // enhancements facility 1), so extend the vector into two v2f64s | |||
2449 | // and compare those. | |||
2450 | if (CmpOp0.getValueType() == MVT::v4f32 && | |||
2451 | !Subtarget.hasVectorEnhancements1()) { | |||
2452 | SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0); | |||
2453 | SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0); | |||
2454 | SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1); | |||
2455 | SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1); | |||
2456 | SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); | |||
2457 | SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); | |||
2458 | return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); | |||
2459 | } | |||
2460 | return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); | |||
2461 | } | |||
2462 | ||||
2463 | // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing | |||
2464 | // an integer mask of type VT. | |||
2465 | SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, | |||
2466 | const SDLoc &DL, EVT VT, | |||
2467 | ISD::CondCode CC, | |||
2468 | SDValue CmpOp0, | |||
2469 | SDValue CmpOp1) const { | |||
2470 | bool IsFP = CmpOp0.getValueType().isFloatingPoint(); | |||
2471 | bool Invert = false; | |||
2472 | SDValue Cmp; | |||
2473 | switch (CC) { | |||
2474 | // Handle tests for order using (or (ogt y x) (oge x y)). | |||
2475 | case ISD::SETUO: | |||
2476 | Invert = true; | |||
2477 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
2478 | case ISD::SETO: { | |||
2479 | assert(IsFP && "Unexpected integer comparison")((IsFP && "Unexpected integer comparison") ? static_cast <void> (0) : __assert_fail ("IsFP && \"Unexpected integer comparison\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2479, __PRETTY_FUNCTION__)); | |||
2480 | SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); | |||
2481 | SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1); | |||
2482 | Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); | |||
2483 | break; | |||
2484 | } | |||
2485 | ||||
2486 | // Handle <> tests using (or (ogt y x) (ogt x y)). | |||
2487 | case ISD::SETUEQ: | |||
2488 | Invert = true; | |||
2489 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
2490 | case ISD::SETONE: { | |||
2491 | assert(IsFP && "Unexpected integer comparison")((IsFP && "Unexpected integer comparison") ? static_cast <void> (0) : __assert_fail ("IsFP && \"Unexpected integer comparison\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2491, __PRETTY_FUNCTION__)); | |||
2492 | SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); | |||
2493 | SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1); | |||
2494 | Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); | |||
2495 | break; | |||
2496 | } | |||
2497 | ||||
2498 | // Otherwise a single comparison is enough. It doesn't really | |||
2499 | // matter whether we try the inversion or the swap first, since | |||
2500 | // there are no cases where both work. | |||
2501 | default: | |||
2502 | if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) | |||
2503 | Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1); | |||
2504 | else { | |||
2505 | CC = ISD::getSetCCSwappedOperands(CC); | |||
2506 | if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) | |||
2507 | Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0); | |||
2508 | else | |||
2509 | llvm_unreachable("Unhandled comparison")::llvm::llvm_unreachable_internal("Unhandled comparison", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2509); | |||
2510 | } | |||
2511 | break; | |||
2512 | } | |||
2513 | if (Invert) { | |||
2514 | SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, | |||
2515 | DAG.getConstant(65535, DL, MVT::i32)); | |||
2516 | Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask); | |||
2517 | Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); | |||
2518 | } | |||
2519 | return Cmp; | |||
2520 | } | |||
2521 | ||||
2522 | SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, | |||
2523 | SelectionDAG &DAG) const { | |||
2524 | SDValue CmpOp0 = Op.getOperand(0); | |||
2525 | SDValue CmpOp1 = Op.getOperand(1); | |||
2526 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
2527 | SDLoc DL(Op); | |||
2528 | EVT VT = Op.getValueType(); | |||
2529 | if (VT.isVector()) | |||
2530 | return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); | |||
2531 | ||||
2532 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); | |||
2533 | SDValue CCReg = emitCmp(DAG, DL, C); | |||
2534 | return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask); | |||
2535 | } | |||
2536 | ||||
2537 | SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { | |||
2538 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); | |||
2539 | SDValue CmpOp0 = Op.getOperand(2); | |||
2540 | SDValue CmpOp1 = Op.getOperand(3); | |||
2541 | SDValue Dest = Op.getOperand(4); | |||
2542 | SDLoc DL(Op); | |||
2543 | ||||
2544 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); | |||
2545 | SDValue CCReg = emitCmp(DAG, DL, C); | |||
2546 | return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), | |||
2547 | Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32), | |||
2548 | DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, CCReg); | |||
2549 | } | |||
2550 | ||||
2551 | // Return true if Pos is CmpOp and Neg is the negative of CmpOp, | |||
2552 | // allowing Pos and Neg to be wider than CmpOp. | |||
2553 | static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { | |||
2554 | return (Neg.getOpcode() == ISD::SUB && | |||
2555 | Neg.getOperand(0).getOpcode() == ISD::Constant && | |||
2556 | cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && | |||
2557 | Neg.getOperand(1) == Pos && | |||
2558 | (Pos == CmpOp || | |||
2559 | (Pos.getOpcode() == ISD::SIGN_EXTEND && | |||
2560 | Pos.getOperand(0) == CmpOp))); | |||
2561 | } | |||
2562 | ||||
2563 | // Return the absolute or negative absolute of Op; IsNegative decides which. | |||
2564 | static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, | |||
2565 | bool IsNegative) { | |||
2566 | Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); | |||
2567 | if (IsNegative) | |||
2568 | Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), | |||
2569 | DAG.getConstant(0, DL, Op.getValueType()), Op); | |||
2570 | return Op; | |||
2571 | } | |||
2572 | ||||
2573 | SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, | |||
2574 | SelectionDAG &DAG) const { | |||
2575 | SDValue CmpOp0 = Op.getOperand(0); | |||
2576 | SDValue CmpOp1 = Op.getOperand(1); | |||
2577 | SDValue TrueOp = Op.getOperand(2); | |||
2578 | SDValue FalseOp = Op.getOperand(3); | |||
2579 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | |||
2580 | SDLoc DL(Op); | |||
2581 | ||||
2582 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); | |||
2583 | ||||
2584 | // Check for absolute and negative-absolute selections, including those | |||
2585 | // where the comparison value is sign-extended (for LPGFR and LNGFR). | |||
2586 | // This check supplements the one in DAGCombiner. | |||
2587 | if (C.Opcode == SystemZISD::ICMP && | |||
2588 | C.CCMask != SystemZ::CCMASK_CMP_EQ && | |||
2589 | C.CCMask != SystemZ::CCMASK_CMP_NE && | |||
2590 | C.Op1.getOpcode() == ISD::Constant && | |||
2591 | cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { | |||
2592 | if (isAbsolute(C.Op0, TrueOp, FalseOp)) | |||
2593 | return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); | |||
2594 | if (isAbsolute(C.Op0, FalseOp, TrueOp)) | |||
2595 | return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); | |||
2596 | } | |||
2597 | ||||
2598 | SDValue CCReg = emitCmp(DAG, DL, C); | |||
2599 | SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32), | |||
2600 | DAG.getConstant(C.CCMask, DL, MVT::i32), CCReg}; | |||
2601 | ||||
2602 | return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops); | |||
2603 | } | |||
2604 | ||||
2605 | SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, | |||
2606 | SelectionDAG &DAG) const { | |||
2607 | SDLoc DL(Node); | |||
2608 | const GlobalValue *GV = Node->getGlobal(); | |||
2609 | int64_t Offset = Node->getOffset(); | |||
2610 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2611 | CodeModel::Model CM = DAG.getTarget().getCodeModel(); | |||
2612 | ||||
2613 | SDValue Result; | |||
2614 | if (Subtarget.isPC32DBLSymbol(GV, CM)) { | |||
2615 | // Assign anchors at 1<<12 byte boundaries. | |||
2616 | uint64_t Anchor = Offset & ~uint64_t(0xfff); | |||
2617 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); | |||
2618 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); | |||
2619 | ||||
2620 | // The offset can be folded into the address if it is aligned to a halfword. | |||
2621 | Offset -= Anchor; | |||
2622 | if (Offset != 0 && (Offset & 1) == 0) { | |||
2623 | SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); | |||
2624 | Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); | |||
2625 | Offset = 0; | |||
2626 | } | |||
2627 | } else { | |||
2628 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); | |||
2629 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); | |||
2630 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, | |||
2631 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
2632 | } | |||
2633 | ||||
2634 | // If there was a non-zero offset that we didn't fold, create an explicit | |||
2635 | // addition for it. | |||
2636 | if (Offset != 0) | |||
2637 | Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, | |||
2638 | DAG.getConstant(Offset, DL, PtrVT)); | |||
2639 | ||||
2640 | return Result; | |||
2641 | } | |||
2642 | ||||
2643 | SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, | |||
2644 | SelectionDAG &DAG, | |||
2645 | unsigned Opcode, | |||
2646 | SDValue GOTOffset) const { | |||
2647 | SDLoc DL(Node); | |||
2648 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2649 | SDValue Chain = DAG.getEntryNode(); | |||
2650 | SDValue Glue; | |||
2651 | ||||
2652 | // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. | |||
2653 | SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); | |||
2654 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); | |||
2655 | Glue = Chain.getValue(1); | |||
2656 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); | |||
2657 | Glue = Chain.getValue(1); | |||
2658 | ||||
2659 | // The first call operand is the chain and the second is the TLS symbol. | |||
2660 | SmallVector<SDValue, 8> Ops; | |||
2661 | Ops.push_back(Chain); | |||
2662 | Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, | |||
2663 | Node->getValueType(0), | |||
2664 | 0, 0)); | |||
2665 | ||||
2666 | // Add argument registers to the end of the list so that they are | |||
2667 | // known live into the call. | |||
2668 | Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); | |||
2669 | Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); | |||
2670 | ||||
2671 | // Add a register mask operand representing the call-preserved registers. | |||
2672 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
2673 | const uint32_t *Mask = | |||
2674 | TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); | |||
2675 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2675, __PRETTY_FUNCTION__)); | |||
2676 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
2677 | ||||
2678 | // Glue the call to the argument copies. | |||
2679 | Ops.push_back(Glue); | |||
2680 | ||||
2681 | // Emit the call. | |||
2682 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
2683 | Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); | |||
2684 | Glue = Chain.getValue(1); | |||
2685 | ||||
2686 | // Copy the return value from %r2. | |||
2687 | return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); | |||
2688 | } | |||
2689 | ||||
2690 | SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, | |||
2691 | SelectionDAG &DAG) const { | |||
2692 | SDValue Chain = DAG.getEntryNode(); | |||
2693 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2694 | ||||
2695 | // The high part of the thread pointer is in access register 0. | |||
2696 | SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); | |||
2697 | TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); | |||
2698 | ||||
2699 | // The low part of the thread pointer is in access register 1. | |||
2700 | SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); | |||
2701 | TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); | |||
2702 | ||||
2703 | // Merge them into a single 64-bit address. | |||
2704 | SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, | |||
2705 | DAG.getConstant(32, DL, PtrVT)); | |||
2706 | return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); | |||
2707 | } | |||
2708 | ||||
2709 | SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, | |||
2710 | SelectionDAG &DAG) const { | |||
2711 | if (DAG.getTarget().useEmulatedTLS()) | |||
2712 | return LowerToTLSEmulatedModel(Node, DAG); | |||
2713 | SDLoc DL(Node); | |||
2714 | const GlobalValue *GV = Node->getGlobal(); | |||
2715 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2716 | TLSModel::Model model = DAG.getTarget().getTLSModel(GV); | |||
2717 | ||||
2718 | SDValue TP = lowerThreadPointer(DL, DAG); | |||
2719 | ||||
2720 | // Get the offset of GA from the thread pointer, based on the TLS model. | |||
2721 | SDValue Offset; | |||
2722 | switch (model) { | |||
2723 | case TLSModel::GeneralDynamic: { | |||
2724 | // Load the GOT offset of the tls_index (module ID / per-symbol offset). | |||
2725 | SystemZConstantPoolValue *CPV = | |||
2726 | SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); | |||
2727 | ||||
2728 | Offset = DAG.getConstantPool(CPV, PtrVT, 8); | |||
2729 | Offset = DAG.getLoad( | |||
2730 | PtrVT, DL, DAG.getEntryNode(), Offset, | |||
2731 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2732 | ||||
2733 | // Call __tls_get_offset to retrieve the offset. | |||
2734 | Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); | |||
2735 | break; | |||
2736 | } | |||
2737 | ||||
2738 | case TLSModel::LocalDynamic: { | |||
2739 | // Load the GOT offset of the module ID. | |||
2740 | SystemZConstantPoolValue *CPV = | |||
2741 | SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); | |||
2742 | ||||
2743 | Offset = DAG.getConstantPool(CPV, PtrVT, 8); | |||
2744 | Offset = DAG.getLoad( | |||
2745 | PtrVT, DL, DAG.getEntryNode(), Offset, | |||
2746 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2747 | ||||
2748 | // Call __tls_get_offset to retrieve the module base offset. | |||
2749 | Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); | |||
2750 | ||||
2751 | // Note: The SystemZLDCleanupPass will remove redundant computations | |||
2752 | // of the module base offset. Count total number of local-dynamic | |||
2753 | // accesses to trigger execution of that pass. | |||
2754 | SystemZMachineFunctionInfo* MFI = | |||
2755 | DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); | |||
2756 | MFI->incNumLocalDynamicTLSAccesses(); | |||
2757 | ||||
2758 | // Add the per-symbol offset. | |||
2759 | CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); | |||
2760 | ||||
2761 | SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8); | |||
2762 | DTPOffset = DAG.getLoad( | |||
2763 | PtrVT, DL, DAG.getEntryNode(), DTPOffset, | |||
2764 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2765 | ||||
2766 | Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); | |||
2767 | break; | |||
2768 | } | |||
2769 | ||||
2770 | case TLSModel::InitialExec: { | |||
2771 | // Load the offset from the GOT. | |||
2772 | Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, | |||
2773 | SystemZII::MO_INDNTPOFF); | |||
2774 | Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); | |||
2775 | Offset = | |||
2776 | DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, | |||
2777 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
2778 | break; | |||
2779 | } | |||
2780 | ||||
2781 | case TLSModel::LocalExec: { | |||
2782 | // Force the offset into the constant pool and load it from there. | |||
2783 | SystemZConstantPoolValue *CPV = | |||
2784 | SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); | |||
2785 | ||||
2786 | Offset = DAG.getConstantPool(CPV, PtrVT, 8); | |||
2787 | Offset = DAG.getLoad( | |||
2788 | PtrVT, DL, DAG.getEntryNode(), Offset, | |||
2789 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2790 | break; | |||
2791 | } | |||
2792 | } | |||
2793 | ||||
2794 | // Add the base and offset together. | |||
2795 | return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); | |||
2796 | } | |||
2797 | ||||
2798 | SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, | |||
2799 | SelectionDAG &DAG) const { | |||
2800 | SDLoc DL(Node); | |||
2801 | const BlockAddress *BA = Node->getBlockAddress(); | |||
2802 | int64_t Offset = Node->getOffset(); | |||
2803 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2804 | ||||
2805 | SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); | |||
2806 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); | |||
2807 | return Result; | |||
2808 | } | |||
2809 | ||||
2810 | SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, | |||
2811 | SelectionDAG &DAG) const { | |||
2812 | SDLoc DL(JT); | |||
2813 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2814 | SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); | |||
2815 | ||||
2816 | // Use LARL to load the address of the table. | |||
2817 | return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); | |||
2818 | } | |||
2819 | ||||
2820 | SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, | |||
2821 | SelectionDAG &DAG) const { | |||
2822 | SDLoc DL(CP); | |||
2823 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2824 | ||||
2825 | SDValue Result; | |||
2826 | if (CP->isMachineConstantPoolEntry()) | |||
2827 | Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, | |||
2828 | CP->getAlignment()); | |||
2829 | else | |||
2830 | Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, | |||
2831 | CP->getAlignment(), CP->getOffset()); | |||
2832 | ||||
2833 | // Use LARL to load the address of the constant pool entry. | |||
2834 | return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); | |||
2835 | } | |||
2836 | ||||
2837 | SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, | |||
2838 | SelectionDAG &DAG) const { | |||
2839 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2840 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
2841 | MFI.setFrameAddressIsTaken(true); | |||
2842 | ||||
2843 | SDLoc DL(Op); | |||
2844 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
2845 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2846 | ||||
2847 | // If the back chain frame index has not been allocated yet, do so. | |||
2848 | SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>(); | |||
2849 | int BackChainIdx = FI->getFramePointerSaveIndex(); | |||
2850 | if (!BackChainIdx) { | |||
2851 | // By definition, the frame address is the address of the back chain. | |||
2852 | BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false); | |||
2853 | FI->setFramePointerSaveIndex(BackChainIdx); | |||
2854 | } | |||
2855 | SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); | |||
2856 | ||||
2857 | // FIXME The frontend should detect this case. | |||
2858 | if (Depth > 0) { | |||
2859 | report_fatal_error("Unsupported stack frame traversal count"); | |||
2860 | } | |||
2861 | ||||
2862 | return BackChain; | |||
2863 | } | |||
2864 | ||||
2865 | SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, | |||
2866 | SelectionDAG &DAG) const { | |||
2867 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2868 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
2869 | MFI.setReturnAddressIsTaken(true); | |||
2870 | ||||
2871 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) | |||
2872 | return SDValue(); | |||
2873 | ||||
2874 | SDLoc DL(Op); | |||
2875 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
2876 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2877 | ||||
2878 | // FIXME The frontend should detect this case. | |||
2879 | if (Depth > 0) { | |||
2880 | report_fatal_error("Unsupported stack frame traversal count"); | |||
2881 | } | |||
2882 | ||||
2883 | // Return R14D, which has the return address. Mark it an implicit live-in. | |||
2884 | unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); | |||
2885 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); | |||
2886 | } | |||
2887 | ||||
2888 | SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, | |||
2889 | SelectionDAG &DAG) const { | |||
2890 | SDLoc DL(Op); | |||
2891 | SDValue In = Op.getOperand(0); | |||
2892 | EVT InVT = In.getValueType(); | |||
2893 | EVT ResVT = Op.getValueType(); | |||
2894 | ||||
2895 | // Convert loads directly. This is normally done by DAGCombiner, | |||
2896 | // but we need this case for bitcasts that are created during lowering | |||
2897 | // and which are then lowered themselves. | |||
2898 | if (auto *LoadN = dyn_cast<LoadSDNode>(In)) | |||
2899 | if (ISD::isNormalLoad(LoadN)) { | |||
2900 | SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), | |||
2901 | LoadN->getBasePtr(), LoadN->getMemOperand()); | |||
2902 | // Update the chain uses. | |||
2903 | DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1)); | |||
2904 | return NewLoad; | |||
2905 | } | |||
2906 | ||||
2907 | if (InVT == MVT::i32 && ResVT == MVT::f32) { | |||
2908 | SDValue In64; | |||
2909 | if (Subtarget.hasHighWord()) { | |||
2910 | SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, | |||
2911 | MVT::i64); | |||
2912 | In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, | |||
2913 | MVT::i64, SDValue(U64, 0), In); | |||
2914 | } else { | |||
2915 | In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); | |||
2916 | In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, | |||
2917 | DAG.getConstant(32, DL, MVT::i64)); | |||
2918 | } | |||
2919 | SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); | |||
2920 | return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, | |||
2921 | DL, MVT::f32, Out64); | |||
2922 | } | |||
2923 | if (InVT == MVT::f32 && ResVT == MVT::i32) { | |||
2924 | SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); | |||
2925 | SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, | |||
2926 | MVT::f64, SDValue(U64, 0), In); | |||
2927 | SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); | |||
2928 | if (Subtarget.hasHighWord()) | |||
2929 | return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, | |||
2930 | MVT::i32, Out64); | |||
2931 | SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, | |||
2932 | DAG.getConstant(32, DL, MVT::i64)); | |||
2933 | return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); | |||
2934 | } | |||
2935 | llvm_unreachable("Unexpected bitcast combination")::llvm::llvm_unreachable_internal("Unexpected bitcast combination" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 2935); | |||
2936 | } | |||
2937 | ||||
2938 | SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, | |||
2939 | SelectionDAG &DAG) const { | |||
2940 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2941 | SystemZMachineFunctionInfo *FuncInfo = | |||
2942 | MF.getInfo<SystemZMachineFunctionInfo>(); | |||
2943 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2944 | ||||
2945 | SDValue Chain = Op.getOperand(0); | |||
2946 | SDValue Addr = Op.getOperand(1); | |||
2947 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
2948 | SDLoc DL(Op); | |||
2949 | ||||
2950 | // The initial values of each field. | |||
2951 | const unsigned NumFields = 4; | |||
2952 | SDValue Fields[NumFields] = { | |||
2953 | DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), | |||
2954 | DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), | |||
2955 | DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), | |||
2956 | DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) | |||
2957 | }; | |||
2958 | ||||
2959 | // Store each field into its respective slot. | |||
2960 | SDValue MemOps[NumFields]; | |||
2961 | unsigned Offset = 0; | |||
2962 | for (unsigned I = 0; I < NumFields; ++I) { | |||
2963 | SDValue FieldAddr = Addr; | |||
2964 | if (Offset != 0) | |||
2965 | FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, | |||
2966 | DAG.getIntPtrConstant(Offset, DL)); | |||
2967 | MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, | |||
2968 | MachinePointerInfo(SV, Offset)); | |||
2969 | Offset += 8; | |||
2970 | } | |||
2971 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); | |||
2972 | } | |||
2973 | ||||
2974 | SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, | |||
2975 | SelectionDAG &DAG) const { | |||
2976 | SDValue Chain = Op.getOperand(0); | |||
2977 | SDValue DstPtr = Op.getOperand(1); | |||
2978 | SDValue SrcPtr = Op.getOperand(2); | |||
2979 | const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); | |||
2980 | const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); | |||
2981 | SDLoc DL(Op); | |||
2982 | ||||
2983 | return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL), | |||
2984 | /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, | |||
2985 | /*isTailCall*/false, | |||
2986 | MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); | |||
2987 | } | |||
2988 | ||||
2989 | SDValue SystemZTargetLowering:: | |||
2990 | lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { | |||
2991 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); | |||
2992 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2993 | bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); | |||
2994 | bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); | |||
2995 | ||||
2996 | SDValue Chain = Op.getOperand(0); | |||
2997 | SDValue Size = Op.getOperand(1); | |||
2998 | SDValue Align = Op.getOperand(2); | |||
2999 | SDLoc DL(Op); | |||
3000 | ||||
3001 | // If user has set the no alignment function attribute, ignore | |||
3002 | // alloca alignments. | |||
3003 | uint64_t AlignVal = (RealignOpt ? | |||
3004 | dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0); | |||
3005 | ||||
3006 | uint64_t StackAlign = TFI->getStackAlignment(); | |||
3007 | uint64_t RequiredAlign = std::max(AlignVal, StackAlign); | |||
3008 | uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; | |||
3009 | ||||
3010 | unsigned SPReg = getStackPointerRegisterToSaveRestore(); | |||
3011 | SDValue NeededSpace = Size; | |||
3012 | ||||
3013 | // Get a reference to the stack pointer. | |||
3014 | SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); | |||
3015 | ||||
3016 | // If we need a backchain, save it now. | |||
3017 | SDValue Backchain; | |||
3018 | if (StoreBackchain) | |||
3019 | Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); | |||
3020 | ||||
3021 | // Add extra space for alignment if needed. | |||
3022 | if (ExtraAlignSpace) | |||
3023 | NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, | |||
3024 | DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); | |||
3025 | ||||
3026 | // Get the new stack pointer value. | |||
3027 | SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); | |||
3028 | ||||
3029 | // Copy the new stack pointer back. | |||
3030 | Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); | |||
3031 | ||||
3032 | // The allocated data lives above the 160 bytes allocated for the standard | |||
3033 | // frame, plus any outgoing stack arguments. We don't know how much that | |||
3034 | // amounts to yet, so emit a special ADJDYNALLOC placeholder. | |||
3035 | SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); | |||
3036 | SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); | |||
3037 | ||||
3038 | // Dynamically realign if needed. | |||
3039 | if (RequiredAlign > StackAlign) { | |||
3040 | Result = | |||
3041 | DAG.getNode(ISD::ADD, DL, MVT::i64, Result, | |||
3042 | DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); | |||
3043 | Result = | |||
3044 | DAG.getNode(ISD::AND, DL, MVT::i64, Result, | |||
3045 | DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); | |||
3046 | } | |||
3047 | ||||
3048 | if (StoreBackchain) | |||
3049 | Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); | |||
3050 | ||||
3051 | SDValue Ops[2] = { Result, Chain }; | |||
3052 | return DAG.getMergeValues(Ops, DL); | |||
3053 | } | |||
3054 | ||||
3055 | SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( | |||
3056 | SDValue Op, SelectionDAG &DAG) const { | |||
3057 | SDLoc DL(Op); | |||
3058 | ||||
3059 | return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); | |||
3060 | } | |||
3061 | ||||
3062 | SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, | |||
3063 | SelectionDAG &DAG) const { | |||
3064 | EVT VT = Op.getValueType(); | |||
3065 | SDLoc DL(Op); | |||
3066 | SDValue Ops[2]; | |||
3067 | if (is32Bit(VT)) | |||
3068 | // Just do a normal 64-bit multiplication and extract the results. | |||
3069 | // We define this so that it can be used for constant division. | |||
3070 | lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), | |||
3071 | Op.getOperand(1), Ops[1], Ops[0]); | |||
3072 | else if (Subtarget.hasMiscellaneousExtensions2()) | |||
3073 | // SystemZISD::SMUL_LOHI returns the low result in the odd register and | |||
3074 | // the high result in the even register. ISD::SMUL_LOHI is defined to | |||
3075 | // return the low half first, so the results are in reverse order. | |||
3076 | lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, | |||
3077 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); | |||
3078 | else { | |||
3079 | // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: | |||
3080 | // | |||
3081 | // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) | |||
3082 | // | |||
3083 | // but using the fact that the upper halves are either all zeros | |||
3084 | // or all ones: | |||
3085 | // | |||
3086 | // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) | |||
3087 | // | |||
3088 | // and grouping the right terms together since they are quicker than the | |||
3089 | // multiplication: | |||
3090 | // | |||
3091 | // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) | |||
3092 | SDValue C63 = DAG.getConstant(63, DL, MVT::i64); | |||
3093 | SDValue LL = Op.getOperand(0); | |||
3094 | SDValue RL = Op.getOperand(1); | |||
3095 | SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); | |||
3096 | SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); | |||
3097 | // SystemZISD::UMUL_LOHI returns the low result in the odd register and | |||
3098 | // the high result in the even register. ISD::SMUL_LOHI is defined to | |||
3099 | // return the low half first, so the results are in reverse order. | |||
3100 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, | |||
3101 | LL, RL, Ops[1], Ops[0]); | |||
3102 | SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); | |||
3103 | SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); | |||
3104 | SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); | |||
3105 | Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); | |||
3106 | } | |||
3107 | return DAG.getMergeValues(Ops, DL); | |||
3108 | } | |||
3109 | ||||
3110 | SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, | |||
3111 | SelectionDAG &DAG) const { | |||
3112 | EVT VT = Op.getValueType(); | |||
3113 | SDLoc DL(Op); | |||
3114 | SDValue Ops[2]; | |||
3115 | if (is32Bit(VT)) | |||
3116 | // Just do a normal 64-bit multiplication and extract the results. | |||
3117 | // We define this so that it can be used for constant division. | |||
3118 | lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), | |||
3119 | Op.getOperand(1), Ops[1], Ops[0]); | |||
3120 | else | |||
3121 | // SystemZISD::UMUL_LOHI returns the low result in the odd register and | |||
3122 | // the high result in the even register. ISD::UMUL_LOHI is defined to | |||
3123 | // return the low half first, so the results are in reverse order. | |||
3124 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, | |||
3125 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); | |||
3126 | return DAG.getMergeValues(Ops, DL); | |||
3127 | } | |||
3128 | ||||
3129 | SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, | |||
3130 | SelectionDAG &DAG) const { | |||
3131 | SDValue Op0 = Op.getOperand(0); | |||
3132 | SDValue Op1 = Op.getOperand(1); | |||
3133 | EVT VT = Op.getValueType(); | |||
3134 | SDLoc DL(Op); | |||
3135 | ||||
3136 | // We use DSGF for 32-bit division. This means the first operand must | |||
3137 | // always be 64-bit, and the second operand should be 32-bit whenever | |||
3138 | // that is possible, to improve performance. | |||
3139 | if (is32Bit(VT)) | |||
3140 | Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); | |||
3141 | else if (DAG.ComputeNumSignBits(Op1) > 32) | |||
3142 | Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); | |||
3143 | ||||
3144 | // DSG(F) returns the remainder in the even register and the | |||
3145 | // quotient in the odd register. | |||
3146 | SDValue Ops[2]; | |||
3147 | lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); | |||
3148 | return DAG.getMergeValues(Ops, DL); | |||
3149 | } | |||
3150 | ||||
3151 | SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, | |||
3152 | SelectionDAG &DAG) const { | |||
3153 | EVT VT = Op.getValueType(); | |||
3154 | SDLoc DL(Op); | |||
3155 | ||||
3156 | // DL(G) returns the remainder in the even register and the | |||
3157 | // quotient in the odd register. | |||
3158 | SDValue Ops[2]; | |||
3159 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, | |||
3160 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); | |||
3161 | return DAG.getMergeValues(Ops, DL); | |||
3162 | } | |||
3163 | ||||
3164 | SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { | |||
3165 | assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation")((Op.getValueType() == MVT::i64 && "Should be 64-bit operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"Should be 64-bit operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3165, __PRETTY_FUNCTION__)); | |||
3166 | ||||
3167 | // Get the known-zero masks for each operand. | |||
3168 | SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)}; | |||
3169 | KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]), | |||
3170 | DAG.computeKnownBits(Ops[1])}; | |||
3171 | ||||
3172 | // See if the upper 32 bits of one operand and the lower 32 bits of the | |||
3173 | // other are known zero. They are the low and high operands respectively. | |||
3174 | uint64_t Masks[] = { Known[0].Zero.getZExtValue(), | |||
3175 | Known[1].Zero.getZExtValue() }; | |||
3176 | unsigned High, Low; | |||
3177 | if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) | |||
3178 | High = 1, Low = 0; | |||
3179 | else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) | |||
3180 | High = 0, Low = 1; | |||
3181 | else | |||
3182 | return Op; | |||
3183 | ||||
3184 | SDValue LowOp = Ops[Low]; | |||
3185 | SDValue HighOp = Ops[High]; | |||
3186 | ||||
3187 | // If the high part is a constant, we're better off using IILH. | |||
3188 | if (HighOp.getOpcode() == ISD::Constant) | |||
3189 | return Op; | |||
3190 | ||||
3191 | // If the low part is a constant that is outside the range of LHI, | |||
3192 | // then we're better off using IILF. | |||
3193 | if (LowOp.getOpcode() == ISD::Constant) { | |||
3194 | int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); | |||
3195 | if (!isInt<16>(Value)) | |||
3196 | return Op; | |||
3197 | } | |||
3198 | ||||
3199 | // Check whether the high part is an AND that doesn't change the | |||
3200 | // high 32 bits and just masks out low bits. We can skip it if so. | |||
3201 | if (HighOp.getOpcode() == ISD::AND && | |||
3202 | HighOp.getOperand(1).getOpcode() == ISD::Constant) { | |||
3203 | SDValue HighOp0 = HighOp.getOperand(0); | |||
3204 | uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); | |||
3205 | if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) | |||
3206 | HighOp = HighOp0; | |||
3207 | } | |||
3208 | ||||
3209 | // Take advantage of the fact that all GR32 operations only change the | |||
3210 | // low 32 bits by truncating Low to an i32 and inserting it directly | |||
3211 | // using a subreg. The interesting cases are those where the truncation | |||
3212 | // can be folded. | |||
3213 | SDLoc DL(Op); | |||
3214 | SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); | |||
3215 | return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, | |||
3216 | MVT::i64, HighOp, Low32); | |||
3217 | } | |||
3218 | ||||
3219 | // Lower SADDO/SSUBO/UADDO/USUBO nodes. | |||
3220 | SDValue SystemZTargetLowering::lowerXALUO(SDValue Op, | |||
3221 | SelectionDAG &DAG) const { | |||
3222 | SDNode *N = Op.getNode(); | |||
3223 | SDValue LHS = N->getOperand(0); | |||
3224 | SDValue RHS = N->getOperand(1); | |||
3225 | SDLoc DL(N); | |||
3226 | unsigned BaseOp = 0; | |||
3227 | unsigned CCValid = 0; | |||
3228 | unsigned CCMask = 0; | |||
3229 | ||||
3230 | switch (Op.getOpcode()) { | |||
3231 | default: llvm_unreachable("Unknown instruction!")::llvm::llvm_unreachable_internal("Unknown instruction!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3231); | |||
3232 | case ISD::SADDO: | |||
3233 | BaseOp = SystemZISD::SADDO; | |||
3234 | CCValid = SystemZ::CCMASK_ARITH; | |||
3235 | CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; | |||
3236 | break; | |||
3237 | case ISD::SSUBO: | |||
3238 | BaseOp = SystemZISD::SSUBO; | |||
3239 | CCValid = SystemZ::CCMASK_ARITH; | |||
3240 | CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; | |||
3241 | break; | |||
3242 | case ISD::UADDO: | |||
3243 | BaseOp = SystemZISD::UADDO; | |||
3244 | CCValid = SystemZ::CCMASK_LOGICAL; | |||
3245 | CCMask = SystemZ::CCMASK_LOGICAL_CARRY; | |||
3246 | break; | |||
3247 | case ISD::USUBO: | |||
3248 | BaseOp = SystemZISD::USUBO; | |||
3249 | CCValid = SystemZ::CCMASK_LOGICAL; | |||
3250 | CCMask = SystemZ::CCMASK_LOGICAL_BORROW; | |||
3251 | break; | |||
3252 | } | |||
3253 | ||||
3254 | SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); | |||
3255 | SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); | |||
3256 | ||||
3257 | SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); | |||
3258 | if (N->getValueType(1) == MVT::i1) | |||
3259 | SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); | |||
3260 | ||||
3261 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); | |||
3262 | } | |||
3263 | ||||
3264 | // Lower ADDCARRY/SUBCARRY nodes. | |||
3265 | SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op, | |||
3266 | SelectionDAG &DAG) const { | |||
3267 | ||||
3268 | SDNode *N = Op.getNode(); | |||
3269 | MVT VT = N->getSimpleValueType(0); | |||
3270 | ||||
3271 | // Let legalize expand this if it isn't a legal type yet. | |||
3272 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
3273 | return SDValue(); | |||
3274 | ||||
3275 | SDValue LHS = N->getOperand(0); | |||
3276 | SDValue RHS = N->getOperand(1); | |||
3277 | SDValue Carry = Op.getOperand(2); | |||
3278 | SDLoc DL(N); | |||
3279 | unsigned BaseOp = 0; | |||
3280 | unsigned CCValid = 0; | |||
3281 | unsigned CCMask = 0; | |||
3282 | ||||
3283 | switch (Op.getOpcode()) { | |||
3284 | default: llvm_unreachable("Unknown instruction!")::llvm::llvm_unreachable_internal("Unknown instruction!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3284); | |||
3285 | case ISD::ADDCARRY: | |||
3286 | BaseOp = SystemZISD::ADDCARRY; | |||
3287 | CCValid = SystemZ::CCMASK_LOGICAL; | |||
3288 | CCMask = SystemZ::CCMASK_LOGICAL_CARRY; | |||
3289 | break; | |||
3290 | case ISD::SUBCARRY: | |||
3291 | BaseOp = SystemZISD::SUBCARRY; | |||
3292 | CCValid = SystemZ::CCMASK_LOGICAL; | |||
3293 | CCMask = SystemZ::CCMASK_LOGICAL_BORROW; | |||
3294 | break; | |||
3295 | } | |||
3296 | ||||
3297 | // Set the condition code from the carry flag. | |||
3298 | Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry, | |||
3299 | DAG.getConstant(CCValid, DL, MVT::i32), | |||
3300 | DAG.getConstant(CCMask, DL, MVT::i32)); | |||
3301 | ||||
3302 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
3303 | SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry); | |||
3304 | ||||
3305 | SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); | |||
3306 | if (N->getValueType(1) == MVT::i1) | |||
3307 | SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); | |||
3308 | ||||
3309 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); | |||
3310 | } | |||
3311 | ||||
3312 | SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, | |||
3313 | SelectionDAG &DAG) const { | |||
3314 | EVT VT = Op.getValueType(); | |||
3315 | SDLoc DL(Op); | |||
3316 | Op = Op.getOperand(0); | |||
3317 | ||||
3318 | // Handle vector types via VPOPCT. | |||
3319 | if (VT.isVector()) { | |||
3320 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); | |||
3321 | Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); | |||
3322 | switch (VT.getScalarSizeInBits()) { | |||
3323 | case 8: | |||
3324 | break; | |||
3325 | case 16: { | |||
3326 | Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
3327 | SDValue Shift = DAG.getConstant(8, DL, MVT::i32); | |||
3328 | SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); | |||
3329 | Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); | |||
3330 | Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); | |||
3331 | break; | |||
3332 | } | |||
3333 | case 32: { | |||
3334 | SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, | |||
3335 | DAG.getConstant(0, DL, MVT::i32)); | |||
3336 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); | |||
3337 | break; | |||
3338 | } | |||
3339 | case 64: { | |||
3340 | SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, | |||
3341 | DAG.getConstant(0, DL, MVT::i32)); | |||
3342 | Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); | |||
3343 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); | |||
3344 | break; | |||
3345 | } | |||
3346 | default: | |||
3347 | llvm_unreachable("Unexpected type")::llvm::llvm_unreachable_internal("Unexpected type", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3347); | |||
3348 | } | |||
3349 | return Op; | |||
3350 | } | |||
3351 | ||||
3352 | // Get the known-zero mask for the operand. | |||
3353 | KnownBits Known = DAG.computeKnownBits(Op); | |||
3354 | unsigned NumSignificantBits = (~Known.Zero).getActiveBits(); | |||
3355 | if (NumSignificantBits == 0) | |||
3356 | return DAG.getConstant(0, DL, VT); | |||
3357 | ||||
3358 | // Skip known-zero high parts of the operand. | |||
3359 | int64_t OrigBitSize = VT.getSizeInBits(); | |||
3360 | int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits); | |||
3361 | BitSize = std::min(BitSize, OrigBitSize); | |||
3362 | ||||
3363 | // The POPCNT instruction counts the number of bits in each byte. | |||
3364 | Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); | |||
3365 | Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); | |||
3366 | Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); | |||
3367 | ||||
3368 | // Add up per-byte counts in a binary tree. All bits of Op at | |||
3369 | // position larger than BitSize remain zero throughout. | |||
3370 | for (int64_t I = BitSize / 2; I >= 8; I = I / 2) { | |||
3371 | SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); | |||
3372 | if (BitSize != OrigBitSize) | |||
3373 | Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, | |||
3374 | DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); | |||
3375 | Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); | |||
3376 | } | |||
3377 | ||||
3378 | // Extract overall result from high byte. | |||
3379 | if (BitSize > 8) | |||
3380 | Op = DAG.getNode(ISD::SRL, DL, VT, Op, | |||
3381 | DAG.getConstant(BitSize - 8, DL, VT)); | |||
3382 | ||||
3383 | return Op; | |||
3384 | } | |||
3385 | ||||
3386 | SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, | |||
3387 | SelectionDAG &DAG) const { | |||
3388 | SDLoc DL(Op); | |||
3389 | AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( | |||
3390 | cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); | |||
3391 | SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( | |||
3392 | cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); | |||
3393 | ||||
3394 | // The only fence that needs an instruction is a sequentially-consistent | |||
3395 | // cross-thread fence. | |||
3396 | if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && | |||
3397 | FenceSSID == SyncScope::System) { | |||
3398 | return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, | |||
3399 | Op.getOperand(0)), | |||
3400 | 0); | |||
3401 | } | |||
3402 | ||||
3403 | // MEMBARRIER is a compiler barrier; it codegens to a no-op. | |||
3404 | return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); | |||
3405 | } | |||
3406 | ||||
3407 | // Op is an atomic load. Lower it into a normal volatile load. | |||
3408 | SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, | |||
3409 | SelectionDAG &DAG) const { | |||
3410 | auto *Node = cast<AtomicSDNode>(Op.getNode()); | |||
3411 | return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), | |||
3412 | Node->getChain(), Node->getBasePtr(), | |||
3413 | Node->getMemoryVT(), Node->getMemOperand()); | |||
3414 | } | |||
3415 | ||||
3416 | // Op is an atomic store. Lower it into a normal volatile store. | |||
3417 | SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, | |||
3418 | SelectionDAG &DAG) const { | |||
3419 | auto *Node = cast<AtomicSDNode>(Op.getNode()); | |||
3420 | SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), | |||
3421 | Node->getBasePtr(), Node->getMemoryVT(), | |||
3422 | Node->getMemOperand()); | |||
3423 | // We have to enforce sequential consistency by performing a | |||
3424 | // serialization operation after the store. | |||
3425 | if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent) | |||
3426 | Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), | |||
3427 | MVT::Other, Chain), 0); | |||
3428 | return Chain; | |||
3429 | } | |||
3430 | ||||
3431 | // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first | |||
3432 | // two into the fullword ATOMIC_LOADW_* operation given by Opcode. | |||
3433 | SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, | |||
3434 | SelectionDAG &DAG, | |||
3435 | unsigned Opcode) const { | |||
3436 | auto *Node = cast<AtomicSDNode>(Op.getNode()); | |||
3437 | ||||
3438 | // 32-bit operations need no code outside the main loop. | |||
3439 | EVT NarrowVT = Node->getMemoryVT(); | |||
3440 | EVT WideVT = MVT::i32; | |||
3441 | if (NarrowVT == WideVT) | |||
3442 | return Op; | |||
3443 | ||||
3444 | int64_t BitSize = NarrowVT.getSizeInBits(); | |||
3445 | SDValue ChainIn = Node->getChain(); | |||
3446 | SDValue Addr = Node->getBasePtr(); | |||
3447 | SDValue Src2 = Node->getVal(); | |||
3448 | MachineMemOperand *MMO = Node->getMemOperand(); | |||
3449 | SDLoc DL(Node); | |||
3450 | EVT PtrVT = Addr.getValueType(); | |||
3451 | ||||
3452 | // Convert atomic subtracts of constants into additions. | |||
3453 | if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) | |||
3454 | if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { | |||
3455 | Opcode = SystemZISD::ATOMIC_LOADW_ADD; | |||
3456 | Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); | |||
3457 | } | |||
3458 | ||||
3459 | // Get the address of the containing word. | |||
3460 | SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, | |||
3461 | DAG.getConstant(-4, DL, PtrVT)); | |||
3462 | ||||
3463 | // Get the number of bits that the word must be rotated left in order | |||
3464 | // to bring the field to the top bits of a GR32. | |||
3465 | SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, | |||
3466 | DAG.getConstant(3, DL, PtrVT)); | |||
3467 | BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); | |||
3468 | ||||
3469 | // Get the complementing shift amount, for rotating a field in the top | |||
3470 | // bits back to its proper position. | |||
3471 | SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, | |||
3472 | DAG.getConstant(0, DL, WideVT), BitShift); | |||
3473 | ||||
3474 | // Extend the source operand to 32 bits and prepare it for the inner loop. | |||
3475 | // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other | |||
3476 | // operations require the source to be shifted in advance. (This shift | |||
3477 | // can be folded if the source is constant.) For AND and NAND, the lower | |||
3478 | // bits must be set, while for other opcodes they should be left clear. | |||
3479 | if (Opcode != SystemZISD::ATOMIC_SWAPW) | |||
3480 | Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, | |||
3481 | DAG.getConstant(32 - BitSize, DL, WideVT)); | |||
3482 | if (Opcode == SystemZISD::ATOMIC_LOADW_AND || | |||
3483 | Opcode == SystemZISD::ATOMIC_LOADW_NAND) | |||
3484 | Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, | |||
3485 | DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); | |||
3486 | ||||
3487 | // Construct the ATOMIC_LOADW_* node. | |||
3488 | SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); | |||
3489 | SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, | |||
3490 | DAG.getConstant(BitSize, DL, WideVT) }; | |||
3491 | SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, | |||
3492 | NarrowVT, MMO); | |||
3493 | ||||
3494 | // Rotate the result of the final CS so that the field is in the lower | |||
3495 | // bits of a GR32, then truncate it. | |||
3496 | SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, | |||
3497 | DAG.getConstant(BitSize, DL, WideVT)); | |||
3498 | SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); | |||
3499 | ||||
3500 | SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; | |||
3501 | return DAG.getMergeValues(RetOps, DL); | |||
3502 | } | |||
3503 | ||||
3504 | // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations | |||
3505 | // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit | |||
3506 | // operations into additions. | |||
3507 | SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, | |||
3508 | SelectionDAG &DAG) const { | |||
3509 | auto *Node = cast<AtomicSDNode>(Op.getNode()); | |||
3510 | EVT MemVT = Node->getMemoryVT(); | |||
3511 | if (MemVT == MVT::i32 || MemVT == MVT::i64) { | |||
3512 | // A full-width operation. | |||
3513 | assert(Op.getValueType() == MemVT && "Mismatched VTs")((Op.getValueType() == MemVT && "Mismatched VTs") ? static_cast <void> (0) : __assert_fail ("Op.getValueType() == MemVT && \"Mismatched VTs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3513, __PRETTY_FUNCTION__)); | |||
3514 | SDValue Src2 = Node->getVal(); | |||
3515 | SDValue NegSrc2; | |||
3516 | SDLoc DL(Src2); | |||
3517 | ||||
3518 | if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { | |||
3519 | // Use an addition if the operand is constant and either LAA(G) is | |||
3520 | // available or the negative value is in the range of A(G)FHI. | |||
3521 | int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); | |||
3522 | if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) | |||
3523 | NegSrc2 = DAG.getConstant(Value, DL, MemVT); | |||
3524 | } else if (Subtarget.hasInterlockedAccess1()) | |||
3525 | // Use LAA(G) if available. | |||
3526 | NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), | |||
3527 | Src2); | |||
3528 | ||||
3529 | if (NegSrc2.getNode()) | |||
3530 | return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, | |||
3531 | Node->getChain(), Node->getBasePtr(), NegSrc2, | |||
3532 | Node->getMemOperand()); | |||
3533 | ||||
3534 | // Use the node as-is. | |||
3535 | return Op; | |||
3536 | } | |||
3537 | ||||
3538 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); | |||
3539 | } | |||
3540 | ||||
3541 | // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. | |||
3542 | SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, | |||
3543 | SelectionDAG &DAG) const { | |||
3544 | auto *Node = cast<AtomicSDNode>(Op.getNode()); | |||
3545 | SDValue ChainIn = Node->getOperand(0); | |||
3546 | SDValue Addr = Node->getOperand(1); | |||
3547 | SDValue CmpVal = Node->getOperand(2); | |||
3548 | SDValue SwapVal = Node->getOperand(3); | |||
3549 | MachineMemOperand *MMO = Node->getMemOperand(); | |||
3550 | SDLoc DL(Node); | |||
3551 | ||||
3552 | // We have native support for 32-bit and 64-bit compare and swap, but we | |||
3553 | // still need to expand extracting the "success" result from the CC. | |||
3554 | EVT NarrowVT = Node->getMemoryVT(); | |||
3555 | EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32; | |||
3556 | if (NarrowVT == WideVT) { | |||
3557 | SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other); | |||
3558 | SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; | |||
3559 | SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, | |||
3560 | DL, Tys, Ops, NarrowVT, MMO); | |||
3561 | SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), | |||
3562 | SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); | |||
3563 | ||||
3564 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); | |||
3565 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); | |||
3566 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); | |||
3567 | return SDValue(); | |||
3568 | } | |||
3569 | ||||
3570 | // Convert 8-bit and 16-bit compare and swap to a loop, implemented | |||
3571 | // via a fullword ATOMIC_CMP_SWAPW operation. | |||
3572 | int64_t BitSize = NarrowVT.getSizeInBits(); | |||
3573 | EVT PtrVT = Addr.getValueType(); | |||
3574 | ||||
3575 | // Get the address of the containing word. | |||
3576 | SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, | |||
3577 | DAG.getConstant(-4, DL, PtrVT)); | |||
3578 | ||||
3579 | // Get the number of bits that the word must be rotated left in order | |||
3580 | // to bring the field to the top bits of a GR32. | |||
3581 | SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, | |||
3582 | DAG.getConstant(3, DL, PtrVT)); | |||
3583 | BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); | |||
3584 | ||||
3585 | // Get the complementing shift amount, for rotating a field in the top | |||
3586 | // bits back to its proper position. | |||
3587 | SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, | |||
3588 | DAG.getConstant(0, DL, WideVT), BitShift); | |||
3589 | ||||
3590 | // Construct the ATOMIC_CMP_SWAPW node. | |||
3591 | SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other); | |||
3592 | SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, | |||
3593 | NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; | |||
3594 | SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, | |||
3595 | VTList, Ops, NarrowVT, MMO); | |||
3596 | SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), | |||
3597 | SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); | |||
3598 | ||||
3599 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); | |||
3600 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); | |||
3601 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); | |||
3602 | return SDValue(); | |||
3603 | } | |||
3604 | ||||
3605 | SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, | |||
3606 | SelectionDAG &DAG) const { | |||
3607 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3608 | MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); | |||
3609 | return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), | |||
3610 | SystemZ::R15D, Op.getValueType()); | |||
3611 | } | |||
3612 | ||||
3613 | SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, | |||
3614 | SelectionDAG &DAG) const { | |||
3615 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3616 | MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); | |||
3617 | bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); | |||
3618 | ||||
3619 | SDValue Chain = Op.getOperand(0); | |||
3620 | SDValue NewSP = Op.getOperand(1); | |||
3621 | SDValue Backchain; | |||
3622 | SDLoc DL(Op); | |||
3623 | ||||
3624 | if (StoreBackchain) { | |||
3625 | SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64); | |||
3626 | Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); | |||
3627 | } | |||
3628 | ||||
3629 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP); | |||
3630 | ||||
3631 | if (StoreBackchain) | |||
3632 | Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); | |||
3633 | ||||
3634 | return Chain; | |||
3635 | } | |||
3636 | ||||
3637 | SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, | |||
3638 | SelectionDAG &DAG) const { | |||
3639 | bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); | |||
3640 | if (!IsData) | |||
3641 | // Just preserve the chain. | |||
3642 | return Op.getOperand(0); | |||
3643 | ||||
3644 | SDLoc DL(Op); | |||
3645 | bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | |||
3646 | unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; | |||
3647 | auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); | |||
3648 | SDValue Ops[] = { | |||
3649 | Op.getOperand(0), | |||
3650 | DAG.getConstant(Code, DL, MVT::i32), | |||
3651 | Op.getOperand(1) | |||
3652 | }; | |||
3653 | return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, | |||
3654 | Node->getVTList(), Ops, | |||
3655 | Node->getMemoryVT(), Node->getMemOperand()); | |||
3656 | } | |||
3657 | ||||
3658 | // Convert condition code in CCReg to an i32 value. | |||
3659 | static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) { | |||
3660 | SDLoc DL(CCReg); | |||
3661 | SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg); | |||
3662 | return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, | |||
3663 | DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); | |||
3664 | } | |||
3665 | ||||
3666 | SDValue | |||
3667 | SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, | |||
3668 | SelectionDAG &DAG) const { | |||
3669 | unsigned Opcode, CCValid; | |||
3670 | if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) { | |||
3671 | assert(Op->getNumValues() == 2 && "Expected only CC result and chain")((Op->getNumValues() == 2 && "Expected only CC result and chain" ) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected only CC result and chain\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3671, __PRETTY_FUNCTION__)); | |||
3672 | SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode); | |||
3673 | SDValue CC = getCCResult(DAG, SDValue(Node, 0)); | |||
3674 | DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); | |||
3675 | return SDValue(); | |||
3676 | } | |||
3677 | ||||
3678 | return SDValue(); | |||
3679 | } | |||
3680 | ||||
3681 | SDValue | |||
3682 | SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, | |||
3683 | SelectionDAG &DAG) const { | |||
3684 | unsigned Opcode, CCValid; | |||
3685 | if (isIntrinsicWithCC(Op, Opcode, CCValid)) { | |||
3686 | SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode); | |||
3687 | if (Op->getNumValues() == 1) | |||
3688 | return getCCResult(DAG, SDValue(Node, 0)); | |||
3689 | assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result")((Op->getNumValues() == 2 && "Expected a CC and non-CC result" ) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected a CC and non-CC result\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 3689, __PRETTY_FUNCTION__)); | |||
3690 | return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), | |||
3691 | SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1))); | |||
3692 | } | |||
3693 | ||||
3694 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
3695 | switch (Id) { | |||
3696 | case Intrinsic::thread_pointer: | |||
3697 | return lowerThreadPointer(SDLoc(Op), DAG); | |||
3698 | ||||
3699 | case Intrinsic::s390_vpdi: | |||
3700 | return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), | |||
3701 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
3702 | ||||
3703 | case Intrinsic::s390_vperm: | |||
3704 | return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), | |||
3705 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
3706 | ||||
3707 | case Intrinsic::s390_vuphb: | |||
3708 | case Intrinsic::s390_vuphh: | |||
3709 | case Intrinsic::s390_vuphf: | |||
3710 | return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), | |||
3711 | Op.getOperand(1)); | |||
3712 | ||||
3713 | case Intrinsic::s390_vuplhb: | |||
3714 | case Intrinsic::s390_vuplhh: | |||
3715 | case Intrinsic::s390_vuplhf: | |||
3716 | return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), | |||
3717 | Op.getOperand(1)); | |||
3718 | ||||
3719 | case Intrinsic::s390_vuplb: | |||
3720 | case Intrinsic::s390_vuplhw: | |||
3721 | case Intrinsic::s390_vuplf: | |||
3722 | return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), | |||
3723 | Op.getOperand(1)); | |||
3724 | ||||
3725 | case Intrinsic::s390_vupllb: | |||
3726 | case Intrinsic::s390_vupllh: | |||
3727 | case Intrinsic::s390_vupllf: | |||
3728 | return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), | |||
3729 | Op.getOperand(1)); | |||
3730 | ||||
3731 | case Intrinsic::s390_vsumb: | |||
3732 | case Intrinsic::s390_vsumh: | |||
3733 | case Intrinsic::s390_vsumgh: | |||
3734 | case Intrinsic::s390_vsumgf: | |||
3735 | case Intrinsic::s390_vsumqf: | |||
3736 | case Intrinsic::s390_vsumqg: | |||
3737 | return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), | |||
3738 | Op.getOperand(1), Op.getOperand(2)); | |||
3739 | } | |||
3740 | ||||
3741 | return SDValue(); | |||
3742 | } | |||
3743 | ||||
3744 | namespace { | |||
3745 | // Says that SystemZISD operation Opcode can be used to perform the equivalent | |||
3746 | // of a VPERM with permute vector Bytes. If Opcode takes three operands, | |||
3747 | // Operand is the constant third operand, otherwise it is the number of | |||
3748 | // bytes in each element of the result. | |||
3749 | struct Permute { | |||
3750 | unsigned Opcode; | |||
3751 | unsigned Operand; | |||
3752 | unsigned char Bytes[SystemZ::VectorBytes]; | |||
3753 | }; | |||
3754 | } | |||
3755 | ||||
3756 | static const Permute PermuteForms[] = { | |||
3757 | // VMRHG | |||
3758 | { SystemZISD::MERGE_HIGH, 8, | |||
3759 | { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, | |||
3760 | // VMRHF | |||
3761 | { SystemZISD::MERGE_HIGH, 4, | |||
3762 | { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, | |||
3763 | // VMRHH | |||
3764 | { SystemZISD::MERGE_HIGH, 2, | |||
3765 | { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, | |||
3766 | // VMRHB | |||
3767 | { SystemZISD::MERGE_HIGH, 1, | |||
3768 | { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, | |||
3769 | // VMRLG | |||
3770 | { SystemZISD::MERGE_LOW, 8, | |||
3771 | { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, | |||
3772 | // VMRLF | |||
3773 | { SystemZISD::MERGE_LOW, 4, | |||
3774 | { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, | |||
3775 | // VMRLH | |||
3776 | { SystemZISD::MERGE_LOW, 2, | |||
3777 | { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, | |||
3778 | // VMRLB | |||
3779 | { SystemZISD::MERGE_LOW, 1, | |||
3780 | { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, | |||
3781 | // VPKG | |||
3782 | { SystemZISD::PACK, 4, | |||
3783 | { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, | |||
3784 | // VPKF | |||
3785 | { SystemZISD::PACK, 2, | |||
3786 | { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, | |||
3787 | // VPKH | |||
3788 | { SystemZISD::PACK, 1, | |||
3789 | { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, | |||
3790 | // VPDI V1, V2, 4 (low half of V1, high half of V2) | |||
3791 | { SystemZISD::PERMUTE_DWORDS, 4, | |||
3792 | { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, | |||
3793 | // VPDI V1, V2, 1 (high half of V1, low half of V2) | |||
3794 | { SystemZISD::PERMUTE_DWORDS, 1, | |||
3795 | { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } | |||
3796 | }; | |||
3797 | ||||
3798 | // Called after matching a vector shuffle against a particular pattern. | |||
3799 | // Both the original shuffle and the pattern have two vector operands. | |||
3800 | // OpNos[0] is the operand of the original shuffle that should be used for | |||
3801 | // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. | |||
3802 | // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and | |||
3803 | // set OpNo0 and OpNo1 to the shuffle operands that should actually be used | |||
3804 | // for operands 0 and 1 of the pattern. | |||
3805 | static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { | |||
3806 | if (OpNos[0] < 0) { | |||
3807 | if (OpNos[1] < 0) | |||
3808 | return false; | |||
3809 | OpNo0 = OpNo1 = OpNos[1]; | |||
3810 | } else if (OpNos[1] < 0) { | |||
3811 | OpNo0 = OpNo1 = OpNos[0]; | |||
3812 | } else { | |||
3813 | OpNo0 = OpNos[0]; | |||
3814 | OpNo1 = OpNos[1]; | |||
3815 | } | |||
3816 | return true; | |||
3817 | } | |||
3818 | ||||
3819 | // Bytes is a VPERM-like permute vector, except that -1 is used for | |||
3820 | // undefined bytes. Return true if the VPERM can be implemented using P. | |||
3821 | // When returning true set OpNo0 to the VPERM operand that should be | |||
3822 | // used for operand 0 of P and likewise OpNo1 for operand 1 of P. | |||
3823 | // | |||
3824 | // For example, if swapping the VPERM operands allows P to match, OpNo0 | |||
3825 | // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one | |||
3826 | // operand, but rewriting it to use two duplicated operands allows it to | |||
3827 | // match P, then OpNo0 and OpNo1 will be the same. | |||
3828 | static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, | |||
3829 | unsigned &OpNo0, unsigned &OpNo1) { | |||
3830 | int OpNos[] = { -1, -1 }; | |||
3831 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { | |||
3832 | int Elt = Bytes[I]; | |||
3833 | if (Elt >= 0) { | |||
3834 | // Make sure that the two permute vectors use the same suboperand | |||
3835 | // byte number. Only the operand numbers (the high bits) are | |||
3836 | // allowed to differ. | |||
3837 | if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) | |||
3838 | return false; | |||
3839 | int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; | |||
3840 | int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; | |||
3841 | // Make sure that the operand mappings are consistent with previous | |||
3842 | // elements. | |||
3843 | if (OpNos[ModelOpNo] == 1 - RealOpNo) | |||
3844 | return false; | |||
3845 | OpNos[ModelOpNo] = RealOpNo; | |||
3846 | } | |||
3847 | } | |||
3848 | return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); | |||
3849 | } | |||
3850 | ||||
3851 | // As above, but search for a matching permute. | |||
3852 | static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, | |||
3853 | unsigned &OpNo0, unsigned &OpNo1) { | |||
3854 | for (auto &P : PermuteForms) | |||
3855 | if (matchPermute(Bytes, P, OpNo0, OpNo1)) | |||
3856 | return &P; | |||
3857 | return nullptr; | |||
3858 | } | |||
3859 | ||||
3860 | // Bytes is a VPERM-like permute vector, except that -1 is used for | |||
3861 | // undefined bytes. This permute is an operand of an outer permute. | |||
3862 | // See whether redistributing the -1 bytes gives a shuffle that can be | |||
3863 | // implemented using P. If so, set Transform to a VPERM-like permute vector | |||
3864 | // that, when applied to the result of P, gives the original permute in Bytes. | |||
3865 | static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, | |||
3866 | const Permute &P, | |||
3867 | SmallVectorImpl<int> &Transform) { | |||
3868 | unsigned To = 0; | |||
3869 | for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) { | |||
3870 | int Elt = Bytes[From]; | |||
3871 | if (Elt < 0) | |||
3872 | // Byte number From of the result is undefined. | |||
3873 | Transform[From] = -1; | |||
3874 | else { | |||
3875 | while (P.Bytes[To] != Elt) { | |||
3876 | To += 1; | |||
3877 | if (To == SystemZ::VectorBytes) | |||
3878 | return false; | |||
3879 | } | |||
3880 | Transform[From] = To; | |||
3881 | } | |||
3882 | } | |||
3883 | return true; | |||
3884 | } | |||
3885 | ||||
3886 | // As above, but search for a matching permute. | |||
3887 | static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, | |||
3888 | SmallVectorImpl<int> &Transform) { | |||
3889 | for (auto &P : PermuteForms) | |||
3890 | if (matchDoublePermute(Bytes, P, Transform)) | |||
3891 | return &P; | |||
3892 | return nullptr; | |||
3893 | } | |||
3894 | ||||
3895 | // Convert the mask of the given shuffle op into a byte-level mask, | |||
3896 | // as if it had type vNi8. | |||
3897 | static bool getVPermMask(SDValue ShuffleOp, | |||
3898 | SmallVectorImpl<int> &Bytes) { | |||
3899 | EVT VT = ShuffleOp.getValueType(); | |||
3900 | unsigned NumElements = VT.getVectorNumElements(); | |||
3901 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); | |||
3902 | ||||
3903 | if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) { | |||
3904 | Bytes.resize(NumElements * BytesPerElement, -1); | |||
3905 | for (unsigned I = 0; I < NumElements; ++I) { | |||
3906 | int Index = VSN->getMaskElt(I); | |||
3907 | if (Index >= 0) | |||
3908 | for (unsigned J = 0; J < BytesPerElement; ++J) | |||
3909 | Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; | |||
3910 | } | |||
3911 | return true; | |||
3912 | } | |||
3913 | if (SystemZISD::SPLAT == ShuffleOp.getOpcode() && | |||
3914 | isa<ConstantSDNode>(ShuffleOp.getOperand(1))) { | |||
3915 | unsigned Index = ShuffleOp.getConstantOperandVal(1); | |||
3916 | Bytes.resize(NumElements * BytesPerElement, -1); | |||
3917 | for (unsigned I = 0; I < NumElements; ++I) | |||
3918 | for (unsigned J = 0; J < BytesPerElement; ++J) | |||
3919 | Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; | |||
3920 | return true; | |||
3921 | } | |||
3922 | return false; | |||
3923 | } | |||
3924 | ||||
3925 | // Bytes is a VPERM-like permute vector, except that -1 is used for | |||
3926 | // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of | |||
3927 | // the result come from a contiguous sequence of bytes from one input. | |||
3928 | // Set Base to the selector for the first byte if so. | |||
3929 | static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, | |||
3930 | unsigned BytesPerElement, int &Base) { | |||
3931 | Base = -1; | |||
3932 | for (unsigned I = 0; I < BytesPerElement; ++I) { | |||
3933 | if (Bytes[Start + I] >= 0) { | |||
3934 | unsigned Elem = Bytes[Start + I]; | |||
3935 | if (Base < 0) { | |||
3936 | Base = Elem - I; | |||
3937 | // Make sure the bytes would come from one input operand. | |||
3938 | if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) | |||
3939 | return false; | |||
3940 | } else if (unsigned(Base) != Elem - I) | |||
3941 | return false; | |||
3942 | } | |||
3943 | } | |||
3944 | return true; | |||
3945 | } | |||
3946 | ||||
3947 | // Bytes is a VPERM-like permute vector, except that -1 is used for | |||
3948 | // undefined bytes. Return true if it can be performed using VSLDI. | |||
3949 | // When returning true, set StartIndex to the shift amount and OpNo0 | |||
3950 | // and OpNo1 to the VPERM operands that should be used as the first | |||
3951 | // and second shift operand respectively. | |||
3952 | static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, | |||
3953 | unsigned &StartIndex, unsigned &OpNo0, | |||
3954 | unsigned &OpNo1) { | |||
3955 | int OpNos[] = { -1, -1 }; | |||
3956 | int Shift = -1; | |||
3957 | for (unsigned I = 0; I < 16; ++I) { | |||
3958 | int Index = Bytes[I]; | |||
3959 | if (Index >= 0) { | |||
3960 | int ExpectedShift = (Index - I) % SystemZ::VectorBytes; | |||
3961 | int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; | |||
3962 | int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; | |||
3963 | if (Shift < 0) | |||
3964 | Shift = ExpectedShift; | |||
3965 | else if (Shift != ExpectedShift) | |||
3966 | return false; | |||
3967 | // Make sure that the operand mappings are consistent with previous | |||
3968 | // elements. | |||
3969 | if (OpNos[ModelOpNo] == 1 - RealOpNo) | |||
3970 | return false; | |||
3971 | OpNos[ModelOpNo] = RealOpNo; | |||
3972 | } | |||
3973 | } | |||
3974 | StartIndex = Shift; | |||
3975 | return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); | |||
3976 | } | |||
3977 | ||||
3978 | // Create a node that performs P on operands Op0 and Op1, casting the | |||
3979 | // operands to the appropriate type. The type of the result is determined by P. | |||
3980 | static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, | |||
3981 | const Permute &P, SDValue Op0, SDValue Op1) { | |||
3982 | // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input | |||
3983 | // elements of a PACK are twice as wide as the outputs. | |||
3984 | unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : | |||
3985 | P.Opcode == SystemZISD::PACK ? P.Operand * 2 : | |||
3986 | P.Operand); | |||
3987 | // Cast both operands to the appropriate type. | |||
3988 | MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), | |||
3989 | SystemZ::VectorBytes / InBytes); | |||
3990 | Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); | |||
3991 | Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); | |||
3992 | SDValue Op; | |||
3993 | if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { | |||
3994 | SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32); | |||
3995 | Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); | |||
3996 | } else if (P.Opcode == SystemZISD::PACK) { | |||
3997 | MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), | |||
3998 | SystemZ::VectorBytes / P.Operand); | |||
3999 | Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); | |||
4000 | } else { | |||
4001 | Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); | |||
4002 | } | |||
4003 | return Op; | |||
4004 | } | |||
4005 | ||||
4006 | // Bytes is a VPERM-like permute vector, except that -1 is used for | |||
4007 | // undefined bytes. Implement it on operands Ops[0] and Ops[1] using | |||
4008 | // VSLDI or VPERM. | |||
4009 | static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, | |||
4010 | SDValue *Ops, | |||
4011 | const SmallVectorImpl<int> &Bytes) { | |||
4012 | for (unsigned I = 0; I < 2; ++I) | |||
4013 | Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); | |||
4014 | ||||
4015 | // First see whether VSLDI can be used. | |||
4016 | unsigned StartIndex, OpNo0, OpNo1; | |||
4017 | if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) | |||
4018 | return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], | |||
4019 | Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32)); | |||
4020 | ||||
4021 | // Fall back on VPERM. Construct an SDNode for the permute vector. | |||
4022 | SDValue IndexNodes[SystemZ::VectorBytes]; | |||
4023 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) | |||
4024 | if (Bytes[I] >= 0) | |||
4025 | IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); | |||
4026 | else | |||
4027 | IndexNodes[I] = DAG.getUNDEF(MVT::i32); | |||
4028 | SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); | |||
4029 | return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2); | |||
4030 | } | |||
4031 | ||||
4032 | namespace { | |||
4033 | // Describes a general N-operand vector shuffle. | |||
4034 | struct GeneralShuffle { | |||
4035 | GeneralShuffle(EVT vt) : VT(vt) {} | |||
4036 | void addUndef(); | |||
4037 | bool add(SDValue, unsigned); | |||
4038 | SDValue getNode(SelectionDAG &, const SDLoc &); | |||
4039 | ||||
4040 | // The operands of the shuffle. | |||
4041 | SmallVector<SDValue, SystemZ::VectorBytes> Ops; | |||
4042 | ||||
4043 | // Index I is -1 if byte I of the result is undefined. Otherwise the | |||
4044 | // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand | |||
4045 | // Bytes[I] / SystemZ::VectorBytes. | |||
4046 | SmallVector<int, SystemZ::VectorBytes> Bytes; | |||
4047 | ||||
4048 | // The type of the shuffle result. | |||
4049 | EVT VT; | |||
4050 | }; | |||
4051 | } | |||
4052 | ||||
4053 | // Add an extra undefined element to the shuffle. | |||
4054 | void GeneralShuffle::addUndef() { | |||
4055 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); | |||
4056 | for (unsigned I = 0; I < BytesPerElement; ++I) | |||
4057 | Bytes.push_back(-1); | |||
4058 | } | |||
4059 | ||||
4060 | // Add an extra element to the shuffle, taking it from element Elem of Op. | |||
4061 | // A null Op indicates a vector input whose value will be calculated later; | |||
4062 | // there is at most one such input per shuffle and it always has the same | |||
4063 | // type as the result. Aborts and returns false if the source vector elements | |||
4064 | // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per | |||
4065 | // LLVM they become implicitly extended, but this is rare and not optimized. | |||
4066 | bool GeneralShuffle::add(SDValue Op, unsigned Elem) { | |||
4067 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); | |||
4068 | ||||
4069 | // The source vector can have wider elements than the result, | |||
4070 | // either through an explicit TRUNCATE or because of type legalization. | |||
4071 | // We want the least significant part. | |||
4072 | EVT FromVT = Op.getNode() ? Op.getValueType() : VT; | |||
4073 | unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); | |||
4074 | ||||
4075 | // Return false if the source elements are smaller than their destination | |||
4076 | // elements. | |||
4077 | if (FromBytesPerElement < BytesPerElement) | |||
4078 | return false; | |||
4079 | ||||
4080 | unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + | |||
4081 | (FromBytesPerElement - BytesPerElement)); | |||
4082 | ||||
4083 | // Look through things like shuffles and bitcasts. | |||
4084 | while (Op.getNode()) { | |||
4085 | if (Op.getOpcode() == ISD::BITCAST) | |||
4086 | Op = Op.getOperand(0); | |||
4087 | else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) { | |||
4088 | // See whether the bytes we need come from a contiguous part of one | |||
4089 | // operand. | |||
4090 | SmallVector<int, SystemZ::VectorBytes> OpBytes; | |||
4091 | if (!getVPermMask(Op, OpBytes)) | |||
4092 | break; | |||
4093 | int NewByte; | |||
4094 | if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) | |||
4095 | break; | |||
4096 | if (NewByte < 0) { | |||
4097 | addUndef(); | |||
4098 | return true; | |||
4099 | } | |||
4100 | Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); | |||
4101 | Byte = unsigned(NewByte) % SystemZ::VectorBytes; | |||
4102 | } else if (Op.isUndef()) { | |||
4103 | addUndef(); | |||
4104 | return true; | |||
4105 | } else | |||
4106 | break; | |||
4107 | } | |||
4108 | ||||
4109 | // Make sure that the source of the extraction is in Ops. | |||
4110 | unsigned OpNo = 0; | |||
4111 | for (; OpNo < Ops.size(); ++OpNo) | |||
4112 | if (Ops[OpNo] == Op) | |||
4113 | break; | |||
4114 | if (OpNo == Ops.size()) | |||
4115 | Ops.push_back(Op); | |||
4116 | ||||
4117 | // Add the element to Bytes. | |||
4118 | unsigned Base = OpNo * SystemZ::VectorBytes + Byte; | |||
4119 | for (unsigned I = 0; I < BytesPerElement; ++I) | |||
4120 | Bytes.push_back(Base + I); | |||
4121 | ||||
4122 | return true; | |||
4123 | } | |||
4124 | ||||
4125 | // Return SDNodes for the completed shuffle. | |||
4126 | SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { | |||
4127 | assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector")((Bytes.size() == SystemZ::VectorBytes && "Incomplete vector" ) ? static_cast<void> (0) : __assert_fail ("Bytes.size() == SystemZ::VectorBytes && \"Incomplete vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4127, __PRETTY_FUNCTION__)); | |||
4128 | ||||
4129 | if (Ops.size() == 0) | |||
4130 | return DAG.getUNDEF(VT); | |||
4131 | ||||
4132 | // Make sure that there are at least two shuffle operands. | |||
4133 | if (Ops.size() == 1) | |||
4134 | Ops.push_back(DAG.getUNDEF(MVT::v16i8)); | |||
4135 | ||||
4136 | // Create a tree of shuffles, deferring root node until after the loop. | |||
4137 | // Try to redistribute the undefined elements of non-root nodes so that | |||
4138 | // the non-root shuffles match something like a pack or merge, then adjust | |||
4139 | // the parent node's permute vector to compensate for the new order. | |||
4140 | // Among other things, this copes with vectors like <2 x i16> that were | |||
4141 | // padded with undefined elements during type legalization. | |||
4142 | // | |||
4143 | // In the best case this redistribution will lead to the whole tree | |||
4144 | // using packs and merges. It should rarely be a loss in other cases. | |||
4145 | unsigned Stride = 1; | |||
4146 | for (; Stride * 2 < Ops.size(); Stride *= 2) { | |||
4147 | for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { | |||
4148 | SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; | |||
4149 | ||||
4150 | // Create a mask for just these two operands. | |||
4151 | SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); | |||
4152 | for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { | |||
4153 | unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; | |||
4154 | unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; | |||
4155 | if (OpNo == I) | |||
4156 | NewBytes[J] = Byte; | |||
4157 | else if (OpNo == I + Stride) | |||
4158 | NewBytes[J] = SystemZ::VectorBytes + Byte; | |||
4159 | else | |||
4160 | NewBytes[J] = -1; | |||
4161 | } | |||
4162 | // See if it would be better to reorganize NewMask to avoid using VPERM. | |||
4163 | SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); | |||
4164 | if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) { | |||
4165 | Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); | |||
4166 | // Applying NewBytesMap to Ops[I] gets back to NewBytes. | |||
4167 | for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { | |||
4168 | if (NewBytes[J] >= 0) { | |||
4169 | assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&((unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && "Invalid double permute") ? static_cast<void> (0) : __assert_fail ("unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && \"Invalid double permute\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4170, __PRETTY_FUNCTION__)) | |||
4170 | "Invalid double permute")((unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && "Invalid double permute") ? static_cast<void> (0) : __assert_fail ("unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && \"Invalid double permute\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4170, __PRETTY_FUNCTION__)); | |||
4171 | Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; | |||
4172 | } else | |||
4173 | assert(NewBytesMap[J] < 0 && "Invalid double permute")((NewBytesMap[J] < 0 && "Invalid double permute") ? static_cast<void> (0) : __assert_fail ("NewBytesMap[J] < 0 && \"Invalid double permute\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4173, __PRETTY_FUNCTION__)); | |||
4174 | } | |||
4175 | } else { | |||
4176 | // Just use NewBytes on the operands. | |||
4177 | Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); | |||
4178 | for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) | |||
4179 | if (NewBytes[J] >= 0) | |||
4180 | Bytes[J] = I * SystemZ::VectorBytes + J; | |||
4181 | } | |||
4182 | } | |||
4183 | } | |||
4184 | ||||
4185 | // Now we just have 2 inputs. Put the second operand in Ops[1]. | |||
4186 | if (Stride > 1) { | |||
4187 | Ops[1] = Ops[Stride]; | |||
4188 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) | |||
4189 | if (Bytes[I] >= int(SystemZ::VectorBytes)) | |||
4190 | Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; | |||
4191 | } | |||
4192 | ||||
4193 | // Look for an instruction that can do the permute without resorting | |||
4194 | // to VPERM. | |||
4195 | unsigned OpNo0, OpNo1; | |||
4196 | SDValue Op; | |||
4197 | if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) | |||
4198 | Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); | |||
4199 | else | |||
4200 | Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); | |||
4201 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
4202 | } | |||
4203 | ||||
4204 | // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. | |||
4205 | static bool isScalarToVector(SDValue Op) { | |||
4206 | for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I) | |||
4207 | if (!Op.getOperand(I).isUndef()) | |||
4208 | return false; | |||
4209 | return true; | |||
4210 | } | |||
4211 | ||||
4212 | // Return a vector of type VT that contains Value in the first element. | |||
4213 | // The other elements don't matter. | |||
4214 | static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, | |||
4215 | SDValue Value) { | |||
4216 | // If we have a constant, replicate it to all elements and let the | |||
4217 | // BUILD_VECTOR lowering take care of it. | |||
4218 | if (Value.getOpcode() == ISD::Constant || | |||
4219 | Value.getOpcode() == ISD::ConstantFP) { | |||
4220 | SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); | |||
4221 | return DAG.getBuildVector(VT, DL, Ops); | |||
4222 | } | |||
4223 | if (Value.isUndef()) | |||
4224 | return DAG.getUNDEF(VT); | |||
4225 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); | |||
4226 | } | |||
4227 | ||||
4228 | // Return a vector of type VT in which Op0 is in element 0 and Op1 is in | |||
4229 | // element 1. Used for cases in which replication is cheap. | |||
4230 | static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, | |||
4231 | SDValue Op0, SDValue Op1) { | |||
4232 | if (Op0.isUndef()) { | |||
4233 | if (Op1.isUndef()) | |||
4234 | return DAG.getUNDEF(VT); | |||
4235 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); | |||
4236 | } | |||
4237 | if (Op1.isUndef()) | |||
4238 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); | |||
4239 | return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, | |||
4240 | buildScalarToVector(DAG, DL, VT, Op0), | |||
4241 | buildScalarToVector(DAG, DL, VT, Op1)); | |||
4242 | } | |||
4243 | ||||
4244 | // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 | |||
4245 | // vector for them. | |||
4246 | static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, | |||
4247 | SDValue Op1) { | |||
4248 | if (Op0.isUndef() && Op1.isUndef()) | |||
4249 | return DAG.getUNDEF(MVT::v2i64); | |||
4250 | // If one of the two inputs is undefined then replicate the other one, | |||
4251 | // in order to avoid using another register unnecessarily. | |||
4252 | if (Op0.isUndef()) | |||
4253 | Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); | |||
4254 | else if (Op1.isUndef()) | |||
4255 | Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); | |||
4256 | else { | |||
4257 | Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); | |||
4258 | Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); | |||
4259 | } | |||
4260 | return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); | |||
4261 | } | |||
4262 | ||||
4263 | // Try to represent constant BUILD_VECTOR node BVN using a | |||
4264 | // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask | |||
4265 | // on success. | |||
4266 | static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) { | |||
4267 | EVT ElemVT = BVN->getValueType(0).getVectorElementType(); | |||
4268 | unsigned BytesPerElement = ElemVT.getStoreSize(); | |||
4269 | for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) { | |||
4270 | SDValue Op = BVN->getOperand(I); | |||
4271 | if (!Op.isUndef()) { | |||
4272 | uint64_t Value; | |||
4273 | if (Op.getOpcode() == ISD::Constant) | |||
4274 | Value = cast<ConstantSDNode>(Op)->getZExtValue(); | |||
4275 | else if (Op.getOpcode() == ISD::ConstantFP) | |||
4276 | Value = (cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt() | |||
4277 | .getZExtValue()); | |||
4278 | else | |||
4279 | return false; | |||
4280 | for (unsigned J = 0; J < BytesPerElement; ++J) { | |||
4281 | uint64_t Byte = (Value >> (J * 8)) & 0xff; | |||
4282 | if (Byte == 0xff) | |||
4283 | Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J); | |||
4284 | else if (Byte != 0) | |||
4285 | return false; | |||
4286 | } | |||
4287 | } | |||
4288 | } | |||
4289 | return true; | |||
4290 | } | |||
4291 | ||||
4292 | // Try to load a vector constant in which BitsPerElement-bit value Value | |||
4293 | // is replicated to fill the vector. VT is the type of the resulting | |||
4294 | // constant, which may have elements of a different size from BitsPerElement. | |||
4295 | // Return the SDValue of the constant on success, otherwise return | |||
4296 | // an empty value. | |||
4297 | static SDValue tryBuildVectorReplicate(SelectionDAG &DAG, | |||
4298 | const SystemZInstrInfo *TII, | |||
4299 | const SDLoc &DL, EVT VT, uint64_t Value, | |||
4300 | unsigned BitsPerElement) { | |||
4301 | // Signed 16-bit values can be replicated using VREPI. | |||
4302 | // Mark the constants as opaque or DAGCombiner will convert back to | |||
4303 | // BUILD_VECTOR. | |||
4304 | int64_t SignedValue = SignExtend64(Value, BitsPerElement); | |||
4305 | if (isInt<16>(SignedValue)) { | |||
4306 | MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), | |||
4307 | SystemZ::VectorBits / BitsPerElement); | |||
4308 | SDValue Op = DAG.getNode( | |||
4309 | SystemZISD::REPLICATE, DL, VecVT, | |||
4310 | DAG.getConstant(SignedValue, DL, MVT::i32, false, true /*isOpaque*/)); | |||
4311 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
4312 | } | |||
4313 | // See whether rotating the constant left some N places gives a value that | |||
4314 | // is one less than a power of 2 (i.e. all zeros followed by all ones). | |||
4315 | // If so we can use VGM. | |||
4316 | unsigned Start, End; | |||
4317 | if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) { | |||
4318 | // isRxSBGMask returns the bit numbers for a full 64-bit value, | |||
4319 | // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to | |||
4320 | // bit numbers for an BitsPerElement value, so that 0 denotes | |||
4321 | // 1 << (BitsPerElement-1). | |||
4322 | Start -= 64 - BitsPerElement; | |||
4323 | End -= 64 - BitsPerElement; | |||
4324 | MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), | |||
4325 | SystemZ::VectorBits / BitsPerElement); | |||
4326 | SDValue Op = DAG.getNode( | |||
4327 | SystemZISD::ROTATE_MASK, DL, VecVT, | |||
4328 | DAG.getConstant(Start, DL, MVT::i32, false, true /*isOpaque*/), | |||
4329 | DAG.getConstant(End, DL, MVT::i32, false, true /*isOpaque*/)); | |||
4330 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
4331 | } | |||
4332 | return SDValue(); | |||
4333 | } | |||
4334 | ||||
4335 | // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually | |||
4336 | // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for | |||
4337 | // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR | |||
4338 | // would benefit from this representation and return it if so. | |||
4339 | static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, | |||
4340 | BuildVectorSDNode *BVN) { | |||
4341 | EVT VT = BVN->getValueType(0); | |||
4342 | unsigned NumElements = VT.getVectorNumElements(); | |||
4343 | ||||
4344 | // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation | |||
4345 | // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still | |||
4346 | // need a BUILD_VECTOR, add an additional placeholder operand for that | |||
4347 | // BUILD_VECTOR and store its operands in ResidueOps. | |||
4348 | GeneralShuffle GS(VT); | |||
4349 | SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; | |||
4350 | bool FoundOne = false; | |||
4351 | for (unsigned I = 0; I < NumElements; ++I) { | |||
4352 | SDValue Op = BVN->getOperand(I); | |||
4353 | if (Op.getOpcode() == ISD::TRUNCATE) | |||
4354 | Op = Op.getOperand(0); | |||
4355 | if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
4356 | Op.getOperand(1).getOpcode() == ISD::Constant) { | |||
4357 | unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
4358 | if (!GS.add(Op.getOperand(0), Elem)) | |||
4359 | return SDValue(); | |||
4360 | FoundOne = true; | |||
4361 | } else if (Op.isUndef()) { | |||
4362 | GS.addUndef(); | |||
4363 | } else { | |||
4364 | if (!GS.add(SDValue(), ResidueOps.size())) | |||
4365 | return SDValue(); | |||
4366 | ResidueOps.push_back(BVN->getOperand(I)); | |||
4367 | } | |||
4368 | } | |||
4369 | ||||
4370 | // Nothing to do if there are no EXTRACT_VECTOR_ELTs. | |||
4371 | if (!FoundOne) | |||
4372 | return SDValue(); | |||
4373 | ||||
4374 | // Create the BUILD_VECTOR for the remaining elements, if any. | |||
4375 | if (!ResidueOps.empty()) { | |||
4376 | while (ResidueOps.size() < NumElements) | |||
4377 | ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); | |||
4378 | for (auto &Op : GS.Ops) { | |||
4379 | if (!Op.getNode()) { | |||
4380 | Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); | |||
4381 | break; | |||
4382 | } | |||
4383 | } | |||
4384 | } | |||
4385 | return GS.getNode(DAG, SDLoc(BVN)); | |||
4386 | } | |||
4387 | ||||
4388 | // Combine GPR scalar values Elems into a vector of type VT. | |||
4389 | static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, | |||
4390 | SmallVectorImpl<SDValue> &Elems) { | |||
4391 | // See whether there is a single replicated value. | |||
4392 | SDValue Single; | |||
4393 | unsigned int NumElements = Elems.size(); | |||
4394 | unsigned int Count = 0; | |||
4395 | for (auto Elem : Elems) { | |||
4396 | if (!Elem.isUndef()) { | |||
4397 | if (!Single.getNode()) | |||
4398 | Single = Elem; | |||
4399 | else if (Elem != Single) { | |||
4400 | Single = SDValue(); | |||
4401 | break; | |||
4402 | } | |||
4403 | Count += 1; | |||
4404 | } | |||
4405 | } | |||
4406 | // There are three cases here: | |||
4407 | // | |||
4408 | // - if the only defined element is a loaded one, the best sequence | |||
4409 | // is a replicating load. | |||
4410 | // | |||
4411 | // - otherwise, if the only defined element is an i64 value, we will | |||
4412 | // end up with the same VLVGP sequence regardless of whether we short-cut | |||
4413 | // for replication or fall through to the later code. | |||
4414 | // | |||
4415 | // - otherwise, if the only defined element is an i32 or smaller value, | |||
4416 | // we would need 2 instructions to replicate it: VLVGP followed by VREPx. | |||
4417 | // This is only a win if the single defined element is used more than once. | |||
4418 | // In other cases we're better off using a single VLVGx. | |||
4419 | if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD)) | |||
4420 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); | |||
4421 | ||||
4422 | // If all elements are loads, use VLREP/VLEs (below). | |||
4423 | bool AllLoads = true; | |||
4424 | for (auto Elem : Elems) | |||
4425 | if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) { | |||
4426 | AllLoads = false; | |||
4427 | break; | |||
4428 | } | |||
4429 | ||||
4430 | // The best way of building a v2i64 from two i64s is to use VLVGP. | |||
4431 | if (VT == MVT::v2i64 && !AllLoads) | |||
4432 | return joinDwords(DAG, DL, Elems[0], Elems[1]); | |||
4433 | ||||
4434 | // Use a 64-bit merge high to combine two doubles. | |||
4435 | if (VT == MVT::v2f64 && !AllLoads) | |||
4436 | return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); | |||
4437 | ||||
4438 | // Build v4f32 values directly from the FPRs: | |||
4439 | // | |||
4440 | // <Axxx> <Bxxx> <Cxxxx> <Dxxx> | |||
4441 | // V V VMRHF | |||
4442 | // <ABxx> <CDxx> | |||
4443 | // V VMRHG | |||
4444 | // <ABCD> | |||
4445 | if (VT == MVT::v4f32 && !AllLoads) { | |||
4446 | SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); | |||
4447 | SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); | |||
4448 | // Avoid unnecessary undefs by reusing the other operand. | |||
4449 | if (Op01.isUndef()) | |||
4450 | Op01 = Op23; | |||
4451 | else if (Op23.isUndef()) | |||
4452 | Op23 = Op01; | |||
4453 | // Merging identical replications is a no-op. | |||
4454 | if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) | |||
4455 | return Op01; | |||
4456 | Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); | |||
4457 | Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); | |||
4458 | SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, | |||
4459 | DL, MVT::v2i64, Op01, Op23); | |||
4460 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
4461 | } | |||
4462 | ||||
4463 | // Collect the constant terms. | |||
4464 | SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); | |||
4465 | SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); | |||
4466 | ||||
4467 | unsigned NumConstants = 0; | |||
4468 | for (unsigned I = 0; I < NumElements; ++I) { | |||
4469 | SDValue Elem = Elems[I]; | |||
4470 | if (Elem.getOpcode() == ISD::Constant || | |||
4471 | Elem.getOpcode() == ISD::ConstantFP) { | |||
4472 | NumConstants += 1; | |||
4473 | Constants[I] = Elem; | |||
4474 | Done[I] = true; | |||
4475 | } | |||
4476 | } | |||
4477 | // If there was at least one constant, fill in the other elements of | |||
4478 | // Constants with undefs to get a full vector constant and use that | |||
4479 | // as the starting point. | |||
4480 | SDValue Result; | |||
4481 | SDValue ReplicatedVal; | |||
4482 | if (NumConstants > 0) { | |||
4483 | for (unsigned I = 0; I < NumElements; ++I) | |||
4484 | if (!Constants[I].getNode()) | |||
4485 | Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); | |||
4486 | Result = DAG.getBuildVector(VT, DL, Constants); | |||
4487 | } else { | |||
4488 | // Otherwise try to use VLREP or VLVGP to start the sequence in order to | |||
4489 | // avoid a false dependency on any previous contents of the vector | |||
4490 | // register. | |||
4491 | ||||
4492 | // Use a VLREP if at least one element is a load. Make sure to replicate | |||
4493 | // the load with the most elements having its value. | |||
4494 | std::map<const SDNode*, unsigned> UseCounts; | |||
4495 | SDNode *LoadMaxUses = nullptr; | |||
4496 | for (unsigned I = 0; I < NumElements; ++I) | |||
4497 | if (Elems[I].getOpcode() == ISD::LOAD && | |||
4498 | cast<LoadSDNode>(Elems[I])->isUnindexed()) { | |||
4499 | SDNode *Ld = Elems[I].getNode(); | |||
4500 | UseCounts[Ld]++; | |||
4501 | if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld]) | |||
4502 | LoadMaxUses = Ld; | |||
4503 | } | |||
4504 | if (LoadMaxUses != nullptr) { | |||
4505 | ReplicatedVal = SDValue(LoadMaxUses, 0); | |||
4506 | Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal); | |||
4507 | } else { | |||
4508 | // Try to use VLVGP. | |||
4509 | unsigned I1 = NumElements / 2 - 1; | |||
4510 | unsigned I2 = NumElements - 1; | |||
4511 | bool Def1 = !Elems[I1].isUndef(); | |||
4512 | bool Def2 = !Elems[I2].isUndef(); | |||
4513 | if (Def1 || Def2) { | |||
4514 | SDValue Elem1 = Elems[Def1 ? I1 : I2]; | |||
4515 | SDValue Elem2 = Elems[Def2 ? I2 : I1]; | |||
4516 | Result = DAG.getNode(ISD::BITCAST, DL, VT, | |||
4517 | joinDwords(DAG, DL, Elem1, Elem2)); | |||
4518 | Done[I1] = true; | |||
4519 | Done[I2] = true; | |||
4520 | } else | |||
4521 | Result = DAG.getUNDEF(VT); | |||
4522 | } | |||
4523 | } | |||
4524 | ||||
4525 | // Use VLVGx to insert the other elements. | |||
4526 | for (unsigned I = 0; I < NumElements; ++I) | |||
4527 | if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal) | |||
4528 | Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], | |||
4529 | DAG.getConstant(I, DL, MVT::i32)); | |||
4530 | return Result; | |||
4531 | } | |||
4532 | ||||
4533 | SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, | |||
4534 | SelectionDAG &DAG) const { | |||
4535 | const SystemZInstrInfo *TII = | |||
4536 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
4537 | auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); | |||
4538 | SDLoc DL(Op); | |||
4539 | EVT VT = Op.getValueType(); | |||
4540 | ||||
4541 | if (BVN->isConstant()) { | |||
4542 | // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- | |||
4543 | // preferred way of creating all-zero and all-one vectors so give it | |||
4544 | // priority over other methods below. | |||
4545 | uint64_t Mask = 0; | |||
4546 | if (tryBuildVectorByteMask(BVN, Mask)) { | |||
4547 | SDValue Op = DAG.getNode( | |||
4548 | SystemZISD::BYTE_MASK, DL, MVT::v16i8, | |||
4549 | DAG.getConstant(Mask, DL, MVT::i32, false, true /*isOpaque*/)); | |||
4550 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
4551 | } | |||
4552 | ||||
4553 | // Try using some form of replication. | |||
4554 | APInt SplatBits, SplatUndef; | |||
4555 | unsigned SplatBitSize; | |||
4556 | bool HasAnyUndefs; | |||
4557 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, | |||
4558 | 8, true) && | |||
4559 | SplatBitSize <= 64) { | |||
4560 | // First try assuming that any undefined bits above the highest set bit | |||
4561 | // and below the lowest set bit are 1s. This increases the likelihood of | |||
4562 | // being able to use a sign-extended element value in VECTOR REPLICATE | |||
4563 | // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. | |||
4564 | uint64_t SplatBitsZ = SplatBits.getZExtValue(); | |||
4565 | uint64_t SplatUndefZ = SplatUndef.getZExtValue(); | |||
4566 | uint64_t Lower = (SplatUndefZ | |||
4567 | & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1)); | |||
| ||||
4568 | uint64_t Upper = (SplatUndefZ | |||
4569 | & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1)); | |||
4570 | uint64_t Value = SplatBitsZ | Upper | Lower; | |||
4571 | SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, | |||
4572 | SplatBitSize); | |||
4573 | if (Op.getNode()) | |||
4574 | return Op; | |||
4575 | ||||
4576 | // Now try assuming that any undefined bits between the first and | |||
4577 | // last defined set bits are set. This increases the chances of | |||
4578 | // using a non-wraparound mask. | |||
4579 | uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; | |||
4580 | Value = SplatBitsZ | Middle; | |||
4581 | Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize); | |||
4582 | if (Op.getNode()) | |||
4583 | return Op; | |||
4584 | } | |||
4585 | ||||
4586 | // Fall back to loading it from memory. | |||
4587 | return SDValue(); | |||
4588 | } | |||
4589 | ||||
4590 | // See if we should use shuffles to construct the vector from other vectors. | |||
4591 | if (SDValue Res = tryBuildVectorShuffle(DAG, BVN)) | |||
4592 | return Res; | |||
4593 | ||||
4594 | // Detect SCALAR_TO_VECTOR conversions. | |||
4595 | if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) | |||
4596 | return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); | |||
4597 | ||||
4598 | // Otherwise use buildVector to build the vector up from GPRs. | |||
4599 | unsigned NumElements = Op.getNumOperands(); | |||
4600 | SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); | |||
4601 | for (unsigned I = 0; I < NumElements; ++I) | |||
4602 | Ops[I] = Op.getOperand(I); | |||
4603 | return buildVector(DAG, DL, VT, Ops); | |||
4604 | } | |||
4605 | ||||
4606 | SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, | |||
4607 | SelectionDAG &DAG) const { | |||
4608 | auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); | |||
4609 | SDLoc DL(Op); | |||
4610 | EVT VT = Op.getValueType(); | |||
4611 | unsigned NumElements = VT.getVectorNumElements(); | |||
4612 | ||||
4613 | if (VSN->isSplat()) { | |||
4614 | SDValue Op0 = Op.getOperand(0); | |||
4615 | unsigned Index = VSN->getSplatIndex(); | |||
4616 | assert(Index < VT.getVectorNumElements() &&((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand" ) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4617, __PRETTY_FUNCTION__)) | |||
4617 | "Splat index should be defined and in first operand")((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand" ) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4617, __PRETTY_FUNCTION__)); | |||
4618 | // See whether the value we're splatting is directly available as a scalar. | |||
4619 | if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || | |||
4620 | Op0.getOpcode() == ISD::BUILD_VECTOR) | |||
4621 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); | |||
4622 | // Otherwise keep it as a vector-to-vector operation. | |||
4623 | return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), | |||
4624 | DAG.getConstant(Index, DL, MVT::i32)); | |||
4625 | } | |||
4626 | ||||
4627 | GeneralShuffle GS(VT); | |||
4628 | for (unsigned I = 0; I < NumElements; ++I) { | |||
4629 | int Elt = VSN->getMaskElt(I); | |||
4630 | if (Elt < 0) | |||
4631 | GS.addUndef(); | |||
4632 | else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), | |||
4633 | unsigned(Elt) % NumElements)) | |||
4634 | return SDValue(); | |||
4635 | } | |||
4636 | return GS.getNode(DAG, SDLoc(VSN)); | |||
4637 | } | |||
4638 | ||||
4639 | SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, | |||
4640 | SelectionDAG &DAG) const { | |||
4641 | SDLoc DL(Op); | |||
4642 | // Just insert the scalar into element 0 of an undefined vector. | |||
4643 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, | |||
4644 | Op.getValueType(), DAG.getUNDEF(Op.getValueType()), | |||
4645 | Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); | |||
4646 | } | |||
4647 | ||||
4648 | SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, | |||
4649 | SelectionDAG &DAG) const { | |||
4650 | // Handle insertions of floating-point values. | |||
4651 | SDLoc DL(Op); | |||
4652 | SDValue Op0 = Op.getOperand(0); | |||
4653 | SDValue Op1 = Op.getOperand(1); | |||
4654 | SDValue Op2 = Op.getOperand(2); | |||
4655 | EVT VT = Op.getValueType(); | |||
4656 | ||||
4657 | // Insertions into constant indices of a v2f64 can be done using VPDI. | |||
4658 | // However, if the inserted value is a bitcast or a constant then it's | |||
4659 | // better to use GPRs, as below. | |||
4660 | if (VT == MVT::v2f64 && | |||
4661 | Op1.getOpcode() != ISD::BITCAST && | |||
4662 | Op1.getOpcode() != ISD::ConstantFP && | |||
4663 | Op2.getOpcode() == ISD::Constant) { | |||
4664 | uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue(); | |||
4665 | unsigned Mask = VT.getVectorNumElements() - 1; | |||
4666 | if (Index <= Mask) | |||
4667 | return Op; | |||
4668 | } | |||
4669 | ||||
4670 | // Otherwise bitcast to the equivalent integer form and insert via a GPR. | |||
4671 | MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); | |||
4672 | MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); | |||
4673 | SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, | |||
4674 | DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), | |||
4675 | DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); | |||
4676 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); | |||
4677 | } | |||
4678 | ||||
4679 | SDValue | |||
4680 | SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, | |||
4681 | SelectionDAG &DAG) const { | |||
4682 | // Handle extractions of floating-point values. | |||
4683 | SDLoc DL(Op); | |||
4684 | SDValue Op0 = Op.getOperand(0); | |||
4685 | SDValue Op1 = Op.getOperand(1); | |||
4686 | EVT VT = Op.getValueType(); | |||
4687 | EVT VecVT = Op0.getValueType(); | |||
4688 | ||||
4689 | // Extractions of constant indices can be done directly. | |||
4690 | if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) { | |||
4691 | uint64_t Index = CIndexN->getZExtValue(); | |||
4692 | unsigned Mask = VecVT.getVectorNumElements() - 1; | |||
4693 | if (Index <= Mask) | |||
4694 | return Op; | |||
4695 | } | |||
4696 | ||||
4697 | // Otherwise bitcast to the equivalent integer form and extract via a GPR. | |||
4698 | MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); | |||
4699 | MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); | |||
4700 | SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, | |||
4701 | DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); | |||
4702 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); | |||
4703 | } | |||
4704 | ||||
4705 | SDValue | |||
4706 | SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG, | |||
4707 | unsigned UnpackHigh) const { | |||
4708 | SDValue PackedOp = Op.getOperand(0); | |||
4709 | EVT OutVT = Op.getValueType(); | |||
4710 | EVT InVT = PackedOp.getValueType(); | |||
4711 | unsigned ToBits = OutVT.getScalarSizeInBits(); | |||
4712 | unsigned FromBits = InVT.getScalarSizeInBits(); | |||
4713 | do { | |||
4714 | FromBits *= 2; | |||
4715 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), | |||
4716 | SystemZ::VectorBits / FromBits); | |||
4717 | PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp); | |||
4718 | } while (FromBits != ToBits); | |||
4719 | return PackedOp; | |||
4720 | } | |||
4721 | ||||
4722 | SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, | |||
4723 | unsigned ByScalar) const { | |||
4724 | // Look for cases where a vector shift can use the *_BY_SCALAR form. | |||
4725 | SDValue Op0 = Op.getOperand(0); | |||
4726 | SDValue Op1 = Op.getOperand(1); | |||
4727 | SDLoc DL(Op); | |||
4728 | EVT VT = Op.getValueType(); | |||
4729 | unsigned ElemBitSize = VT.getScalarSizeInBits(); | |||
4730 | ||||
4731 | // See whether the shift vector is a splat represented as BUILD_VECTOR. | |||
4732 | if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { | |||
4733 | APInt SplatBits, SplatUndef; | |||
4734 | unsigned SplatBitSize; | |||
4735 | bool HasAnyUndefs; | |||
4736 | // Check for constant splats. Use ElemBitSize as the minimum element | |||
4737 | // width and reject splats that need wider elements. | |||
4738 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, | |||
4739 | ElemBitSize, true) && | |||
4740 | SplatBitSize == ElemBitSize) { | |||
4741 | SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, | |||
4742 | DL, MVT::i32); | |||
4743 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); | |||
4744 | } | |||
4745 | // Check for variable splats. | |||
4746 | BitVector UndefElements; | |||
4747 | SDValue Splat = BVN->getSplatValue(&UndefElements); | |||
4748 | if (Splat) { | |||
4749 | // Since i32 is the smallest legal type, we either need a no-op | |||
4750 | // or a truncation. | |||
4751 | SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); | |||
4752 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); | |||
4753 | } | |||
4754 | } | |||
4755 | ||||
4756 | // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, | |||
4757 | // and the shift amount is directly available in a GPR. | |||
4758 | if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) { | |||
4759 | if (VSN->isSplat()) { | |||
4760 | SDValue VSNOp0 = VSN->getOperand(0); | |||
4761 | unsigned Index = VSN->getSplatIndex(); | |||
4762 | assert(Index < VT.getVectorNumElements() &&((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand" ) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4763, __PRETTY_FUNCTION__)) | |||
4763 | "Splat index should be defined and in first operand")((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand" ) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4763, __PRETTY_FUNCTION__)); | |||
4764 | if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || | |||
4765 | VSNOp0.getOpcode() == ISD::BUILD_VECTOR) { | |||
4766 | // Since i32 is the smallest legal type, we either need a no-op | |||
4767 | // or a truncation. | |||
4768 | SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, | |||
4769 | VSNOp0.getOperand(Index)); | |||
4770 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); | |||
4771 | } | |||
4772 | } | |||
4773 | } | |||
4774 | ||||
4775 | // Otherwise just treat the current form as legal. | |||
4776 | return Op; | |||
4777 | } | |||
4778 | ||||
4779 | SDValue SystemZTargetLowering::LowerOperation(SDValue Op, | |||
4780 | SelectionDAG &DAG) const { | |||
4781 | switch (Op.getOpcode()) { | |||
| ||||
4782 | case ISD::FRAMEADDR: | |||
4783 | return lowerFRAMEADDR(Op, DAG); | |||
4784 | case ISD::RETURNADDR: | |||
4785 | return lowerRETURNADDR(Op, DAG); | |||
4786 | case ISD::BR_CC: | |||
4787 | return lowerBR_CC(Op, DAG); | |||
4788 | case ISD::SELECT_CC: | |||
4789 | return lowerSELECT_CC(Op, DAG); | |||
4790 | case ISD::SETCC: | |||
4791 | return lowerSETCC(Op, DAG); | |||
4792 | case ISD::GlobalAddress: | |||
4793 | return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); | |||
4794 | case ISD::GlobalTLSAddress: | |||
4795 | return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); | |||
4796 | case ISD::BlockAddress: | |||
4797 | return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); | |||
4798 | case ISD::JumpTable: | |||
4799 | return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); | |||
4800 | case ISD::ConstantPool: | |||
4801 | return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); | |||
4802 | case ISD::BITCAST: | |||
4803 | return lowerBITCAST(Op, DAG); | |||
4804 | case ISD::VASTART: | |||
4805 | return lowerVASTART(Op, DAG); | |||
4806 | case ISD::VACOPY: | |||
4807 | return lowerVACOPY(Op, DAG); | |||
4808 | case ISD::DYNAMIC_STACKALLOC: | |||
4809 | return lowerDYNAMIC_STACKALLOC(Op, DAG); | |||
4810 | case ISD::GET_DYNAMIC_AREA_OFFSET: | |||
4811 | return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); | |||
4812 | case ISD::SMUL_LOHI: | |||
4813 | return lowerSMUL_LOHI(Op, DAG); | |||
4814 | case ISD::UMUL_LOHI: | |||
4815 | return lowerUMUL_LOHI(Op, DAG); | |||
4816 | case ISD::SDIVREM: | |||
4817 | return lowerSDIVREM(Op, DAG); | |||
4818 | case ISD::UDIVREM: | |||
4819 | return lowerUDIVREM(Op, DAG); | |||
4820 | case ISD::SADDO: | |||
4821 | case ISD::SSUBO: | |||
4822 | case ISD::UADDO: | |||
4823 | case ISD::USUBO: | |||
4824 | return lowerXALUO(Op, DAG); | |||
4825 | case ISD::ADDCARRY: | |||
4826 | case ISD::SUBCARRY: | |||
4827 | return lowerADDSUBCARRY(Op, DAG); | |||
4828 | case ISD::OR: | |||
4829 | return lowerOR(Op, DAG); | |||
4830 | case ISD::CTPOP: | |||
4831 | return lowerCTPOP(Op, DAG); | |||
4832 | case ISD::ATOMIC_FENCE: | |||
4833 | return lowerATOMIC_FENCE(Op, DAG); | |||
4834 | case ISD::ATOMIC_SWAP: | |||
4835 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); | |||
4836 | case ISD::ATOMIC_STORE: | |||
4837 | return lowerATOMIC_STORE(Op, DAG); | |||
4838 | case ISD::ATOMIC_LOAD: | |||
4839 | return lowerATOMIC_LOAD(Op, DAG); | |||
4840 | case ISD::ATOMIC_LOAD_ADD: | |||
4841 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); | |||
4842 | case ISD::ATOMIC_LOAD_SUB: | |||
4843 | return lowerATOMIC_LOAD_SUB(Op, DAG); | |||
4844 | case ISD::ATOMIC_LOAD_AND: | |||
4845 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); | |||
4846 | case ISD::ATOMIC_LOAD_OR: | |||
4847 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); | |||
4848 | case ISD::ATOMIC_LOAD_XOR: | |||
4849 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); | |||
4850 | case ISD::ATOMIC_LOAD_NAND: | |||
4851 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); | |||
4852 | case ISD::ATOMIC_LOAD_MIN: | |||
4853 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); | |||
4854 | case ISD::ATOMIC_LOAD_MAX: | |||
4855 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); | |||
4856 | case ISD::ATOMIC_LOAD_UMIN: | |||
4857 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); | |||
4858 | case ISD::ATOMIC_LOAD_UMAX: | |||
4859 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); | |||
4860 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | |||
4861 | return lowerATOMIC_CMP_SWAP(Op, DAG); | |||
4862 | case ISD::STACKSAVE: | |||
4863 | return lowerSTACKSAVE(Op, DAG); | |||
4864 | case ISD::STACKRESTORE: | |||
4865 | return lowerSTACKRESTORE(Op, DAG); | |||
4866 | case ISD::PREFETCH: | |||
4867 | return lowerPREFETCH(Op, DAG); | |||
4868 | case ISD::INTRINSIC_W_CHAIN: | |||
4869 | return lowerINTRINSIC_W_CHAIN(Op, DAG); | |||
4870 | case ISD::INTRINSIC_WO_CHAIN: | |||
4871 | return lowerINTRINSIC_WO_CHAIN(Op, DAG); | |||
4872 | case ISD::BUILD_VECTOR: | |||
4873 | return lowerBUILD_VECTOR(Op, DAG); | |||
4874 | case ISD::VECTOR_SHUFFLE: | |||
4875 | return lowerVECTOR_SHUFFLE(Op, DAG); | |||
4876 | case ISD::SCALAR_TO_VECTOR: | |||
4877 | return lowerSCALAR_TO_VECTOR(Op, DAG); | |||
4878 | case ISD::INSERT_VECTOR_ELT: | |||
4879 | return lowerINSERT_VECTOR_ELT(Op, DAG); | |||
4880 | case ISD::EXTRACT_VECTOR_ELT: | |||
4881 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); | |||
4882 | case ISD::SIGN_EXTEND_VECTOR_INREG: | |||
4883 | return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH); | |||
4884 | case ISD::ZERO_EXTEND_VECTOR_INREG: | |||
4885 | return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH); | |||
4886 | case ISD::SHL: | |||
4887 | return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); | |||
4888 | case ISD::SRL: | |||
4889 | return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); | |||
4890 | case ISD::SRA: | |||
4891 | return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); | |||
4892 | default: | |||
4893 | llvm_unreachable("Unexpected node to lower")::llvm::llvm_unreachable_internal("Unexpected node to lower", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4893); | |||
4894 | } | |||
4895 | } | |||
4896 | ||||
4897 | // Lower operations with invalid operand or result types (currently used | |||
4898 | // only for 128-bit integer types). | |||
4899 | ||||
4900 | static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { | |||
4901 | SDLoc DL(In); | |||
4902 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, | |||
4903 | DAG.getIntPtrConstant(0, DL)); | |||
4904 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, | |||
4905 | DAG.getIntPtrConstant(1, DL)); | |||
4906 | SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, | |||
4907 | MVT::Untyped, Hi, Lo); | |||
4908 | return SDValue(Pair, 0); | |||
4909 | } | |||
4910 | ||||
4911 | static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { | |||
4912 | SDLoc DL(In); | |||
4913 | SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, | |||
4914 | DL, MVT::i64, In); | |||
4915 | SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, | |||
4916 | DL, MVT::i64, In); | |||
4917 | return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); | |||
4918 | } | |||
4919 | ||||
4920 | void | |||
4921 | SystemZTargetLowering::LowerOperationWrapper(SDNode *N, | |||
4922 | SmallVectorImpl<SDValue> &Results, | |||
4923 | SelectionDAG &DAG) const { | |||
4924 | switch (N->getOpcode()) { | |||
4925 | case ISD::ATOMIC_LOAD: { | |||
4926 | SDLoc DL(N); | |||
4927 | SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); | |||
4928 | SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; | |||
4929 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); | |||
4930 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, | |||
4931 | DL, Tys, Ops, MVT::i128, MMO); | |||
4932 | Results.push_back(lowerGR128ToI128(DAG, Res)); | |||
4933 | Results.push_back(Res.getValue(1)); | |||
4934 | break; | |||
4935 | } | |||
4936 | case ISD::ATOMIC_STORE: { | |||
4937 | SDLoc DL(N); | |||
4938 | SDVTList Tys = DAG.getVTList(MVT::Other); | |||
4939 | SDValue Ops[] = { N->getOperand(0), | |||
4940 | lowerI128ToGR128(DAG, N->getOperand(2)), | |||
4941 | N->getOperand(1) }; | |||
4942 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); | |||
4943 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, | |||
4944 | DL, Tys, Ops, MVT::i128, MMO); | |||
4945 | // We have to enforce sequential consistency by performing a | |||
4946 | // serialization operation after the store. | |||
4947 | if (cast<AtomicSDNode>(N)->getOrdering() == | |||
4948 | AtomicOrdering::SequentiallyConsistent) | |||
4949 | Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, | |||
4950 | MVT::Other, Res), 0); | |||
4951 | Results.push_back(Res); | |||
4952 | break; | |||
4953 | } | |||
4954 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { | |||
4955 | SDLoc DL(N); | |||
4956 | SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other); | |||
4957 | SDValue Ops[] = { N->getOperand(0), N->getOperand(1), | |||
4958 | lowerI128ToGR128(DAG, N->getOperand(2)), | |||
4959 | lowerI128ToGR128(DAG, N->getOperand(3)) }; | |||
4960 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); | |||
4961 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, | |||
4962 | DL, Tys, Ops, MVT::i128, MMO); | |||
4963 | SDValue Success = emitSETCC(DAG, DL, Res.getValue(1), | |||
4964 | SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); | |||
4965 | Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); | |||
4966 | Results.push_back(lowerGR128ToI128(DAG, Res)); | |||
4967 | Results.push_back(Success); | |||
4968 | Results.push_back(Res.getValue(2)); | |||
4969 | break; | |||
4970 | } | |||
4971 | default: | |||
4972 | llvm_unreachable("Unexpected node to lower")::llvm::llvm_unreachable_internal("Unexpected node to lower", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 4972); | |||
4973 | } | |||
4974 | } | |||
4975 | ||||
4976 | void | |||
4977 | SystemZTargetLowering::ReplaceNodeResults(SDNode *N, | |||
4978 | SmallVectorImpl<SDValue> &Results, | |||
4979 | SelectionDAG &DAG) const { | |||
4980 | return LowerOperationWrapper(N, Results, DAG); | |||
4981 | } | |||
4982 | ||||
4983 | const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
4984 | #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME | |||
4985 | switch ((SystemZISD::NodeType)Opcode) { | |||
4986 | case SystemZISD::FIRST_NUMBER: break; | |||
4987 | OPCODE(RET_FLAG); | |||
4988 | OPCODE(CALL); | |||
4989 | OPCODE(SIBCALL); | |||
4990 | OPCODE(TLS_GDCALL); | |||
4991 | OPCODE(TLS_LDCALL); | |||
4992 | OPCODE(PCREL_WRAPPER); | |||
4993 | OPCODE(PCREL_OFFSET); | |||
4994 | OPCODE(IABS); | |||
4995 | OPCODE(ICMP); | |||
4996 | OPCODE(FCMP); | |||
4997 | OPCODE(TM); | |||
4998 | OPCODE(BR_CCMASK); | |||
4999 | OPCODE(SELECT_CCMASK); | |||
5000 | OPCODE(ADJDYNALLOC); | |||
5001 | OPCODE(POPCNT); | |||
5002 | OPCODE(SMUL_LOHI); | |||
5003 | OPCODE(UMUL_LOHI); | |||
5004 | OPCODE(SDIVREM); | |||
5005 | OPCODE(UDIVREM); | |||
5006 | OPCODE(SADDO); | |||
5007 | OPCODE(SSUBO); | |||
5008 | OPCODE(UADDO); | |||
5009 | OPCODE(USUBO); | |||
5010 | OPCODE(ADDCARRY); | |||
5011 | OPCODE(SUBCARRY); | |||
5012 | OPCODE(GET_CCMASK); | |||
5013 | OPCODE(MVC); | |||
5014 | OPCODE(MVC_LOOP); | |||
5015 | OPCODE(NC); | |||
5016 | OPCODE(NC_LOOP); | |||
5017 | OPCODE(OC); | |||
5018 | OPCODE(OC_LOOP); | |||
5019 | OPCODE(XC); | |||
5020 | OPCODE(XC_LOOP); | |||
5021 | OPCODE(CLC); | |||
5022 | OPCODE(CLC_LOOP); | |||
5023 | OPCODE(STPCPY); | |||
5024 | OPCODE(STRCMP); | |||
5025 | OPCODE(SEARCH_STRING); | |||
5026 | OPCODE(IPM); | |||
5027 | OPCODE(MEMBARRIER); | |||
5028 | OPCODE(TBEGIN); | |||
5029 | OPCODE(TBEGIN_NOFLOAT); | |||
5030 | OPCODE(TEND); | |||
5031 | OPCODE(BYTE_MASK); | |||
5032 | OPCODE(ROTATE_MASK); | |||
5033 | OPCODE(REPLICATE); | |||
5034 | OPCODE(JOIN_DWORDS); | |||
5035 | OPCODE(SPLAT); | |||
5036 | OPCODE(MERGE_HIGH); | |||
5037 | OPCODE(MERGE_LOW); | |||
5038 | OPCODE(SHL_DOUBLE); | |||
5039 | OPCODE(PERMUTE_DWORDS); | |||
5040 | OPCODE(PERMUTE); | |||
5041 | OPCODE(PACK); | |||
5042 | OPCODE(PACKS_CC); | |||
5043 | OPCODE(PACKLS_CC); | |||
5044 | OPCODE(UNPACK_HIGH); | |||
5045 | OPCODE(UNPACKL_HIGH); | |||
5046 | OPCODE(UNPACK_LOW); | |||
5047 | OPCODE(UNPACKL_LOW); | |||
5048 | OPCODE(VSHL_BY_SCALAR); | |||
5049 | OPCODE(VSRL_BY_SCALAR); | |||
5050 | OPCODE(VSRA_BY_SCALAR); | |||
5051 | OPCODE(VSUM); | |||
5052 | OPCODE(VICMPE); | |||
5053 | OPCODE(VICMPH); | |||
5054 | OPCODE(VICMPHL); | |||
5055 | OPCODE(VICMPES); | |||
5056 | OPCODE(VICMPHS); | |||
5057 | OPCODE(VICMPHLS); | |||
5058 | OPCODE(VFCMPE); | |||
5059 | OPCODE(VFCMPH); | |||
5060 | OPCODE(VFCMPHE); | |||
5061 | OPCODE(VFCMPES); | |||
5062 | OPCODE(VFCMPHS); | |||
5063 | OPCODE(VFCMPHES); | |||
5064 | OPCODE(VFTCI); | |||
5065 | OPCODE(VEXTEND); | |||
5066 | OPCODE(VROUND); | |||
5067 | OPCODE(VTM); | |||
5068 | OPCODE(VFAE_CC); | |||
5069 | OPCODE(VFAEZ_CC); | |||
5070 | OPCODE(VFEE_CC); | |||
5071 | OPCODE(VFEEZ_CC); | |||
5072 | OPCODE(VFENE_CC); | |||
5073 | OPCODE(VFENEZ_CC); | |||
5074 | OPCODE(VISTR_CC); | |||
5075 | OPCODE(VSTRC_CC); | |||
5076 | OPCODE(VSTRCZ_CC); | |||
5077 | OPCODE(TDC); | |||
5078 | OPCODE(ATOMIC_SWAPW); | |||
5079 | OPCODE(ATOMIC_LOADW_ADD); | |||
5080 | OPCODE(ATOMIC_LOADW_SUB); | |||
5081 | OPCODE(ATOMIC_LOADW_AND); | |||
5082 | OPCODE(ATOMIC_LOADW_OR); | |||
5083 | OPCODE(ATOMIC_LOADW_XOR); | |||
5084 | OPCODE(ATOMIC_LOADW_NAND); | |||
5085 | OPCODE(ATOMIC_LOADW_MIN); | |||
5086 | OPCODE(ATOMIC_LOADW_MAX); | |||
5087 | OPCODE(ATOMIC_LOADW_UMIN); | |||
5088 | OPCODE(ATOMIC_LOADW_UMAX); | |||
5089 | OPCODE(ATOMIC_CMP_SWAPW); | |||
5090 | OPCODE(ATOMIC_CMP_SWAP); | |||
5091 | OPCODE(ATOMIC_LOAD_128); | |||
5092 | OPCODE(ATOMIC_STORE_128); | |||
5093 | OPCODE(ATOMIC_CMP_SWAP_128); | |||
5094 | OPCODE(LRV); | |||
5095 | OPCODE(STRV); | |||
5096 | OPCODE(PREFETCH); | |||
5097 | } | |||
5098 | return nullptr; | |||
5099 | #undef OPCODE | |||
5100 | } | |||
5101 | ||||
5102 | // Return true if VT is a vector whose elements are a whole number of bytes | |||
5103 | // in width. Also check for presence of vector support. | |||
5104 | bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { | |||
5105 | if (!Subtarget.hasVector()) | |||
5106 | return false; | |||
5107 | ||||
5108 | return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); | |||
5109 | } | |||
5110 | ||||
5111 | // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT | |||
5112 | // producing a result of type ResVT. Op is a possibly bitcast version | |||
5113 | // of the input vector and Index is the index (based on type VecVT) that | |||
5114 | // should be extracted. Return the new extraction if a simplification | |||
5115 | // was possible or if Force is true. | |||
5116 | SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, | |||
5117 | EVT VecVT, SDValue Op, | |||
5118 | unsigned Index, | |||
5119 | DAGCombinerInfo &DCI, | |||
5120 | bool Force) const { | |||
5121 | SelectionDAG &DAG = DCI.DAG; | |||
5122 | ||||
5123 | // The number of bytes being extracted. | |||
5124 | unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); | |||
5125 | ||||
5126 | for (;;) { | |||
5127 | unsigned Opcode = Op.getOpcode(); | |||
5128 | if (Opcode == ISD::BITCAST) | |||
5129 | // Look through bitcasts. | |||
5130 | Op = Op.getOperand(0); | |||
5131 | else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) && | |||
5132 | canTreatAsByteVector(Op.getValueType())) { | |||
5133 | // Get a VPERM-like permute mask and see whether the bytes covered | |||
5134 | // by the extracted element are a contiguous sequence from one | |||
5135 | // source operand. | |||
5136 | SmallVector<int, SystemZ::VectorBytes> Bytes; | |||
5137 | if (!getVPermMask(Op, Bytes)) | |||
5138 | break; | |||
5139 | int First; | |||
5140 | if (!getShuffleInput(Bytes, Index * BytesPerElement, | |||
5141 | BytesPerElement, First)) | |||
5142 | break; | |||
5143 | if (First < 0) | |||
5144 | return DAG.getUNDEF(ResVT); | |||
5145 | // Make sure the contiguous sequence starts at a multiple of the | |||
5146 | // original element size. | |||
5147 | unsigned Byte = unsigned(First) % Bytes.size(); | |||
5148 | if (Byte % BytesPerElement != 0) | |||
5149 | break; | |||
5150 | // We can get the extracted value directly from an input. | |||
5151 | Index = Byte / BytesPerElement; | |||
5152 | Op = Op.getOperand(unsigned(First) / Bytes.size()); | |||
5153 | Force = true; | |||
5154 | } else if (Opcode == ISD::BUILD_VECTOR && | |||
5155 | canTreatAsByteVector(Op.getValueType())) { | |||
5156 | // We can only optimize this case if the BUILD_VECTOR elements are | |||
5157 | // at least as wide as the extracted value. | |||
5158 | EVT OpVT = Op.getValueType(); | |||
5159 | unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); | |||
5160 | if (OpBytesPerElement < BytesPerElement) | |||
5161 | break; | |||
5162 | // Make sure that the least-significant bit of the extracted value | |||
5163 | // is the least significant bit of an input. | |||
5164 | unsigned End = (Index + 1) * BytesPerElement; | |||
5165 | if (End % OpBytesPerElement != 0) | |||
5166 | break; | |||
5167 | // We're extracting the low part of one operand of the BUILD_VECTOR. | |||
5168 | Op = Op.getOperand(End / OpBytesPerElement - 1); | |||
5169 | if (!Op.getValueType().isInteger()) { | |||
5170 | EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); | |||
5171 | Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); | |||
5172 | DCI.AddToWorklist(Op.getNode()); | |||
5173 | } | |||
5174 | EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); | |||
5175 | Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); | |||
5176 | if (VT != ResVT) { | |||
5177 | DCI.AddToWorklist(Op.getNode()); | |||
5178 | Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); | |||
5179 | } | |||
5180 | return Op; | |||
5181 | } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || | |||
5182 | Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || | |||
5183 | Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && | |||
5184 | canTreatAsByteVector(Op.getValueType()) && | |||
5185 | canTreatAsByteVector(Op.getOperand(0).getValueType())) { | |||
5186 | // Make sure that only the unextended bits are significant. | |||
5187 | EVT ExtVT = Op.getValueType(); | |||
5188 | EVT OpVT = Op.getOperand(0).getValueType(); | |||
5189 | unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); | |||
5190 | unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); | |||
5191 | unsigned Byte = Index * BytesPerElement; | |||
5192 | unsigned SubByte = Byte % ExtBytesPerElement; | |||
5193 | unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; | |||
5194 | if (SubByte < MinSubByte || | |||
5195 | SubByte + BytesPerElement > ExtBytesPerElement) | |||
5196 | break; | |||
5197 | // Get the byte offset of the unextended element | |||
5198 | Byte = Byte / ExtBytesPerElement * OpBytesPerElement; | |||
5199 | // ...then add the byte offset relative to that element. | |||
5200 | Byte += SubByte - MinSubByte; | |||
5201 | if (Byte % BytesPerElement != 0) | |||
5202 | break; | |||
5203 | Op = Op.getOperand(0); | |||
5204 | Index = Byte / BytesPerElement; | |||
5205 | Force = true; | |||
5206 | } else | |||
5207 | break; | |||
5208 | } | |||
5209 | if (Force) { | |||
5210 | if (Op.getValueType() != VecVT) { | |||
5211 | Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); | |||
5212 | DCI.AddToWorklist(Op.getNode()); | |||
5213 | } | |||
5214 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, | |||
5215 | DAG.getConstant(Index, DL, MVT::i32)); | |||
5216 | } | |||
5217 | return SDValue(); | |||
5218 | } | |||
5219 | ||||
5220 | // Optimize vector operations in scalar value Op on the basis that Op | |||
5221 | // is truncated to TruncVT. | |||
5222 | SDValue SystemZTargetLowering::combineTruncateExtract( | |||
5223 | const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { | |||
5224 | // If we have (trunc (extract_vector_elt X, Y)), try to turn it into | |||
5225 | // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements | |||
5226 | // of type TruncVT. | |||
5227 | if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
5228 | TruncVT.getSizeInBits() % 8 == 0) { | |||
5229 | SDValue Vec = Op.getOperand(0); | |||
5230 | EVT VecVT = Vec.getValueType(); | |||
5231 | if (canTreatAsByteVector(VecVT)) { | |||
5232 | if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
5233 | unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); | |||
5234 | unsigned TruncBytes = TruncVT.getStoreSize(); | |||
5235 | if (BytesPerElement % TruncBytes == 0) { | |||
5236 | // Calculate the value of Y' in the above description. We are | |||
5237 | // splitting the original elements into Scale equal-sized pieces | |||
5238 | // and for truncation purposes want the last (least-significant) | |||
5239 | // of these pieces for IndexN. This is easiest to do by calculating | |||
5240 | // the start index of the following element and then subtracting 1. | |||
5241 | unsigned Scale = BytesPerElement / TruncBytes; | |||
5242 | unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; | |||
5243 | ||||
5244 | // Defer the creation of the bitcast from X to combineExtract, | |||
5245 | // which might be able to optimize the extraction. | |||
5246 | VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), | |||
5247 | VecVT.getStoreSize() / TruncBytes); | |||
5248 | EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT); | |||
5249 | return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); | |||
5250 | } | |||
5251 | } | |||
5252 | } | |||
5253 | } | |||
5254 | return SDValue(); | |||
5255 | } | |||
5256 | ||||
5257 | SDValue SystemZTargetLowering::combineZERO_EXTEND( | |||
5258 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5259 | // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2') | |||
5260 | SelectionDAG &DAG = DCI.DAG; | |||
5261 | SDValue N0 = N->getOperand(0); | |||
5262 | EVT VT = N->getValueType(0); | |||
5263 | if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) { | |||
5264 | auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0)); | |||
5265 | auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1)); | |||
5266 | if (TrueOp && FalseOp) { | |||
5267 | SDLoc DL(N0); | |||
5268 | SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT), | |||
5269 | DAG.getConstant(FalseOp->getZExtValue(), DL, VT), | |||
5270 | N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) }; | |||
5271 | SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops); | |||
5272 | // If N0 has multiple uses, change other uses as well. | |||
5273 | if (!N0.hasOneUse()) { | |||
5274 | SDValue TruncSelect = | |||
5275 | DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect); | |||
5276 | DCI.CombineTo(N0.getNode(), TruncSelect); | |||
5277 | } | |||
5278 | return NewSelect; | |||
5279 | } | |||
5280 | } | |||
5281 | return SDValue(); | |||
5282 | } | |||
5283 | ||||
5284 | SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG( | |||
5285 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5286 | // Convert (sext_in_reg (setcc LHS, RHS, COND), i1) | |||
5287 | // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1) | |||
5288 | // into (select_cc LHS, RHS, -1, 0, COND) | |||
5289 | SelectionDAG &DAG = DCI.DAG; | |||
5290 | SDValue N0 = N->getOperand(0); | |||
5291 | EVT VT = N->getValueType(0); | |||
5292 | EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); | |||
5293 | if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND) | |||
5294 | N0 = N0.getOperand(0); | |||
5295 | if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) { | |||
5296 | SDLoc DL(N0); | |||
5297 | SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1), | |||
5298 | DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT), | |||
5299 | N0.getOperand(2) }; | |||
5300 | return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); | |||
5301 | } | |||
5302 | return SDValue(); | |||
5303 | } | |||
5304 | ||||
5305 | SDValue SystemZTargetLowering::combineSIGN_EXTEND( | |||
5306 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5307 | // Convert (sext (ashr (shl X, C1), C2)) to | |||
5308 | // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as | |||
5309 | // cheap as narrower ones. | |||
5310 | SelectionDAG &DAG = DCI.DAG; | |||
5311 | SDValue N0 = N->getOperand(0); | |||
5312 | EVT VT = N->getValueType(0); | |||
5313 | if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { | |||
5314 | auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); | |||
5315 | SDValue Inner = N0.getOperand(0); | |||
5316 | if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { | |||
5317 | if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { | |||
5318 | unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); | |||
5319 | unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; | |||
5320 | unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; | |||
5321 | EVT ShiftVT = N0.getOperand(1).getValueType(); | |||
5322 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, | |||
5323 | Inner.getOperand(0)); | |||
5324 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, | |||
5325 | DAG.getConstant(NewShlAmt, SDLoc(Inner), | |||
5326 | ShiftVT)); | |||
5327 | return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, | |||
5328 | DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); | |||
5329 | } | |||
5330 | } | |||
5331 | } | |||
5332 | return SDValue(); | |||
5333 | } | |||
5334 | ||||
5335 | SDValue SystemZTargetLowering::combineMERGE( | |||
5336 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5337 | SelectionDAG &DAG = DCI.DAG; | |||
5338 | unsigned Opcode = N->getOpcode(); | |||
5339 | SDValue Op0 = N->getOperand(0); | |||
5340 | SDValue Op1 = N->getOperand(1); | |||
5341 | if (Op0.getOpcode() == ISD::BITCAST) | |||
5342 | Op0 = Op0.getOperand(0); | |||
5343 | if (Op0.getOpcode() == SystemZISD::BYTE_MASK && | |||
5344 | cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) { | |||
5345 | // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF | |||
5346 | // for v4f32. | |||
5347 | if (Op1 == N->getOperand(0)) | |||
5348 | return Op1; | |||
5349 | // (z_merge_? 0, X) -> (z_unpackl_? 0, X). | |||
5350 | EVT VT = Op1.getValueType(); | |||
5351 | unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); | |||
5352 | if (ElemBytes <= 4) { | |||
5353 | Opcode = (Opcode == SystemZISD::MERGE_HIGH ? | |||
5354 | SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW); | |||
5355 | EVT InVT = VT.changeVectorElementTypeToInteger(); | |||
5356 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), | |||
5357 | SystemZ::VectorBytes / ElemBytes / 2); | |||
5358 | if (VT != InVT) { | |||
5359 | Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); | |||
5360 | DCI.AddToWorklist(Op1.getNode()); | |||
5361 | } | |||
5362 | SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); | |||
5363 | DCI.AddToWorklist(Op.getNode()); | |||
5364 | return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); | |||
5365 | } | |||
5366 | } | |||
5367 | return SDValue(); | |||
5368 | } | |||
5369 | ||||
5370 | SDValue SystemZTargetLowering::combineLOAD( | |||
5371 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5372 | SelectionDAG &DAG = DCI.DAG; | |||
5373 | EVT LdVT = N->getValueType(0); | |||
5374 | if (LdVT.isVector() || LdVT.isInteger()) | |||
5375 | return SDValue(); | |||
5376 | // Transform a scalar load that is REPLICATEd as well as having other | |||
5377 | // use(s) to the form where the other use(s) use the first element of the | |||
5378 | // REPLICATE instead of the load. Otherwise instruction selection will not | |||
5379 | // produce a VLREP. Avoid extracting to a GPR, so only do this for floating | |||
5380 | // point loads. | |||
5381 | ||||
5382 | SDValue Replicate; | |||
5383 | SmallVector<SDNode*, 8> OtherUses; | |||
5384 | for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); | |||
5385 | UI != UE; ++UI) { | |||
5386 | if (UI->getOpcode() == SystemZISD::REPLICATE) { | |||
5387 | if (Replicate) | |||
5388 | return SDValue(); // Should never happen | |||
5389 | Replicate = SDValue(*UI, 0); | |||
5390 | } | |||
5391 | else if (UI.getUse().getResNo() == 0) | |||
5392 | OtherUses.push_back(*UI); | |||
5393 | } | |||
5394 | if (!Replicate || OtherUses.empty()) | |||
5395 | return SDValue(); | |||
5396 | ||||
5397 | SDLoc DL(N); | |||
5398 | SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT, | |||
5399 | Replicate, DAG.getConstant(0, DL, MVT::i32)); | |||
5400 | // Update uses of the loaded Value while preserving old chains. | |||
5401 | for (SDNode *U : OtherUses) { | |||
5402 | SmallVector<SDValue, 8> Ops; | |||
5403 | for (SDValue Op : U->ops()) | |||
5404 | Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op); | |||
5405 | DAG.UpdateNodeOperands(U, Ops); | |||
5406 | } | |||
5407 | return SDValue(N, 0); | |||
5408 | } | |||
5409 | ||||
5410 | SDValue SystemZTargetLowering::combineSTORE( | |||
5411 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5412 | SelectionDAG &DAG = DCI.DAG; | |||
5413 | auto *SN = cast<StoreSDNode>(N); | |||
5414 | auto &Op1 = N->getOperand(1); | |||
5415 | EVT MemVT = SN->getMemoryVT(); | |||
5416 | // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better | |||
5417 | // for the extraction to be done on a vMiN value, so that we can use VSTE. | |||
5418 | // If X has wider elements then convert it to: | |||
5419 | // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). | |||
5420 | if (MemVT.isInteger() && SN->isTruncatingStore()) { | |||
5421 | if (SDValue Value = | |||
5422 | combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { | |||
5423 | DCI.AddToWorklist(Value.getNode()); | |||
5424 | ||||
5425 | // Rewrite the store with the new form of stored value. | |||
5426 | return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, | |||
5427 | SN->getBasePtr(), SN->getMemoryVT(), | |||
5428 | SN->getMemOperand()); | |||
5429 | } | |||
5430 | } | |||
5431 | // Combine STORE (BSWAP) into STRVH/STRV/STRVG | |||
5432 | if (!SN->isTruncatingStore() && | |||
5433 | Op1.getOpcode() == ISD::BSWAP && | |||
5434 | Op1.getNode()->hasOneUse() && | |||
5435 | (Op1.getValueType() == MVT::i16 || | |||
5436 | Op1.getValueType() == MVT::i32 || | |||
5437 | Op1.getValueType() == MVT::i64)) { | |||
5438 | ||||
5439 | SDValue BSwapOp = Op1.getOperand(0); | |||
5440 | ||||
5441 | if (BSwapOp.getValueType() == MVT::i16) | |||
5442 | BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); | |||
5443 | ||||
5444 | SDValue Ops[] = { | |||
5445 | N->getOperand(0), BSwapOp, N->getOperand(2) | |||
5446 | }; | |||
5447 | ||||
5448 | return | |||
5449 | DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), | |||
5450 | Ops, MemVT, SN->getMemOperand()); | |||
5451 | } | |||
5452 | return SDValue(); | |||
5453 | } | |||
5454 | ||||
5455 | SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( | |||
5456 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5457 | ||||
5458 | if (!Subtarget.hasVector()) | |||
5459 | return SDValue(); | |||
5460 | ||||
5461 | // Try to simplify a vector extraction. | |||
5462 | if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { | |||
5463 | SDValue Op0 = N->getOperand(0); | |||
5464 | EVT VecVT = Op0.getValueType(); | |||
5465 | return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, | |||
5466 | IndexN->getZExtValue(), DCI, false); | |||
5467 | } | |||
5468 | return SDValue(); | |||
5469 | } | |||
5470 | ||||
5471 | SDValue SystemZTargetLowering::combineJOIN_DWORDS( | |||
5472 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5473 | SelectionDAG &DAG = DCI.DAG; | |||
5474 | // (join_dwords X, X) == (replicate X) | |||
5475 | if (N->getOperand(0) == N->getOperand(1)) | |||
5476 | return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), | |||
5477 | N->getOperand(0)); | |||
5478 | return SDValue(); | |||
5479 | } | |||
5480 | ||||
5481 | SDValue SystemZTargetLowering::combineFP_ROUND( | |||
5482 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5483 | // (fpround (extract_vector_elt X 0)) | |||
5484 | // (fpround (extract_vector_elt X 1)) -> | |||
5485 | // (extract_vector_elt (VROUND X) 0) | |||
5486 | // (extract_vector_elt (VROUND X) 2) | |||
5487 | // | |||
5488 | // This is a special case since the target doesn't really support v2f32s. | |||
5489 | SelectionDAG &DAG = DCI.DAG; | |||
5490 | SDValue Op0 = N->getOperand(0); | |||
5491 | if (N->getValueType(0) == MVT::f32 && | |||
5492 | Op0.hasOneUse() && | |||
5493 | Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
5494 | Op0.getOperand(0).getValueType() == MVT::v2f64 && | |||
5495 | Op0.getOperand(1).getOpcode() == ISD::Constant && | |||
5496 | cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { | |||
5497 | SDValue Vec = Op0.getOperand(0); | |||
5498 | for (auto *U : Vec->uses()) { | |||
5499 | if (U != Op0.getNode() && | |||
5500 | U->hasOneUse() && | |||
5501 | U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
5502 | U->getOperand(0) == Vec && | |||
5503 | U->getOperand(1).getOpcode() == ISD::Constant && | |||
5504 | cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) { | |||
5505 | SDValue OtherRound = SDValue(*U->use_begin(), 0); | |||
5506 | if (OtherRound.getOpcode() == ISD::FP_ROUND && | |||
5507 | OtherRound.getOperand(0) == SDValue(U, 0) && | |||
5508 | OtherRound.getValueType() == MVT::f32) { | |||
5509 | SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), | |||
5510 | MVT::v4f32, Vec); | |||
5511 | DCI.AddToWorklist(VRound.getNode()); | |||
5512 | SDValue Extract1 = | |||
5513 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, | |||
5514 | VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); | |||
5515 | DCI.AddToWorklist(Extract1.getNode()); | |||
5516 | DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); | |||
5517 | SDValue Extract0 = | |||
5518 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, | |||
5519 | VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); | |||
5520 | return Extract0; | |||
5521 | } | |||
5522 | } | |||
5523 | } | |||
5524 | } | |||
5525 | return SDValue(); | |||
5526 | } | |||
5527 | ||||
5528 | SDValue SystemZTargetLowering::combineFP_EXTEND( | |||
5529 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5530 | // (fpextend (extract_vector_elt X 0)) | |||
5531 | // (fpextend (extract_vector_elt X 2)) -> | |||
5532 | // (extract_vector_elt (VEXTEND X) 0) | |||
5533 | // (extract_vector_elt (VEXTEND X) 1) | |||
5534 | // | |||
5535 | // This is a special case since the target doesn't really support v2f32s. | |||
5536 | SelectionDAG &DAG = DCI.DAG; | |||
5537 | SDValue Op0 = N->getOperand(0); | |||
5538 | if (N->getValueType(0) == MVT::f64 && | |||
5539 | Op0.hasOneUse() && | |||
5540 | Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
5541 | Op0.getOperand(0).getValueType() == MVT::v4f32 && | |||
5542 | Op0.getOperand(1).getOpcode() == ISD::Constant && | |||
5543 | cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { | |||
5544 | SDValue Vec = Op0.getOperand(0); | |||
5545 | for (auto *U : Vec->uses()) { | |||
5546 | if (U != Op0.getNode() && | |||
5547 | U->hasOneUse() && | |||
5548 | U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
5549 | U->getOperand(0) == Vec && | |||
5550 | U->getOperand(1).getOpcode() == ISD::Constant && | |||
5551 | cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) { | |||
5552 | SDValue OtherExtend = SDValue(*U->use_begin(), 0); | |||
5553 | if (OtherExtend.getOpcode() == ISD::FP_EXTEND && | |||
5554 | OtherExtend.getOperand(0) == SDValue(U, 0) && | |||
5555 | OtherExtend.getValueType() == MVT::f64) { | |||
5556 | SDValue VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N), | |||
5557 | MVT::v2f64, Vec); | |||
5558 | DCI.AddToWorklist(VExtend.getNode()); | |||
5559 | SDValue Extract1 = | |||
5560 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64, | |||
5561 | VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32)); | |||
5562 | DCI.AddToWorklist(Extract1.getNode()); | |||
5563 | DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1); | |||
5564 | SDValue Extract0 = | |||
5565 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64, | |||
5566 | VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); | |||
5567 | return Extract0; | |||
5568 | } | |||
5569 | } | |||
5570 | } | |||
5571 | } | |||
5572 | return SDValue(); | |||
5573 | } | |||
5574 | ||||
5575 | SDValue SystemZTargetLowering::combineBSWAP( | |||
5576 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5577 | SelectionDAG &DAG = DCI.DAG; | |||
5578 | // Combine BSWAP (LOAD) into LRVH/LRV/LRVG | |||
5579 | if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && | |||
5580 | N->getOperand(0).hasOneUse() && | |||
5581 | (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 || | |||
5582 | N->getValueType(0) == MVT::i64)) { | |||
5583 | SDValue Load = N->getOperand(0); | |||
5584 | LoadSDNode *LD = cast<LoadSDNode>(Load); | |||
5585 | ||||
5586 | // Create the byte-swapping load. | |||
5587 | SDValue Ops[] = { | |||
5588 | LD->getChain(), // Chain | |||
5589 | LD->getBasePtr() // Ptr | |||
5590 | }; | |||
5591 | EVT LoadVT = N->getValueType(0); | |||
5592 | if (LoadVT == MVT::i16) | |||
5593 | LoadVT = MVT::i32; | |||
5594 | SDValue BSLoad = | |||
5595 | DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), | |||
5596 | DAG.getVTList(LoadVT, MVT::Other), | |||
5597 | Ops, LD->getMemoryVT(), LD->getMemOperand()); | |||
5598 | ||||
5599 | // If this is an i16 load, insert the truncate. | |||
5600 | SDValue ResVal = BSLoad; | |||
5601 | if (N->getValueType(0) == MVT::i16) | |||
5602 | ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); | |||
5603 | ||||
5604 | // First, combine the bswap away. This makes the value produced by the | |||
5605 | // load dead. | |||
5606 | DCI.CombineTo(N, ResVal); | |||
5607 | ||||
5608 | // Next, combine the load away, we give it a bogus result value but a real | |||
5609 | // chain result. The result value is dead because the bswap is dead. | |||
5610 | DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); | |||
5611 | ||||
5612 | // Return N so it doesn't get rechecked! | |||
5613 | return SDValue(N, 0); | |||
5614 | } | |||
5615 | return SDValue(); | |||
5616 | } | |||
5617 | ||||
5618 | static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) { | |||
5619 | // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code | |||
5620 | // set by the CCReg instruction using the CCValid / CCMask masks, | |||
5621 | // If the CCReg instruction is itself a (ICMP (SELECT_CCMASK)) testing | |||
5622 | // the condition code set by some other instruction, see whether we | |||
5623 | // can directly use that condition code. | |||
5624 | bool Invert = false; | |||
5625 | ||||
5626 | // Verify that we have an appropriate mask for a EQ or NE comparison. | |||
5627 | if (CCValid != SystemZ::CCMASK_ICMP) | |||
5628 | return false; | |||
5629 | if (CCMask == SystemZ::CCMASK_CMP_NE) | |||
5630 | Invert = !Invert; | |||
5631 | else if (CCMask != SystemZ::CCMASK_CMP_EQ) | |||
5632 | return false; | |||
5633 | ||||
5634 | // Verify that we have an ICMP that is the user of a SELECT_CCMASK. | |||
5635 | SDNode *ICmp = CCReg.getNode(); | |||
5636 | if (ICmp->getOpcode() != SystemZISD::ICMP) | |||
5637 | return false; | |||
5638 | SDNode *Select = ICmp->getOperand(0).getNode(); | |||
5639 | if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) | |||
5640 | return false; | |||
5641 | ||||
5642 | // Verify that the ICMP compares against one of select values. | |||
5643 | auto *CompareVal = dyn_cast<ConstantSDNode>(ICmp->getOperand(1)); | |||
5644 | if (!CompareVal) | |||
5645 | return false; | |||
5646 | auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); | |||
5647 | if (!TrueVal) | |||
5648 | return false; | |||
5649 | auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); | |||
5650 | if (!FalseVal) | |||
5651 | return false; | |||
5652 | if (CompareVal->getZExtValue() == FalseVal->getZExtValue()) | |||
5653 | Invert = !Invert; | |||
5654 | else if (CompareVal->getZExtValue() != TrueVal->getZExtValue()) | |||
5655 | return false; | |||
5656 | ||||
5657 | // Compute the effective CC mask for the new branch or select. | |||
5658 | auto *NewCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); | |||
5659 | auto *NewCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); | |||
5660 | if (!NewCCValid || !NewCCMask) | |||
5661 | return false; | |||
5662 | CCValid = NewCCValid->getZExtValue(); | |||
5663 | CCMask = NewCCMask->getZExtValue(); | |||
5664 | if (Invert) | |||
5665 | CCMask ^= CCValid; | |||
5666 | ||||
5667 | // Return the updated CCReg link. | |||
5668 | CCReg = Select->getOperand(4); | |||
5669 | return true; | |||
5670 | } | |||
5671 | ||||
5672 | SDValue SystemZTargetLowering::combineBR_CCMASK( | |||
5673 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5674 | SelectionDAG &DAG = DCI.DAG; | |||
5675 | ||||
5676 | // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK. | |||
5677 | auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
5678 | auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); | |||
5679 | if (!CCValid || !CCMask) | |||
5680 | return SDValue(); | |||
5681 | ||||
5682 | int CCValidVal = CCValid->getZExtValue(); | |||
5683 | int CCMaskVal = CCMask->getZExtValue(); | |||
5684 | SDValue Chain = N->getOperand(0); | |||
5685 | SDValue CCReg = N->getOperand(4); | |||
5686 | ||||
5687 | if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) | |||
5688 | return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0), | |||
5689 | Chain, | |||
5690 | DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), | |||
5691 | DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), | |||
5692 | N->getOperand(3), CCReg); | |||
5693 | return SDValue(); | |||
5694 | } | |||
5695 | ||||
5696 | SDValue SystemZTargetLowering::combineSELECT_CCMASK( | |||
5697 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5698 | SelectionDAG &DAG = DCI.DAG; | |||
5699 | ||||
5700 | // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK. | |||
5701 | auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2)); | |||
5702 | auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3)); | |||
5703 | if (!CCValid || !CCMask) | |||
5704 | return SDValue(); | |||
5705 | ||||
5706 | int CCValidVal = CCValid->getZExtValue(); | |||
5707 | int CCMaskVal = CCMask->getZExtValue(); | |||
5708 | SDValue CCReg = N->getOperand(4); | |||
5709 | ||||
5710 | if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) | |||
5711 | return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), | |||
5712 | N->getOperand(0), | |||
5713 | N->getOperand(1), | |||
5714 | DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), | |||
5715 | DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), | |||
5716 | CCReg); | |||
5717 | return SDValue(); | |||
5718 | } | |||
5719 | ||||
5720 | ||||
5721 | SDValue SystemZTargetLowering::combineGET_CCMASK( | |||
5722 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5723 | ||||
5724 | // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible | |||
5725 | auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
5726 | auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); | |||
5727 | if (!CCValid || !CCMask) | |||
5728 | return SDValue(); | |||
5729 | int CCValidVal = CCValid->getZExtValue(); | |||
5730 | int CCMaskVal = CCMask->getZExtValue(); | |||
5731 | ||||
5732 | SDValue Select = N->getOperand(0); | |||
5733 | if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) | |||
5734 | return SDValue(); | |||
5735 | ||||
5736 | auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); | |||
5737 | auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); | |||
5738 | if (!SelectCCValid || !SelectCCMask) | |||
5739 | return SDValue(); | |||
5740 | int SelectCCValidVal = SelectCCValid->getZExtValue(); | |||
5741 | int SelectCCMaskVal = SelectCCMask->getZExtValue(); | |||
5742 | ||||
5743 | auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); | |||
5744 | auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); | |||
5745 | if (!TrueVal || !FalseVal) | |||
5746 | return SDValue(); | |||
5747 | if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0) | |||
5748 | ; | |||
5749 | else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0) | |||
5750 | SelectCCMaskVal ^= SelectCCValidVal; | |||
5751 | else | |||
5752 | return SDValue(); | |||
5753 | ||||
5754 | if (SelectCCValidVal & ~CCValidVal) | |||
5755 | return SDValue(); | |||
5756 | if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal)) | |||
5757 | return SDValue(); | |||
5758 | ||||
5759 | return Select->getOperand(4); | |||
5760 | } | |||
5761 | ||||
5762 | SDValue SystemZTargetLowering::combineIntDIVREM( | |||
5763 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
5764 | SelectionDAG &DAG = DCI.DAG; | |||
5765 | EVT VT = N->getValueType(0); | |||
5766 | // In the case where the divisor is a vector of constants a cheaper | |||
5767 | // sequence of instructions can replace the divide. BuildSDIV is called to | |||
5768 | // do this during DAG combining, but it only succeeds when it can build a | |||
5769 | // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and | |||
5770 | // since it is not Legal but Custom it can only happen before | |||
5771 | // legalization. Therefore we must scalarize this early before Combine | |||
5772 | // 1. For widened vectors, this is already the result of type legalization. | |||
5773 | if (VT.isVector() && isTypeLegal(VT) && | |||
5774 | DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1))) | |||
5775 | return DAG.UnrollVectorOp(N); | |||
5776 | return SDValue(); | |||
5777 | } | |||
5778 | ||||
5779 | SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, | |||
5780 | DAGCombinerInfo &DCI) const { | |||
5781 | switch(N->getOpcode()) { | |||
5782 | default: break; | |||
5783 | case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI); | |||
5784 | case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); | |||
5785 | case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI); | |||
5786 | case SystemZISD::MERGE_HIGH: | |||
5787 | case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); | |||
5788 | case ISD::LOAD: return combineLOAD(N, DCI); | |||
5789 | case ISD::STORE: return combineSTORE(N, DCI); | |||
5790 | case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); | |||
5791 | case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); | |||
5792 | case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); | |||
5793 | case ISD::FP_EXTEND: return combineFP_EXTEND(N, DCI); | |||
5794 | case ISD::BSWAP: return combineBSWAP(N, DCI); | |||
5795 | case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI); | |||
5796 | case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI); | |||
5797 | case SystemZISD::GET_CCMASK: return combineGET_CCMASK(N, DCI); | |||
5798 | case ISD::SDIV: | |||
5799 | case ISD::UDIV: | |||
5800 | case ISD::SREM: | |||
5801 | case ISD::UREM: return combineIntDIVREM(N, DCI); | |||
5802 | } | |||
5803 | ||||
5804 | return SDValue(); | |||
5805 | } | |||
5806 | ||||
5807 | // Return the demanded elements for the OpNo source operand of Op. DemandedElts | |||
5808 | // are for Op. | |||
5809 | static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, | |||
5810 | unsigned OpNo) { | |||
5811 | EVT VT = Op.getValueType(); | |||
5812 | unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1); | |||
5813 | APInt SrcDemE; | |||
5814 | unsigned Opcode = Op.getOpcode(); | |||
5815 | if (Opcode == ISD::INTRINSIC_WO_CHAIN) { | |||
5816 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
5817 | switch (Id) { | |||
5818 | case Intrinsic::s390_vpksh: // PACKS | |||
5819 | case Intrinsic::s390_vpksf: | |||
5820 | case Intrinsic::s390_vpksg: | |||
5821 | case Intrinsic::s390_vpkshs: // PACKS_CC | |||
5822 | case Intrinsic::s390_vpksfs: | |||
5823 | case Intrinsic::s390_vpksgs: | |||
5824 | case Intrinsic::s390_vpklsh: // PACKLS | |||
5825 | case Intrinsic::s390_vpklsf: | |||
5826 | case Intrinsic::s390_vpklsg: | |||
5827 | case Intrinsic::s390_vpklshs: // PACKLS_CC | |||
5828 | case Intrinsic::s390_vpklsfs: | |||
5829 | case Intrinsic::s390_vpklsgs: | |||
5830 | // VECTOR PACK truncates the elements of two source vectors into one. | |||
5831 | SrcDemE = DemandedElts; | |||
5832 | if (OpNo == 2) | |||
5833 | SrcDemE.lshrInPlace(NumElts / 2); | |||
5834 | SrcDemE = SrcDemE.trunc(NumElts / 2); | |||
5835 | break; | |||
5836 | // VECTOR UNPACK extends half the elements of the source vector. | |||
5837 | case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH | |||
5838 | case Intrinsic::s390_vuphh: | |||
5839 | case Intrinsic::s390_vuphf: | |||
5840 | case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH | |||
5841 | case Intrinsic::s390_vuplhh: | |||
5842 | case Intrinsic::s390_vuplhf: | |||
5843 | SrcDemE = APInt(NumElts * 2, 0); | |||
5844 | SrcDemE.insertBits(DemandedElts, 0); | |||
5845 | break; | |||
5846 | case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW | |||
5847 | case Intrinsic::s390_vuplhw: | |||
5848 | case Intrinsic::s390_vuplf: | |||
5849 | case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW | |||
5850 | case Intrinsic::s390_vupllh: | |||
5851 | case Intrinsic::s390_vupllf: | |||
5852 | SrcDemE = APInt(NumElts * 2, 0); | |||
5853 | SrcDemE.insertBits(DemandedElts, NumElts); | |||
5854 | break; | |||
5855 | case Intrinsic::s390_vpdi: { | |||
5856 | // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source. | |||
5857 | SrcDemE = APInt(NumElts, 0); | |||
5858 | if (!DemandedElts[OpNo - 1]) | |||
5859 | break; | |||
5860 | unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | |||
5861 | unsigned MaskBit = ((OpNo - 1) ? 1 : 4); | |||
5862 | // Demand input element 0 or 1, given by the mask bit value. | |||
5863 | SrcDemE.setBit((Mask & MaskBit)? 1 : 0); | |||
5864 | break; | |||
5865 | } | |||
5866 | case Intrinsic::s390_vsldb: { | |||
5867 | // VECTOR SHIFT LEFT DOUBLE BY BYTE | |||
5868 | assert(VT == MVT::v16i8 && "Unexpected type.")((VT == MVT::v16i8 && "Unexpected type.") ? static_cast <void> (0) : __assert_fail ("VT == MVT::v16i8 && \"Unexpected type.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5868, __PRETTY_FUNCTION__)); | |||
5869 | unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | |||
5870 | assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand.")((FirstIdx > 0 && FirstIdx < 16 && "Unused operand." ) ? static_cast<void> (0) : __assert_fail ("FirstIdx > 0 && FirstIdx < 16 && \"Unused operand.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5870, __PRETTY_FUNCTION__)); | |||
5871 | unsigned NumSrc0Els = 16 - FirstIdx; | |||
5872 | SrcDemE = APInt(NumElts, 0); | |||
5873 | if (OpNo == 1) { | |||
5874 | APInt DemEls = DemandedElts.trunc(NumSrc0Els); | |||
5875 | SrcDemE.insertBits(DemEls, FirstIdx); | |||
5876 | } else { | |||
5877 | APInt DemEls = DemandedElts.lshr(NumSrc0Els); | |||
5878 | SrcDemE.insertBits(DemEls, 0); | |||
5879 | } | |||
5880 | break; | |||
5881 | } | |||
5882 | case Intrinsic::s390_vperm: | |||
5883 | SrcDemE = APInt(NumElts, 1); | |||
5884 | break; | |||
5885 | default: | |||
5886 | llvm_unreachable("Unhandled intrinsic.")::llvm::llvm_unreachable_internal("Unhandled intrinsic.", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5886); | |||
5887 | break; | |||
5888 | } | |||
5889 | } else { | |||
5890 | switch (Opcode) { | |||
5891 | case SystemZISD::JOIN_DWORDS: | |||
5892 | // Scalar operand. | |||
5893 | SrcDemE = APInt(1, 1); | |||
5894 | break; | |||
5895 | case SystemZISD::SELECT_CCMASK: | |||
5896 | SrcDemE = DemandedElts; | |||
5897 | break; | |||
5898 | default: | |||
5899 | llvm_unreachable("Unhandled opcode.")::llvm::llvm_unreachable_internal("Unhandled opcode.", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5899); | |||
5900 | break; | |||
5901 | } | |||
5902 | } | |||
5903 | return SrcDemE; | |||
5904 | } | |||
5905 | ||||
5906 | static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, | |||
5907 | const APInt &DemandedElts, | |||
5908 | const SelectionDAG &DAG, unsigned Depth, | |||
5909 | unsigned OpNo) { | |||
5910 | APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); | |||
5911 | APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); | |||
5912 | KnownBits LHSKnown = | |||
5913 | DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); | |||
5914 | KnownBits RHSKnown = | |||
5915 | DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); | |||
5916 | Known.Zero = LHSKnown.Zero & RHSKnown.Zero; | |||
5917 | Known.One = LHSKnown.One & RHSKnown.One; | |||
5918 | } | |||
5919 | ||||
5920 | void | |||
5921 | SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, | |||
5922 | KnownBits &Known, | |||
5923 | const APInt &DemandedElts, | |||
5924 | const SelectionDAG &DAG, | |||
5925 | unsigned Depth) const { | |||
5926 | Known.resetAll(); | |||
5927 | ||||
5928 | // Intrinsic CC result is returned in the two low bits. | |||
5929 | unsigned tmp0, tmp1; // not used | |||
5930 | if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) { | |||
5931 | Known.Zero.setBitsFrom(2); | |||
5932 | return; | |||
5933 | } | |||
5934 | EVT VT = Op.getValueType(); | |||
5935 | if (Op.getResNo() != 0 || VT == MVT::Untyped) | |||
5936 | return; | |||
5937 | assert (Known.getBitWidth() == VT.getScalarSizeInBits() &&((Known.getBitWidth() == VT.getScalarSizeInBits() && "KnownBits does not match VT in bitwidth" ) ? static_cast<void> (0) : __assert_fail ("Known.getBitWidth() == VT.getScalarSizeInBits() && \"KnownBits does not match VT in bitwidth\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5938, __PRETTY_FUNCTION__)) | |||
5938 | "KnownBits does not match VT in bitwidth")((Known.getBitWidth() == VT.getScalarSizeInBits() && "KnownBits does not match VT in bitwidth" ) ? static_cast<void> (0) : __assert_fail ("Known.getBitWidth() == VT.getScalarSizeInBits() && \"KnownBits does not match VT in bitwidth\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5938, __PRETTY_FUNCTION__)); | |||
5939 | assert ((!VT.isVector() ||(((!VT.isVector() || (DemandedElts.getBitWidth() == VT.getVectorNumElements ())) && "DemandedElts does not match VT number of elements" ) ? static_cast<void> (0) : __assert_fail ("(!VT.isVector() || (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && \"DemandedElts does not match VT number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5941, __PRETTY_FUNCTION__)) | |||
5940 | (DemandedElts.getBitWidth() == VT.getVectorNumElements())) &&(((!VT.isVector() || (DemandedElts.getBitWidth() == VT.getVectorNumElements ())) && "DemandedElts does not match VT number of elements" ) ? static_cast<void> (0) : __assert_fail ("(!VT.isVector() || (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && \"DemandedElts does not match VT number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5941, __PRETTY_FUNCTION__)) | |||
5941 | "DemandedElts does not match VT number of elements")(((!VT.isVector() || (DemandedElts.getBitWidth() == VT.getVectorNumElements ())) && "DemandedElts does not match VT number of elements" ) ? static_cast<void> (0) : __assert_fail ("(!VT.isVector() || (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && \"DemandedElts does not match VT number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 5941, __PRETTY_FUNCTION__)); | |||
5942 | unsigned BitWidth = Known.getBitWidth(); | |||
5943 | unsigned Opcode = Op.getOpcode(); | |||
5944 | if (Opcode == ISD::INTRINSIC_WO_CHAIN) { | |||
5945 | bool IsLogical = false; | |||
5946 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
5947 | switch (Id) { | |||
5948 | case Intrinsic::s390_vpksh: // PACKS | |||
5949 | case Intrinsic::s390_vpksf: | |||
5950 | case Intrinsic::s390_vpksg: | |||
5951 | case Intrinsic::s390_vpkshs: // PACKS_CC | |||
5952 | case Intrinsic::s390_vpksfs: | |||
5953 | case Intrinsic::s390_vpksgs: | |||
5954 | case Intrinsic::s390_vpklsh: // PACKLS | |||
5955 | case Intrinsic::s390_vpklsf: | |||
5956 | case Intrinsic::s390_vpklsg: | |||
5957 | case Intrinsic::s390_vpklshs: // PACKLS_CC | |||
5958 | case Intrinsic::s390_vpklsfs: | |||
5959 | case Intrinsic::s390_vpklsgs: | |||
5960 | case Intrinsic::s390_vpdi: | |||
5961 | case Intrinsic::s390_vsldb: | |||
5962 | case Intrinsic::s390_vperm: | |||
5963 | computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1); | |||
5964 | break; | |||
5965 | case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH | |||
5966 | case Intrinsic::s390_vuplhh: | |||
5967 | case Intrinsic::s390_vuplhf: | |||
5968 | case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW | |||
5969 | case Intrinsic::s390_vupllh: | |||
5970 | case Intrinsic::s390_vupllf: | |||
5971 | IsLogical = true; | |||
5972 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
5973 | case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH | |||
5974 | case Intrinsic::s390_vuphh: | |||
5975 | case Intrinsic::s390_vuphf: | |||
5976 | case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW | |||
5977 | case Intrinsic::s390_vuplhw: | |||
5978 | case Intrinsic::s390_vuplf: { | |||
5979 | SDValue SrcOp = Op.getOperand(1); | |||
5980 | unsigned SrcBitWidth = SrcOp.getScalarValueSizeInBits(); | |||
5981 | APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0); | |||
5982 | Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1); | |||
5983 | if (IsLogical) { | |||
5984 | Known = Known.zext(BitWidth); | |||
5985 | Known.Zero.setBitsFrom(SrcBitWidth); | |||
5986 | } else | |||
5987 | Known = Known.sext(BitWidth); | |||
5988 | break; | |||
5989 | } | |||
5990 | default: | |||
5991 | break; | |||
5992 | } | |||
5993 | } else { | |||
5994 | switch (Opcode) { | |||
5995 | case SystemZISD::JOIN_DWORDS: | |||
5996 | case SystemZISD::SELECT_CCMASK: | |||
5997 | computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0); | |||
5998 | break; | |||
5999 | case SystemZISD::REPLICATE: { | |||
6000 | SDValue SrcOp = Op.getOperand(0); | |||
6001 | Known = DAG.computeKnownBits(SrcOp, Depth + 1); | |||
6002 | if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp)) | |||
6003 | Known = Known.sext(BitWidth); // VREPI sign extends the immedate. | |||
6004 | break; | |||
6005 | } | |||
6006 | default: | |||
6007 | break; | |||
6008 | } | |||
6009 | } | |||
6010 | ||||
6011 | // Known has the width of the source operand(s). Adjust if needed to match | |||
6012 | // the passed bitwidth. | |||
6013 | if (Known.getBitWidth() != BitWidth) | |||
6014 | Known = Known.zextOrTrunc(BitWidth); | |||
6015 | } | |||
6016 | ||||
6017 | static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, | |||
6018 | const SelectionDAG &DAG, unsigned Depth, | |||
6019 | unsigned OpNo) { | |||
6020 | APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); | |||
6021 | unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); | |||
6022 | if (LHS == 1) return 1; // Early out. | |||
6023 | APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); | |||
6024 | unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); | |||
6025 | if (RHS == 1) return 1; // Early out. | |||
6026 | unsigned Common = std::min(LHS, RHS); | |||
6027 | unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits(); | |||
6028 | EVT VT = Op.getValueType(); | |||
6029 | unsigned VTBits = VT.getScalarSizeInBits(); | |||
6030 | if (SrcBitWidth > VTBits) { // PACK | |||
6031 | unsigned SrcExtraBits = SrcBitWidth - VTBits; | |||
6032 | if (Common > SrcExtraBits) | |||
6033 | return (Common - SrcExtraBits); | |||
6034 | return 1; | |||
6035 | } | |||
6036 | assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth.")((SrcBitWidth == VTBits && "Expected operands of same bitwidth." ) ? static_cast<void> (0) : __assert_fail ("SrcBitWidth == VTBits && \"Expected operands of same bitwidth.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 6036, __PRETTY_FUNCTION__)); | |||
6037 | return Common; | |||
6038 | } | |||
6039 | ||||
6040 | unsigned | |||
6041 | SystemZTargetLowering::ComputeNumSignBitsForTargetNode( | |||
6042 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, | |||
6043 | unsigned Depth) const { | |||
6044 | if (Op.getResNo() != 0) | |||
6045 | return 1; | |||
6046 | unsigned Opcode = Op.getOpcode(); | |||
6047 | if (Opcode == ISD::INTRINSIC_WO_CHAIN) { | |||
6048 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
6049 | switch (Id) { | |||
6050 | case Intrinsic::s390_vpksh: // PACKS | |||
6051 | case Intrinsic::s390_vpksf: | |||
6052 | case Intrinsic::s390_vpksg: | |||
6053 | case Intrinsic::s390_vpkshs: // PACKS_CC | |||
6054 | case Intrinsic::s390_vpksfs: | |||
6055 | case Intrinsic::s390_vpksgs: | |||
6056 | case Intrinsic::s390_vpklsh: // PACKLS | |||
6057 | case Intrinsic::s390_vpklsf: | |||
6058 | case Intrinsic::s390_vpklsg: | |||
6059 | case Intrinsic::s390_vpklshs: // PACKLS_CC | |||
6060 | case Intrinsic::s390_vpklsfs: | |||
6061 | case Intrinsic::s390_vpklsgs: | |||
6062 | case Intrinsic::s390_vpdi: | |||
6063 | case Intrinsic::s390_vsldb: | |||
6064 | case Intrinsic::s390_vperm: | |||
6065 | return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1); | |||
6066 | case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH | |||
6067 | case Intrinsic::s390_vuphh: | |||
6068 | case Intrinsic::s390_vuphf: | |||
6069 | case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW | |||
6070 | case Intrinsic::s390_vuplhw: | |||
6071 | case Intrinsic::s390_vuplf: { | |||
6072 | SDValue PackedOp = Op.getOperand(1); | |||
6073 | APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1); | |||
6074 | unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1); | |||
6075 | EVT VT = Op.getValueType(); | |||
6076 | unsigned VTBits = VT.getScalarSizeInBits(); | |||
6077 | Tmp += VTBits - PackedOp.getScalarValueSizeInBits(); | |||
6078 | return Tmp; | |||
6079 | } | |||
6080 | default: | |||
6081 | break; | |||
6082 | } | |||
6083 | } else { | |||
6084 | switch (Opcode) { | |||
6085 | case SystemZISD::SELECT_CCMASK: | |||
6086 | return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0); | |||
6087 | default: | |||
6088 | break; | |||
6089 | } | |||
6090 | } | |||
6091 | ||||
6092 | return 1; | |||
6093 | } | |||
6094 | ||||
6095 | //===----------------------------------------------------------------------===// | |||
6096 | // Custom insertion | |||
6097 | //===----------------------------------------------------------------------===// | |||
6098 | ||||
6099 | // Create a new basic block after MBB. | |||
6100 | static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { | |||
6101 | MachineFunction &MF = *MBB->getParent(); | |||
6102 | MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); | |||
6103 | MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); | |||
6104 | return NewMBB; | |||
6105 | } | |||
6106 | ||||
6107 | // Split MBB after MI and return the new block (the one that contains | |||
6108 | // instructions after MI). | |||
6109 | static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI, | |||
6110 | MachineBasicBlock *MBB) { | |||
6111 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); | |||
6112 | NewMBB->splice(NewMBB->begin(), MBB, | |||
6113 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); | |||
6114 | NewMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
6115 | return NewMBB; | |||
6116 | } | |||
6117 | ||||
6118 | // Split MBB before MI and return the new block (the one that contains MI). | |||
6119 | static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI, | |||
6120 | MachineBasicBlock *MBB) { | |||
6121 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); | |||
6122 | NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); | |||
6123 | NewMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
6124 | return NewMBB; | |||
6125 | } | |||
6126 | ||||
6127 | // Force base value Base into a register before MI. Return the register. | |||
6128 | static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, | |||
6129 | const SystemZInstrInfo *TII) { | |||
6130 | if (Base.isReg()) | |||
6131 | return Base.getReg(); | |||
6132 | ||||
6133 | MachineBasicBlock *MBB = MI.getParent(); | |||
6134 | MachineFunction &MF = *MBB->getParent(); | |||
6135 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6136 | ||||
6137 | unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); | |||
6138 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) | |||
6139 | .add(Base) | |||
6140 | .addImm(0) | |||
6141 | .addReg(0); | |||
6142 | return Reg; | |||
6143 | } | |||
6144 | ||||
6145 | // The CC operand of MI might be missing a kill marker because there | |||
6146 | // were multiple uses of CC, and ISel didn't know which to mark. | |||
6147 | // Figure out whether MI should have had a kill marker. | |||
6148 | static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) { | |||
6149 | // Scan forward through BB for a use/def of CC. | |||
6150 | MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI))); | |||
6151 | for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) { | |||
6152 | const MachineInstr& mi = *miI; | |||
6153 | if (mi.readsRegister(SystemZ::CC)) | |||
6154 | return false; | |||
6155 | if (mi.definesRegister(SystemZ::CC)) | |||
6156 | break; // Should have kill-flag - update below. | |||
6157 | } | |||
6158 | ||||
6159 | // If we hit the end of the block, check whether CC is live into a | |||
6160 | // successor. | |||
6161 | if (miI == MBB->end()) { | |||
6162 | for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) | |||
6163 | if ((*SI)->isLiveIn(SystemZ::CC)) | |||
6164 | return false; | |||
6165 | } | |||
6166 | ||||
6167 | return true; | |||
6168 | } | |||
6169 | ||||
6170 | // Return true if it is OK for this Select pseudo-opcode to be cascaded | |||
6171 | // together with other Select pseudo-opcodes into a single basic-block with | |||
6172 | // a conditional jump around it. | |||
6173 | static bool isSelectPseudo(MachineInstr &MI) { | |||
6174 | switch (MI.getOpcode()) { | |||
6175 | case SystemZ::Select32: | |||
6176 | case SystemZ::Select64: | |||
6177 | case SystemZ::SelectF32: | |||
6178 | case SystemZ::SelectF64: | |||
6179 | case SystemZ::SelectF128: | |||
6180 | case SystemZ::SelectVR32: | |||
6181 | case SystemZ::SelectVR64: | |||
6182 | case SystemZ::SelectVR128: | |||
6183 | return true; | |||
6184 | ||||
6185 | default: | |||
6186 | return false; | |||
6187 | } | |||
6188 | } | |||
6189 | ||||
6190 | // Helper function, which inserts PHI functions into SinkMBB: | |||
6191 | // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ], | |||
6192 | // where %FalseValue(i) and %TrueValue(i) are taken from the consequent Selects | |||
6193 | // in [MIItBegin, MIItEnd) range. | |||
6194 | static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin, | |||
6195 | MachineBasicBlock::iterator MIItEnd, | |||
6196 | MachineBasicBlock *TrueMBB, | |||
6197 | MachineBasicBlock *FalseMBB, | |||
6198 | MachineBasicBlock *SinkMBB) { | |||
6199 | MachineFunction *MF = TrueMBB->getParent(); | |||
6200 | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); | |||
6201 | ||||
6202 | unsigned CCValid = MIItBegin->getOperand(3).getImm(); | |||
6203 | unsigned CCMask = MIItBegin->getOperand(4).getImm(); | |||
6204 | DebugLoc DL = MIItBegin->getDebugLoc(); | |||
6205 | ||||
6206 | MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); | |||
6207 | ||||
6208 | // As we are creating the PHIs, we have to be careful if there is more than | |||
6209 | // one. Later Selects may reference the results of earlier Selects, but later | |||
6210 | // PHIs have to reference the individual true/false inputs from earlier PHIs. | |||
6211 | // That also means that PHI construction must work forward from earlier to | |||
6212 | // later, and that the code must maintain a mapping from earlier PHI's | |||
6213 | // destination registers, and the registers that went into the PHI. | |||
6214 | DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable; | |||
6215 | ||||
6216 | for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) { | |||
6217 | unsigned DestReg = MIIt->getOperand(0).getReg(); | |||
6218 | unsigned TrueReg = MIIt->getOperand(1).getReg(); | |||
6219 | unsigned FalseReg = MIIt->getOperand(2).getReg(); | |||
6220 | ||||
6221 | // If this Select we are generating is the opposite condition from | |||
6222 | // the jump we generated, then we have to swap the operands for the | |||
6223 | // PHI that is going to be generated. | |||
6224 | if (MIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) | |||
6225 | std::swap(TrueReg, FalseReg); | |||
6226 | ||||
6227 | if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end()) | |||
6228 | TrueReg = RegRewriteTable[TrueReg].first; | |||
6229 | ||||
6230 | if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end()) | |||
6231 | FalseReg = RegRewriteTable[FalseReg].second; | |||
6232 | ||||
6233 | BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg) | |||
6234 | .addReg(TrueReg).addMBB(TrueMBB) | |||
6235 | .addReg(FalseReg).addMBB(FalseMBB); | |||
6236 | ||||
6237 | // Add this PHI to the rewrite table. | |||
6238 | RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg); | |||
6239 | } | |||
6240 | } | |||
6241 | ||||
6242 | // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. | |||
6243 | MachineBasicBlock * | |||
6244 | SystemZTargetLowering::emitSelect(MachineInstr &MI, | |||
6245 | MachineBasicBlock *MBB) const { | |||
6246 | const SystemZInstrInfo *TII = | |||
6247 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6248 | ||||
6249 | unsigned CCValid = MI.getOperand(3).getImm(); | |||
6250 | unsigned CCMask = MI.getOperand(4).getImm(); | |||
6251 | DebugLoc DL = MI.getDebugLoc(); | |||
6252 | ||||
6253 | // If we have a sequence of Select* pseudo instructions using the | |||
6254 | // same condition code value, we want to expand all of them into | |||
6255 | // a single pair of basic blocks using the same condition. | |||
6256 | MachineInstr *LastMI = &MI; | |||
6257 | MachineBasicBlock::iterator NextMIIt = | |||
6258 | std::next(MachineBasicBlock::iterator(MI)); | |||
6259 | ||||
6260 | if (isSelectPseudo(MI)) | |||
6261 | while (NextMIIt != MBB->end() && isSelectPseudo(*NextMIIt) && | |||
6262 | NextMIIt->getOperand(3).getImm() == CCValid && | |||
6263 | (NextMIIt->getOperand(4).getImm() == CCMask || | |||
6264 | NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask))) { | |||
6265 | LastMI = &*NextMIIt; | |||
6266 | ++NextMIIt; | |||
6267 | } | |||
6268 | ||||
6269 | MachineBasicBlock *StartMBB = MBB; | |||
6270 | MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); | |||
6271 | MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); | |||
6272 | ||||
6273 | // Unless CC was killed in the last Select instruction, mark it as | |||
6274 | // live-in to both FalseMBB and JoinMBB. | |||
6275 | if (!LastMI->killsRegister(SystemZ::CC) && !checkCCKill(*LastMI, JoinMBB)) { | |||
6276 | FalseMBB->addLiveIn(SystemZ::CC); | |||
6277 | JoinMBB->addLiveIn(SystemZ::CC); | |||
6278 | } | |||
6279 | ||||
6280 | // StartMBB: | |||
6281 | // BRC CCMask, JoinMBB | |||
6282 | // # fallthrough to FalseMBB | |||
6283 | MBB = StartMBB; | |||
6284 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6285 | .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); | |||
6286 | MBB->addSuccessor(JoinMBB); | |||
6287 | MBB->addSuccessor(FalseMBB); | |||
6288 | ||||
6289 | // FalseMBB: | |||
6290 | // # fallthrough to JoinMBB | |||
6291 | MBB = FalseMBB; | |||
6292 | MBB->addSuccessor(JoinMBB); | |||
6293 | ||||
6294 | // JoinMBB: | |||
6295 | // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] | |||
6296 | // ... | |||
6297 | MBB = JoinMBB; | |||
6298 | MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI); | |||
6299 | MachineBasicBlock::iterator MIItEnd = | |||
6300 | std::next(MachineBasicBlock::iterator(LastMI)); | |||
6301 | createPHIsForSelects(MIItBegin, MIItEnd, StartMBB, FalseMBB, MBB); | |||
6302 | ||||
6303 | StartMBB->erase(MIItBegin, MIItEnd); | |||
6304 | return JoinMBB; | |||
6305 | } | |||
6306 | ||||
6307 | // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. | |||
6308 | // StoreOpcode is the store to use and Invert says whether the store should | |||
6309 | // happen when the condition is false rather than true. If a STORE ON | |||
6310 | // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. | |||
6311 | MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, | |||
6312 | MachineBasicBlock *MBB, | |||
6313 | unsigned StoreOpcode, | |||
6314 | unsigned STOCOpcode, | |||
6315 | bool Invert) const { | |||
6316 | const SystemZInstrInfo *TII = | |||
6317 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6318 | ||||
6319 | unsigned SrcReg = MI.getOperand(0).getReg(); | |||
6320 | MachineOperand Base = MI.getOperand(1); | |||
6321 | int64_t Disp = MI.getOperand(2).getImm(); | |||
6322 | unsigned IndexReg = MI.getOperand(3).getReg(); | |||
6323 | unsigned CCValid = MI.getOperand(4).getImm(); | |||
6324 | unsigned CCMask = MI.getOperand(5).getImm(); | |||
6325 | DebugLoc DL = MI.getDebugLoc(); | |||
6326 | ||||
6327 | StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); | |||
6328 | ||||
6329 | // Use STOCOpcode if possible. We could use different store patterns in | |||
6330 | // order to avoid matching the index register, but the performance trade-offs | |||
6331 | // might be more complicated in that case. | |||
6332 | if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { | |||
6333 | if (Invert) | |||
6334 | CCMask ^= CCValid; | |||
6335 | ||||
6336 | // ISel pattern matching also adds a load memory operand of the same | |||
6337 | // address, so take special care to find the storing memory operand. | |||
6338 | MachineMemOperand *MMO = nullptr; | |||
6339 | for (auto *I : MI.memoperands()) | |||
6340 | if (I->isStore()) { | |||
6341 | MMO = I; | |||
6342 | break; | |||
6343 | } | |||
6344 | ||||
6345 | BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) | |||
6346 | .addReg(SrcReg) | |||
6347 | .add(Base) | |||
6348 | .addImm(Disp) | |||
6349 | .addImm(CCValid) | |||
6350 | .addImm(CCMask) | |||
6351 | .addMemOperand(MMO); | |||
6352 | ||||
6353 | MI.eraseFromParent(); | |||
6354 | return MBB; | |||
6355 | } | |||
6356 | ||||
6357 | // Get the condition needed to branch around the store. | |||
6358 | if (!Invert) | |||
6359 | CCMask ^= CCValid; | |||
6360 | ||||
6361 | MachineBasicBlock *StartMBB = MBB; | |||
6362 | MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); | |||
6363 | MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); | |||
6364 | ||||
6365 | // Unless CC was killed in the CondStore instruction, mark it as | |||
6366 | // live-in to both FalseMBB and JoinMBB. | |||
6367 | if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) { | |||
6368 | FalseMBB->addLiveIn(SystemZ::CC); | |||
6369 | JoinMBB->addLiveIn(SystemZ::CC); | |||
6370 | } | |||
6371 | ||||
6372 | // StartMBB: | |||
6373 | // BRC CCMask, JoinMBB | |||
6374 | // # fallthrough to FalseMBB | |||
6375 | MBB = StartMBB; | |||
6376 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6377 | .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); | |||
6378 | MBB->addSuccessor(JoinMBB); | |||
6379 | MBB->addSuccessor(FalseMBB); | |||
6380 | ||||
6381 | // FalseMBB: | |||
6382 | // store %SrcReg, %Disp(%Index,%Base) | |||
6383 | // # fallthrough to JoinMBB | |||
6384 | MBB = FalseMBB; | |||
6385 | BuildMI(MBB, DL, TII->get(StoreOpcode)) | |||
6386 | .addReg(SrcReg) | |||
6387 | .add(Base) | |||
6388 | .addImm(Disp) | |||
6389 | .addReg(IndexReg); | |||
6390 | MBB->addSuccessor(JoinMBB); | |||
6391 | ||||
6392 | MI.eraseFromParent(); | |||
6393 | return JoinMBB; | |||
6394 | } | |||
6395 | ||||
6396 | // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* | |||
6397 | // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that | |||
6398 | // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. | |||
6399 | // BitSize is the width of the field in bits, or 0 if this is a partword | |||
6400 | // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize | |||
6401 | // is one of the operands. Invert says whether the field should be | |||
6402 | // inverted after performing BinOpcode (e.g. for NAND). | |||
6403 | MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( | |||
6404 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, | |||
6405 | unsigned BitSize, bool Invert) const { | |||
6406 | MachineFunction &MF = *MBB->getParent(); | |||
6407 | const SystemZInstrInfo *TII = | |||
6408 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6409 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6410 | bool IsSubWord = (BitSize < 32); | |||
6411 | ||||
6412 | // Extract the operands. Base can be a register or a frame index. | |||
6413 | // Src2 can be a register or immediate. | |||
6414 | unsigned Dest = MI.getOperand(0).getReg(); | |||
6415 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); | |||
6416 | int64_t Disp = MI.getOperand(2).getImm(); | |||
6417 | MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); | |||
6418 | unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); | |||
6419 | unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); | |||
6420 | DebugLoc DL = MI.getDebugLoc(); | |||
6421 | if (IsSubWord) | |||
6422 | BitSize = MI.getOperand(6).getImm(); | |||
6423 | ||||
6424 | // Subword operations use 32-bit registers. | |||
6425 | const TargetRegisterClass *RC = (BitSize <= 32 ? | |||
6426 | &SystemZ::GR32BitRegClass : | |||
6427 | &SystemZ::GR64BitRegClass); | |||
6428 | unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; | |||
6429 | unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; | |||
6430 | ||||
6431 | // Get the right opcodes for the displacement. | |||
6432 | LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); | |||
6433 | CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); | |||
6434 | assert(LOpcode && CSOpcode && "Displacement out of range")((LOpcode && CSOpcode && "Displacement out of range" ) ? static_cast<void> (0) : __assert_fail ("LOpcode && CSOpcode && \"Displacement out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 6434, __PRETTY_FUNCTION__)); | |||
6435 | ||||
6436 | // Create virtual registers for temporary results. | |||
6437 | unsigned OrigVal = MRI.createVirtualRegister(RC); | |||
6438 | unsigned OldVal = MRI.createVirtualRegister(RC); | |||
6439 | unsigned NewVal = (BinOpcode || IsSubWord ? | |||
6440 | MRI.createVirtualRegister(RC) : Src2.getReg()); | |||
6441 | unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); | |||
6442 | unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); | |||
6443 | ||||
6444 | // Insert a basic block for the main loop. | |||
6445 | MachineBasicBlock *StartMBB = MBB; | |||
6446 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); | |||
6447 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); | |||
6448 | ||||
6449 | // StartMBB: | |||
6450 | // ... | |||
6451 | // %OrigVal = L Disp(%Base) | |||
6452 | // # fall through to LoopMMB | |||
6453 | MBB = StartMBB; | |||
6454 | BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); | |||
6455 | MBB->addSuccessor(LoopMBB); | |||
6456 | ||||
6457 | // LoopMBB: | |||
6458 | // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] | |||
6459 | // %RotatedOldVal = RLL %OldVal, 0(%BitShift) | |||
6460 | // %RotatedNewVal = OP %RotatedOldVal, %Src2 | |||
6461 | // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) | |||
6462 | // %Dest = CS %OldVal, %NewVal, Disp(%Base) | |||
6463 | // JNE LoopMBB | |||
6464 | // # fall through to DoneMMB | |||
6465 | MBB = LoopMBB; | |||
6466 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) | |||
6467 | .addReg(OrigVal).addMBB(StartMBB) | |||
6468 | .addReg(Dest).addMBB(LoopMBB); | |||
6469 | if (IsSubWord) | |||
6470 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) | |||
6471 | .addReg(OldVal).addReg(BitShift).addImm(0); | |||
6472 | if (Invert) { | |||
6473 | // Perform the operation normally and then invert every bit of the field. | |||
6474 | unsigned Tmp = MRI.createVirtualRegister(RC); | |||
6475 | BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); | |||
6476 | if (BitSize <= 32) | |||
6477 | // XILF with the upper BitSize bits set. | |||
6478 | BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) | |||
6479 | .addReg(Tmp).addImm(-1U << (32 - BitSize)); | |||
6480 | else { | |||
6481 | // Use LCGR and add -1 to the result, which is more compact than | |||
6482 | // an XILF, XILH pair. | |||
6483 | unsigned Tmp2 = MRI.createVirtualRegister(RC); | |||
6484 | BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); | |||
6485 | BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) | |||
6486 | .addReg(Tmp2).addImm(-1); | |||
6487 | } | |||
6488 | } else if (BinOpcode) | |||
6489 | // A simply binary operation. | |||
6490 | BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) | |||
6491 | .addReg(RotatedOldVal) | |||
6492 | .add(Src2); | |||
6493 | else if (IsSubWord) | |||
6494 | // Use RISBG to rotate Src2 into position and use it to replace the | |||
6495 | // field in RotatedOldVal. | |||
6496 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) | |||
6497 | .addReg(RotatedOldVal).addReg(Src2.getReg()) | |||
6498 | .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); | |||
6499 | if (IsSubWord) | |||
6500 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) | |||
6501 | .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); | |||
6502 | BuildMI(MBB, DL, TII->get(CSOpcode), Dest) | |||
6503 | .addReg(OldVal) | |||
6504 | .addReg(NewVal) | |||
6505 | .add(Base) | |||
6506 | .addImm(Disp); | |||
6507 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6508 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); | |||
6509 | MBB->addSuccessor(LoopMBB); | |||
6510 | MBB->addSuccessor(DoneMBB); | |||
6511 | ||||
6512 | MI.eraseFromParent(); | |||
6513 | return DoneMBB; | |||
6514 | } | |||
6515 | ||||
6516 | // Implement EmitInstrWithCustomInserter for pseudo | |||
6517 | // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the | |||
6518 | // instruction that should be used to compare the current field with the | |||
6519 | // minimum or maximum value. KeepOldMask is the BRC condition-code mask | |||
6520 | // for when the current field should be kept. BitSize is the width of | |||
6521 | // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. | |||
6522 | MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( | |||
6523 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, | |||
6524 | unsigned KeepOldMask, unsigned BitSize) const { | |||
6525 | MachineFunction &MF = *MBB->getParent(); | |||
6526 | const SystemZInstrInfo *TII = | |||
6527 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6528 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6529 | bool IsSubWord = (BitSize < 32); | |||
6530 | ||||
6531 | // Extract the operands. Base can be a register or a frame index. | |||
6532 | unsigned Dest = MI.getOperand(0).getReg(); | |||
6533 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); | |||
6534 | int64_t Disp = MI.getOperand(2).getImm(); | |||
6535 | unsigned Src2 = MI.getOperand(3).getReg(); | |||
6536 | unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); | |||
6537 | unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); | |||
6538 | DebugLoc DL = MI.getDebugLoc(); | |||
6539 | if (IsSubWord) | |||
6540 | BitSize = MI.getOperand(6).getImm(); | |||
6541 | ||||
6542 | // Subword operations use 32-bit registers. | |||
6543 | const TargetRegisterClass *RC = (BitSize <= 32 ? | |||
6544 | &SystemZ::GR32BitRegClass : | |||
6545 | &SystemZ::GR64BitRegClass); | |||
6546 | unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; | |||
6547 | unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; | |||
6548 | ||||
6549 | // Get the right opcodes for the displacement. | |||
6550 | LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); | |||
6551 | CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); | |||
6552 | assert(LOpcode && CSOpcode && "Displacement out of range")((LOpcode && CSOpcode && "Displacement out of range" ) ? static_cast<void> (0) : __assert_fail ("LOpcode && CSOpcode && \"Displacement out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 6552, __PRETTY_FUNCTION__)); | |||
6553 | ||||
6554 | // Create virtual registers for temporary results. | |||
6555 | unsigned OrigVal = MRI.createVirtualRegister(RC); | |||
6556 | unsigned OldVal = MRI.createVirtualRegister(RC); | |||
6557 | unsigned NewVal = MRI.createVirtualRegister(RC); | |||
6558 | unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); | |||
6559 | unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); | |||
6560 | unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); | |||
6561 | ||||
6562 | // Insert 3 basic blocks for the loop. | |||
6563 | MachineBasicBlock *StartMBB = MBB; | |||
6564 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); | |||
6565 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); | |||
6566 | MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); | |||
6567 | MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); | |||
6568 | ||||
6569 | // StartMBB: | |||
6570 | // ... | |||
6571 | // %OrigVal = L Disp(%Base) | |||
6572 | // # fall through to LoopMMB | |||
6573 | MBB = StartMBB; | |||
6574 | BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); | |||
6575 | MBB->addSuccessor(LoopMBB); | |||
6576 | ||||
6577 | // LoopMBB: | |||
6578 | // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] | |||
6579 | // %RotatedOldVal = RLL %OldVal, 0(%BitShift) | |||
6580 | // CompareOpcode %RotatedOldVal, %Src2 | |||
6581 | // BRC KeepOldMask, UpdateMBB | |||
6582 | MBB = LoopMBB; | |||
6583 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) | |||
6584 | .addReg(OrigVal).addMBB(StartMBB) | |||
6585 | .addReg(Dest).addMBB(UpdateMBB); | |||
6586 | if (IsSubWord) | |||
6587 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) | |||
6588 | .addReg(OldVal).addReg(BitShift).addImm(0); | |||
6589 | BuildMI(MBB, DL, TII->get(CompareOpcode)) | |||
6590 | .addReg(RotatedOldVal).addReg(Src2); | |||
6591 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6592 | .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); | |||
6593 | MBB->addSuccessor(UpdateMBB); | |||
6594 | MBB->addSuccessor(UseAltMBB); | |||
6595 | ||||
6596 | // UseAltMBB: | |||
6597 | // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 | |||
6598 | // # fall through to UpdateMMB | |||
6599 | MBB = UseAltMBB; | |||
6600 | if (IsSubWord) | |||
6601 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) | |||
6602 | .addReg(RotatedOldVal).addReg(Src2) | |||
6603 | .addImm(32).addImm(31 + BitSize).addImm(0); | |||
6604 | MBB->addSuccessor(UpdateMBB); | |||
6605 | ||||
6606 | // UpdateMBB: | |||
6607 | // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], | |||
6608 | // [ %RotatedAltVal, UseAltMBB ] | |||
6609 | // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) | |||
6610 | // %Dest = CS %OldVal, %NewVal, Disp(%Base) | |||
6611 | // JNE LoopMBB | |||
6612 | // # fall through to DoneMMB | |||
6613 | MBB = UpdateMBB; | |||
6614 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) | |||
6615 | .addReg(RotatedOldVal).addMBB(LoopMBB) | |||
6616 | .addReg(RotatedAltVal).addMBB(UseAltMBB); | |||
6617 | if (IsSubWord) | |||
6618 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) | |||
6619 | .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); | |||
6620 | BuildMI(MBB, DL, TII->get(CSOpcode), Dest) | |||
6621 | .addReg(OldVal) | |||
6622 | .addReg(NewVal) | |||
6623 | .add(Base) | |||
6624 | .addImm(Disp); | |||
6625 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6626 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); | |||
6627 | MBB->addSuccessor(LoopMBB); | |||
6628 | MBB->addSuccessor(DoneMBB); | |||
6629 | ||||
6630 | MI.eraseFromParent(); | |||
6631 | return DoneMBB; | |||
6632 | } | |||
6633 | ||||
6634 | // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW | |||
6635 | // instruction MI. | |||
6636 | MachineBasicBlock * | |||
6637 | SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, | |||
6638 | MachineBasicBlock *MBB) const { | |||
6639 | ||||
6640 | MachineFunction &MF = *MBB->getParent(); | |||
6641 | const SystemZInstrInfo *TII = | |||
6642 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6643 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6644 | ||||
6645 | // Extract the operands. Base can be a register or a frame index. | |||
6646 | unsigned Dest = MI.getOperand(0).getReg(); | |||
6647 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); | |||
6648 | int64_t Disp = MI.getOperand(2).getImm(); | |||
6649 | unsigned OrigCmpVal = MI.getOperand(3).getReg(); | |||
6650 | unsigned OrigSwapVal = MI.getOperand(4).getReg(); | |||
6651 | unsigned BitShift = MI.getOperand(5).getReg(); | |||
6652 | unsigned NegBitShift = MI.getOperand(6).getReg(); | |||
6653 | int64_t BitSize = MI.getOperand(7).getImm(); | |||
6654 | DebugLoc DL = MI.getDebugLoc(); | |||
6655 | ||||
6656 | const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; | |||
6657 | ||||
6658 | // Get the right opcodes for the displacement. | |||
6659 | unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); | |||
6660 | unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); | |||
6661 | assert(LOpcode && CSOpcode && "Displacement out of range")((LOpcode && CSOpcode && "Displacement out of range" ) ? static_cast<void> (0) : __assert_fail ("LOpcode && CSOpcode && \"Displacement out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 6661, __PRETTY_FUNCTION__)); | |||
6662 | ||||
6663 | // Create virtual registers for temporary results. | |||
6664 | unsigned OrigOldVal = MRI.createVirtualRegister(RC); | |||
6665 | unsigned OldVal = MRI.createVirtualRegister(RC); | |||
6666 | unsigned CmpVal = MRI.createVirtualRegister(RC); | |||
6667 | unsigned SwapVal = MRI.createVirtualRegister(RC); | |||
6668 | unsigned StoreVal = MRI.createVirtualRegister(RC); | |||
6669 | unsigned RetryOldVal = MRI.createVirtualRegister(RC); | |||
6670 | unsigned RetryCmpVal = MRI.createVirtualRegister(RC); | |||
6671 | unsigned RetrySwapVal = MRI.createVirtualRegister(RC); | |||
6672 | ||||
6673 | // Insert 2 basic blocks for the loop. | |||
6674 | MachineBasicBlock *StartMBB = MBB; | |||
6675 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); | |||
6676 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); | |||
6677 | MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); | |||
6678 | ||||
6679 | // StartMBB: | |||
6680 | // ... | |||
6681 | // %OrigOldVal = L Disp(%Base) | |||
6682 | // # fall through to LoopMMB | |||
6683 | MBB = StartMBB; | |||
6684 | BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) | |||
6685 | .add(Base) | |||
6686 | .addImm(Disp) | |||
6687 | .addReg(0); | |||
6688 | MBB->addSuccessor(LoopMBB); | |||
6689 | ||||
6690 | // LoopMBB: | |||
6691 | // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] | |||
6692 | // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] | |||
6693 | // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] | |||
6694 | // %Dest = RLL %OldVal, BitSize(%BitShift) | |||
6695 | // ^^ The low BitSize bits contain the field | |||
6696 | // of interest. | |||
6697 | // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 | |||
6698 | // ^^ Replace the upper 32-BitSize bits of the | |||
6699 | // comparison value with those that we loaded, | |||
6700 | // so that we can use a full word comparison. | |||
6701 | // CR %Dest, %RetryCmpVal | |||
6702 | // JNE DoneMBB | |||
6703 | // # Fall through to SetMBB | |||
6704 | MBB = LoopMBB; | |||
6705 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) | |||
6706 | .addReg(OrigOldVal).addMBB(StartMBB) | |||
6707 | .addReg(RetryOldVal).addMBB(SetMBB); | |||
6708 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) | |||
6709 | .addReg(OrigCmpVal).addMBB(StartMBB) | |||
6710 | .addReg(RetryCmpVal).addMBB(SetMBB); | |||
6711 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) | |||
6712 | .addReg(OrigSwapVal).addMBB(StartMBB) | |||
6713 | .addReg(RetrySwapVal).addMBB(SetMBB); | |||
6714 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) | |||
6715 | .addReg(OldVal).addReg(BitShift).addImm(BitSize); | |||
6716 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) | |||
6717 | .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); | |||
6718 | BuildMI(MBB, DL, TII->get(SystemZ::CR)) | |||
6719 | .addReg(Dest).addReg(RetryCmpVal); | |||
6720 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6721 | .addImm(SystemZ::CCMASK_ICMP) | |||
6722 | .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); | |||
6723 | MBB->addSuccessor(DoneMBB); | |||
6724 | MBB->addSuccessor(SetMBB); | |||
6725 | ||||
6726 | // SetMBB: | |||
6727 | // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 | |||
6728 | // ^^ Replace the upper 32-BitSize bits of the new | |||
6729 | // value with those that we loaded. | |||
6730 | // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) | |||
6731 | // ^^ Rotate the new field to its proper position. | |||
6732 | // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) | |||
6733 | // JNE LoopMBB | |||
6734 | // # fall through to ExitMMB | |||
6735 | MBB = SetMBB; | |||
6736 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) | |||
6737 | .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); | |||
6738 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) | |||
6739 | .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); | |||
6740 | BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) | |||
6741 | .addReg(OldVal) | |||
6742 | .addReg(StoreVal) | |||
6743 | .add(Base) | |||
6744 | .addImm(Disp); | |||
6745 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6746 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); | |||
6747 | MBB->addSuccessor(LoopMBB); | |||
6748 | MBB->addSuccessor(DoneMBB); | |||
6749 | ||||
6750 | // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in | |||
6751 | // to the block after the loop. At this point, CC may have been defined | |||
6752 | // either by the CR in LoopMBB or by the CS in SetMBB. | |||
6753 | if (!MI.registerDefIsDead(SystemZ::CC)) | |||
6754 | DoneMBB->addLiveIn(SystemZ::CC); | |||
6755 | ||||
6756 | MI.eraseFromParent(); | |||
6757 | return DoneMBB; | |||
6758 | } | |||
6759 | ||||
6760 | // Emit a move from two GR64s to a GR128. | |||
6761 | MachineBasicBlock * | |||
6762 | SystemZTargetLowering::emitPair128(MachineInstr &MI, | |||
6763 | MachineBasicBlock *MBB) const { | |||
6764 | MachineFunction &MF = *MBB->getParent(); | |||
6765 | const SystemZInstrInfo *TII = | |||
6766 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6767 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6768 | DebugLoc DL = MI.getDebugLoc(); | |||
6769 | ||||
6770 | unsigned Dest = MI.getOperand(0).getReg(); | |||
6771 | unsigned Hi = MI.getOperand(1).getReg(); | |||
6772 | unsigned Lo = MI.getOperand(2).getReg(); | |||
6773 | unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); | |||
6774 | unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); | |||
6775 | ||||
6776 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); | |||
6777 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) | |||
6778 | .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); | |||
6779 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) | |||
6780 | .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); | |||
6781 | ||||
6782 | MI.eraseFromParent(); | |||
6783 | return MBB; | |||
6784 | } | |||
6785 | ||||
6786 | // Emit an extension from a GR64 to a GR128. ClearEven is true | |||
6787 | // if the high register of the GR128 value must be cleared or false if | |||
6788 | // it's "don't care". | |||
6789 | MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, | |||
6790 | MachineBasicBlock *MBB, | |||
6791 | bool ClearEven) const { | |||
6792 | MachineFunction &MF = *MBB->getParent(); | |||
6793 | const SystemZInstrInfo *TII = | |||
6794 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6795 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6796 | DebugLoc DL = MI.getDebugLoc(); | |||
6797 | ||||
6798 | unsigned Dest = MI.getOperand(0).getReg(); | |||
6799 | unsigned Src = MI.getOperand(1).getReg(); | |||
6800 | unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); | |||
6801 | ||||
6802 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); | |||
6803 | if (ClearEven) { | |||
6804 | unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); | |||
6805 | unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); | |||
6806 | ||||
6807 | BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) | |||
6808 | .addImm(0); | |||
6809 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) | |||
6810 | .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); | |||
6811 | In128 = NewIn128; | |||
6812 | } | |||
6813 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) | |||
6814 | .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); | |||
6815 | ||||
6816 | MI.eraseFromParent(); | |||
6817 | return MBB; | |||
6818 | } | |||
6819 | ||||
6820 | MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( | |||
6821 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { | |||
6822 | MachineFunction &MF = *MBB->getParent(); | |||
6823 | const SystemZInstrInfo *TII = | |||
6824 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
6825 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
6826 | DebugLoc DL = MI.getDebugLoc(); | |||
6827 | ||||
6828 | MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); | |||
6829 | uint64_t DestDisp = MI.getOperand(1).getImm(); | |||
6830 | MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2)); | |||
6831 | uint64_t SrcDisp = MI.getOperand(3).getImm(); | |||
6832 | uint64_t Length = MI.getOperand(4).getImm(); | |||
6833 | ||||
6834 | // When generating more than one CLC, all but the last will need to | |||
6835 | // branch to the end when a difference is found. | |||
6836 | MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? | |||
6837 | splitBlockAfter(MI, MBB) : nullptr); | |||
6838 | ||||
6839 | // Check for the loop form, in which operand 5 is the trip count. | |||
6840 | if (MI.getNumExplicitOperands() > 5) { | |||
6841 | bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); | |||
6842 | ||||
6843 | uint64_t StartCountReg = MI.getOperand(5).getReg(); | |||
6844 | uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); | |||
6845 | uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : | |||
6846 | forceReg(MI, DestBase, TII)); | |||
6847 | ||||
6848 | const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; | |||
6849 | uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); | |||
6850 | uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : | |||
6851 | MRI.createVirtualRegister(RC)); | |||
6852 | uint64_t NextSrcReg = MRI.createVirtualRegister(RC); | |||
6853 | uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : | |||
6854 | MRI.createVirtualRegister(RC)); | |||
6855 | ||||
6856 | RC = &SystemZ::GR64BitRegClass; | |||
6857 | uint64_t ThisCountReg = MRI.createVirtualRegister(RC); | |||
6858 | uint64_t NextCountReg = MRI.createVirtualRegister(RC); | |||
6859 | ||||
6860 | MachineBasicBlock *StartMBB = MBB; | |||
6861 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); | |||
6862 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); | |||
6863 | MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); | |||
6864 | ||||
6865 | // StartMBB: | |||
6866 | // # fall through to LoopMMB | |||
6867 | MBB->addSuccessor(LoopMBB); | |||
6868 | ||||
6869 | // LoopMBB: | |||
6870 | // %ThisDestReg = phi [ %StartDestReg, StartMBB ], | |||
6871 | // [ %NextDestReg, NextMBB ] | |||
6872 | // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], | |||
6873 | // [ %NextSrcReg, NextMBB ] | |||
6874 | // %ThisCountReg = phi [ %StartCountReg, StartMBB ], | |||
6875 | // [ %NextCountReg, NextMBB ] | |||
6876 | // ( PFD 2, 768+DestDisp(%ThisDestReg) ) | |||
6877 | // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) | |||
6878 | // ( JLH EndMBB ) | |||
6879 | // | |||
6880 | // The prefetch is used only for MVC. The JLH is used only for CLC. | |||
6881 | MBB = LoopMBB; | |||
6882 | ||||
6883 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) | |||
6884 | .addReg(StartDestReg).addMBB(StartMBB) | |||
6885 | .addReg(NextDestReg).addMBB(NextMBB); | |||
6886 | if (!HaveSingleBase) | |||
6887 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) | |||
6888 | .addReg(StartSrcReg).addMBB(StartMBB) | |||
6889 | .addReg(NextSrcReg).addMBB(NextMBB); | |||
6890 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) | |||
6891 | .addReg(StartCountReg).addMBB(StartMBB) | |||
6892 | .addReg(NextCountReg).addMBB(NextMBB); | |||
6893 | if (Opcode == SystemZ::MVC) | |||
6894 | BuildMI(MBB, DL, TII->get(SystemZ::PFD)) | |||
6895 | .addImm(SystemZ::PFD_WRITE) | |||
6896 | .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); | |||
6897 | BuildMI(MBB, DL, TII->get(Opcode)) | |||
6898 | .addReg(ThisDestReg).addImm(DestDisp).addImm(256) | |||
6899 | .addReg(ThisSrcReg).addImm(SrcDisp); | |||
6900 | if (EndMBB) { | |||
6901 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6902 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) | |||
6903 | .addMBB(EndMBB); | |||
6904 | MBB->addSuccessor(EndMBB); | |||
6905 | MBB->addSuccessor(NextMBB); | |||
6906 | } | |||
6907 | ||||
6908 | // NextMBB: | |||
6909 | // %NextDestReg = LA 256(%ThisDestReg) | |||
6910 | // %NextSrcReg = LA 256(%ThisSrcReg) | |||
6911 | // %NextCountReg = AGHI %ThisCountReg, -1 | |||
6912 | // CGHI %NextCountReg, 0 | |||
6913 | // JLH LoopMBB | |||
6914 | // # fall through to DoneMMB | |||
6915 | // | |||
6916 | // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. | |||
6917 | MBB = NextMBB; | |||
6918 | ||||
6919 | BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) | |||
6920 | .addReg(ThisDestReg).addImm(256).addReg(0); | |||
6921 | if (!HaveSingleBase) | |||
6922 | BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) | |||
6923 | .addReg(ThisSrcReg).addImm(256).addReg(0); | |||
6924 | BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) | |||
6925 | .addReg(ThisCountReg).addImm(-1); | |||
6926 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) | |||
6927 | .addReg(NextCountReg).addImm(0); | |||
6928 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6929 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) | |||
6930 | .addMBB(LoopMBB); | |||
6931 | MBB->addSuccessor(LoopMBB); | |||
6932 | MBB->addSuccessor(DoneMBB); | |||
6933 | ||||
6934 | DestBase = MachineOperand::CreateReg(NextDestReg, false); | |||
6935 | SrcBase = MachineOperand::CreateReg(NextSrcReg, false); | |||
6936 | Length &= 255; | |||
6937 | if (EndMBB && !Length) | |||
6938 | // If the loop handled the whole CLC range, DoneMBB will be empty with | |||
6939 | // CC live-through into EndMBB, so add it as live-in. | |||
6940 | DoneMBB->addLiveIn(SystemZ::CC); | |||
6941 | MBB = DoneMBB; | |||
6942 | } | |||
6943 | // Handle any remaining bytes with straight-line code. | |||
6944 | while (Length > 0) { | |||
6945 | uint64_t ThisLength = std::min(Length, uint64_t(256)); | |||
6946 | // The previous iteration might have created out-of-range displacements. | |||
6947 | // Apply them using LAY if so. | |||
6948 | if (!isUInt<12>(DestDisp)) { | |||
6949 | unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); | |||
6950 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) | |||
6951 | .add(DestBase) | |||
6952 | .addImm(DestDisp) | |||
6953 | .addReg(0); | |||
6954 | DestBase = MachineOperand::CreateReg(Reg, false); | |||
6955 | DestDisp = 0; | |||
6956 | } | |||
6957 | if (!isUInt<12>(SrcDisp)) { | |||
6958 | unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); | |||
6959 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) | |||
6960 | .add(SrcBase) | |||
6961 | .addImm(SrcDisp) | |||
6962 | .addReg(0); | |||
6963 | SrcBase = MachineOperand::CreateReg(Reg, false); | |||
6964 | SrcDisp = 0; | |||
6965 | } | |||
6966 | BuildMI(*MBB, MI, DL, TII->get(Opcode)) | |||
6967 | .add(DestBase) | |||
6968 | .addImm(DestDisp) | |||
6969 | .addImm(ThisLength) | |||
6970 | .add(SrcBase) | |||
6971 | .addImm(SrcDisp) | |||
6972 | .setMemRefs(MI.memoperands()); | |||
6973 | DestDisp += ThisLength; | |||
6974 | SrcDisp += ThisLength; | |||
6975 | Length -= ThisLength; | |||
6976 | // If there's another CLC to go, branch to the end if a difference | |||
6977 | // was found. | |||
6978 | if (EndMBB && Length > 0) { | |||
6979 | MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); | |||
6980 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
6981 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) | |||
6982 | .addMBB(EndMBB); | |||
6983 | MBB->addSuccessor(EndMBB); | |||
6984 | MBB->addSuccessor(NextMBB); | |||
6985 | MBB = NextMBB; | |||
6986 | } | |||
6987 | } | |||
6988 | if (EndMBB) { | |||
6989 | MBB->addSuccessor(EndMBB); | |||
6990 | MBB = EndMBB; | |||
6991 | MBB->addLiveIn(SystemZ::CC); | |||
6992 | } | |||
6993 | ||||
6994 | MI.eraseFromParent(); | |||
6995 | return MBB; | |||
6996 | } | |||
6997 | ||||
6998 | // Decompose string pseudo-instruction MI into a loop that continually performs | |||
6999 | // Opcode until CC != 3. | |||
7000 | MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( | |||
7001 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { | |||
7002 | MachineFunction &MF = *MBB->getParent(); | |||
7003 | const SystemZInstrInfo *TII = | |||
7004 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
7005 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
7006 | DebugLoc DL = MI.getDebugLoc(); | |||
7007 | ||||
7008 | uint64_t End1Reg = MI.getOperand(0).getReg(); | |||
7009 | uint64_t Start1Reg = MI.getOperand(1).getReg(); | |||
7010 | uint64_t Start2Reg = MI.getOperand(2).getReg(); | |||
7011 | uint64_t CharReg = MI.getOperand(3).getReg(); | |||
7012 | ||||
7013 | const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; | |||
7014 | uint64_t This1Reg = MRI.createVirtualRegister(RC); | |||
7015 | uint64_t This2Reg = MRI.createVirtualRegister(RC); | |||
7016 | uint64_t End2Reg = MRI.createVirtualRegister(RC); | |||
7017 | ||||
7018 | MachineBasicBlock *StartMBB = MBB; | |||
7019 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); | |||
7020 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); | |||
7021 | ||||
7022 | // StartMBB: | |||
7023 | // # fall through to LoopMMB | |||
7024 | MBB->addSuccessor(LoopMBB); | |||
7025 | ||||
7026 | // LoopMBB: | |||
7027 | // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] | |||
7028 | // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] | |||
7029 | // R0L = %CharReg | |||
7030 | // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L | |||
7031 | // JO LoopMBB | |||
7032 | // # fall through to DoneMMB | |||
7033 | // | |||
7034 | // The load of R0L can be hoisted by post-RA LICM. | |||
7035 | MBB = LoopMBB; | |||
7036 | ||||
7037 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) | |||
7038 | .addReg(Start1Reg).addMBB(StartMBB) | |||
7039 | .addReg(End1Reg).addMBB(LoopMBB); | |||
7040 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) | |||
7041 | .addReg(Start2Reg).addMBB(StartMBB) | |||
7042 | .addReg(End2Reg).addMBB(LoopMBB); | |||
7043 | BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); | |||
7044 | BuildMI(MBB, DL, TII->get(Opcode)) | |||
7045 | .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) | |||
7046 | .addReg(This1Reg).addReg(This2Reg); | |||
7047 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) | |||
7048 | .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); | |||
7049 | MBB->addSuccessor(LoopMBB); | |||
7050 | MBB->addSuccessor(DoneMBB); | |||
7051 | ||||
7052 | DoneMBB->addLiveIn(SystemZ::CC); | |||
7053 | ||||
7054 | MI.eraseFromParent(); | |||
7055 | return DoneMBB; | |||
7056 | } | |||
7057 | ||||
7058 | // Update TBEGIN instruction with final opcode and register clobbers. | |||
7059 | MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( | |||
7060 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, | |||
7061 | bool NoFloat) const { | |||
7062 | MachineFunction &MF = *MBB->getParent(); | |||
7063 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); | |||
7064 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); | |||
7065 | ||||
7066 | // Update opcode. | |||
7067 | MI.setDesc(TII->get(Opcode)); | |||
7068 | ||||
7069 | // We cannot handle a TBEGIN that clobbers the stack or frame pointer. | |||
7070 | // Make sure to add the corresponding GRSM bits if they are missing. | |||
7071 | uint64_t Control = MI.getOperand(2).getImm(); | |||
7072 | static const unsigned GPRControlBit[16] = { | |||
7073 | 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, | |||
7074 | 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 | |||
7075 | }; | |||
7076 | Control |= GPRControlBit[15]; | |||
7077 | if (TFI->hasFP(MF)) | |||
7078 | Control |= GPRControlBit[11]; | |||
7079 | MI.getOperand(2).setImm(Control); | |||
7080 | ||||
7081 | // Add GPR clobbers. | |||
7082 | for (int I = 0; I < 16; I++) { | |||
7083 | if ((Control & GPRControlBit[I]) == 0) { | |||
7084 | unsigned Reg = SystemZMC::GR64Regs[I]; | |||
7085 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); | |||
7086 | } | |||
7087 | } | |||
7088 | ||||
7089 | // Add FPR/VR clobbers. | |||
7090 | if (!NoFloat && (Control & 4) != 0) { | |||
7091 | if (Subtarget.hasVector()) { | |||
7092 | for (int I = 0; I < 32; I++) { | |||
7093 | unsigned Reg = SystemZMC::VR128Regs[I]; | |||
7094 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); | |||
7095 | } | |||
7096 | } else { | |||
7097 | for (int I = 0; I < 16; I++) { | |||
7098 | unsigned Reg = SystemZMC::FP64Regs[I]; | |||
7099 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); | |||
7100 | } | |||
7101 | } | |||
7102 | } | |||
7103 | ||||
7104 | return MBB; | |||
7105 | } | |||
7106 | ||||
7107 | MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( | |||
7108 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { | |||
7109 | MachineFunction &MF = *MBB->getParent(); | |||
7110 | MachineRegisterInfo *MRI = &MF.getRegInfo(); | |||
7111 | const SystemZInstrInfo *TII = | |||
7112 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); | |||
7113 | DebugLoc DL = MI.getDebugLoc(); | |||
7114 | ||||
7115 | unsigned SrcReg = MI.getOperand(0).getReg(); | |||
7116 | ||||
7117 | // Create new virtual register of the same class as source. | |||
7118 | const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); | |||
7119 | unsigned DstReg = MRI->createVirtualRegister(RC); | |||
7120 | ||||
7121 | // Replace pseudo with a normal load-and-test that models the def as | |||
7122 | // well. | |||
7123 | BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) | |||
7124 | .addReg(SrcReg); | |||
7125 | MI.eraseFromParent(); | |||
7126 | ||||
7127 | return MBB; | |||
7128 | } | |||
7129 | ||||
7130 | MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( | |||
7131 | MachineInstr &MI, MachineBasicBlock *MBB) const { | |||
7132 | switch (MI.getOpcode()) { | |||
7133 | case SystemZ::Select32: | |||
7134 | case SystemZ::Select64: | |||
7135 | case SystemZ::SelectF32: | |||
7136 | case SystemZ::SelectF64: | |||
7137 | case SystemZ::SelectF128: | |||
7138 | case SystemZ::SelectVR32: | |||
7139 | case SystemZ::SelectVR64: | |||
7140 | case SystemZ::SelectVR128: | |||
7141 | return emitSelect(MI, MBB); | |||
7142 | ||||
7143 | case SystemZ::CondStore8Mux: | |||
7144 | return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); | |||
7145 | case SystemZ::CondStore8MuxInv: | |||
7146 | return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); | |||
7147 | case SystemZ::CondStore16Mux: | |||
7148 | return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); | |||
7149 | case SystemZ::CondStore16MuxInv: | |||
7150 | return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); | |||
7151 | case SystemZ::CondStore32Mux: | |||
7152 | return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); | |||
7153 | case SystemZ::CondStore32MuxInv: | |||
7154 | return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); | |||
7155 | case SystemZ::CondStore8: | |||
7156 | return emitCondStore(MI, MBB, SystemZ::STC, 0, false); | |||
7157 | case SystemZ::CondStore8Inv: | |||
7158 | return emitCondStore(MI, MBB, SystemZ::STC, 0, true); | |||
7159 | case SystemZ::CondStore16: | |||
7160 | return emitCondStore(MI, MBB, SystemZ::STH, 0, false); | |||
7161 | case SystemZ::CondStore16Inv: | |||
7162 | return emitCondStore(MI, MBB, SystemZ::STH, 0, true); | |||
7163 | case SystemZ::CondStore32: | |||
7164 | return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); | |||
7165 | case SystemZ::CondStore32Inv: | |||
7166 | return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); | |||
7167 | case SystemZ::CondStore64: | |||
7168 | return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); | |||
7169 | case SystemZ::CondStore64Inv: | |||
7170 | return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); | |||
7171 | case SystemZ::CondStoreF32: | |||
7172 | return emitCondStore(MI, MBB, SystemZ::STE, 0, false); | |||
7173 | case SystemZ::CondStoreF32Inv: | |||
7174 | return emitCondStore(MI, MBB, SystemZ::STE, 0, true); | |||
7175 | case SystemZ::CondStoreF64: | |||
7176 | return emitCondStore(MI, MBB, SystemZ::STD, 0, false); | |||
7177 | case SystemZ::CondStoreF64Inv: | |||
7178 | return emitCondStore(MI, MBB, SystemZ::STD, 0, true); | |||
7179 | ||||
7180 | case SystemZ::PAIR128: | |||
7181 | return emitPair128(MI, MBB); | |||
7182 | case SystemZ::AEXT128: | |||
7183 | return emitExt128(MI, MBB, false); | |||
7184 | case SystemZ::ZEXT128: | |||
7185 | return emitExt128(MI, MBB, true); | |||
7186 | ||||
7187 | case SystemZ::ATOMIC_SWAPW: | |||
7188 | return emitAtomicLoadBinary(MI, MBB, 0, 0); | |||
7189 | case SystemZ::ATOMIC_SWAP_32: | |||
7190 | return emitAtomicLoadBinary(MI, MBB, 0, 32); | |||
7191 | case SystemZ::ATOMIC_SWAP_64: | |||
7192 | return emitAtomicLoadBinary(MI, MBB, 0, 64); | |||
7193 | ||||
7194 | case SystemZ::ATOMIC_LOADW_AR: | |||
7195 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); | |||
7196 | case SystemZ::ATOMIC_LOADW_AFI: | |||
7197 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); | |||
7198 | case SystemZ::ATOMIC_LOAD_AR: | |||
7199 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); | |||
7200 | case SystemZ::ATOMIC_LOAD_AHI: | |||
7201 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); | |||
7202 | case SystemZ::ATOMIC_LOAD_AFI: | |||
7203 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); | |||
7204 | case SystemZ::ATOMIC_LOAD_AGR: | |||
7205 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); | |||
7206 | case SystemZ::ATOMIC_LOAD_AGHI: | |||
7207 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); | |||
7208 | case SystemZ::ATOMIC_LOAD_AGFI: | |||
7209 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); | |||
7210 | ||||
7211 | case SystemZ::ATOMIC_LOADW_SR: | |||
7212 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); | |||
7213 | case SystemZ::ATOMIC_LOAD_SR: | |||
7214 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); | |||
7215 | case SystemZ::ATOMIC_LOAD_SGR: | |||
7216 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); | |||
7217 | ||||
7218 | case SystemZ::ATOMIC_LOADW_NR: | |||
7219 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); | |||
7220 | case SystemZ::ATOMIC_LOADW_NILH: | |||
7221 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); | |||
7222 | case SystemZ::ATOMIC_LOAD_NR: | |||
7223 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); | |||
7224 | case SystemZ::ATOMIC_LOAD_NILL: | |||
7225 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); | |||
7226 | case SystemZ::ATOMIC_LOAD_NILH: | |||
7227 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); | |||
7228 | case SystemZ::ATOMIC_LOAD_NILF: | |||
7229 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); | |||
7230 | case SystemZ::ATOMIC_LOAD_NGR: | |||
7231 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); | |||
7232 | case SystemZ::ATOMIC_LOAD_NILL64: | |||
7233 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); | |||
7234 | case SystemZ::ATOMIC_LOAD_NILH64: | |||
7235 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); | |||
7236 | case SystemZ::ATOMIC_LOAD_NIHL64: | |||
7237 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); | |||
7238 | case SystemZ::ATOMIC_LOAD_NIHH64: | |||
7239 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); | |||
7240 | case SystemZ::ATOMIC_LOAD_NILF64: | |||
7241 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); | |||
7242 | case SystemZ::ATOMIC_LOAD_NIHF64: | |||
7243 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); | |||
7244 | ||||
7245 | case SystemZ::ATOMIC_LOADW_OR: | |||
7246 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); | |||
7247 | case SystemZ::ATOMIC_LOADW_OILH: | |||
7248 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); | |||
7249 | case SystemZ::ATOMIC_LOAD_OR: | |||
7250 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); | |||
7251 | case SystemZ::ATOMIC_LOAD_OILL: | |||
7252 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); | |||
7253 | case SystemZ::ATOMIC_LOAD_OILH: | |||
7254 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); | |||
7255 | case SystemZ::ATOMIC_LOAD_OILF: | |||
7256 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); | |||
7257 | case SystemZ::ATOMIC_LOAD_OGR: | |||
7258 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); | |||
7259 | case SystemZ::ATOMIC_LOAD_OILL64: | |||
7260 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); | |||
7261 | case SystemZ::ATOMIC_LOAD_OILH64: | |||
7262 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); | |||
7263 | case SystemZ::ATOMIC_LOAD_OIHL64: | |||
7264 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); | |||
7265 | case SystemZ::ATOMIC_LOAD_OIHH64: | |||
7266 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); | |||
7267 | case SystemZ::ATOMIC_LOAD_OILF64: | |||
7268 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); | |||
7269 | case SystemZ::ATOMIC_LOAD_OIHF64: | |||
7270 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); | |||
7271 | ||||
7272 | case SystemZ::ATOMIC_LOADW_XR: | |||
7273 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); | |||
7274 | case SystemZ::ATOMIC_LOADW_XILF: | |||
7275 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); | |||
7276 | case SystemZ::ATOMIC_LOAD_XR: | |||
7277 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); | |||
7278 | case SystemZ::ATOMIC_LOAD_XILF: | |||
7279 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); | |||
7280 | case SystemZ::ATOMIC_LOAD_XGR: | |||
7281 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); | |||
7282 | case SystemZ::ATOMIC_LOAD_XILF64: | |||
7283 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); | |||
7284 | case SystemZ::ATOMIC_LOAD_XIHF64: | |||
7285 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); | |||
7286 | ||||
7287 | case SystemZ::ATOMIC_LOADW_NRi: | |||
7288 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); | |||
7289 | case SystemZ::ATOMIC_LOADW_NILHi: | |||
7290 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); | |||
7291 | case SystemZ::ATOMIC_LOAD_NRi: | |||
7292 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); | |||
7293 | case SystemZ::ATOMIC_LOAD_NILLi: | |||
7294 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); | |||
7295 | case SystemZ::ATOMIC_LOAD_NILHi: | |||
7296 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); | |||
7297 | case SystemZ::ATOMIC_LOAD_NILFi: | |||
7298 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); | |||
7299 | case SystemZ::ATOMIC_LOAD_NGRi: | |||
7300 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); | |||
7301 | case SystemZ::ATOMIC_LOAD_NILL64i: | |||
7302 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); | |||
7303 | case SystemZ::ATOMIC_LOAD_NILH64i: | |||
7304 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); | |||
7305 | case SystemZ::ATOMIC_LOAD_NIHL64i: | |||
7306 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); | |||
7307 | case SystemZ::ATOMIC_LOAD_NIHH64i: | |||
7308 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); | |||
7309 | case SystemZ::ATOMIC_LOAD_NILF64i: | |||
7310 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); | |||
7311 | case SystemZ::ATOMIC_LOAD_NIHF64i: | |||
7312 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); | |||
7313 | ||||
7314 | case SystemZ::ATOMIC_LOADW_MIN: | |||
7315 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, | |||
7316 | SystemZ::CCMASK_CMP_LE, 0); | |||
7317 | case SystemZ::ATOMIC_LOAD_MIN_32: | |||
7318 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, | |||
7319 | SystemZ::CCMASK_CMP_LE, 32); | |||
7320 | case SystemZ::ATOMIC_LOAD_MIN_64: | |||
7321 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, | |||
7322 | SystemZ::CCMASK_CMP_LE, 64); | |||
7323 | ||||
7324 | case SystemZ::ATOMIC_LOADW_MAX: | |||
7325 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, | |||
7326 | SystemZ::CCMASK_CMP_GE, 0); | |||
7327 | case SystemZ::ATOMIC_LOAD_MAX_32: | |||
7328 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, | |||
7329 | SystemZ::CCMASK_CMP_GE, 32); | |||
7330 | case SystemZ::ATOMIC_LOAD_MAX_64: | |||
7331 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, | |||
7332 | SystemZ::CCMASK_CMP_GE, 64); | |||
7333 | ||||
7334 | case SystemZ::ATOMIC_LOADW_UMIN: | |||
7335 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, | |||
7336 | SystemZ::CCMASK_CMP_LE, 0); | |||
7337 | case SystemZ::ATOMIC_LOAD_UMIN_32: | |||
7338 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, | |||
7339 | SystemZ::CCMASK_CMP_LE, 32); | |||
7340 | case SystemZ::ATOMIC_LOAD_UMIN_64: | |||
7341 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, | |||
7342 | SystemZ::CCMASK_CMP_LE, 64); | |||
7343 | ||||
7344 | case SystemZ::ATOMIC_LOADW_UMAX: | |||
7345 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, | |||
7346 | SystemZ::CCMASK_CMP_GE, 0); | |||
7347 | case SystemZ::ATOMIC_LOAD_UMAX_32: | |||
7348 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, | |||
7349 | SystemZ::CCMASK_CMP_GE, 32); | |||
7350 | case SystemZ::ATOMIC_LOAD_UMAX_64: | |||
7351 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, | |||
7352 | SystemZ::CCMASK_CMP_GE, 64); | |||
7353 | ||||
7354 | case SystemZ::ATOMIC_CMP_SWAPW: | |||
7355 | return emitAtomicCmpSwapW(MI, MBB); | |||
7356 | case SystemZ::MVCSequence: | |||
7357 | case SystemZ::MVCLoop: | |||
7358 | return emitMemMemWrapper(MI, MBB, SystemZ::MVC); | |||
7359 | case SystemZ::NCSequence: | |||
7360 | case SystemZ::NCLoop: | |||
7361 | return emitMemMemWrapper(MI, MBB, SystemZ::NC); | |||
7362 | case SystemZ::OCSequence: | |||
7363 | case SystemZ::OCLoop: | |||
7364 | return emitMemMemWrapper(MI, MBB, SystemZ::OC); | |||
7365 | case SystemZ::XCSequence: | |||
7366 | case SystemZ::XCLoop: | |||
7367 | return emitMemMemWrapper(MI, MBB, SystemZ::XC); | |||
7368 | case SystemZ::CLCSequence: | |||
7369 | case SystemZ::CLCLoop: | |||
7370 | return emitMemMemWrapper(MI, MBB, SystemZ::CLC); | |||
7371 | case SystemZ::CLSTLoop: | |||
7372 | return emitStringWrapper(MI, MBB, SystemZ::CLST); | |||
7373 | case SystemZ::MVSTLoop: | |||
7374 | return emitStringWrapper(MI, MBB, SystemZ::MVST); | |||
7375 | case SystemZ::SRSTLoop: | |||
7376 | return emitStringWrapper(MI, MBB, SystemZ::SRST); | |||
7377 | case SystemZ::TBEGIN: | |||
7378 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); | |||
7379 | case SystemZ::TBEGIN_nofloat: | |||
7380 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); | |||
7381 | case SystemZ::TBEGINC: | |||
7382 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); | |||
7383 | case SystemZ::LTEBRCompare_VecPseudo: | |||
7384 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); | |||
7385 | case SystemZ::LTDBRCompare_VecPseudo: | |||
7386 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); | |||
7387 | case SystemZ::LTXBRCompare_VecPseudo: | |||
7388 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); | |||
7389 | ||||
7390 | case TargetOpcode::STACKMAP: | |||
7391 | case TargetOpcode::PATCHPOINT: | |||
7392 | return emitPatchPoint(MI, MBB); | |||
7393 | ||||
7394 | default: | |||
7395 | llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/SystemZ/SystemZISelLowering.cpp" , 7395); | |||
7396 | } | |||
7397 | } | |||
7398 | ||||
7399 | // This is only used by the isel schedulers, and is needed only to prevent | |||
7400 | // compiler from crashing when list-ilp is used. | |||
7401 | const TargetRegisterClass * | |||
7402 | SystemZTargetLowering::getRepRegClassFor(MVT VT) const { | |||
7403 | if (VT == MVT::Untyped) | |||
7404 | return &SystemZ::ADDR128BitRegClass; | |||
7405 | return TargetLowering::getRepRegClassFor(VT); | |||
7406 | } |
1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file contains some functions that are useful for math stuff. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H |
15 | #define LLVM_SUPPORT_MATHEXTRAS_H |
16 | |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/SwapByteOrder.h" |
19 | #include <algorithm> |
20 | #include <cassert> |
21 | #include <climits> |
22 | #include <cstring> |
23 | #include <limits> |
24 | #include <type_traits> |
25 | |
26 | #ifdef __ANDROID_NDK__ |
27 | #include <android/api-level.h> |
28 | #endif |
29 | |
30 | #ifdef _MSC_VER |
31 | // Declare these intrinsics manually rather including intrin.h. It's very |
32 | // expensive, and MathExtras.h is popular. |
33 | // #include <intrin.h> |
34 | extern "C" { |
35 | unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); |
36 | unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); |
37 | unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); |
38 | unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); |
39 | } |
40 | #endif |
41 | |
42 | namespace llvm { |
43 | /// The behavior an operation has on an input of 0. |
44 | enum ZeroBehavior { |
45 | /// The returned value is undefined. |
46 | ZB_Undefined, |
47 | /// The returned value is numeric_limits<T>::max() |
48 | ZB_Max, |
49 | /// The returned value is numeric_limits<T>::digits |
50 | ZB_Width |
51 | }; |
52 | |
53 | namespace detail { |
54 | template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { |
55 | static std::size_t count(T Val, ZeroBehavior) { |
56 | if (!Val) |
57 | return std::numeric_limits<T>::digits; |
58 | if (Val & 0x1) |
59 | return 0; |
60 | |
61 | // Bisection method. |
62 | std::size_t ZeroBits = 0; |
63 | T Shift = std::numeric_limits<T>::digits >> 1; |
64 | T Mask = std::numeric_limits<T>::max() >> Shift; |
65 | while (Shift) { |
66 | if ((Val & Mask) == 0) { |
67 | Val >>= Shift; |
68 | ZeroBits |= Shift; |
69 | } |
70 | Shift >>= 1; |
71 | Mask >>= Shift; |
72 | } |
73 | return ZeroBits; |
74 | } |
75 | }; |
76 | |
77 | #if __GNUC__4 >= 4 || defined(_MSC_VER) |
78 | template <typename T> struct TrailingZerosCounter<T, 4> { |
79 | static std::size_t count(T Val, ZeroBehavior ZB) { |
80 | if (ZB != ZB_Undefined && Val == 0) |
81 | return 32; |
82 | |
83 | #if __has_builtin(__builtin_ctz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
84 | return __builtin_ctz(Val); |
85 | #elif defined(_MSC_VER) |
86 | unsigned long Index; |
87 | _BitScanForward(&Index, Val); |
88 | return Index; |
89 | #endif |
90 | } |
91 | }; |
92 | |
93 | #if !defined(_MSC_VER) || defined(_M_X64) |
94 | template <typename T> struct TrailingZerosCounter<T, 8> { |
95 | static std::size_t count(T Val, ZeroBehavior ZB) { |
96 | if (ZB != ZB_Undefined && Val == 0) |
97 | return 64; |
98 | |
99 | #if __has_builtin(__builtin_ctzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
100 | return __builtin_ctzll(Val); |
101 | #elif defined(_MSC_VER) |
102 | unsigned long Index; |
103 | _BitScanForward64(&Index, Val); |
104 | return Index; |
105 | #endif |
106 | } |
107 | }; |
108 | #endif |
109 | #endif |
110 | } // namespace detail |
111 | |
112 | /// Count number of 0's from the least significant bit to the most |
113 | /// stopping at the first 1. |
114 | /// |
115 | /// Only unsigned integral types are allowed. |
116 | /// |
117 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are |
118 | /// valid arguments. |
119 | template <typename T> |
120 | std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { |
121 | static_assert(std::numeric_limits<T>::is_integer && |
122 | !std::numeric_limits<T>::is_signed, |
123 | "Only unsigned integral types are allowed."); |
124 | return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); |
125 | } |
126 | |
127 | namespace detail { |
128 | template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { |
129 | static std::size_t count(T Val, ZeroBehavior) { |
130 | if (!Val) |
131 | return std::numeric_limits<T>::digits; |
132 | |
133 | // Bisection method. |
134 | std::size_t ZeroBits = 0; |
135 | for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { |
136 | T Tmp = Val >> Shift; |
137 | if (Tmp) |
138 | Val = Tmp; |
139 | else |
140 | ZeroBits |= Shift; |
141 | } |
142 | return ZeroBits; |
143 | } |
144 | }; |
145 | |
146 | #if __GNUC__4 >= 4 || defined(_MSC_VER) |
147 | template <typename T> struct LeadingZerosCounter<T, 4> { |
148 | static std::size_t count(T Val, ZeroBehavior ZB) { |
149 | if (ZB != ZB_Undefined && Val == 0) |
150 | return 32; |
151 | |
152 | #if __has_builtin(__builtin_clz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
153 | return __builtin_clz(Val); |
154 | #elif defined(_MSC_VER) |
155 | unsigned long Index; |
156 | _BitScanReverse(&Index, Val); |
157 | return Index ^ 31; |
158 | #endif |
159 | } |
160 | }; |
161 | |
162 | #if !defined(_MSC_VER) || defined(_M_X64) |
163 | template <typename T> struct LeadingZerosCounter<T, 8> { |
164 | static std::size_t count(T Val, ZeroBehavior ZB) { |
165 | if (ZB != ZB_Undefined && Val == 0) |
166 | return 64; |
167 | |
168 | #if __has_builtin(__builtin_clzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
169 | return __builtin_clzll(Val); |
170 | #elif defined(_MSC_VER) |
171 | unsigned long Index; |
172 | _BitScanReverse64(&Index, Val); |
173 | return Index ^ 63; |
174 | #endif |
175 | } |
176 | }; |
177 | #endif |
178 | #endif |
179 | } // namespace detail |
180 | |
181 | /// Count number of 0's from the most significant bit to the least |
182 | /// stopping at the first 1. |
183 | /// |
184 | /// Only unsigned integral types are allowed. |
185 | /// |
186 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are |
187 | /// valid arguments. |
188 | template <typename T> |
189 | std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { |
190 | static_assert(std::numeric_limits<T>::is_integer && |
191 | !std::numeric_limits<T>::is_signed, |
192 | "Only unsigned integral types are allowed."); |
193 | return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); |
194 | } |
195 | |
196 | /// Get the index of the first set bit starting from the least |
197 | /// significant bit. |
198 | /// |
199 | /// Only unsigned integral types are allowed. |
200 | /// |
201 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are |
202 | /// valid arguments. |
203 | template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { |
204 | if (ZB == ZB_Max && Val == 0) |
205 | return std::numeric_limits<T>::max(); |
206 | |
207 | return countTrailingZeros(Val, ZB_Undefined); |
208 | } |
209 | |
210 | /// Create a bitmask with the N right-most bits set to 1, and all other |
211 | /// bits set to 0. Only unsigned types are allowed. |
212 | template <typename T> T maskTrailingOnes(unsigned N) { |
213 | static_assert(std::is_unsigned<T>::value, "Invalid type!"); |
214 | const unsigned Bits = CHAR_BIT8 * sizeof(T); |
215 | assert(N <= Bits && "Invalid bit index")((N <= Bits && "Invalid bit index") ? static_cast< void> (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 215, __PRETTY_FUNCTION__)); |
216 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); |
217 | } |
218 | |
219 | /// Create a bitmask with the N left-most bits set to 1, and all other |
220 | /// bits set to 0. Only unsigned types are allowed. |
221 | template <typename T> T maskLeadingOnes(unsigned N) { |
222 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
223 | } |
224 | |
225 | /// Create a bitmask with the N right-most bits set to 0, and all other |
226 | /// bits set to 1. Only unsigned types are allowed. |
227 | template <typename T> T maskTrailingZeros(unsigned N) { |
228 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
229 | } |
230 | |
231 | /// Create a bitmask with the N left-most bits set to 0, and all other |
232 | /// bits set to 1. Only unsigned types are allowed. |
233 | template <typename T> T maskLeadingZeros(unsigned N) { |
234 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
235 | } |
236 | |
237 | /// Get the index of the last set bit starting from the least |
238 | /// significant bit. |
239 | /// |
240 | /// Only unsigned integral types are allowed. |
241 | /// |
242 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are |
243 | /// valid arguments. |
244 | template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { |
245 | if (ZB == ZB_Max && Val == 0) |
246 | return std::numeric_limits<T>::max(); |
247 | |
248 | // Use ^ instead of - because both gcc and llvm can remove the associated ^ |
249 | // in the __builtin_clz intrinsic on x86. |
250 | return countLeadingZeros(Val, ZB_Undefined) ^ |
251 | (std::numeric_limits<T>::digits - 1); |
252 | } |
253 | |
254 | /// Macro compressed bit reversal table for 256 bits. |
255 | /// |
256 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
257 | static const unsigned char BitReverseTable256[256] = { |
258 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 |
259 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) |
260 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) |
261 | R6(0), R6(2), R6(1), R6(3) |
262 | #undef R2 |
263 | #undef R4 |
264 | #undef R6 |
265 | }; |
266 | |
267 | /// Reverse the bits in \p Val. |
268 | template <typename T> |
269 | T reverseBits(T Val) { |
270 | unsigned char in[sizeof(Val)]; |
271 | unsigned char out[sizeof(Val)]; |
272 | std::memcpy(in, &Val, sizeof(Val)); |
273 | for (unsigned i = 0; i < sizeof(Val); ++i) |
274 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; |
275 | std::memcpy(&Val, out, sizeof(Val)); |
276 | return Val; |
277 | } |
278 | |
279 | // NOTE: The following support functions use the _32/_64 extensions instead of |
280 | // type overloading so that signed and unsigned integers can be used without |
281 | // ambiguity. |
282 | |
283 | /// Return the high 32 bits of a 64 bit value. |
284 | constexpr inline uint32_t Hi_32(uint64_t Value) { |
285 | return static_cast<uint32_t>(Value >> 32); |
286 | } |
287 | |
288 | /// Return the low 32 bits of a 64 bit value. |
289 | constexpr inline uint32_t Lo_32(uint64_t Value) { |
290 | return static_cast<uint32_t>(Value); |
291 | } |
292 | |
293 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. |
294 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { |
295 | return ((uint64_t)High << 32) | (uint64_t)Low; |
296 | } |
297 | |
298 | /// Checks if an integer fits into the given bit width. |
299 | template <unsigned N> constexpr inline bool isInt(int64_t x) { |
300 | return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1))); |
301 | } |
302 | // Template specializations to get better code for common cases. |
303 | template <> constexpr inline bool isInt<8>(int64_t x) { |
304 | return static_cast<int8_t>(x) == x; |
305 | } |
306 | template <> constexpr inline bool isInt<16>(int64_t x) { |
307 | return static_cast<int16_t>(x) == x; |
308 | } |
309 | template <> constexpr inline bool isInt<32>(int64_t x) { |
310 | return static_cast<int32_t>(x) == x; |
311 | } |
312 | |
313 | /// Checks if a signed integer is an N bit number shifted left by S. |
314 | template <unsigned N, unsigned S> |
315 | constexpr inline bool isShiftedInt(int64_t x) { |
316 | static_assert( |
317 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); |
318 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); |
319 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
320 | } |
321 | |
322 | /// Checks if an unsigned integer fits into the given bit width. |
323 | /// |
324 | /// This is written as two functions rather than as simply |
325 | /// |
326 | /// return N >= 64 || X < (UINT64_C(1) << N); |
327 | /// |
328 | /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting |
329 | /// left too many places. |
330 | template <unsigned N> |
331 | constexpr inline typename std::enable_if<(N < 64), bool>::type |
332 | isUInt(uint64_t X) { |
333 | static_assert(N > 0, "isUInt<0> doesn't make sense"); |
334 | return X < (UINT64_C(1)1UL << (N)); |
335 | } |
336 | template <unsigned N> |
337 | constexpr inline typename std::enable_if<N >= 64, bool>::type |
338 | isUInt(uint64_t X) { |
339 | return true; |
340 | } |
341 | |
342 | // Template specializations to get better code for common cases. |
343 | template <> constexpr inline bool isUInt<8>(uint64_t x) { |
344 | return static_cast<uint8_t>(x) == x; |
345 | } |
346 | template <> constexpr inline bool isUInt<16>(uint64_t x) { |
347 | return static_cast<uint16_t>(x) == x; |
348 | } |
349 | template <> constexpr inline bool isUInt<32>(uint64_t x) { |
350 | return static_cast<uint32_t>(x) == x; |
351 | } |
352 | |
353 | /// Checks if a unsigned integer is an N bit number shifted left by S. |
354 | template <unsigned N, unsigned S> |
355 | constexpr inline bool isShiftedUInt(uint64_t x) { |
356 | static_assert( |
357 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); |
358 | static_assert(N + S <= 64, |
359 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); |
360 | // Per the two static_asserts above, S must be strictly less than 64. So |
361 | // 1 << S is not undefined behavior. |
362 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
363 | } |
364 | |
365 | /// Gets the maximum value for a N-bit unsigned integer. |
366 | inline uint64_t maxUIntN(uint64_t N) { |
367 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 367, __PRETTY_FUNCTION__)); |
368 | |
369 | // uint64_t(1) << 64 is undefined behavior, so we can't do |
370 | // (uint64_t(1) << N) - 1 |
371 | // without checking first that N != 64. But this works and doesn't have a |
372 | // branch. |
373 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); |
374 | } |
375 | |
376 | /// Gets the minimum value for a N-bit signed integer. |
377 | inline int64_t minIntN(int64_t N) { |
378 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 378, __PRETTY_FUNCTION__)); |
379 | |
380 | return -(UINT64_C(1)1UL<<(N-1)); |
381 | } |
382 | |
383 | /// Gets the maximum value for a N-bit signed integer. |
384 | inline int64_t maxIntN(int64_t N) { |
385 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 385, __PRETTY_FUNCTION__)); |
386 | |
387 | // This relies on two's complement wraparound when N == 64, so we convert to |
388 | // int64_t only at the very end to avoid UB. |
389 | return (UINT64_C(1)1UL << (N - 1)) - 1; |
390 | } |
391 | |
392 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. |
393 | inline bool isUIntN(unsigned N, uint64_t x) { |
394 | return N >= 64 || x <= maxUIntN(N); |
395 | } |
396 | |
397 | /// Checks if an signed integer fits into the given (dynamic) bit width. |
398 | inline bool isIntN(unsigned N, int64_t x) { |
399 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); |
400 | } |
401 | |
402 | /// Return true if the argument is a non-empty sequence of ones starting at the |
403 | /// least significant bit with the remainder zero (32 bit version). |
404 | /// Ex. isMask_32(0x0000FFFFU) == true. |
405 | constexpr inline bool isMask_32(uint32_t Value) { |
406 | return Value && ((Value + 1) & Value) == 0; |
407 | } |
408 | |
409 | /// Return true if the argument is a non-empty sequence of ones starting at the |
410 | /// least significant bit with the remainder zero (64 bit version). |
411 | constexpr inline bool isMask_64(uint64_t Value) { |
412 | return Value && ((Value + 1) & Value) == 0; |
413 | } |
414 | |
415 | /// Return true if the argument contains a non-empty sequence of ones with the |
416 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. |
417 | constexpr inline bool isShiftedMask_32(uint32_t Value) { |
418 | return Value && isMask_32((Value - 1) | Value); |
419 | } |
420 | |
421 | /// Return true if the argument contains a non-empty sequence of ones with the |
422 | /// remainder zero (64 bit version.) |
423 | constexpr inline bool isShiftedMask_64(uint64_t Value) { |
424 | return Value && isMask_64((Value - 1) | Value); |
425 | } |
426 | |
427 | /// Return true if the argument is a power of two > 0. |
428 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) |
429 | constexpr inline bool isPowerOf2_32(uint32_t Value) { |
430 | return Value && !(Value & (Value - 1)); |
431 | } |
432 | |
433 | /// Return true if the argument is a power of two > 0 (64 bit edition.) |
434 | constexpr inline bool isPowerOf2_64(uint64_t Value) { |
435 | return Value && !(Value & (Value - 1)); |
436 | } |
437 | |
438 | /// Return a byte-swapped representation of the 16-bit argument. |
439 | inline uint16_t ByteSwap_16(uint16_t Value) { |
440 | return sys::SwapByteOrder_16(Value); |
441 | } |
442 | |
443 | /// Return a byte-swapped representation of the 32-bit argument. |
444 | inline uint32_t ByteSwap_32(uint32_t Value) { |
445 | return sys::SwapByteOrder_32(Value); |
446 | } |
447 | |
448 | /// Return a byte-swapped representation of the 64-bit argument. |
449 | inline uint64_t ByteSwap_64(uint64_t Value) { |
450 | return sys::SwapByteOrder_64(Value); |
451 | } |
452 | |
453 | /// Count the number of ones from the most significant bit to the first |
454 | /// zero bit. |
455 | /// |
456 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. |
457 | /// Only unsigned integral types are allowed. |
458 | /// |
459 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and |
460 | /// ZB_Undefined are valid arguments. |
461 | template <typename T> |
462 | std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { |
463 | static_assert(std::numeric_limits<T>::is_integer && |
464 | !std::numeric_limits<T>::is_signed, |
465 | "Only unsigned integral types are allowed."); |
466 | return countLeadingZeros<T>(~Value, ZB); |
467 | } |
468 | |
469 | /// Count the number of ones from the least significant bit to the first |
470 | /// zero bit. |
471 | /// |
472 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. |
473 | /// Only unsigned integral types are allowed. |
474 | /// |
475 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and |
476 | /// ZB_Undefined are valid arguments. |
477 | template <typename T> |
478 | std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { |
479 | static_assert(std::numeric_limits<T>::is_integer && |
480 | !std::numeric_limits<T>::is_signed, |
481 | "Only unsigned integral types are allowed."); |
482 | return countTrailingZeros<T>(~Value, ZB); |
483 | } |
484 | |
485 | namespace detail { |
486 | template <typename T, std::size_t SizeOfT> struct PopulationCounter { |
487 | static unsigned count(T Value) { |
488 | // Generic version, forward to 32 bits. |
489 | static_assert(SizeOfT <= 4, "Not implemented!"); |
490 | #if __GNUC__4 >= 4 |
491 | return __builtin_popcount(Value); |
492 | #else |
493 | uint32_t v = Value; |
494 | v = v - ((v >> 1) & 0x55555555); |
495 | v = (v & 0x33333333) + ((v >> 2) & 0x33333333); |
496 | return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; |
497 | #endif |
498 | } |
499 | }; |
500 | |
501 | template <typename T> struct PopulationCounter<T, 8> { |
502 | static unsigned count(T Value) { |
503 | #if __GNUC__4 >= 4 |
504 | return __builtin_popcountll(Value); |
505 | #else |
506 | uint64_t v = Value; |
507 | v = v - ((v >> 1) & 0x5555555555555555ULL); |
508 | v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); |
509 | v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; |
510 | return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); |
511 | #endif |
512 | } |
513 | }; |
514 | } // namespace detail |
515 | |
516 | /// Count the number of set bits in a value. |
517 | /// Ex. countPopulation(0xF000F000) = 8 |
518 | /// Returns 0 if the word is zero. |
519 | template <typename T> |
520 | inline unsigned countPopulation(T Value) { |
521 | static_assert(std::numeric_limits<T>::is_integer && |
522 | !std::numeric_limits<T>::is_signed, |
523 | "Only unsigned integral types are allowed."); |
524 | return detail::PopulationCounter<T, sizeof(T)>::count(Value); |
525 | } |
526 | |
527 | /// Return the log base 2 of the specified value. |
528 | inline double Log2(double Value) { |
529 | #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 |
530 | return __builtin_log(Value) / __builtin_log(2.0); |
531 | #else |
532 | return log2(Value); |
533 | #endif |
534 | } |
535 | |
536 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
537 | /// (32 bit edition.) |
538 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 |
539 | inline unsigned Log2_32(uint32_t Value) { |
540 | return 31 - countLeadingZeros(Value); |
541 | } |
542 | |
543 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
544 | /// (64 bit edition.) |
545 | inline unsigned Log2_64(uint64_t Value) { |
546 | return 63 - countLeadingZeros(Value); |
547 | } |
548 | |
549 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. |
550 | /// (32 bit edition). |
551 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 |
552 | inline unsigned Log2_32_Ceil(uint32_t Value) { |
553 | return 32 - countLeadingZeros(Value - 1); |
554 | } |
555 | |
556 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. |
557 | /// (64 bit edition.) |
558 | inline unsigned Log2_64_Ceil(uint64_t Value) { |
559 | return 64 - countLeadingZeros(Value - 1); |
560 | } |
561 | |
562 | /// Return the greatest common divisor of the values using Euclid's algorithm. |
563 | inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { |
564 | while (B) { |
565 | uint64_t T = B; |
566 | B = A % B; |
567 | A = T; |
568 | } |
569 | return A; |
570 | } |
571 | |
572 | /// This function takes a 64-bit integer and returns the bit equivalent double. |
573 | inline double BitsToDouble(uint64_t Bits) { |
574 | double D; |
575 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
576 | memcpy(&D, &Bits, sizeof(Bits)); |
577 | return D; |
578 | } |
579 | |
580 | /// This function takes a 32-bit integer and returns the bit equivalent float. |
581 | inline float BitsToFloat(uint32_t Bits) { |
582 | float F; |
583 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
584 | memcpy(&F, &Bits, sizeof(Bits)); |
585 | return F; |
586 | } |
587 | |
588 | /// This function takes a double and returns the bit equivalent 64-bit integer. |
589 | /// Note that copying doubles around changes the bits of NaNs on some hosts, |
590 | /// notably x86, so this routine cannot be used if these bits are needed. |
591 | inline uint64_t DoubleToBits(double Double) { |
592 | uint64_t Bits; |
593 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
594 | memcpy(&Bits, &Double, sizeof(Double)); |
595 | return Bits; |
596 | } |
597 | |
598 | /// This function takes a float and returns the bit equivalent 32-bit integer. |
599 | /// Note that copying floats around changes the bits of NaNs on some hosts, |
600 | /// notably x86, so this routine cannot be used if these bits are needed. |
601 | inline uint32_t FloatToBits(float Float) { |
602 | uint32_t Bits; |
603 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
604 | memcpy(&Bits, &Float, sizeof(Float)); |
605 | return Bits; |
606 | } |
607 | |
608 | /// A and B are either alignments or offsets. Return the minimum alignment that |
609 | /// may be assumed after adding the two together. |
610 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { |
611 | // The largest power of 2 that divides both A and B. |
612 | // |
613 | // Replace "-Value" by "1+~Value" in the following commented code to avoid |
614 | // MSVC warning C4146 |
615 | // return (A | B) & -(A | B); |
616 | return (A | B) & (1 + ~(A | B)); |
617 | } |
618 | |
619 | /// Aligns \c Addr to \c Alignment bytes, rounding up. |
620 | /// |
621 | /// Alignment should be a power of two. This method rounds up, so |
622 | /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. |
623 | inline uintptr_t alignAddr(const void *Addr, size_t Alignment) { |
624 | assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&((Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!") ? static_cast<void> (0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 625, __PRETTY_FUNCTION__)) |
625 | "Alignment is not a power of two!")((Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!") ? static_cast<void> (0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 625, __PRETTY_FUNCTION__)); |
626 | |
627 | assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr)(((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr) ? static_cast <void> (0) : __assert_fail ("(uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 627, __PRETTY_FUNCTION__)); |
628 | |
629 | return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); |
630 | } |
631 | |
632 | /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment |
633 | /// bytes, rounding up. |
634 | inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) { |
635 | return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; |
636 | } |
637 | |
638 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. |
639 | /// Returns zero on overflow. |
640 | inline uint64_t NextPowerOf2(uint64_t A) { |
641 | A |= (A >> 1); |
642 | A |= (A >> 2); |
643 | A |= (A >> 4); |
644 | A |= (A >> 8); |
645 | A |= (A >> 16); |
646 | A |= (A >> 32); |
647 | return A + 1; |
648 | } |
649 | |
650 | /// Returns the power of two which is less than or equal to the given value. |
651 | /// Essentially, it is a floor operation across the domain of powers of two. |
652 | inline uint64_t PowerOf2Floor(uint64_t A) { |
653 | if (!A) return 0; |
654 | return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); |
655 | } |
656 | |
657 | /// Returns the power of two which is greater than or equal to the given value. |
658 | /// Essentially, it is a ceil operation across the domain of powers of two. |
659 | inline uint64_t PowerOf2Ceil(uint64_t A) { |
660 | if (!A) |
661 | return 0; |
662 | return NextPowerOf2(A - 1); |
663 | } |
664 | |
665 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
666 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. |
667 | /// |
668 | /// If non-zero \p Skew is specified, the return value will be a minimal |
669 | /// integer that is greater than or equal to \p Value and equal to |
670 | /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than |
671 | /// \p Align, its value is adjusted to '\p Skew mod \p Align'. |
672 | /// |
673 | /// Examples: |
674 | /// \code |
675 | /// alignTo(5, 8) = 8 |
676 | /// alignTo(17, 8) = 24 |
677 | /// alignTo(~0LL, 8) = 0 |
678 | /// alignTo(321, 255) = 510 |
679 | /// |
680 | /// alignTo(5, 8, 7) = 7 |
681 | /// alignTo(17, 8, 1) = 17 |
682 | /// alignTo(~0LL, 8, 3) = 3 |
683 | /// alignTo(321, 255, 42) = 552 |
684 | /// \endcode |
685 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
686 | assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast< void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 686, __PRETTY_FUNCTION__)); |
687 | Skew %= Align; |
688 | return (Value + Align - 1 - Skew) / Align * Align + Skew; |
689 | } |
690 | |
691 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
692 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. |
693 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { |
694 | static_assert(Align != 0u, "Align must be non-zero"); |
695 | return (Value + Align - 1) / Align * Align; |
696 | } |
697 | |
698 | /// Returns the integer ceil(Numerator / Denominator). |
699 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { |
700 | return alignTo(Numerator, Denominator) / Denominator; |
701 | } |
702 | |
703 | /// \c alignTo for contexts where a constant expression is required. |
704 | /// \sa alignTo |
705 | /// |
706 | /// \todo FIXME: remove when \c constexpr becomes really \c constexpr |
707 | template <uint64_t Align> |
708 | struct AlignTo { |
709 | static_assert(Align != 0u, "Align must be non-zero"); |
710 | template <uint64_t Value> |
711 | struct from_value { |
712 | static const uint64_t value = (Value + Align - 1) / Align * Align; |
713 | }; |
714 | }; |
715 | |
716 | /// Returns the largest uint64_t less than or equal to \p Value and is |
717 | /// \p Skew mod \p Align. \p Align must be non-zero |
718 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
719 | assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast< void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 719, __PRETTY_FUNCTION__)); |
720 | Skew %= Align; |
721 | return (Value - Skew) / Align * Align + Skew; |
722 | } |
723 | |
724 | /// Returns the offset to the next integer (mod 2**64) that is greater than |
725 | /// or equal to \p Value and is a multiple of \p Align. \p Align must be |
726 | /// non-zero. |
727 | inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { |
728 | return alignTo(Value, Align) - Value; |
729 | } |
730 | |
731 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
732 | /// Requires 0 < B <= 32. |
733 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { |
734 | static_assert(B > 0, "Bit width can't be 0."); |
735 | static_assert(B <= 32, "Bit width out of range."); |
736 | return int32_t(X << (32 - B)) >> (32 - B); |
737 | } |
738 | |
739 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
740 | /// Requires 0 < B < 32. |
741 | inline int32_t SignExtend32(uint32_t X, unsigned B) { |
742 | assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast< void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 742, __PRETTY_FUNCTION__)); |
743 | assert(B <= 32 && "Bit width out of range.")((B <= 32 && "Bit width out of range.") ? static_cast <void> (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 743, __PRETTY_FUNCTION__)); |
744 | return int32_t(X << (32 - B)) >> (32 - B); |
745 | } |
746 | |
747 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
748 | /// Requires 0 < B < 64. |
749 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { |
750 | static_assert(B > 0, "Bit width can't be 0."); |
751 | static_assert(B <= 64, "Bit width out of range."); |
752 | return int64_t(x << (64 - B)) >> (64 - B); |
753 | } |
754 | |
755 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
756 | /// Requires 0 < B < 64. |
757 | inline int64_t SignExtend64(uint64_t X, unsigned B) { |
758 | assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast< void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 758, __PRETTY_FUNCTION__)); |
759 | assert(B <= 64 && "Bit width out of range.")((B <= 64 && "Bit width out of range.") ? static_cast <void> (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h" , 759, __PRETTY_FUNCTION__)); |
760 | return int64_t(X << (64 - B)) >> (64 - B); |
761 | } |
762 | |
763 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute |
764 | /// value of the result. |
765 | template <typename T> |
766 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
767 | AbsoluteDifference(T X, T Y) { |
768 | return std::max(X, Y) - std::min(X, Y); |
769 | } |
770 | |
771 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the |
772 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
773 | /// the result is larger than the maximum representable value of type T. |
774 | template <typename T> |
775 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
776 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { |
777 | bool Dummy; |
778 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
779 | // Hacker's Delight, p. 29 |
780 | T Z = X + Y; |
781 | Overflowed = (Z < X || Z < Y); |
782 | if (Overflowed) |
783 | return std::numeric_limits<T>::max(); |
784 | else |
785 | return Z; |
786 | } |
787 | |
788 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the |
789 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
790 | /// the result is larger than the maximum representable value of type T. |
791 | template <typename T> |
792 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
793 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { |
794 | bool Dummy; |
795 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
796 | |
797 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that |
798 | // because it fails for uint16_t (where multiplication can have undefined |
799 | // behavior due to promotion to int), and requires a division in addition |
800 | // to the multiplication. |
801 | |
802 | Overflowed = false; |
803 | |
804 | // Log2(Z) would be either Log2Z or Log2Z + 1. |
805 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z |
806 | // will necessarily be less than Log2Max as desired. |
807 | int Log2Z = Log2_64(X) + Log2_64(Y); |
808 | const T Max = std::numeric_limits<T>::max(); |
809 | int Log2Max = Log2_64(Max); |
810 | if (Log2Z < Log2Max) { |
811 | return X * Y; |
812 | } |
813 | if (Log2Z > Log2Max) { |
814 | Overflowed = true; |
815 | return Max; |
816 | } |
817 | |
818 | // We're going to use the top bit, and maybe overflow one |
819 | // bit past it. Multiply all but the bottom bit then add |
820 | // that on at the end. |
821 | T Z = (X >> 1) * Y; |
822 | if (Z & ~(Max >> 1)) { |
823 | Overflowed = true; |
824 | return Max; |
825 | } |
826 | Z <<= 1; |
827 | if (X & 1) |
828 | return SaturatingAdd(Z, Y, ResultOverflowed); |
829 | |
830 | return Z; |
831 | } |
832 | |
833 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to |
834 | /// the product. Clamp the result to the maximum representable value of T on |
835 | /// overflow. ResultOverflowed indicates if the result is larger than the |
836 | /// maximum representable value of type T. |
837 | template <typename T> |
838 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
839 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { |
840 | bool Dummy; |
841 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
842 | |
843 | T Product = SaturatingMultiply(X, Y, &Overflowed); |
844 | if (Overflowed) |
845 | return Product; |
846 | |
847 | return SaturatingAdd(A, Product, &Overflowed); |
848 | } |
849 | |
850 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. |
851 | extern const float huge_valf; |
852 | } // End llvm namespace |
853 | |
854 | #endif |
1 | // The template and inlines for the numeric_limits classes. -*- C++ -*- |
2 | |
3 | // Copyright (C) 1999-2016 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file include/limits |
26 | * This is a Standard C++ Library header. |
27 | */ |
28 | |
29 | // Note: this is not a conforming implementation. |
30 | // Written by Gabriel Dos Reis <gdr@codesourcery.com> |
31 | |
32 | // |
33 | // ISO 14882:1998 |
34 | // 18.2.1 |
35 | // |
36 | |
37 | #ifndef _GLIBCXX_NUMERIC_LIMITS1 |
38 | #define _GLIBCXX_NUMERIC_LIMITS1 1 |
39 | |
40 | #pragma GCC system_header |
41 | |
42 | #include <bits/c++config.h> |
43 | |
44 | // |
45 | // The numeric_limits<> traits document implementation-defined aspects |
46 | // of fundamental arithmetic data types (integers and floating points). |
47 | // From Standard C++ point of view, there are 14 such types: |
48 | // * integers |
49 | // bool (1) |
50 | // char, signed char, unsigned char, wchar_t (4) |
51 | // short, unsigned short (2) |
52 | // int, unsigned (2) |
53 | // long, unsigned long (2) |
54 | // |
55 | // * floating points |
56 | // float (1) |
57 | // double (1) |
58 | // long double (1) |
59 | // |
60 | // GNU C++ understands (where supported by the host C-library) |
61 | // * integer |
62 | // long long, unsigned long long (2) |
63 | // |
64 | // which brings us to 16 fundamental arithmetic data types in GNU C++. |
65 | // |
66 | // |
67 | // Since a numeric_limits<> is a bit tricky to get right, we rely on |
68 | // an interface composed of macros which should be defined in config/os |
69 | // or config/cpu when they differ from the generic (read arbitrary) |
70 | // definitions given here. |
71 | // |
72 | |
73 | // These values can be overridden in the target configuration file. |
74 | // The default values are appropriate for many 32-bit targets. |
75 | |
76 | // GCC only intrinsically supports modulo integral types. The only remaining |
77 | // integral exceptional values is division by zero. Only targets that do not |
78 | // signal division by zero in some "hard to ignore" way should use false. |
79 | #ifndef __glibcxx_integral_trapstrue |
80 | # define __glibcxx_integral_trapstrue true |
81 | #endif |
82 | |
83 | // float |
84 | // |
85 | |
86 | // Default values. Should be overridden in configuration files if necessary. |
87 | |
88 | #ifndef __glibcxx_float_has_denorm_loss |
89 | # define __glibcxx_float_has_denorm_loss false |
90 | #endif |
91 | #ifndef __glibcxx_float_traps |
92 | # define __glibcxx_float_traps false |
93 | #endif |
94 | #ifndef __glibcxx_float_tinyness_before |
95 | # define __glibcxx_float_tinyness_before false |
96 | #endif |
97 | |
98 | // double |
99 | |
100 | // Default values. Should be overridden in configuration files if necessary. |
101 | |
102 | #ifndef __glibcxx_double_has_denorm_loss |
103 | # define __glibcxx_double_has_denorm_loss false |
104 | #endif |
105 | #ifndef __glibcxx_double_traps |
106 | # define __glibcxx_double_traps false |
107 | #endif |
108 | #ifndef __glibcxx_double_tinyness_before |
109 | # define __glibcxx_double_tinyness_before false |
110 | #endif |
111 | |
112 | // long double |
113 | |
114 | // Default values. Should be overridden in configuration files if necessary. |
115 | |
116 | #ifndef __glibcxx_long_double_has_denorm_loss |
117 | # define __glibcxx_long_double_has_denorm_loss false |
118 | #endif |
119 | #ifndef __glibcxx_long_double_traps |
120 | # define __glibcxx_long_double_traps false |
121 | #endif |
122 | #ifndef __glibcxx_long_double_tinyness_before |
123 | # define __glibcxx_long_double_tinyness_before false |
124 | #endif |
125 | |
126 | // You should not need to define any macros below this point. |
127 | |
128 | #define __glibcxx_signed_b(T,B)((T)(-1) < 0) ((T)(-1) < 0) |
129 | |
130 | #define __glibcxx_min_b(T,B)(((T)(-1) < 0) ? -(((T)(-1) < 0) ? (((((T)1 << (( B - ((T)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(T)0) - 1 : (T)0) \ |
131 | (__glibcxx_signed_b (T,B)((T)(-1) < 0) ? -__glibcxx_max_b (T,B)(((T)(-1) < 0) ? (((((T)1 << ((B - ((T)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(T)0) - 1 : (T)0) |
132 | |
133 | #define __glibcxx_max_b(T,B)(((T)(-1) < 0) ? (((((T)1 << ((B - ((T)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(T)0) \ |
134 | (__glibcxx_signed_b (T,B)((T)(-1) < 0) ? \ |
135 | (((((T)1 << (__glibcxx_digits_b (T,B)(B - ((T)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(T)0) |
136 | |
137 | #define __glibcxx_digits_b(T,B)(B - ((T)(-1) < 0)) \ |
138 | (B - __glibcxx_signed_b (T,B)((T)(-1) < 0)) |
139 | |
140 | // The fraction 643/2136 approximates log10(2) to 7 significant digits. |
141 | #define __glibcxx_digits10_b(T,B)((B - ((T)(-1) < 0)) * 643L / 2136) \ |
142 | (__glibcxx_digits_b (T,B)(B - ((T)(-1) < 0)) * 643L / 2136) |
143 | |
144 | #define __glibcxx_signed(T) \ |
145 | __glibcxx_signed_b (T, sizeof(T) * __CHAR_BIT__)((T)(-1) < 0) |
146 | #define __glibcxx_min(T) \ |
147 | __glibcxx_min_b (T, sizeof(T) * __CHAR_BIT__)(((T)(-1) < 0) ? -(((T)(-1) < 0) ? (((((T)1 << (( sizeof(T) * 8 - ((T)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(T)0) - 1 : (T)0) |
148 | #define __glibcxx_max(T) \ |
149 | __glibcxx_max_b (T, sizeof(T) * __CHAR_BIT__)(((T)(-1) < 0) ? (((((T)1 << ((sizeof(T) * 8 - ((T)( -1) < 0)) - 1)) - 1) << 1) + 1) : ~(T)0) |
150 | #define __glibcxx_digits(T) \ |
151 | __glibcxx_digits_b (T, sizeof(T) * __CHAR_BIT__)(sizeof(T) * 8 - ((T)(-1) < 0)) |
152 | #define __glibcxx_digits10(T) \ |
153 | __glibcxx_digits10_b (T, sizeof(T) * __CHAR_BIT__)((sizeof(T) * 8 - ((T)(-1) < 0)) * 643L / 2136) |
154 | |
155 | #define __glibcxx_max_digits10(T) \ |
156 | (2 + (T) * 643L / 2136) |
157 | |
158 | namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default"))) |
159 | { |
160 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
161 | |
162 | /** |
163 | * @brief Describes the rounding style for floating-point types. |
164 | * |
165 | * This is used in the std::numeric_limits class. |
166 | */ |
167 | enum float_round_style |
168 | { |
169 | round_indeterminate = -1, /// Intermediate. |
170 | round_toward_zero = 0, /// To zero. |
171 | round_to_nearest = 1, /// To the nearest representable value. |
172 | round_toward_infinity = 2, /// To infinity. |
173 | round_toward_neg_infinity = 3 /// To negative infinity. |
174 | }; |
175 | |
176 | /** |
177 | * @brief Describes the denormalization for floating-point types. |
178 | * |
179 | * These values represent the presence or absence of a variable number |
180 | * of exponent bits. This type is used in the std::numeric_limits class. |
181 | */ |
182 | enum float_denorm_style |
183 | { |
184 | /// Indeterminate at compile time whether denormalized values are allowed. |
185 | denorm_indeterminate = -1, |
186 | /// The type does not allow denormalized values. |
187 | denorm_absent = 0, |
188 | /// The type allows denormalized values. |
189 | denorm_present = 1 |
190 | }; |
191 | |
192 | /** |
193 | * @brief Part of std::numeric_limits. |
194 | * |
195 | * The @c static @c const members are usable as integral constant |
196 | * expressions. |
197 | * |
198 | * @note This is a separate class for purposes of efficiency; you |
199 | * should only access these members as part of an instantiation |
200 | * of the std::numeric_limits class. |
201 | */ |
202 | struct __numeric_limits_base |
203 | { |
204 | /** This will be true for all fundamental types (which have |
205 | specializations), and false for everything else. */ |
206 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = false; |
207 | |
208 | /** The number of @c radix digits that be represented without change: for |
209 | integer types, the number of non-sign bits in the mantissa; for |
210 | floating types, the number of @c radix digits in the mantissa. */ |
211 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = 0; |
212 | |
213 | /** The number of base 10 digits that can be represented without change. */ |
214 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = 0; |
215 | |
216 | #if __cplusplus201103L >= 201103L |
217 | /** The number of base 10 digits required to ensure that values which |
218 | differ are always differentiated. */ |
219 | static constexpr int max_digits10 = 0; |
220 | #endif |
221 | |
222 | /** True if the type is signed. */ |
223 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
224 | |
225 | /** True if the type is integer. */ |
226 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = false; |
227 | |
228 | /** True if the type uses an exact representation. All integer types are |
229 | exact, but not all exact types are integer. For example, rational and |
230 | fixed-exponent representations are exact but not integer. */ |
231 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = false; |
232 | |
233 | /** For integer types, specifies the base of the representation. For |
234 | floating types, specifies the base of the exponent representation. */ |
235 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 0; |
236 | |
237 | /** The minimum negative integer such that @c radix raised to the power of |
238 | (one less than that integer) is a normalized floating point number. */ |
239 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
240 | |
241 | /** The minimum negative integer such that 10 raised to that power is in |
242 | the range of normalized floating point numbers. */ |
243 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
244 | |
245 | /** The maximum positive integer such that @c radix raised to the power of |
246 | (one less than that integer) is a representable finite floating point |
247 | number. */ |
248 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
249 | |
250 | /** The maximum positive integer such that 10 raised to that power is in |
251 | the range of representable finite floating point numbers. */ |
252 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
253 | |
254 | /** True if the type has a representation for positive infinity. */ |
255 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
256 | |
257 | /** True if the type has a representation for a quiet (non-signaling) |
258 | Not a Number. */ |
259 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
260 | |
261 | /** True if the type has a representation for a signaling |
262 | Not a Number. */ |
263 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
264 | |
265 | /** See std::float_denorm_style for more information. */ |
266 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm = denorm_absent; |
267 | |
268 | /** True if loss of accuracy is detected as a denormalization loss, |
269 | rather than as an inexact result. */ |
270 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
271 | |
272 | /** True if-and-only-if the type adheres to the IEC 559 standard, also |
273 | known as IEEE 754. (Only makes sense for floating point types.) */ |
274 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
275 | |
276 | /** True if the set of values representable by the type is |
277 | finite. All built-in types are bounded, this member would be |
278 | false for arbitrary precision types. */ |
279 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = false; |
280 | |
281 | /** True if the type is @e modulo. A type is modulo if, for any |
282 | operation involving +, -, or * on values of that type whose |
283 | result would fall outside the range [min(),max()], the value |
284 | returned differs from the true value by an integer multiple of |
285 | max() - min() + 1. On most machines, this is false for floating |
286 | types, true for unsigned integers, and true for signed integers. |
287 | See PR22200 about signed integers. */ |
288 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
289 | |
290 | /** True if trapping is implemented for this type. */ |
291 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = false; |
292 | |
293 | /** True if tininess is detected before rounding. (see IEC 559) */ |
294 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
295 | |
296 | /** See std::float_round_style for more information. This is only |
297 | meaningful for floating types; integer types will all be |
298 | round_toward_zero. */ |
299 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style = |
300 | round_toward_zero; |
301 | }; |
302 | |
303 | /** |
304 | * @brief Properties of fundamental types. |
305 | * |
306 | * This class allows a program to obtain information about the |
307 | * representation of a fundamental type on a given platform. For |
308 | * non-fundamental types, the functions will return 0 and the data |
309 | * members will all be @c false. |
310 | * |
311 | * _GLIBCXX_RESOLVE_LIB_DEFECTS: DRs 201 and 184 (hi Gaby!) are |
312 | * noted, but not incorporated in this documented (yet). |
313 | */ |
314 | template<typename _Tp> |
315 | struct numeric_limits : public __numeric_limits_base |
316 | { |
317 | /** The minimum finite value, or for floating types with |
318 | denormalization, the minimum positive normalized value. */ |
319 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
320 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
321 | |
322 | /** The maximum finite value. */ |
323 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
324 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
325 | |
326 | #if __cplusplus201103L >= 201103L |
327 | /** A finite value x such that there is no other finite value y |
328 | * where y < x. */ |
329 | static constexpr _Tp |
330 | lowest() noexcept { return _Tp(); } |
331 | #endif |
332 | |
333 | /** The @e machine @e epsilon: the difference between 1 and the least |
334 | value greater than 1 that is representable. */ |
335 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
336 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
337 | |
338 | /** The maximum rounding error measurement (see LIA-1). */ |
339 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
340 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
341 | |
342 | /** The representation of positive infinity, if @c has_infinity. */ |
343 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
344 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
345 | |
346 | /** The representation of a quiet Not a Number, |
347 | if @c has_quiet_NaN. */ |
348 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
349 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
350 | |
351 | /** The representation of a signaling Not a Number, if |
352 | @c has_signaling_NaN. */ |
353 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
354 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
355 | |
356 | /** The minimum positive denormalized value. For types where |
357 | @c has_denorm is false, this is the minimum positive normalized |
358 | value. */ |
359 | static _GLIBCXX_CONSTEXPRconstexpr _Tp |
360 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return _Tp(); } |
361 | }; |
362 | |
363 | #if __cplusplus201103L >= 201103L |
364 | template<typename _Tp> |
365 | struct numeric_limits<const _Tp> |
366 | : public numeric_limits<_Tp> { }; |
367 | |
368 | template<typename _Tp> |
369 | struct numeric_limits<volatile _Tp> |
370 | : public numeric_limits<_Tp> { }; |
371 | |
372 | template<typename _Tp> |
373 | struct numeric_limits<const volatile _Tp> |
374 | : public numeric_limits<_Tp> { }; |
375 | #endif |
376 | |
377 | // Now there follow 16 explicit specializations. Yes, 16. Make sure |
378 | // you get the count right. (18 in c++0x mode) |
379 | |
380 | /// numeric_limits<bool> specialization. |
381 | template<> |
382 | struct numeric_limits<bool> |
383 | { |
384 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
385 | |
386 | static _GLIBCXX_CONSTEXPRconstexpr bool |
387 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
388 | |
389 | static _GLIBCXX_CONSTEXPRconstexpr bool |
390 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return true; } |
391 | |
392 | #if __cplusplus201103L >= 201103L |
393 | static constexpr bool |
394 | lowest() noexcept { return min(); } |
395 | #endif |
396 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = 1; |
397 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = 0; |
398 | #if __cplusplus201103L >= 201103L |
399 | static constexpr int max_digits10 = 0; |
400 | #endif |
401 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
402 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
403 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
404 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
405 | |
406 | static _GLIBCXX_CONSTEXPRconstexpr bool |
407 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
408 | |
409 | static _GLIBCXX_CONSTEXPRconstexpr bool |
410 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
411 | |
412 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
413 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
414 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
415 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
416 | |
417 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
418 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
419 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
420 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
421 | = denorm_absent; |
422 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
423 | |
424 | static _GLIBCXX_CONSTEXPRconstexpr bool |
425 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
426 | |
427 | static _GLIBCXX_CONSTEXPRconstexpr bool |
428 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
429 | |
430 | static _GLIBCXX_CONSTEXPRconstexpr bool |
431 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
432 | |
433 | static _GLIBCXX_CONSTEXPRconstexpr bool |
434 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return false; } |
435 | |
436 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
437 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
438 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
439 | |
440 | // It is not clear what it means for a boolean type to trap. |
441 | // This is a DR on the LWG issue list. Here, I use integer |
442 | // promotion semantics. |
443 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
444 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
445 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
446 | = round_toward_zero; |
447 | }; |
448 | |
449 | /// numeric_limits<char> specialization. |
450 | template<> |
451 | struct numeric_limits<char> |
452 | { |
453 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
454 | |
455 | static _GLIBCXX_CONSTEXPRconstexpr char |
456 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __glibcxx_min(char); } |
457 | |
458 | static _GLIBCXX_CONSTEXPRconstexpr char |
459 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __glibcxx_max(char); } |
460 | |
461 | #if __cplusplus201103L >= 201103L |
462 | static constexpr char |
463 | lowest() noexcept { return min(); } |
464 | #endif |
465 | |
466 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __glibcxx_digits (char); |
467 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __glibcxx_digits10 (char); |
468 | #if __cplusplus201103L >= 201103L |
469 | static constexpr int max_digits10 = 0; |
470 | #endif |
471 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = __glibcxx_signed (char); |
472 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
473 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
474 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
475 | |
476 | static _GLIBCXX_CONSTEXPRconstexpr char |
477 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
478 | |
479 | static _GLIBCXX_CONSTEXPRconstexpr char |
480 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
481 | |
482 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
483 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
484 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
485 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
486 | |
487 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
488 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
489 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
490 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
491 | = denorm_absent; |
492 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
493 | |
494 | static _GLIBCXX_CONSTEXPRconstexpr |
495 | char infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return char(); } |
496 | |
497 | static _GLIBCXX_CONSTEXPRconstexpr char |
498 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return char(); } |
499 | |
500 | static _GLIBCXX_CONSTEXPRconstexpr char |
501 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return char(); } |
502 | |
503 | static _GLIBCXX_CONSTEXPRconstexpr char |
504 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<char>(0); } |
505 | |
506 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
507 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
508 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = !is_signed; |
509 | |
510 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
511 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
512 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
513 | = round_toward_zero; |
514 | }; |
515 | |
516 | /// numeric_limits<signed char> specialization. |
517 | template<> |
518 | struct numeric_limits<signed char> |
519 | { |
520 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
521 | |
522 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
523 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return -__SCHAR_MAX__127 - 1; } |
524 | |
525 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
526 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __SCHAR_MAX__127; } |
527 | |
528 | #if __cplusplus201103L >= 201103L |
529 | static constexpr signed char |
530 | lowest() noexcept { return min(); } |
531 | #endif |
532 | |
533 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __glibcxx_digits (signed char); |
534 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
535 | = __glibcxx_digits10 (signed char); |
536 | #if __cplusplus201103L >= 201103L |
537 | static constexpr int max_digits10 = 0; |
538 | #endif |
539 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
540 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
541 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
542 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
543 | |
544 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
545 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
546 | |
547 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
548 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
549 | |
550 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
551 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
552 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
553 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
554 | |
555 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
556 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
557 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
558 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
559 | = denorm_absent; |
560 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
561 | |
562 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
563 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<signed char>(0); } |
564 | |
565 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
566 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<signed char>(0); } |
567 | |
568 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
569 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
570 | { return static_cast<signed char>(0); } |
571 | |
572 | static _GLIBCXX_CONSTEXPRconstexpr signed char |
573 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept |
574 | { return static_cast<signed char>(0); } |
575 | |
576 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
577 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
578 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
579 | |
580 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
581 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
582 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
583 | = round_toward_zero; |
584 | }; |
585 | |
586 | /// numeric_limits<unsigned char> specialization. |
587 | template<> |
588 | struct numeric_limits<unsigned char> |
589 | { |
590 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
591 | |
592 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
593 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
594 | |
595 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
596 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __SCHAR_MAX__127 * 2U + 1; } |
597 | |
598 | #if __cplusplus201103L >= 201103L |
599 | static constexpr unsigned char |
600 | lowest() noexcept { return min(); } |
601 | #endif |
602 | |
603 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits |
604 | = __glibcxx_digits (unsigned char); |
605 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
606 | = __glibcxx_digits10 (unsigned char); |
607 | #if __cplusplus201103L >= 201103L |
608 | static constexpr int max_digits10 = 0; |
609 | #endif |
610 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
611 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
612 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
613 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
614 | |
615 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
616 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
617 | |
618 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
619 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
620 | |
621 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
622 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
623 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
624 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
625 | |
626 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
627 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
628 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
629 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
630 | = denorm_absent; |
631 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
632 | |
633 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
634 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept |
635 | { return static_cast<unsigned char>(0); } |
636 | |
637 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
638 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
639 | { return static_cast<unsigned char>(0); } |
640 | |
641 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
642 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
643 | { return static_cast<unsigned char>(0); } |
644 | |
645 | static _GLIBCXX_CONSTEXPRconstexpr unsigned char |
646 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept |
647 | { return static_cast<unsigned char>(0); } |
648 | |
649 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
650 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
651 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = true; |
652 | |
653 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
654 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
655 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
656 | = round_toward_zero; |
657 | }; |
658 | |
659 | /// numeric_limits<wchar_t> specialization. |
660 | template<> |
661 | struct numeric_limits<wchar_t> |
662 | { |
663 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
664 | |
665 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
666 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __glibcxx_min (wchar_t); } |
667 | |
668 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
669 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __glibcxx_max (wchar_t); } |
670 | |
671 | #if __cplusplus201103L >= 201103L |
672 | static constexpr wchar_t |
673 | lowest() noexcept { return min(); } |
674 | #endif |
675 | |
676 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __glibcxx_digits (wchar_t); |
677 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
678 | = __glibcxx_digits10 (wchar_t); |
679 | #if __cplusplus201103L >= 201103L |
680 | static constexpr int max_digits10 = 0; |
681 | #endif |
682 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = __glibcxx_signed (wchar_t); |
683 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
684 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
685 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
686 | |
687 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
688 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
689 | |
690 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
691 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
692 | |
693 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
694 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
695 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
696 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
697 | |
698 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
699 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
700 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
701 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
702 | = denorm_absent; |
703 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
704 | |
705 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
706 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return wchar_t(); } |
707 | |
708 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
709 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return wchar_t(); } |
710 | |
711 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
712 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return wchar_t(); } |
713 | |
714 | static _GLIBCXX_CONSTEXPRconstexpr wchar_t |
715 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return wchar_t(); } |
716 | |
717 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
718 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
719 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = !is_signed; |
720 | |
721 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
722 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
723 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
724 | = round_toward_zero; |
725 | }; |
726 | |
727 | #if __cplusplus201103L >= 201103L |
728 | /// numeric_limits<char16_t> specialization. |
729 | template<> |
730 | struct numeric_limits<char16_t> |
731 | { |
732 | static constexpr bool is_specialized = true; |
733 | |
734 | static constexpr char16_t |
735 | min() noexcept { return __glibcxx_min (char16_t); } |
736 | |
737 | static constexpr char16_t |
738 | max() noexcept { return __glibcxx_max (char16_t); } |
739 | |
740 | static constexpr char16_t |
741 | lowest() noexcept { return min(); } |
742 | |
743 | static constexpr int digits = __glibcxx_digits (char16_t); |
744 | static constexpr int digits10 = __glibcxx_digits10 (char16_t); |
745 | static constexpr int max_digits10 = 0; |
746 | static constexpr bool is_signed = __glibcxx_signed (char16_t); |
747 | static constexpr bool is_integer = true; |
748 | static constexpr bool is_exact = true; |
749 | static constexpr int radix = 2; |
750 | |
751 | static constexpr char16_t |
752 | epsilon() noexcept { return 0; } |
753 | |
754 | static constexpr char16_t |
755 | round_error() noexcept { return 0; } |
756 | |
757 | static constexpr int min_exponent = 0; |
758 | static constexpr int min_exponent10 = 0; |
759 | static constexpr int max_exponent = 0; |
760 | static constexpr int max_exponent10 = 0; |
761 | |
762 | static constexpr bool has_infinity = false; |
763 | static constexpr bool has_quiet_NaN = false; |
764 | static constexpr bool has_signaling_NaN = false; |
765 | static constexpr float_denorm_style has_denorm = denorm_absent; |
766 | static constexpr bool has_denorm_loss = false; |
767 | |
768 | static constexpr char16_t |
769 | infinity() noexcept { return char16_t(); } |
770 | |
771 | static constexpr char16_t |
772 | quiet_NaN() noexcept { return char16_t(); } |
773 | |
774 | static constexpr char16_t |
775 | signaling_NaN() noexcept { return char16_t(); } |
776 | |
777 | static constexpr char16_t |
778 | denorm_min() noexcept { return char16_t(); } |
779 | |
780 | static constexpr bool is_iec559 = false; |
781 | static constexpr bool is_bounded = true; |
782 | static constexpr bool is_modulo = !is_signed; |
783 | |
784 | static constexpr bool traps = __glibcxx_integral_trapstrue; |
785 | static constexpr bool tinyness_before = false; |
786 | static constexpr float_round_style round_style = round_toward_zero; |
787 | }; |
788 | |
789 | /// numeric_limits<char32_t> specialization. |
790 | template<> |
791 | struct numeric_limits<char32_t> |
792 | { |
793 | static constexpr bool is_specialized = true; |
794 | |
795 | static constexpr char32_t |
796 | min() noexcept { return __glibcxx_min (char32_t); } |
797 | |
798 | static constexpr char32_t |
799 | max() noexcept { return __glibcxx_max (char32_t); } |
800 | |
801 | static constexpr char32_t |
802 | lowest() noexcept { return min(); } |
803 | |
804 | static constexpr int digits = __glibcxx_digits (char32_t); |
805 | static constexpr int digits10 = __glibcxx_digits10 (char32_t); |
806 | static constexpr int max_digits10 = 0; |
807 | static constexpr bool is_signed = __glibcxx_signed (char32_t); |
808 | static constexpr bool is_integer = true; |
809 | static constexpr bool is_exact = true; |
810 | static constexpr int radix = 2; |
811 | |
812 | static constexpr char32_t |
813 | epsilon() noexcept { return 0; } |
814 | |
815 | static constexpr char32_t |
816 | round_error() noexcept { return 0; } |
817 | |
818 | static constexpr int min_exponent = 0; |
819 | static constexpr int min_exponent10 = 0; |
820 | static constexpr int max_exponent = 0; |
821 | static constexpr int max_exponent10 = 0; |
822 | |
823 | static constexpr bool has_infinity = false; |
824 | static constexpr bool has_quiet_NaN = false; |
825 | static constexpr bool has_signaling_NaN = false; |
826 | static constexpr float_denorm_style has_denorm = denorm_absent; |
827 | static constexpr bool has_denorm_loss = false; |
828 | |
829 | static constexpr char32_t |
830 | infinity() noexcept { return char32_t(); } |
831 | |
832 | static constexpr char32_t |
833 | quiet_NaN() noexcept { return char32_t(); } |
834 | |
835 | static constexpr char32_t |
836 | signaling_NaN() noexcept { return char32_t(); } |
837 | |
838 | static constexpr char32_t |
839 | denorm_min() noexcept { return char32_t(); } |
840 | |
841 | static constexpr bool is_iec559 = false; |
842 | static constexpr bool is_bounded = true; |
843 | static constexpr bool is_modulo = !is_signed; |
844 | |
845 | static constexpr bool traps = __glibcxx_integral_trapstrue; |
846 | static constexpr bool tinyness_before = false; |
847 | static constexpr float_round_style round_style = round_toward_zero; |
848 | }; |
849 | #endif |
850 | |
851 | /// numeric_limits<short> specialization. |
852 | template<> |
853 | struct numeric_limits<short> |
854 | { |
855 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
856 | |
857 | static _GLIBCXX_CONSTEXPRconstexpr short |
858 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return -__SHRT_MAX__32767 - 1; } |
859 | |
860 | static _GLIBCXX_CONSTEXPRconstexpr short |
861 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __SHRT_MAX__32767; } |
862 | |
863 | #if __cplusplus201103L >= 201103L |
864 | static constexpr short |
865 | lowest() noexcept { return min(); } |
866 | #endif |
867 | |
868 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __glibcxx_digits (short); |
869 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __glibcxx_digits10 (short); |
870 | #if __cplusplus201103L >= 201103L |
871 | static constexpr int max_digits10 = 0; |
872 | #endif |
873 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
874 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
875 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
876 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
877 | |
878 | static _GLIBCXX_CONSTEXPRconstexpr short |
879 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
880 | |
881 | static _GLIBCXX_CONSTEXPRconstexpr short |
882 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
883 | |
884 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
885 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
886 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
887 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
888 | |
889 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
890 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
891 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
892 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
893 | = denorm_absent; |
894 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
895 | |
896 | static _GLIBCXX_CONSTEXPRconstexpr short |
897 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return short(); } |
898 | |
899 | static _GLIBCXX_CONSTEXPRconstexpr short |
900 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return short(); } |
901 | |
902 | static _GLIBCXX_CONSTEXPRconstexpr short |
903 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return short(); } |
904 | |
905 | static _GLIBCXX_CONSTEXPRconstexpr short |
906 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return short(); } |
907 | |
908 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
909 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
910 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
911 | |
912 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
913 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
914 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
915 | = round_toward_zero; |
916 | }; |
917 | |
918 | /// numeric_limits<unsigned short> specialization. |
919 | template<> |
920 | struct numeric_limits<unsigned short> |
921 | { |
922 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
923 | |
924 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
925 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
926 | |
927 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
928 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __SHRT_MAX__32767 * 2U + 1; } |
929 | |
930 | #if __cplusplus201103L >= 201103L |
931 | static constexpr unsigned short |
932 | lowest() noexcept { return min(); } |
933 | #endif |
934 | |
935 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits |
936 | = __glibcxx_digits (unsigned short); |
937 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
938 | = __glibcxx_digits10 (unsigned short); |
939 | #if __cplusplus201103L >= 201103L |
940 | static constexpr int max_digits10 = 0; |
941 | #endif |
942 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
943 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
944 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
945 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
946 | |
947 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
948 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
949 | |
950 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
951 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
952 | |
953 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
954 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
955 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
956 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
957 | |
958 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
959 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
960 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
961 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
962 | = denorm_absent; |
963 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
964 | |
965 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
966 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept |
967 | { return static_cast<unsigned short>(0); } |
968 | |
969 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
970 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
971 | { return static_cast<unsigned short>(0); } |
972 | |
973 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
974 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
975 | { return static_cast<unsigned short>(0); } |
976 | |
977 | static _GLIBCXX_CONSTEXPRconstexpr unsigned short |
978 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept |
979 | { return static_cast<unsigned short>(0); } |
980 | |
981 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
982 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
983 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = true; |
984 | |
985 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
986 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
987 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
988 | = round_toward_zero; |
989 | }; |
990 | |
991 | /// numeric_limits<int> specialization. |
992 | template<> |
993 | struct numeric_limits<int> |
994 | { |
995 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
996 | |
997 | static _GLIBCXX_CONSTEXPRconstexpr int |
998 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return -__INT_MAX__2147483647 - 1; } |
999 | |
1000 | static _GLIBCXX_CONSTEXPRconstexpr int |
1001 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __INT_MAX__2147483647; } |
1002 | |
1003 | #if __cplusplus201103L >= 201103L |
1004 | static constexpr int |
1005 | lowest() noexcept { return min(); } |
1006 | #endif |
1007 | |
1008 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __glibcxx_digits (int); |
1009 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __glibcxx_digits10 (int); |
1010 | #if __cplusplus201103L >= 201103L |
1011 | static constexpr int max_digits10 = 0; |
1012 | #endif |
1013 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
1014 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
1015 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
1016 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
1017 | |
1018 | static _GLIBCXX_CONSTEXPRconstexpr int |
1019 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1020 | |
1021 | static _GLIBCXX_CONSTEXPRconstexpr int |
1022 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1023 | |
1024 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
1025 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
1026 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
1027 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
1028 | |
1029 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
1030 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
1031 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
1032 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1033 | = denorm_absent; |
1034 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
1035 | |
1036 | static _GLIBCXX_CONSTEXPRconstexpr int |
1037 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<int>(0); } |
1038 | |
1039 | static _GLIBCXX_CONSTEXPRconstexpr int |
1040 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<int>(0); } |
1041 | |
1042 | static _GLIBCXX_CONSTEXPRconstexpr int |
1043 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<int>(0); } |
1044 | |
1045 | static _GLIBCXX_CONSTEXPRconstexpr int |
1046 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<int>(0); } |
1047 | |
1048 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
1049 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1050 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
1051 | |
1052 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
1053 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
1054 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1055 | = round_toward_zero; |
1056 | }; |
1057 | |
1058 | /// numeric_limits<unsigned int> specialization. |
1059 | template<> |
1060 | struct numeric_limits<unsigned int> |
1061 | { |
1062 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1063 | |
1064 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1065 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1066 | |
1067 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1068 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __INT_MAX__2147483647 * 2U + 1; } |
1069 | |
1070 | #if __cplusplus201103L >= 201103L |
1071 | static constexpr unsigned int |
1072 | lowest() noexcept { return min(); } |
1073 | #endif |
1074 | |
1075 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits |
1076 | = __glibcxx_digits (unsigned int); |
1077 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
1078 | = __glibcxx_digits10 (unsigned int); |
1079 | #if __cplusplus201103L >= 201103L |
1080 | static constexpr int max_digits10 = 0; |
1081 | #endif |
1082 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
1083 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
1084 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
1085 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
1086 | |
1087 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1088 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1089 | |
1090 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1091 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1092 | |
1093 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
1094 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
1095 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
1096 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
1097 | |
1098 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
1099 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
1100 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
1101 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1102 | = denorm_absent; |
1103 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
1104 | |
1105 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1106 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<unsigned int>(0); } |
1107 | |
1108 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1109 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1110 | { return static_cast<unsigned int>(0); } |
1111 | |
1112 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1113 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1114 | { return static_cast<unsigned int>(0); } |
1115 | |
1116 | static _GLIBCXX_CONSTEXPRconstexpr unsigned int |
1117 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept |
1118 | { return static_cast<unsigned int>(0); } |
1119 | |
1120 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
1121 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1122 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = true; |
1123 | |
1124 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
1125 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
1126 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1127 | = round_toward_zero; |
1128 | }; |
1129 | |
1130 | /// numeric_limits<long> specialization. |
1131 | template<> |
1132 | struct numeric_limits<long> |
1133 | { |
1134 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1135 | |
1136 | static _GLIBCXX_CONSTEXPRconstexpr long |
1137 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return -__LONG_MAX__9223372036854775807L - 1; } |
1138 | |
1139 | static _GLIBCXX_CONSTEXPRconstexpr long |
1140 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LONG_MAX__9223372036854775807L; } |
1141 | |
1142 | #if __cplusplus201103L >= 201103L |
1143 | static constexpr long |
1144 | lowest() noexcept { return min(); } |
1145 | #endif |
1146 | |
1147 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __glibcxx_digits (long); |
1148 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __glibcxx_digits10 (long); |
1149 | #if __cplusplus201103L >= 201103L |
1150 | static constexpr int max_digits10 = 0; |
1151 | #endif |
1152 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
1153 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
1154 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
1155 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
1156 | |
1157 | static _GLIBCXX_CONSTEXPRconstexpr long |
1158 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1159 | |
1160 | static _GLIBCXX_CONSTEXPRconstexpr long |
1161 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1162 | |
1163 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
1164 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
1165 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
1166 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
1167 | |
1168 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
1169 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
1170 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
1171 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1172 | = denorm_absent; |
1173 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
1174 | |
1175 | static _GLIBCXX_CONSTEXPRconstexpr long |
1176 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long>(0); } |
1177 | |
1178 | static _GLIBCXX_CONSTEXPRconstexpr long |
1179 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long>(0); } |
1180 | |
1181 | static _GLIBCXX_CONSTEXPRconstexpr long |
1182 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long>(0); } |
1183 | |
1184 | static _GLIBCXX_CONSTEXPRconstexpr long |
1185 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long>(0); } |
1186 | |
1187 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
1188 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1189 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
1190 | |
1191 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
1192 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
1193 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1194 | = round_toward_zero; |
1195 | }; |
1196 | |
1197 | /// numeric_limits<unsigned long> specialization. |
1198 | template<> |
1199 | struct numeric_limits<unsigned long> |
1200 | { |
1201 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1202 | |
1203 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1204 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1205 | |
1206 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1207 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LONG_MAX__9223372036854775807L * 2UL + 1; } |
1208 | |
1209 | #if __cplusplus201103L >= 201103L |
1210 | static constexpr unsigned long |
1211 | lowest() noexcept { return min(); } |
1212 | #endif |
1213 | |
1214 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits |
1215 | = __glibcxx_digits (unsigned long); |
1216 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
1217 | = __glibcxx_digits10 (unsigned long); |
1218 | #if __cplusplus201103L >= 201103L |
1219 | static constexpr int max_digits10 = 0; |
1220 | #endif |
1221 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
1222 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
1223 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
1224 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
1225 | |
1226 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1227 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1228 | |
1229 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1230 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1231 | |
1232 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
1233 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
1234 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
1235 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
1236 | |
1237 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
1238 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
1239 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
1240 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1241 | = denorm_absent; |
1242 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
1243 | |
1244 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1245 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept |
1246 | { return static_cast<unsigned long>(0); } |
1247 | |
1248 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1249 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1250 | { return static_cast<unsigned long>(0); } |
1251 | |
1252 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1253 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1254 | { return static_cast<unsigned long>(0); } |
1255 | |
1256 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long |
1257 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept |
1258 | { return static_cast<unsigned long>(0); } |
1259 | |
1260 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
1261 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1262 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = true; |
1263 | |
1264 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
1265 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
1266 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1267 | = round_toward_zero; |
1268 | }; |
1269 | |
1270 | /// numeric_limits<long long> specialization. |
1271 | template<> |
1272 | struct numeric_limits<long long> |
1273 | { |
1274 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1275 | |
1276 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1277 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return -__LONG_LONG_MAX__9223372036854775807LL - 1; } |
1278 | |
1279 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1280 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LONG_LONG_MAX__9223372036854775807LL; } |
1281 | |
1282 | #if __cplusplus201103L >= 201103L |
1283 | static constexpr long long |
1284 | lowest() noexcept { return min(); } |
1285 | #endif |
1286 | |
1287 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits |
1288 | = __glibcxx_digits (long long); |
1289 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
1290 | = __glibcxx_digits10 (long long); |
1291 | #if __cplusplus201103L >= 201103L |
1292 | static constexpr int max_digits10 = 0; |
1293 | #endif |
1294 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
1295 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
1296 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
1297 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
1298 | |
1299 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1300 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1301 | |
1302 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1303 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1304 | |
1305 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
1306 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
1307 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
1308 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
1309 | |
1310 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
1311 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
1312 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
1313 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1314 | = denorm_absent; |
1315 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
1316 | |
1317 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1318 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long long>(0); } |
1319 | |
1320 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1321 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long long>(0); } |
1322 | |
1323 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1324 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1325 | { return static_cast<long long>(0); } |
1326 | |
1327 | static _GLIBCXX_CONSTEXPRconstexpr long long |
1328 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return static_cast<long long>(0); } |
1329 | |
1330 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
1331 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1332 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
1333 | |
1334 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
1335 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
1336 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1337 | = round_toward_zero; |
1338 | }; |
1339 | |
1340 | /// numeric_limits<unsigned long long> specialization. |
1341 | template<> |
1342 | struct numeric_limits<unsigned long long> |
1343 | { |
1344 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1345 | |
1346 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1347 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1348 | |
1349 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1350 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LONG_LONG_MAX__9223372036854775807LL * 2ULL + 1; } |
1351 | |
1352 | #if __cplusplus201103L >= 201103L |
1353 | static constexpr unsigned long long |
1354 | lowest() noexcept { return min(); } |
1355 | #endif |
1356 | |
1357 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits |
1358 | = __glibcxx_digits (unsigned long long); |
1359 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 |
1360 | = __glibcxx_digits10 (unsigned long long); |
1361 | #if __cplusplus201103L >= 201103L |
1362 | static constexpr int max_digits10 = 0; |
1363 | #endif |
1364 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; |
1365 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; |
1366 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; |
1367 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; |
1368 | |
1369 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1370 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1371 | |
1372 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1373 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } |
1374 | |
1375 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; |
1376 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; |
1377 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; |
1378 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; |
1379 | |
1380 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; |
1381 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; |
1382 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; |
1383 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1384 | = denorm_absent; |
1385 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; |
1386 | |
1387 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1388 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept |
1389 | { return static_cast<unsigned long long>(0); } |
1390 | |
1391 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1392 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1393 | { return static_cast<unsigned long long>(0); } |
1394 | |
1395 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1396 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept |
1397 | { return static_cast<unsigned long long>(0); } |
1398 | |
1399 | static _GLIBCXX_CONSTEXPRconstexpr unsigned long long |
1400 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept |
1401 | { return static_cast<unsigned long long>(0); } |
1402 | |
1403 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; |
1404 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1405 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = true; |
1406 | |
1407 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; |
1408 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; |
1409 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1410 | = round_toward_zero; |
1411 | }; |
1412 | |
1413 | #if !defined(__STRICT_ANSI__1) |
1414 | |
1415 | #define __INT_N(TYPE, BITSIZE, EXT, UEXT) \ |
1416 | template<> \ |
1417 | struct numeric_limits<TYPE> \ |
1418 | { \ |
1419 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; \ |
1420 | \ |
1421 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1422 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __glibcxx_min_b (TYPE, BITSIZE)(((TYPE)(-1) < 0) ? -(((TYPE)(-1) < 0) ? (((((TYPE)1 << ((BITSIZE - ((TYPE)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(TYPE)0) - 1 : (TYPE)0); } \ |
1423 | \ |
1424 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1425 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __glibcxx_max_b (TYPE, BITSIZE)(((TYPE)(-1) < 0) ? (((((TYPE)1 << ((BITSIZE - ((TYPE )(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(TYPE)0); } \ |
1426 | \ |
1427 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits \ |
1428 | = BITSIZE - 1; \ |
1429 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 \ |
1430 | = (BITSIZE - 1) * 643L / 2136; \ |
1431 | \ |
1432 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; \ |
1433 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; \ |
1434 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; \ |
1435 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; \ |
1436 | \ |
1437 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1438 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } \ |
1439 | \ |
1440 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1441 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } \ |
1442 | \ |
1443 | EXT \ |
1444 | \ |
1445 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; \ |
1446 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; \ |
1447 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; \ |
1448 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; \ |
1449 | \ |
1450 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; \ |
1451 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; \ |
1452 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; \ |
1453 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm \ |
1454 | = denorm_absent; \ |
1455 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; \ |
1456 | \ |
1457 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1458 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1459 | { return static_cast<TYPE>(0); } \ |
1460 | \ |
1461 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1462 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1463 | { return static_cast<TYPE>(0); } \ |
1464 | \ |
1465 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1466 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1467 | { return static_cast<TYPE>(0); } \ |
1468 | \ |
1469 | static _GLIBCXX_CONSTEXPRconstexpr TYPE \ |
1470 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1471 | { return static_cast<TYPE>(0); } \ |
1472 | \ |
1473 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; \ |
1474 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; \ |
1475 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; \ |
1476 | \ |
1477 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps \ |
1478 | = __glibcxx_integral_trapstrue; \ |
1479 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; \ |
1480 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style \ |
1481 | = round_toward_zero; \ |
1482 | }; \ |
1483 | \ |
1484 | template<> \ |
1485 | struct numeric_limits<unsigned TYPE> \ |
1486 | { \ |
1487 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; \ |
1488 | \ |
1489 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1490 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } \ |
1491 | \ |
1492 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1493 | max() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1494 | { return __glibcxx_max_b (unsigned TYPE, BITSIZE)(((unsigned TYPE)(-1) < 0) ? (((((unsigned TYPE)1 << ((BITSIZE - ((unsigned TYPE)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(unsigned TYPE)0); } \ |
1495 | \ |
1496 | UEXT \ |
1497 | \ |
1498 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits \ |
1499 | = BITSIZE; \ |
1500 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 \ |
1501 | = BITSIZE * 643L / 2136; \ |
1502 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = false; \ |
1503 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = true; \ |
1504 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = true; \ |
1505 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = 2; \ |
1506 | \ |
1507 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1508 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } \ |
1509 | \ |
1510 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1511 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0; } \ |
1512 | \ |
1513 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = 0; \ |
1514 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = 0; \ |
1515 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = 0; \ |
1516 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = 0; \ |
1517 | \ |
1518 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = false; \ |
1519 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = false; \ |
1520 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = false; \ |
1521 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm \ |
1522 | = denorm_absent; \ |
1523 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss = false; \ |
1524 | \ |
1525 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1526 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1527 | { return static_cast<unsigned TYPE>(0); } \ |
1528 | \ |
1529 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1530 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1531 | { return static_cast<unsigned TYPE>(0); } \ |
1532 | \ |
1533 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1534 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1535 | { return static_cast<unsigned TYPE>(0); } \ |
1536 | \ |
1537 | static _GLIBCXX_CONSTEXPRconstexpr unsigned TYPE \ |
1538 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept \ |
1539 | { return static_cast<unsigned TYPE>(0); } \ |
1540 | \ |
1541 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 = false; \ |
1542 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; \ |
1543 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = true; \ |
1544 | \ |
1545 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_integral_trapstrue; \ |
1546 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = false; \ |
1547 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style \ |
1548 | = round_toward_zero; \ |
1549 | }; |
1550 | |
1551 | #if __cplusplus201103L >= 201103L |
1552 | |
1553 | #define __INT_N_201103(TYPE) \ |
1554 | static constexpr TYPE \ |
1555 | lowest() noexcept { return min(); } \ |
1556 | static constexpr int max_digits10 = 0; |
1557 | |
1558 | #define __INT_N_U201103(TYPE) \ |
1559 | static constexpr unsigned TYPE \ |
1560 | lowest() noexcept { return min(); } \ |
1561 | static constexpr int max_digits10 = 0; |
1562 | |
1563 | #else |
1564 | #define __INT_N_201103(TYPE) |
1565 | #define __INT_N_U201103(TYPE) |
1566 | #endif |
1567 | |
1568 | #ifdef __GLIBCXX_TYPE_INT_N_0 |
1569 | __INT_N(__GLIBCXX_TYPE_INT_N_0, __GLIBCXX_BITSIZE_INT_N_0, |
1570 | __INT_N_201103 (__GLIBCXX_TYPE_INT_N_0), __INT_N_U201103 (__GLIBCXX_TYPE_INT_N_0)) |
1571 | #endif |
1572 | #ifdef __GLIBCXX_TYPE_INT_N_1 |
1573 | __INT_N (__GLIBCXX_TYPE_INT_N_1, __GLIBCXX_BITSIZE_INT_N_1, |
1574 | __INT_N_201103 (__GLIBCXX_TYPE_INT_N_1), __INT_N_U201103 (__GLIBCXX_TYPE_INT_N_1)) |
1575 | #endif |
1576 | #ifdef __GLIBCXX_TYPE_INT_N_2 |
1577 | __INT_N (__GLIBCXX_TYPE_INT_N_2, __GLIBCXX_BITSIZE_INT_N_2, |
1578 | __INT_N_201103 (__GLIBCXX_TYPE_INT_N_2), __INT_N_U201103 (__GLIBCXX_TYPE_INT_N_2)) |
1579 | #endif |
1580 | #ifdef __GLIBCXX_TYPE_INT_N_3 |
1581 | __INT_N (__GLIBCXX_TYPE_INT_N_3, __GLIBCXX_BITSIZE_INT_N_3, |
1582 | __INT_N_201103 (__GLIBCXX_TYPE_INT_N_3), __INT_N_U201103 (__GLIBCXX_TYPE_INT_N_3)) |
1583 | #endif |
1584 | |
1585 | #undef __INT_N |
1586 | #undef __INT_N_201103 |
1587 | #undef __INT_N_U201103 |
1588 | |
1589 | #endif |
1590 | |
1591 | /// numeric_limits<float> specialization. |
1592 | template<> |
1593 | struct numeric_limits<float> |
1594 | { |
1595 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1596 | |
1597 | static _GLIBCXX_CONSTEXPRconstexpr float |
1598 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __FLT_MIN__1.17549435e-38F; } |
1599 | |
1600 | static _GLIBCXX_CONSTEXPRconstexpr float |
1601 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __FLT_MAX__3.40282347e+38F; } |
1602 | |
1603 | #if __cplusplus201103L >= 201103L |
1604 | static constexpr float |
1605 | lowest() noexcept { return -__FLT_MAX__3.40282347e+38F; } |
1606 | #endif |
1607 | |
1608 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __FLT_MANT_DIG__24; |
1609 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __FLT_DIG__6; |
1610 | #if __cplusplus201103L >= 201103L |
1611 | static constexpr int max_digits10 |
1612 | = __glibcxx_max_digits10 (__FLT_MANT_DIG__24); |
1613 | #endif |
1614 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
1615 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = false; |
1616 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = false; |
1617 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = __FLT_RADIX__2; |
1618 | |
1619 | static _GLIBCXX_CONSTEXPRconstexpr float |
1620 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return __FLT_EPSILON__1.19209290e-7F; } |
1621 | |
1622 | static _GLIBCXX_CONSTEXPRconstexpr float |
1623 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0.5F; } |
1624 | |
1625 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = __FLT_MIN_EXP__(-125); |
1626 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = __FLT_MIN_10_EXP__(-37); |
1627 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = __FLT_MAX_EXP__128; |
1628 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = __FLT_MAX_10_EXP__38; |
1629 | |
1630 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = __FLT_HAS_INFINITY__1; |
1631 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = __FLT_HAS_QUIET_NAN__1; |
1632 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = has_quiet_NaN; |
1633 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1634 | = bool(__FLT_HAS_DENORM__1) ? denorm_present : denorm_absent; |
1635 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss |
1636 | = __glibcxx_float_has_denorm_loss; |
1637 | |
1638 | static _GLIBCXX_CONSTEXPRconstexpr float |
1639 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_huge_valf(); } |
1640 | |
1641 | static _GLIBCXX_CONSTEXPRconstexpr float |
1642 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_nanf(""); } |
1643 | |
1644 | static _GLIBCXX_CONSTEXPRconstexpr float |
1645 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_nansf(""); } |
1646 | |
1647 | static _GLIBCXX_CONSTEXPRconstexpr float |
1648 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __FLT_DENORM_MIN__1.40129846e-45F; } |
1649 | |
1650 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 |
1651 | = has_infinity && has_quiet_NaN && has_denorm == denorm_present; |
1652 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1653 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
1654 | |
1655 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_float_traps; |
1656 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before |
1657 | = __glibcxx_float_tinyness_before; |
1658 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1659 | = round_to_nearest; |
1660 | }; |
1661 | |
1662 | #undef __glibcxx_float_has_denorm_loss |
1663 | #undef __glibcxx_float_traps |
1664 | #undef __glibcxx_float_tinyness_before |
1665 | |
1666 | /// numeric_limits<double> specialization. |
1667 | template<> |
1668 | struct numeric_limits<double> |
1669 | { |
1670 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1671 | |
1672 | static _GLIBCXX_CONSTEXPRconstexpr double |
1673 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __DBL_MIN__2.2250738585072014e-308; } |
1674 | |
1675 | static _GLIBCXX_CONSTEXPRconstexpr double |
1676 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __DBL_MAX__1.7976931348623157e+308; } |
1677 | |
1678 | #if __cplusplus201103L >= 201103L |
1679 | static constexpr double |
1680 | lowest() noexcept { return -__DBL_MAX__1.7976931348623157e+308; } |
1681 | #endif |
1682 | |
1683 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __DBL_MANT_DIG__53; |
1684 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __DBL_DIG__15; |
1685 | #if __cplusplus201103L >= 201103L |
1686 | static constexpr int max_digits10 |
1687 | = __glibcxx_max_digits10 (__DBL_MANT_DIG__53); |
1688 | #endif |
1689 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
1690 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = false; |
1691 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = false; |
1692 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = __FLT_RADIX__2; |
1693 | |
1694 | static _GLIBCXX_CONSTEXPRconstexpr double |
1695 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return __DBL_EPSILON__2.2204460492503131e-16; } |
1696 | |
1697 | static _GLIBCXX_CONSTEXPRconstexpr double |
1698 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0.5; } |
1699 | |
1700 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = __DBL_MIN_EXP__(-1021); |
1701 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = __DBL_MIN_10_EXP__(-307); |
1702 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = __DBL_MAX_EXP__1024; |
1703 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = __DBL_MAX_10_EXP__308; |
1704 | |
1705 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = __DBL_HAS_INFINITY__1; |
1706 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = __DBL_HAS_QUIET_NAN__1; |
1707 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = has_quiet_NaN; |
1708 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1709 | = bool(__DBL_HAS_DENORM__1) ? denorm_present : denorm_absent; |
1710 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss |
1711 | = __glibcxx_double_has_denorm_loss; |
1712 | |
1713 | static _GLIBCXX_CONSTEXPRconstexpr double |
1714 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_huge_val(); } |
1715 | |
1716 | static _GLIBCXX_CONSTEXPRconstexpr double |
1717 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_nan(""); } |
1718 | |
1719 | static _GLIBCXX_CONSTEXPRconstexpr double |
1720 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_nans(""); } |
1721 | |
1722 | static _GLIBCXX_CONSTEXPRconstexpr double |
1723 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __DBL_DENORM_MIN__4.9406564584124654e-324; } |
1724 | |
1725 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 |
1726 | = has_infinity && has_quiet_NaN && has_denorm == denorm_present; |
1727 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1728 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
1729 | |
1730 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_double_traps; |
1731 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before |
1732 | = __glibcxx_double_tinyness_before; |
1733 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style |
1734 | = round_to_nearest; |
1735 | }; |
1736 | |
1737 | #undef __glibcxx_double_has_denorm_loss |
1738 | #undef __glibcxx_double_traps |
1739 | #undef __glibcxx_double_tinyness_before |
1740 | |
1741 | /// numeric_limits<long double> specialization. |
1742 | template<> |
1743 | struct numeric_limits<long double> |
1744 | { |
1745 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_specialized = true; |
1746 | |
1747 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1748 | min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LDBL_MIN__3.36210314311209350626e-4932L; } |
1749 | |
1750 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1751 | max() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LDBL_MAX__1.18973149535723176502e+4932L; } |
1752 | |
1753 | #if __cplusplus201103L >= 201103L |
1754 | static constexpr long double |
1755 | lowest() noexcept { return -__LDBL_MAX__1.18973149535723176502e+4932L; } |
1756 | #endif |
1757 | |
1758 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits = __LDBL_MANT_DIG__64; |
1759 | static _GLIBCXX_USE_CONSTEXPRconstexpr int digits10 = __LDBL_DIG__18; |
1760 | #if __cplusplus201103L >= 201103L |
1761 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_digits10 |
1762 | = __glibcxx_max_digits10 (__LDBL_MANT_DIG__64); |
1763 | #endif |
1764 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_signed = true; |
1765 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_integer = false; |
1766 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_exact = false; |
1767 | static _GLIBCXX_USE_CONSTEXPRconstexpr int radix = __FLT_RADIX__2; |
1768 | |
1769 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1770 | epsilon() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LDBL_EPSILON__1.08420217248550443401e-19L; } |
1771 | |
1772 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1773 | round_error() _GLIBCXX_USE_NOEXCEPTnoexcept { return 0.5L; } |
1774 | |
1775 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent = __LDBL_MIN_EXP__(-16381); |
1776 | static _GLIBCXX_USE_CONSTEXPRconstexpr int min_exponent10 = __LDBL_MIN_10_EXP__(-4931); |
1777 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent = __LDBL_MAX_EXP__16384; |
1778 | static _GLIBCXX_USE_CONSTEXPRconstexpr int max_exponent10 = __LDBL_MAX_10_EXP__4932; |
1779 | |
1780 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_infinity = __LDBL_HAS_INFINITY__1; |
1781 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_quiet_NaN = __LDBL_HAS_QUIET_NAN__1; |
1782 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_signaling_NaN = has_quiet_NaN; |
1783 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_denorm_style has_denorm |
1784 | = bool(__LDBL_HAS_DENORM__1) ? denorm_present : denorm_absent; |
1785 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool has_denorm_loss |
1786 | = __glibcxx_long_double_has_denorm_loss; |
1787 | |
1788 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1789 | infinity() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_huge_vall(); } |
1790 | |
1791 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1792 | quiet_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_nanl(""); } |
1793 | |
1794 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1795 | signaling_NaN() _GLIBCXX_USE_NOEXCEPTnoexcept { return __builtin_nansl(""); } |
1796 | |
1797 | static _GLIBCXX_CONSTEXPRconstexpr long double |
1798 | denorm_min() _GLIBCXX_USE_NOEXCEPTnoexcept { return __LDBL_DENORM_MIN__3.64519953188247460253e-4951L; } |
1799 | |
1800 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_iec559 |
1801 | = has_infinity && has_quiet_NaN && has_denorm == denorm_present; |
1802 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_bounded = true; |
1803 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool is_modulo = false; |
1804 | |
1805 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool traps = __glibcxx_long_double_traps; |
1806 | static _GLIBCXX_USE_CONSTEXPRconstexpr bool tinyness_before = |
1807 | __glibcxx_long_double_tinyness_before; |
1808 | static _GLIBCXX_USE_CONSTEXPRconstexpr float_round_style round_style = |
1809 | round_to_nearest; |
1810 | }; |
1811 | |
1812 | #undef __glibcxx_long_double_has_denorm_loss |
1813 | #undef __glibcxx_long_double_traps |
1814 | #undef __glibcxx_long_double_tinyness_before |
1815 | |
1816 | _GLIBCXX_END_NAMESPACE_VERSION |
1817 | } // namespace |
1818 | |
1819 | #undef __glibcxx_signed |
1820 | #undef __glibcxx_min |
1821 | #undef __glibcxx_max |
1822 | #undef __glibcxx_digits |
1823 | #undef __glibcxx_digits10 |
1824 | #undef __glibcxx_max_digits10 |
1825 | |
1826 | #endif // _GLIBCXX_NUMERIC_LIMITS |