File: | llvm/lib/Target/AMDGPU/SIISelLowering.cpp |
Warning: | line 4477, column 5 Value stored to 'BR' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// Custom DAG lowering for SI |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #if defined(_MSC_VER) || defined(__MINGW32__) |
15 | // Provide M_PI. |
16 | #define _USE_MATH_DEFINES |
17 | #endif |
18 | |
19 | #include "SIISelLowering.h" |
20 | #include "AMDGPU.h" |
21 | #include "AMDGPUSubtarget.h" |
22 | #include "AMDGPUTargetMachine.h" |
23 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
24 | #include "SIDefines.h" |
25 | #include "SIInstrInfo.h" |
26 | #include "SIMachineFunctionInfo.h" |
27 | #include "SIRegisterInfo.h" |
28 | #include "Utils/AMDGPUBaseInfo.h" |
29 | #include "llvm/ADT/APFloat.h" |
30 | #include "llvm/ADT/APInt.h" |
31 | #include "llvm/ADT/ArrayRef.h" |
32 | #include "llvm/ADT/BitVector.h" |
33 | #include "llvm/ADT/SmallVector.h" |
34 | #include "llvm/ADT/Statistic.h" |
35 | #include "llvm/ADT/StringRef.h" |
36 | #include "llvm/ADT/StringSwitch.h" |
37 | #include "llvm/ADT/Twine.h" |
38 | #include "llvm/Analysis/LegacyDivergenceAnalysis.h" |
39 | #include "llvm/CodeGen/Analysis.h" |
40 | #include "llvm/CodeGen/CallingConvLower.h" |
41 | #include "llvm/CodeGen/DAGCombine.h" |
42 | #include "llvm/CodeGen/ISDOpcodes.h" |
43 | #include "llvm/CodeGen/MachineBasicBlock.h" |
44 | #include "llvm/CodeGen/MachineFrameInfo.h" |
45 | #include "llvm/CodeGen/MachineFunction.h" |
46 | #include "llvm/CodeGen/MachineInstr.h" |
47 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
48 | #include "llvm/CodeGen/MachineLoopInfo.h" |
49 | #include "llvm/CodeGen/MachineMemOperand.h" |
50 | #include "llvm/CodeGen/MachineModuleInfo.h" |
51 | #include "llvm/CodeGen/MachineOperand.h" |
52 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
53 | #include "llvm/CodeGen/SelectionDAG.h" |
54 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
55 | #include "llvm/CodeGen/TargetCallingConv.h" |
56 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
57 | #include "llvm/CodeGen/ValueTypes.h" |
58 | #include "llvm/IR/Constants.h" |
59 | #include "llvm/IR/DataLayout.h" |
60 | #include "llvm/IR/DebugLoc.h" |
61 | #include "llvm/IR/DerivedTypes.h" |
62 | #include "llvm/IR/DiagnosticInfo.h" |
63 | #include "llvm/IR/Function.h" |
64 | #include "llvm/IR/GlobalValue.h" |
65 | #include "llvm/IR/InstrTypes.h" |
66 | #include "llvm/IR/Instruction.h" |
67 | #include "llvm/IR/Instructions.h" |
68 | #include "llvm/IR/IntrinsicInst.h" |
69 | #include "llvm/IR/Type.h" |
70 | #include "llvm/Support/Casting.h" |
71 | #include "llvm/Support/CodeGen.h" |
72 | #include "llvm/Support/CommandLine.h" |
73 | #include "llvm/Support/Compiler.h" |
74 | #include "llvm/Support/ErrorHandling.h" |
75 | #include "llvm/Support/KnownBits.h" |
76 | #include "llvm/Support/MachineValueType.h" |
77 | #include "llvm/Support/MathExtras.h" |
78 | #include "llvm/Target/TargetOptions.h" |
79 | #include <cassert> |
80 | #include <cmath> |
81 | #include <cstdint> |
82 | #include <iterator> |
83 | #include <tuple> |
84 | #include <utility> |
85 | #include <vector> |
86 | |
87 | using namespace llvm; |
88 | |
89 | #define DEBUG_TYPE"si-lower" "si-lower" |
90 | |
91 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"si-lower", "NumTailCalls" , "Number of tail calls"}; |
92 | |
93 | static cl::opt<bool> DisableLoopAlignment( |
94 | "amdgpu-disable-loop-alignment", |
95 | cl::desc("Do not align and prefetch loops"), |
96 | cl::init(false)); |
97 | |
98 | static bool hasFP32Denormals(const MachineFunction &MF) { |
99 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
100 | return Info->getMode().allFP32Denormals(); |
101 | } |
102 | |
103 | static bool hasFP64FP16Denormals(const MachineFunction &MF) { |
104 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
105 | return Info->getMode().allFP64FP16Denormals(); |
106 | } |
107 | |
108 | static unsigned findFirstFreeSGPR(CCState &CCInfo) { |
109 | unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
110 | for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { |
111 | if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { |
112 | return AMDGPU::SGPR0 + Reg; |
113 | } |
114 | } |
115 | llvm_unreachable("Cannot allocate sgpr")::llvm::llvm_unreachable_internal("Cannot allocate sgpr", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 115); |
116 | } |
117 | |
118 | SITargetLowering::SITargetLowering(const TargetMachine &TM, |
119 | const GCNSubtarget &STI) |
120 | : AMDGPUTargetLowering(TM, STI), |
121 | Subtarget(&STI) { |
122 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); |
123 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); |
124 | |
125 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); |
126 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); |
127 | |
128 | addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); |
129 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); |
130 | addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); |
131 | |
132 | addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); |
133 | addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass); |
134 | |
135 | addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); |
136 | addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); |
137 | |
138 | addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); |
139 | addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); |
140 | |
141 | addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); |
142 | addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass); |
143 | |
144 | addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); |
145 | addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); |
146 | |
147 | addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); |
148 | addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); |
149 | |
150 | if (Subtarget->has16BitInsts()) { |
151 | addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); |
152 | addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); |
153 | |
154 | // Unless there are also VOP3P operations, not operations are really legal. |
155 | addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); |
156 | addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); |
157 | addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); |
158 | addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); |
159 | } |
160 | |
161 | if (Subtarget->hasMAIInsts()) { |
162 | addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); |
163 | addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass); |
164 | } |
165 | |
166 | computeRegisterProperties(Subtarget->getRegisterInfo()); |
167 | |
168 | // The boolean content concept here is too inflexible. Compares only ever |
169 | // really produce a 1-bit result. Any copy/extend from these will turn into a |
170 | // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as |
171 | // it's what most targets use. |
172 | setBooleanContents(ZeroOrOneBooleanContent); |
173 | setBooleanVectorContents(ZeroOrOneBooleanContent); |
174 | |
175 | // We need to custom lower vector stores from local memory |
176 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); |
177 | setOperationAction(ISD::LOAD, MVT::v3i32, Custom); |
178 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); |
179 | setOperationAction(ISD::LOAD, MVT::v5i32, Custom); |
180 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); |
181 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); |
182 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
183 | setOperationAction(ISD::LOAD, MVT::v32i32, Custom); |
184 | |
185 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); |
186 | setOperationAction(ISD::STORE, MVT::v3i32, Custom); |
187 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); |
188 | setOperationAction(ISD::STORE, MVT::v5i32, Custom); |
189 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); |
190 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); |
191 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
192 | setOperationAction(ISD::STORE, MVT::v32i32, Custom); |
193 | |
194 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); |
195 | setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); |
196 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); |
197 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); |
198 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); |
199 | setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); |
200 | setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); |
201 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); |
202 | setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); |
203 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); |
204 | setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); |
205 | |
206 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
207 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
208 | |
209 | setOperationAction(ISD::SELECT, MVT::i1, Promote); |
210 | setOperationAction(ISD::SELECT, MVT::i64, Custom); |
211 | setOperationAction(ISD::SELECT, MVT::f64, Promote); |
212 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); |
213 | |
214 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); |
215 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); |
216 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); |
217 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); |
218 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); |
219 | |
220 | setOperationAction(ISD::SETCC, MVT::i1, Promote); |
221 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); |
222 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); |
223 | AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); |
224 | |
225 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); |
226 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
227 | |
228 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); |
229 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); |
230 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); |
231 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); |
232 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); |
233 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom); |
234 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); |
235 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); |
236 | |
237 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
238 | setOperationAction(ISD::BR_CC, MVT::i1, Expand); |
239 | setOperationAction(ISD::BR_CC, MVT::i32, Expand); |
240 | setOperationAction(ISD::BR_CC, MVT::i64, Expand); |
241 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); |
242 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); |
243 | |
244 | setOperationAction(ISD::UADDO, MVT::i32, Legal); |
245 | setOperationAction(ISD::USUBO, MVT::i32, Legal); |
246 | |
247 | setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); |
248 | setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); |
249 | |
250 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); |
251 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); |
252 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); |
253 | |
254 | #if 0 |
255 | setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); |
256 | setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); |
257 | #endif |
258 | |
259 | // We only support LOAD/STORE and vector manipulation ops for vectors |
260 | // with > 4 elements. |
261 | for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, |
262 | MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, |
263 | MVT::v32i32, MVT::v32f32 }) { |
264 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
265 | switch (Op) { |
266 | case ISD::LOAD: |
267 | case ISD::STORE: |
268 | case ISD::BUILD_VECTOR: |
269 | case ISD::BITCAST: |
270 | case ISD::EXTRACT_VECTOR_ELT: |
271 | case ISD::INSERT_VECTOR_ELT: |
272 | case ISD::INSERT_SUBVECTOR: |
273 | case ISD::EXTRACT_SUBVECTOR: |
274 | case ISD::SCALAR_TO_VECTOR: |
275 | break; |
276 | case ISD::CONCAT_VECTORS: |
277 | setOperationAction(Op, VT, Custom); |
278 | break; |
279 | default: |
280 | setOperationAction(Op, VT, Expand); |
281 | break; |
282 | } |
283 | } |
284 | } |
285 | |
286 | setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); |
287 | |
288 | // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that |
289 | // is expanded to avoid having two separate loops in case the index is a VGPR. |
290 | |
291 | // Most operations are naturally 32-bit vector operations. We only support |
292 | // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. |
293 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { |
294 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
295 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); |
296 | |
297 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
298 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); |
299 | |
300 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
301 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); |
302 | |
303 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
304 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); |
305 | } |
306 | |
307 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); |
308 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); |
309 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); |
310 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); |
311 | |
312 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); |
313 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); |
314 | |
315 | // Avoid stack access for these. |
316 | // TODO: Generalize to more vector types. |
317 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); |
318 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); |
319 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); |
320 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); |
321 | |
322 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); |
323 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); |
324 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); |
325 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); |
326 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); |
327 | |
328 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); |
329 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); |
330 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); |
331 | |
332 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); |
333 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); |
334 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); |
335 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); |
336 | |
337 | // Deal with vec3 vector operations when widened to vec4. |
338 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); |
339 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); |
340 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); |
341 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); |
342 | |
343 | // Deal with vec5 vector operations when widened to vec8. |
344 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); |
345 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); |
346 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); |
347 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); |
348 | |
349 | // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, |
350 | // and output demarshalling |
351 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); |
352 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
353 | |
354 | // We can't return success/failure, only the old value, |
355 | // let LLVM add the comparison |
356 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); |
357 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); |
358 | |
359 | if (Subtarget->hasFlatAddressSpace()) { |
360 | setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); |
361 | setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); |
362 | } |
363 | |
364 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
365 | |
366 | // FIXME: This should be narrowed to i32, but that only happens if i64 is |
367 | // illegal. |
368 | // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32. |
369 | setOperationAction(ISD::BSWAP, MVT::i64, Legal); |
370 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
371 | |
372 | // On SI this is s_memtime and s_memrealtime on VI. |
373 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); |
374 | setOperationAction(ISD::TRAP, MVT::Other, Custom); |
375 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); |
376 | |
377 | if (Subtarget->has16BitInsts()) { |
378 | setOperationAction(ISD::FPOW, MVT::f16, Promote); |
379 | setOperationAction(ISD::FLOG, MVT::f16, Custom); |
380 | setOperationAction(ISD::FEXP, MVT::f16, Custom); |
381 | setOperationAction(ISD::FLOG10, MVT::f16, Custom); |
382 | } |
383 | |
384 | // v_mad_f32 does not support denormals. We report it as unconditionally |
385 | // legal, and the context where it is formed will disallow it when fp32 |
386 | // denormals are enabled. |
387 | setOperationAction(ISD::FMAD, MVT::f32, Legal); |
388 | |
389 | if (!Subtarget->hasBFI()) { |
390 | // fcopysign can be done in a single instruction with BFI. |
391 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
392 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
393 | } |
394 | |
395 | if (!Subtarget->hasBCNT(32)) |
396 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); |
397 | |
398 | if (!Subtarget->hasBCNT(64)) |
399 | setOperationAction(ISD::CTPOP, MVT::i64, Expand); |
400 | |
401 | if (Subtarget->hasFFBH()) |
402 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); |
403 | |
404 | if (Subtarget->hasFFBL()) |
405 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); |
406 | |
407 | // We only really have 32-bit BFE instructions (and 16-bit on VI). |
408 | // |
409 | // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any |
410 | // effort to match them now. We want this to be false for i64 cases when the |
411 | // extraction isn't restricted to the upper or lower half. Ideally we would |
412 | // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that |
413 | // span the midpoint are probably relatively rare, so don't worry about them |
414 | // for now. |
415 | if (Subtarget->hasBFE()) |
416 | setHasExtractBitsInsn(true); |
417 | |
418 | setOperationAction(ISD::FMINNUM, MVT::f32, Custom); |
419 | setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); |
420 | setOperationAction(ISD::FMINNUM, MVT::f64, Custom); |
421 | setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); |
422 | |
423 | |
424 | // These are really only legal for ieee_mode functions. We should be avoiding |
425 | // them for functions that don't have ieee_mode enabled, so just say they are |
426 | // legal. |
427 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); |
428 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); |
429 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); |
430 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); |
431 | |
432 | |
433 | if (Subtarget->haveRoundOpsF64()) { |
434 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
435 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
436 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
437 | } else { |
438 | setOperationAction(ISD::FCEIL, MVT::f64, Custom); |
439 | setOperationAction(ISD::FTRUNC, MVT::f64, Custom); |
440 | setOperationAction(ISD::FRINT, MVT::f64, Custom); |
441 | setOperationAction(ISD::FFLOOR, MVT::f64, Custom); |
442 | } |
443 | |
444 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
445 | |
446 | setOperationAction(ISD::FSIN, MVT::f32, Custom); |
447 | setOperationAction(ISD::FCOS, MVT::f32, Custom); |
448 | setOperationAction(ISD::FDIV, MVT::f32, Custom); |
449 | setOperationAction(ISD::FDIV, MVT::f64, Custom); |
450 | |
451 | if (Subtarget->has16BitInsts()) { |
452 | setOperationAction(ISD::Constant, MVT::i16, Legal); |
453 | |
454 | setOperationAction(ISD::SMIN, MVT::i16, Legal); |
455 | setOperationAction(ISD::SMAX, MVT::i16, Legal); |
456 | |
457 | setOperationAction(ISD::UMIN, MVT::i16, Legal); |
458 | setOperationAction(ISD::UMAX, MVT::i16, Legal); |
459 | |
460 | setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); |
461 | AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); |
462 | |
463 | setOperationAction(ISD::ROTR, MVT::i16, Promote); |
464 | setOperationAction(ISD::ROTL, MVT::i16, Promote); |
465 | |
466 | setOperationAction(ISD::SDIV, MVT::i16, Promote); |
467 | setOperationAction(ISD::UDIV, MVT::i16, Promote); |
468 | setOperationAction(ISD::SREM, MVT::i16, Promote); |
469 | setOperationAction(ISD::UREM, MVT::i16, Promote); |
470 | |
471 | setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); |
472 | |
473 | setOperationAction(ISD::CTTZ, MVT::i16, Promote); |
474 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); |
475 | setOperationAction(ISD::CTLZ, MVT::i16, Promote); |
476 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); |
477 | setOperationAction(ISD::CTPOP, MVT::i16, Promote); |
478 | |
479 | setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); |
480 | |
481 | setOperationAction(ISD::BR_CC, MVT::i16, Expand); |
482 | |
483 | setOperationAction(ISD::LOAD, MVT::i16, Custom); |
484 | |
485 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); |
486 | |
487 | setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); |
488 | AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); |
489 | setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); |
490 | AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); |
491 | |
492 | setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); |
493 | setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); |
494 | |
495 | // F16 - Constant Actions. |
496 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); |
497 | |
498 | // F16 - Load/Store Actions. |
499 | setOperationAction(ISD::LOAD, MVT::f16, Promote); |
500 | AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); |
501 | setOperationAction(ISD::STORE, MVT::f16, Promote); |
502 | AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); |
503 | |
504 | // F16 - VOP1 Actions. |
505 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); |
506 | setOperationAction(ISD::FCOS, MVT::f16, Custom); |
507 | setOperationAction(ISD::FSIN, MVT::f16, Custom); |
508 | |
509 | setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); |
510 | setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom); |
511 | |
512 | setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); |
513 | setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); |
514 | setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); |
515 | setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); |
516 | setOperationAction(ISD::FROUND, MVT::f16, Custom); |
517 | |
518 | // F16 - VOP2 Actions. |
519 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); |
520 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); |
521 | |
522 | setOperationAction(ISD::FDIV, MVT::f16, Custom); |
523 | |
524 | // F16 - VOP3 Actions. |
525 | setOperationAction(ISD::FMA, MVT::f16, Legal); |
526 | if (STI.hasMadF16()) |
527 | setOperationAction(ISD::FMAD, MVT::f16, Legal); |
528 | |
529 | for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { |
530 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
531 | switch (Op) { |
532 | case ISD::LOAD: |
533 | case ISD::STORE: |
534 | case ISD::BUILD_VECTOR: |
535 | case ISD::BITCAST: |
536 | case ISD::EXTRACT_VECTOR_ELT: |
537 | case ISD::INSERT_VECTOR_ELT: |
538 | case ISD::INSERT_SUBVECTOR: |
539 | case ISD::EXTRACT_SUBVECTOR: |
540 | case ISD::SCALAR_TO_VECTOR: |
541 | break; |
542 | case ISD::CONCAT_VECTORS: |
543 | setOperationAction(Op, VT, Custom); |
544 | break; |
545 | default: |
546 | setOperationAction(Op, VT, Expand); |
547 | break; |
548 | } |
549 | } |
550 | } |
551 | |
552 | // v_perm_b32 can handle either of these. |
553 | setOperationAction(ISD::BSWAP, MVT::i16, Legal); |
554 | setOperationAction(ISD::BSWAP, MVT::v2i16, Legal); |
555 | setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); |
556 | |
557 | // XXX - Do these do anything? Vector constants turn into build_vector. |
558 | setOperationAction(ISD::Constant, MVT::v2i16, Legal); |
559 | setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); |
560 | |
561 | setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); |
562 | setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); |
563 | |
564 | setOperationAction(ISD::STORE, MVT::v2i16, Promote); |
565 | AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); |
566 | setOperationAction(ISD::STORE, MVT::v2f16, Promote); |
567 | AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); |
568 | |
569 | setOperationAction(ISD::LOAD, MVT::v2i16, Promote); |
570 | AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); |
571 | setOperationAction(ISD::LOAD, MVT::v2f16, Promote); |
572 | AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); |
573 | |
574 | setOperationAction(ISD::AND, MVT::v2i16, Promote); |
575 | AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); |
576 | setOperationAction(ISD::OR, MVT::v2i16, Promote); |
577 | AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); |
578 | setOperationAction(ISD::XOR, MVT::v2i16, Promote); |
579 | AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); |
580 | |
581 | setOperationAction(ISD::LOAD, MVT::v4i16, Promote); |
582 | AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); |
583 | setOperationAction(ISD::LOAD, MVT::v4f16, Promote); |
584 | AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); |
585 | |
586 | setOperationAction(ISD::STORE, MVT::v4i16, Promote); |
587 | AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); |
588 | setOperationAction(ISD::STORE, MVT::v4f16, Promote); |
589 | AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); |
590 | |
591 | setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); |
592 | setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); |
593 | setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); |
594 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); |
595 | |
596 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); |
597 | setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); |
598 | setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); |
599 | |
600 | if (!Subtarget->hasVOP3PInsts()) { |
601 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); |
602 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); |
603 | } |
604 | |
605 | setOperationAction(ISD::FNEG, MVT::v2f16, Legal); |
606 | // This isn't really legal, but this avoids the legalizer unrolling it (and |
607 | // allows matching fneg (fabs x) patterns) |
608 | setOperationAction(ISD::FABS, MVT::v2f16, Legal); |
609 | |
610 | setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); |
611 | setOperationAction(ISD::FMINNUM, MVT::f16, Custom); |
612 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); |
613 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); |
614 | |
615 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); |
616 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); |
617 | |
618 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); |
619 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); |
620 | } |
621 | |
622 | if (Subtarget->hasVOP3PInsts()) { |
623 | setOperationAction(ISD::ADD, MVT::v2i16, Legal); |
624 | setOperationAction(ISD::SUB, MVT::v2i16, Legal); |
625 | setOperationAction(ISD::MUL, MVT::v2i16, Legal); |
626 | setOperationAction(ISD::SHL, MVT::v2i16, Legal); |
627 | setOperationAction(ISD::SRL, MVT::v2i16, Legal); |
628 | setOperationAction(ISD::SRA, MVT::v2i16, Legal); |
629 | setOperationAction(ISD::SMIN, MVT::v2i16, Legal); |
630 | setOperationAction(ISD::UMIN, MVT::v2i16, Legal); |
631 | setOperationAction(ISD::SMAX, MVT::v2i16, Legal); |
632 | setOperationAction(ISD::UMAX, MVT::v2i16, Legal); |
633 | |
634 | setOperationAction(ISD::FADD, MVT::v2f16, Legal); |
635 | setOperationAction(ISD::FMUL, MVT::v2f16, Legal); |
636 | setOperationAction(ISD::FMA, MVT::v2f16, Legal); |
637 | |
638 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); |
639 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); |
640 | |
641 | setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); |
642 | |
643 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); |
644 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); |
645 | |
646 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); |
647 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); |
648 | |
649 | setOperationAction(ISD::SHL, MVT::v4i16, Custom); |
650 | setOperationAction(ISD::SRA, MVT::v4i16, Custom); |
651 | setOperationAction(ISD::SRL, MVT::v4i16, Custom); |
652 | setOperationAction(ISD::ADD, MVT::v4i16, Custom); |
653 | setOperationAction(ISD::SUB, MVT::v4i16, Custom); |
654 | setOperationAction(ISD::MUL, MVT::v4i16, Custom); |
655 | |
656 | setOperationAction(ISD::SMIN, MVT::v4i16, Custom); |
657 | setOperationAction(ISD::SMAX, MVT::v4i16, Custom); |
658 | setOperationAction(ISD::UMIN, MVT::v4i16, Custom); |
659 | setOperationAction(ISD::UMAX, MVT::v4i16, Custom); |
660 | |
661 | setOperationAction(ISD::FADD, MVT::v4f16, Custom); |
662 | setOperationAction(ISD::FMUL, MVT::v4f16, Custom); |
663 | setOperationAction(ISD::FMA, MVT::v4f16, Custom); |
664 | |
665 | setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); |
666 | setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); |
667 | |
668 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); |
669 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); |
670 | setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); |
671 | |
672 | setOperationAction(ISD::FEXP, MVT::v2f16, Custom); |
673 | setOperationAction(ISD::SELECT, MVT::v4i16, Custom); |
674 | setOperationAction(ISD::SELECT, MVT::v4f16, Custom); |
675 | } |
676 | |
677 | setOperationAction(ISD::FNEG, MVT::v4f16, Custom); |
678 | setOperationAction(ISD::FABS, MVT::v4f16, Custom); |
679 | |
680 | if (Subtarget->has16BitInsts()) { |
681 | setOperationAction(ISD::SELECT, MVT::v2i16, Promote); |
682 | AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); |
683 | setOperationAction(ISD::SELECT, MVT::v2f16, Promote); |
684 | AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); |
685 | } else { |
686 | // Legalization hack. |
687 | setOperationAction(ISD::SELECT, MVT::v2i16, Custom); |
688 | setOperationAction(ISD::SELECT, MVT::v2f16, Custom); |
689 | |
690 | setOperationAction(ISD::FNEG, MVT::v2f16, Custom); |
691 | setOperationAction(ISD::FABS, MVT::v2f16, Custom); |
692 | } |
693 | |
694 | for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { |
695 | setOperationAction(ISD::SELECT, VT, Custom); |
696 | } |
697 | |
698 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
699 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); |
700 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
701 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); |
702 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); |
703 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); |
704 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); |
705 | |
706 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); |
707 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom); |
708 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); |
709 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom); |
710 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); |
711 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); |
712 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom); |
713 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); |
714 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); |
715 | |
716 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
717 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); |
718 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); |
719 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); |
720 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom); |
721 | setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom); |
722 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); |
723 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); |
724 | |
725 | setTargetDAGCombine(ISD::ADD); |
726 | setTargetDAGCombine(ISD::ADDCARRY); |
727 | setTargetDAGCombine(ISD::SUB); |
728 | setTargetDAGCombine(ISD::SUBCARRY); |
729 | setTargetDAGCombine(ISD::FADD); |
730 | setTargetDAGCombine(ISD::FSUB); |
731 | setTargetDAGCombine(ISD::FMINNUM); |
732 | setTargetDAGCombine(ISD::FMAXNUM); |
733 | setTargetDAGCombine(ISD::FMINNUM_IEEE); |
734 | setTargetDAGCombine(ISD::FMAXNUM_IEEE); |
735 | setTargetDAGCombine(ISD::FMA); |
736 | setTargetDAGCombine(ISD::SMIN); |
737 | setTargetDAGCombine(ISD::SMAX); |
738 | setTargetDAGCombine(ISD::UMIN); |
739 | setTargetDAGCombine(ISD::UMAX); |
740 | setTargetDAGCombine(ISD::SETCC); |
741 | setTargetDAGCombine(ISD::AND); |
742 | setTargetDAGCombine(ISD::OR); |
743 | setTargetDAGCombine(ISD::XOR); |
744 | setTargetDAGCombine(ISD::SINT_TO_FP); |
745 | setTargetDAGCombine(ISD::UINT_TO_FP); |
746 | setTargetDAGCombine(ISD::FCANONICALIZE); |
747 | setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); |
748 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
749 | setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); |
750 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); |
751 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); |
752 | |
753 | // All memory operations. Some folding on the pointer operand is done to help |
754 | // matching the constant offsets in the addressing modes. |
755 | setTargetDAGCombine(ISD::LOAD); |
756 | setTargetDAGCombine(ISD::STORE); |
757 | setTargetDAGCombine(ISD::ATOMIC_LOAD); |
758 | setTargetDAGCombine(ISD::ATOMIC_STORE); |
759 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); |
760 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); |
761 | setTargetDAGCombine(ISD::ATOMIC_SWAP); |
762 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); |
763 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); |
764 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); |
765 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); |
766 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); |
767 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); |
768 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); |
769 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); |
770 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); |
771 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); |
772 | setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); |
773 | |
774 | setSchedulingPreference(Sched::RegPressure); |
775 | } |
776 | |
777 | const GCNSubtarget *SITargetLowering::getSubtarget() const { |
778 | return Subtarget; |
779 | } |
780 | |
781 | //===----------------------------------------------------------------------===// |
782 | // TargetLowering queries |
783 | //===----------------------------------------------------------------------===// |
784 | |
785 | // v_mad_mix* support a conversion from f16 to f32. |
786 | // |
787 | // There is only one special case when denormals are enabled we don't currently, |
788 | // where this is OK to use. |
789 | bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, |
790 | EVT DestVT, EVT SrcVT) const { |
791 | return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || |
792 | (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && |
793 | DestVT.getScalarType() == MVT::f32 && |
794 | SrcVT.getScalarType() == MVT::f16 && |
795 | // TODO: This probably only requires no input flushing? |
796 | !hasFP32Denormals(DAG.getMachineFunction()); |
797 | } |
798 | |
799 | bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { |
800 | // SI has some legal vector types, but no legal vector operations. Say no |
801 | // shuffles are legal in order to prefer scalarizing some vector operations. |
802 | return false; |
803 | } |
804 | |
805 | MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, |
806 | CallingConv::ID CC, |
807 | EVT VT) const { |
808 | if (CC == CallingConv::AMDGPU_KERNEL) |
809 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
810 | |
811 | if (VT.isVector()) { |
812 | EVT ScalarVT = VT.getScalarType(); |
813 | unsigned Size = ScalarVT.getSizeInBits(); |
814 | if (Size == 32) |
815 | return ScalarVT.getSimpleVT(); |
816 | |
817 | if (Size > 32) |
818 | return MVT::i32; |
819 | |
820 | if (Size == 16 && Subtarget->has16BitInsts()) |
821 | return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
822 | } else if (VT.getSizeInBits() > 32) |
823 | return MVT::i32; |
824 | |
825 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
826 | } |
827 | |
828 | unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, |
829 | CallingConv::ID CC, |
830 | EVT VT) const { |
831 | if (CC == CallingConv::AMDGPU_KERNEL) |
832 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); |
833 | |
834 | if (VT.isVector()) { |
835 | unsigned NumElts = VT.getVectorNumElements(); |
836 | EVT ScalarVT = VT.getScalarType(); |
837 | unsigned Size = ScalarVT.getSizeInBits(); |
838 | |
839 | if (Size == 32) |
840 | return NumElts; |
841 | |
842 | if (Size > 32) |
843 | return NumElts * ((Size + 31) / 32); |
844 | |
845 | if (Size == 16 && Subtarget->has16BitInsts()) |
846 | return (NumElts + 1) / 2; |
847 | } else if (VT.getSizeInBits() > 32) |
848 | return (VT.getSizeInBits() + 31) / 32; |
849 | |
850 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); |
851 | } |
852 | |
853 | unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( |
854 | LLVMContext &Context, CallingConv::ID CC, |
855 | EVT VT, EVT &IntermediateVT, |
856 | unsigned &NumIntermediates, MVT &RegisterVT) const { |
857 | if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { |
858 | unsigned NumElts = VT.getVectorNumElements(); |
859 | EVT ScalarVT = VT.getScalarType(); |
860 | unsigned Size = ScalarVT.getSizeInBits(); |
861 | if (Size == 32) { |
862 | RegisterVT = ScalarVT.getSimpleVT(); |
863 | IntermediateVT = RegisterVT; |
864 | NumIntermediates = NumElts; |
865 | return NumIntermediates; |
866 | } |
867 | |
868 | if (Size > 32) { |
869 | RegisterVT = MVT::i32; |
870 | IntermediateVT = RegisterVT; |
871 | NumIntermediates = NumElts * ((Size + 31) / 32); |
872 | return NumIntermediates; |
873 | } |
874 | |
875 | // FIXME: We should fix the ABI to be the same on targets without 16-bit |
876 | // support, but unless we can properly handle 3-vectors, it will be still be |
877 | // inconsistent. |
878 | if (Size == 16 && Subtarget->has16BitInsts()) { |
879 | RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
880 | IntermediateVT = RegisterVT; |
881 | NumIntermediates = (NumElts + 1) / 2; |
882 | return NumIntermediates; |
883 | } |
884 | } |
885 | |
886 | return TargetLowering::getVectorTypeBreakdownForCallingConv( |
887 | Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); |
888 | } |
889 | |
890 | // Peek through TFE struct returns to only use the data size. |
891 | static EVT memVTFromImageReturn(Type *Ty) { |
892 | auto *ST = dyn_cast<StructType>(Ty); |
893 | if (!ST) |
894 | return EVT::getEVT(Ty, true); |
895 | |
896 | // Some intrinsics return an aggregate type - special case to work out the |
897 | // correct memVT. |
898 | // |
899 | // Only limited forms of aggregate type currently expected. |
900 | if (ST->getNumContainedTypes() != 2 || |
901 | !ST->getContainedType(1)->isIntegerTy(32)) |
902 | return EVT(); |
903 | return EVT::getEVT(ST->getContainedType(0)); |
904 | } |
905 | |
906 | bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
907 | const CallInst &CI, |
908 | MachineFunction &MF, |
909 | unsigned IntrID) const { |
910 | if (const AMDGPU::RsrcIntrinsic *RsrcIntr = |
911 | AMDGPU::lookupRsrcIntrinsic(IntrID)) { |
912 | AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), |
913 | (Intrinsic::ID)IntrID); |
914 | if (Attr.hasFnAttribute(Attribute::ReadNone)) |
915 | return false; |
916 | |
917 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
918 | |
919 | if (RsrcIntr->IsImage) { |
920 | Info.ptrVal = MFI->getImagePSV( |
921 | *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), |
922 | CI.getArgOperand(RsrcIntr->RsrcArg)); |
923 | Info.align.reset(); |
924 | } else { |
925 | Info.ptrVal = MFI->getBufferPSV( |
926 | *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), |
927 | CI.getArgOperand(RsrcIntr->RsrcArg)); |
928 | } |
929 | |
930 | Info.flags = MachineMemOperand::MODereferenceable; |
931 | if (Attr.hasFnAttribute(Attribute::ReadOnly)) { |
932 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
933 | // TODO: Account for dmask reducing loaded size. |
934 | Info.memVT = memVTFromImageReturn(CI.getType()); |
935 | Info.flags |= MachineMemOperand::MOLoad; |
936 | } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) { |
937 | Info.opc = ISD::INTRINSIC_VOID; |
938 | Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); |
939 | Info.flags |= MachineMemOperand::MOStore; |
940 | } else { |
941 | // Atomic |
942 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
943 | Info.memVT = MVT::getVT(CI.getType()); |
944 | Info.flags = MachineMemOperand::MOLoad | |
945 | MachineMemOperand::MOStore | |
946 | MachineMemOperand::MODereferenceable; |
947 | |
948 | // XXX - Should this be volatile without known ordering? |
949 | Info.flags |= MachineMemOperand::MOVolatile; |
950 | } |
951 | return true; |
952 | } |
953 | |
954 | switch (IntrID) { |
955 | case Intrinsic::amdgcn_atomic_inc: |
956 | case Intrinsic::amdgcn_atomic_dec: |
957 | case Intrinsic::amdgcn_ds_ordered_add: |
958 | case Intrinsic::amdgcn_ds_ordered_swap: |
959 | case Intrinsic::amdgcn_ds_fadd: |
960 | case Intrinsic::amdgcn_ds_fmin: |
961 | case Intrinsic::amdgcn_ds_fmax: { |
962 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
963 | Info.memVT = MVT::getVT(CI.getType()); |
964 | Info.ptrVal = CI.getOperand(0); |
965 | Info.align.reset(); |
966 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
967 | |
968 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); |
969 | if (!Vol->isZero()) |
970 | Info.flags |= MachineMemOperand::MOVolatile; |
971 | |
972 | return true; |
973 | } |
974 | case Intrinsic::amdgcn_buffer_atomic_fadd: { |
975 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
976 | |
977 | Info.opc = ISD::INTRINSIC_VOID; |
978 | Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); |
979 | Info.ptrVal = MFI->getBufferPSV( |
980 | *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), |
981 | CI.getArgOperand(1)); |
982 | Info.align.reset(); |
983 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
984 | |
985 | const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); |
986 | if (!Vol || !Vol->isZero()) |
987 | Info.flags |= MachineMemOperand::MOVolatile; |
988 | |
989 | return true; |
990 | } |
991 | case Intrinsic::amdgcn_global_atomic_fadd: { |
992 | Info.opc = ISD::INTRINSIC_VOID; |
993 | Info.memVT = MVT::getVT(CI.getOperand(0)->getType() |
994 | ->getPointerElementType()); |
995 | Info.ptrVal = CI.getOperand(0); |
996 | Info.align.reset(); |
997 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
998 | |
999 | return true; |
1000 | } |
1001 | case Intrinsic::amdgcn_ds_append: |
1002 | case Intrinsic::amdgcn_ds_consume: { |
1003 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1004 | Info.memVT = MVT::getVT(CI.getType()); |
1005 | Info.ptrVal = CI.getOperand(0); |
1006 | Info.align.reset(); |
1007 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
1008 | |
1009 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); |
1010 | if (!Vol->isZero()) |
1011 | Info.flags |= MachineMemOperand::MOVolatile; |
1012 | |
1013 | return true; |
1014 | } |
1015 | case Intrinsic::amdgcn_ds_gws_init: |
1016 | case Intrinsic::amdgcn_ds_gws_barrier: |
1017 | case Intrinsic::amdgcn_ds_gws_sema_v: |
1018 | case Intrinsic::amdgcn_ds_gws_sema_br: |
1019 | case Intrinsic::amdgcn_ds_gws_sema_p: |
1020 | case Intrinsic::amdgcn_ds_gws_sema_release_all: { |
1021 | Info.opc = ISD::INTRINSIC_VOID; |
1022 | |
1023 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1024 | Info.ptrVal = |
1025 | MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
1026 | |
1027 | // This is an abstract access, but we need to specify a type and size. |
1028 | Info.memVT = MVT::i32; |
1029 | Info.size = 4; |
1030 | Info.align = Align(4); |
1031 | |
1032 | Info.flags = MachineMemOperand::MOStore; |
1033 | if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) |
1034 | Info.flags = MachineMemOperand::MOLoad; |
1035 | return true; |
1036 | } |
1037 | default: |
1038 | return false; |
1039 | } |
1040 | } |
1041 | |
1042 | bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, |
1043 | SmallVectorImpl<Value*> &Ops, |
1044 | Type *&AccessTy) const { |
1045 | switch (II->getIntrinsicID()) { |
1046 | case Intrinsic::amdgcn_atomic_inc: |
1047 | case Intrinsic::amdgcn_atomic_dec: |
1048 | case Intrinsic::amdgcn_ds_ordered_add: |
1049 | case Intrinsic::amdgcn_ds_ordered_swap: |
1050 | case Intrinsic::amdgcn_ds_fadd: |
1051 | case Intrinsic::amdgcn_ds_fmin: |
1052 | case Intrinsic::amdgcn_ds_fmax: { |
1053 | Value *Ptr = II->getArgOperand(0); |
1054 | AccessTy = II->getType(); |
1055 | Ops.push_back(Ptr); |
1056 | return true; |
1057 | } |
1058 | default: |
1059 | return false; |
1060 | } |
1061 | } |
1062 | |
1063 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { |
1064 | if (!Subtarget->hasFlatInstOffsets()) { |
1065 | // Flat instructions do not have offsets, and only have the register |
1066 | // address. |
1067 | return AM.BaseOffs == 0 && AM.Scale == 0; |
1068 | } |
1069 | |
1070 | return AM.Scale == 0 && |
1071 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( |
1072 | AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, |
1073 | /*Signed=*/false)); |
1074 | } |
1075 | |
1076 | bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { |
1077 | if (Subtarget->hasFlatGlobalInsts()) |
1078 | return AM.Scale == 0 && |
1079 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( |
1080 | AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS, |
1081 | /*Signed=*/true)); |
1082 | |
1083 | if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { |
1084 | // Assume the we will use FLAT for all global memory accesses |
1085 | // on VI. |
1086 | // FIXME: This assumption is currently wrong. On VI we still use |
1087 | // MUBUF instructions for the r + i addressing mode. As currently |
1088 | // implemented, the MUBUF instructions only work on buffer < 4GB. |
1089 | // It may be possible to support > 4GB buffers with MUBUF instructions, |
1090 | // by setting the stride value in the resource descriptor which would |
1091 | // increase the size limit to (stride * 4GB). However, this is risky, |
1092 | // because it has never been validated. |
1093 | return isLegalFlatAddressingMode(AM); |
1094 | } |
1095 | |
1096 | return isLegalMUBUFAddressingMode(AM); |
1097 | } |
1098 | |
1099 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { |
1100 | // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and |
1101 | // additionally can do r + r + i with addr64. 32-bit has more addressing |
1102 | // mode options. Depending on the resource constant, it can also do |
1103 | // (i64 r0) + (i32 r1) * (i14 i). |
1104 | // |
1105 | // Private arrays end up using a scratch buffer most of the time, so also |
1106 | // assume those use MUBUF instructions. Scratch loads / stores are currently |
1107 | // implemented as mubuf instructions with offen bit set, so slightly |
1108 | // different than the normal addr64. |
1109 | if (!isUInt<12>(AM.BaseOffs)) |
1110 | return false; |
1111 | |
1112 | // FIXME: Since we can split immediate into soffset and immediate offset, |
1113 | // would it make sense to allow any immediate? |
1114 | |
1115 | switch (AM.Scale) { |
1116 | case 0: // r + i or just i, depending on HasBaseReg. |
1117 | return true; |
1118 | case 1: |
1119 | return true; // We have r + r or r + i. |
1120 | case 2: |
1121 | if (AM.HasBaseReg) { |
1122 | // Reject 2 * r + r. |
1123 | return false; |
1124 | } |
1125 | |
1126 | // Allow 2 * r as r + r |
1127 | // Or 2 * r + i is allowed as r + r + i. |
1128 | return true; |
1129 | default: // Don't allow n * r |
1130 | return false; |
1131 | } |
1132 | } |
1133 | |
1134 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, |
1135 | const AddrMode &AM, Type *Ty, |
1136 | unsigned AS, Instruction *I) const { |
1137 | // No global is ever allowed as a base. |
1138 | if (AM.BaseGV) |
1139 | return false; |
1140 | |
1141 | if (AS == AMDGPUAS::GLOBAL_ADDRESS) |
1142 | return isLegalGlobalAddressingMode(AM); |
1143 | |
1144 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
1145 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
1146 | AS == AMDGPUAS::BUFFER_FAT_POINTER) { |
1147 | // If the offset isn't a multiple of 4, it probably isn't going to be |
1148 | // correctly aligned. |
1149 | // FIXME: Can we get the real alignment here? |
1150 | if (AM.BaseOffs % 4 != 0) |
1151 | return isLegalMUBUFAddressingMode(AM); |
1152 | |
1153 | // There are no SMRD extloads, so if we have to do a small type access we |
1154 | // will use a MUBUF load. |
1155 | // FIXME?: We also need to do this if unaligned, but we don't know the |
1156 | // alignment here. |
1157 | if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) |
1158 | return isLegalGlobalAddressingMode(AM); |
1159 | |
1160 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { |
1161 | // SMRD instructions have an 8-bit, dword offset on SI. |
1162 | if (!isUInt<8>(AM.BaseOffs / 4)) |
1163 | return false; |
1164 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { |
1165 | // On CI+, this can also be a 32-bit literal constant offset. If it fits |
1166 | // in 8-bits, it can use a smaller encoding. |
1167 | if (!isUInt<32>(AM.BaseOffs / 4)) |
1168 | return false; |
1169 | } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { |
1170 | // On VI, these use the SMEM format and the offset is 20-bit in bytes. |
1171 | if (!isUInt<20>(AM.BaseOffs)) |
1172 | return false; |
1173 | } else |
1174 | llvm_unreachable("unhandled generation")::llvm::llvm_unreachable_internal("unhandled generation", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1174); |
1175 | |
1176 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
1177 | return true; |
1178 | |
1179 | if (AM.Scale == 1 && AM.HasBaseReg) |
1180 | return true; |
1181 | |
1182 | return false; |
1183 | |
1184 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
1185 | return isLegalMUBUFAddressingMode(AM); |
1186 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || |
1187 | AS == AMDGPUAS::REGION_ADDRESS) { |
1188 | // Basic, single offset DS instructions allow a 16-bit unsigned immediate |
1189 | // field. |
1190 | // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have |
1191 | // an 8-bit dword offset but we don't know the alignment here. |
1192 | if (!isUInt<16>(AM.BaseOffs)) |
1193 | return false; |
1194 | |
1195 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
1196 | return true; |
1197 | |
1198 | if (AM.Scale == 1 && AM.HasBaseReg) |
1199 | return true; |
1200 | |
1201 | return false; |
1202 | } else if (AS == AMDGPUAS::FLAT_ADDRESS || |
1203 | AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { |
1204 | // For an unknown address space, this usually means that this is for some |
1205 | // reason being used for pure arithmetic, and not based on some addressing |
1206 | // computation. We don't have instructions that compute pointers with any |
1207 | // addressing modes, so treat them as having no offset like flat |
1208 | // instructions. |
1209 | return isLegalFlatAddressingMode(AM); |
1210 | } else { |
1211 | llvm_unreachable("unhandled address space")::llvm::llvm_unreachable_internal("unhandled address space", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1211); |
1212 | } |
1213 | } |
1214 | |
1215 | bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, |
1216 | const SelectionDAG &DAG) const { |
1217 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { |
1218 | return (MemVT.getSizeInBits() <= 4 * 32); |
1219 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
1220 | unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); |
1221 | return (MemVT.getSizeInBits() <= MaxPrivateBits); |
1222 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
1223 | return (MemVT.getSizeInBits() <= 2 * 32); |
1224 | } |
1225 | return true; |
1226 | } |
1227 | |
1228 | bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( |
1229 | unsigned Size, unsigned AddrSpace, unsigned Align, |
1230 | MachineMemOperand::Flags Flags, bool *IsFast) const { |
1231 | if (IsFast) |
1232 | *IsFast = false; |
1233 | |
1234 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
1235 | AddrSpace == AMDGPUAS::REGION_ADDRESS) { |
1236 | // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte |
1237 | // aligned, 8 byte access in a single operation using ds_read2/write2_b32 |
1238 | // with adjacent offsets. |
1239 | bool AlignedBy4 = (Align % 4 == 0); |
1240 | if (IsFast) |
1241 | *IsFast = AlignedBy4; |
1242 | |
1243 | return AlignedBy4; |
1244 | } |
1245 | |
1246 | // FIXME: We have to be conservative here and assume that flat operations |
1247 | // will access scratch. If we had access to the IR function, then we |
1248 | // could determine if any private memory was used in the function. |
1249 | if (!Subtarget->hasUnalignedScratchAccess() && |
1250 | (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || |
1251 | AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { |
1252 | bool AlignedBy4 = Align >= 4; |
1253 | if (IsFast) |
1254 | *IsFast = AlignedBy4; |
1255 | |
1256 | return AlignedBy4; |
1257 | } |
1258 | |
1259 | if (Subtarget->hasUnalignedBufferAccess()) { |
1260 | // If we have an uniform constant load, it still requires using a slow |
1261 | // buffer instruction if unaligned. |
1262 | if (IsFast) { |
1263 | // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so |
1264 | // 2-byte alignment is worse than 1 unless doing a 2-byte accesss. |
1265 | *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || |
1266 | AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? |
1267 | Align >= 4 : Align != 2; |
1268 | } |
1269 | |
1270 | return true; |
1271 | } |
1272 | |
1273 | // Smaller than dword value must be aligned. |
1274 | if (Size < 32) |
1275 | return false; |
1276 | |
1277 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the |
1278 | // byte-address are ignored, thus forcing Dword alignment. |
1279 | // This applies to private, global, and constant memory. |
1280 | if (IsFast) |
1281 | *IsFast = true; |
1282 | |
1283 | return Size >= 32 && Align >= 4; |
1284 | } |
1285 | |
1286 | bool SITargetLowering::allowsMisalignedMemoryAccesses( |
1287 | EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, |
1288 | bool *IsFast) const { |
1289 | if (IsFast) |
1290 | *IsFast = false; |
1291 | |
1292 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, |
1293 | // which isn't a simple VT. |
1294 | // Until MVT is extended to handle this, simply check for the size and |
1295 | // rely on the condition below: allow accesses if the size is a multiple of 4. |
1296 | if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && |
1297 | VT.getStoreSize() > 16)) { |
1298 | return false; |
1299 | } |
1300 | |
1301 | return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, |
1302 | Align, Flags, IsFast); |
1303 | } |
1304 | |
1305 | EVT SITargetLowering::getOptimalMemOpType( |
1306 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
1307 | // FIXME: Should account for address space here. |
1308 | |
1309 | // The default fallback uses the private pointer size as a guess for a type to |
1310 | // use. Make sure we switch these to 64-bit accesses. |
1311 | |
1312 | if (Op.size() >= 16 && |
1313 | Op.isDstAligned(Align(4))) // XXX: Should only do for global |
1314 | return MVT::v4i32; |
1315 | |
1316 | if (Op.size() >= 8 && Op.isDstAligned(Align(4))) |
1317 | return MVT::v2i32; |
1318 | |
1319 | // Use the default. |
1320 | return MVT::Other; |
1321 | } |
1322 | |
1323 | bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, |
1324 | unsigned DestAS) const { |
1325 | return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); |
1326 | } |
1327 | |
1328 | bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { |
1329 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
1330 | const Value *Ptr = MemNode->getMemOperand()->getValue(); |
1331 | const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); |
1332 | return I && I->getMetadata("amdgpu.noclobber"); |
1333 | } |
1334 | |
1335 | bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, |
1336 | unsigned DestAS) const { |
1337 | // Flat -> private/local is a simple truncate. |
1338 | // Flat -> global is no-op |
1339 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) |
1340 | return true; |
1341 | |
1342 | return isNoopAddrSpaceCast(SrcAS, DestAS); |
1343 | } |
1344 | |
1345 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { |
1346 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
1347 | |
1348 | return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); |
1349 | } |
1350 | |
1351 | TargetLoweringBase::LegalizeTypeAction |
1352 | SITargetLowering::getPreferredVectorAction(MVT VT) const { |
1353 | int NumElts = VT.getVectorNumElements(); |
1354 | if (NumElts != 1 && VT.getScalarType().bitsLE(MVT::i16)) |
1355 | return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector; |
1356 | return TargetLoweringBase::getPreferredVectorAction(VT); |
1357 | } |
1358 | |
1359 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
1360 | Type *Ty) const { |
1361 | // FIXME: Could be smarter if called for vector constants. |
1362 | return true; |
1363 | } |
1364 | |
1365 | bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { |
1366 | if (Subtarget->has16BitInsts() && VT == MVT::i16) { |
1367 | switch (Op) { |
1368 | case ISD::LOAD: |
1369 | case ISD::STORE: |
1370 | |
1371 | // These operations are done with 32-bit instructions anyway. |
1372 | case ISD::AND: |
1373 | case ISD::OR: |
1374 | case ISD::XOR: |
1375 | case ISD::SELECT: |
1376 | // TODO: Extensions? |
1377 | return true; |
1378 | default: |
1379 | return false; |
1380 | } |
1381 | } |
1382 | |
1383 | // SimplifySetCC uses this function to determine whether or not it should |
1384 | // create setcc with i1 operands. We don't have instructions for i1 setcc. |
1385 | if (VT == MVT::i1 && Op == ISD::SETCC) |
1386 | return false; |
1387 | |
1388 | return TargetLowering::isTypeDesirableForOp(Op, VT); |
1389 | } |
1390 | |
1391 | SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, |
1392 | const SDLoc &SL, |
1393 | SDValue Chain, |
1394 | uint64_t Offset) const { |
1395 | const DataLayout &DL = DAG.getDataLayout(); |
1396 | MachineFunction &MF = DAG.getMachineFunction(); |
1397 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
1398 | |
1399 | const ArgDescriptor *InputPtrReg; |
1400 | const TargetRegisterClass *RC; |
1401 | |
1402 | std::tie(InputPtrReg, RC) |
1403 | = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
1404 | |
1405 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
1406 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); |
1407 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, |
1408 | MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); |
1409 | |
1410 | return DAG.getObjectPtrOffset(SL, BasePtr, Offset); |
1411 | } |
1412 | |
1413 | SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, |
1414 | const SDLoc &SL) const { |
1415 | uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), |
1416 | FIRST_IMPLICIT); |
1417 | return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); |
1418 | } |
1419 | |
1420 | SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, |
1421 | const SDLoc &SL, SDValue Val, |
1422 | bool Signed, |
1423 | const ISD::InputArg *Arg) const { |
1424 | // First, if it is a widened vector, narrow it. |
1425 | if (VT.isVector() && |
1426 | VT.getVectorNumElements() != MemVT.getVectorNumElements()) { |
1427 | EVT NarrowedVT = |
1428 | EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), |
1429 | VT.getVectorNumElements()); |
1430 | Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, |
1431 | DAG.getConstant(0, SL, MVT::i32)); |
1432 | } |
1433 | |
1434 | // Then convert the vector elements or scalar value. |
1435 | if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && |
1436 | VT.bitsLT(MemVT)) { |
1437 | unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; |
1438 | Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); |
1439 | } |
1440 | |
1441 | if (MemVT.isFloatingPoint()) |
1442 | Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); |
1443 | else if (Signed) |
1444 | Val = DAG.getSExtOrTrunc(Val, SL, VT); |
1445 | else |
1446 | Val = DAG.getZExtOrTrunc(Val, SL, VT); |
1447 | |
1448 | return Val; |
1449 | } |
1450 | |
1451 | SDValue SITargetLowering::lowerKernargMemParameter( |
1452 | SelectionDAG &DAG, EVT VT, EVT MemVT, |
1453 | const SDLoc &SL, SDValue Chain, |
1454 | uint64_t Offset, unsigned Align, bool Signed, |
1455 | const ISD::InputArg *Arg) const { |
1456 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
1457 | |
1458 | // Try to avoid using an extload by loading earlier than the argument address, |
1459 | // and extracting the relevant bits. The load should hopefully be merged with |
1460 | // the previous argument. |
1461 | if (MemVT.getStoreSize() < 4 && Align < 4) { |
1462 | // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). |
1463 | int64_t AlignDownOffset = alignDown(Offset, 4); |
1464 | int64_t OffsetDiff = Offset - AlignDownOffset; |
1465 | |
1466 | EVT IntVT = MemVT.changeTypeToInteger(); |
1467 | |
1468 | // TODO: If we passed in the base kernel offset we could have a better |
1469 | // alignment than 4, but we don't really need it. |
1470 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); |
1471 | SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4, |
1472 | MachineMemOperand::MODereferenceable | |
1473 | MachineMemOperand::MOInvariant); |
1474 | |
1475 | SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); |
1476 | SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); |
1477 | |
1478 | SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); |
1479 | ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); |
1480 | ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); |
1481 | |
1482 | |
1483 | return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); |
1484 | } |
1485 | |
1486 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); |
1487 | SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, |
1488 | MachineMemOperand::MODereferenceable | |
1489 | MachineMemOperand::MOInvariant); |
1490 | |
1491 | SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); |
1492 | return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); |
1493 | } |
1494 | |
1495 | SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, |
1496 | const SDLoc &SL, SDValue Chain, |
1497 | const ISD::InputArg &Arg) const { |
1498 | MachineFunction &MF = DAG.getMachineFunction(); |
1499 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1500 | |
1501 | if (Arg.Flags.isByVal()) { |
1502 | unsigned Size = Arg.Flags.getByValSize(); |
1503 | int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); |
1504 | return DAG.getFrameIndex(FrameIdx, MVT::i32); |
1505 | } |
1506 | |
1507 | unsigned ArgOffset = VA.getLocMemOffset(); |
1508 | unsigned ArgSize = VA.getValVT().getStoreSize(); |
1509 | |
1510 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); |
1511 | |
1512 | // Create load nodes to retrieve arguments from the stack. |
1513 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); |
1514 | SDValue ArgValue; |
1515 | |
1516 | // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) |
1517 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; |
1518 | MVT MemVT = VA.getValVT(); |
1519 | |
1520 | switch (VA.getLocInfo()) { |
1521 | default: |
1522 | break; |
1523 | case CCValAssign::BCvt: |
1524 | MemVT = VA.getLocVT(); |
1525 | break; |
1526 | case CCValAssign::SExt: |
1527 | ExtType = ISD::SEXTLOAD; |
1528 | break; |
1529 | case CCValAssign::ZExt: |
1530 | ExtType = ISD::ZEXTLOAD; |
1531 | break; |
1532 | case CCValAssign::AExt: |
1533 | ExtType = ISD::EXTLOAD; |
1534 | break; |
1535 | } |
1536 | |
1537 | ArgValue = DAG.getExtLoad( |
1538 | ExtType, SL, VA.getLocVT(), Chain, FIN, |
1539 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), |
1540 | MemVT); |
1541 | return ArgValue; |
1542 | } |
1543 | |
1544 | SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, |
1545 | const SIMachineFunctionInfo &MFI, |
1546 | EVT VT, |
1547 | AMDGPUFunctionArgInfo::PreloadedValue PVID) const { |
1548 | const ArgDescriptor *Reg; |
1549 | const TargetRegisterClass *RC; |
1550 | |
1551 | std::tie(Reg, RC) = MFI.getPreloadedValue(PVID); |
1552 | return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); |
1553 | } |
1554 | |
1555 | static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, |
1556 | CallingConv::ID CallConv, |
1557 | ArrayRef<ISD::InputArg> Ins, |
1558 | BitVector &Skipped, |
1559 | FunctionType *FType, |
1560 | SIMachineFunctionInfo *Info) { |
1561 | for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { |
1562 | const ISD::InputArg *Arg = &Ins[I]; |
1563 | |
1564 | assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&(((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && "vector type argument should have been split" ) ? static_cast<void> (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"vector type argument should have been split\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1565, __PRETTY_FUNCTION__)) |
1565 | "vector type argument should have been split")(((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && "vector type argument should have been split" ) ? static_cast<void> (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"vector type argument should have been split\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1565, __PRETTY_FUNCTION__)); |
1566 | |
1567 | // First check if it's a PS input addr. |
1568 | if (CallConv == CallingConv::AMDGPU_PS && |
1569 | !Arg->Flags.isInReg() && PSInputNum <= 15) { |
1570 | bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); |
1571 | |
1572 | // Inconveniently only the first part of the split is marked as isSplit, |
1573 | // so skip to the end. We only want to increment PSInputNum once for the |
1574 | // entire split argument. |
1575 | if (Arg->Flags.isSplit()) { |
1576 | while (!Arg->Flags.isSplitEnd()) { |
1577 | assert((!Arg->VT.isVector() ||(((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? static_cast<void> (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1579, __PRETTY_FUNCTION__)) |
1578 | Arg->VT.getScalarSizeInBits() == 16) &&(((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? static_cast<void> (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1579, __PRETTY_FUNCTION__)) |
1579 | "unexpected vector split in ps argument type")(((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? static_cast<void> (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1579, __PRETTY_FUNCTION__)); |
1580 | if (!SkipArg) |
1581 | Splits.push_back(*Arg); |
1582 | Arg = &Ins[++I]; |
1583 | } |
1584 | } |
1585 | |
1586 | if (SkipArg) { |
1587 | // We can safely skip PS inputs. |
1588 | Skipped.set(Arg->getOrigArgIndex()); |
1589 | ++PSInputNum; |
1590 | continue; |
1591 | } |
1592 | |
1593 | Info->markPSInputAllocated(PSInputNum); |
1594 | if (Arg->Used) |
1595 | Info->markPSInputEnabled(PSInputNum); |
1596 | |
1597 | ++PSInputNum; |
1598 | } |
1599 | |
1600 | Splits.push_back(*Arg); |
1601 | } |
1602 | } |
1603 | |
1604 | // Allocate special inputs passed in VGPRs. |
1605 | void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo, |
1606 | MachineFunction &MF, |
1607 | const SIRegisterInfo &TRI, |
1608 | SIMachineFunctionInfo &Info) const { |
1609 | const LLT S32 = LLT::scalar(32); |
1610 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1611 | |
1612 | if (Info.hasWorkItemIDX()) { |
1613 | Register Reg = AMDGPU::VGPR0; |
1614 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
1615 | |
1616 | CCInfo.AllocateReg(Reg); |
1617 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); |
1618 | } |
1619 | |
1620 | if (Info.hasWorkItemIDY()) { |
1621 | Register Reg = AMDGPU::VGPR1; |
1622 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
1623 | |
1624 | CCInfo.AllocateReg(Reg); |
1625 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); |
1626 | } |
1627 | |
1628 | if (Info.hasWorkItemIDZ()) { |
1629 | Register Reg = AMDGPU::VGPR2; |
1630 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
1631 | |
1632 | CCInfo.AllocateReg(Reg); |
1633 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); |
1634 | } |
1635 | } |
1636 | |
1637 | // Try to allocate a VGPR at the end of the argument list, or if no argument |
1638 | // VGPRs are left allocating a stack slot. |
1639 | // If \p Mask is is given it indicates bitfield position in the register. |
1640 | // If \p Arg is given use it with new ]p Mask instead of allocating new. |
1641 | static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, |
1642 | ArgDescriptor Arg = ArgDescriptor()) { |
1643 | if (Arg.isSet()) |
1644 | return ArgDescriptor::createArg(Arg, Mask); |
1645 | |
1646 | ArrayRef<MCPhysReg> ArgVGPRs |
1647 | = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); |
1648 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); |
1649 | if (RegIdx == ArgVGPRs.size()) { |
1650 | // Spill to stack required. |
1651 | int64_t Offset = CCInfo.AllocateStack(4, 4); |
1652 | |
1653 | return ArgDescriptor::createStack(Offset, Mask); |
1654 | } |
1655 | |
1656 | unsigned Reg = ArgVGPRs[RegIdx]; |
1657 | Reg = CCInfo.AllocateReg(Reg); |
1658 | assert(Reg != AMDGPU::NoRegister)((Reg != AMDGPU::NoRegister) ? static_cast<void> (0) : __assert_fail ("Reg != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1658, __PRETTY_FUNCTION__)); |
1659 | |
1660 | MachineFunction &MF = CCInfo.getMachineFunction(); |
1661 | Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
1662 | MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32)); |
1663 | return ArgDescriptor::createRegister(Reg, Mask); |
1664 | } |
1665 | |
1666 | static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, |
1667 | const TargetRegisterClass *RC, |
1668 | unsigned NumArgRegs) { |
1669 | ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); |
1670 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); |
1671 | if (RegIdx == ArgSGPRs.size()) |
1672 | report_fatal_error("ran out of SGPRs for arguments"); |
1673 | |
1674 | unsigned Reg = ArgSGPRs[RegIdx]; |
1675 | Reg = CCInfo.AllocateReg(Reg); |
1676 | assert(Reg != AMDGPU::NoRegister)((Reg != AMDGPU::NoRegister) ? static_cast<void> (0) : __assert_fail ("Reg != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1676, __PRETTY_FUNCTION__)); |
1677 | |
1678 | MachineFunction &MF = CCInfo.getMachineFunction(); |
1679 | MF.addLiveIn(Reg, RC); |
1680 | return ArgDescriptor::createRegister(Reg); |
1681 | } |
1682 | |
1683 | static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { |
1684 | return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); |
1685 | } |
1686 | |
1687 | static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { |
1688 | return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); |
1689 | } |
1690 | |
1691 | void SITargetLowering::allocateSpecialInputVGPRs(CCState &CCInfo, |
1692 | MachineFunction &MF, |
1693 | const SIRegisterInfo &TRI, |
1694 | SIMachineFunctionInfo &Info) const { |
1695 | const unsigned Mask = 0x3ff; |
1696 | ArgDescriptor Arg; |
1697 | |
1698 | if (Info.hasWorkItemIDX()) { |
1699 | Arg = allocateVGPR32Input(CCInfo, Mask); |
1700 | Info.setWorkItemIDX(Arg); |
1701 | } |
1702 | |
1703 | if (Info.hasWorkItemIDY()) { |
1704 | Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); |
1705 | Info.setWorkItemIDY(Arg); |
1706 | } |
1707 | |
1708 | if (Info.hasWorkItemIDZ()) |
1709 | Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); |
1710 | } |
1711 | |
1712 | void SITargetLowering::allocateSpecialInputSGPRs( |
1713 | CCState &CCInfo, |
1714 | MachineFunction &MF, |
1715 | const SIRegisterInfo &TRI, |
1716 | SIMachineFunctionInfo &Info) const { |
1717 | auto &ArgInfo = Info.getArgInfo(); |
1718 | |
1719 | // TODO: Unify handling with private memory pointers. |
1720 | |
1721 | if (Info.hasDispatchPtr()) |
1722 | ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); |
1723 | |
1724 | if (Info.hasQueuePtr()) |
1725 | ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); |
1726 | |
1727 | if (Info.hasKernargSegmentPtr()) |
1728 | ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo); |
1729 | |
1730 | if (Info.hasDispatchID()) |
1731 | ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); |
1732 | |
1733 | // flat_scratch_init is not applicable for non-kernel functions. |
1734 | |
1735 | if (Info.hasWorkGroupIDX()) |
1736 | ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); |
1737 | |
1738 | if (Info.hasWorkGroupIDY()) |
1739 | ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); |
1740 | |
1741 | if (Info.hasWorkGroupIDZ()) |
1742 | ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); |
1743 | |
1744 | if (Info.hasImplicitArgPtr()) |
1745 | ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); |
1746 | } |
1747 | |
1748 | // Allocate special inputs passed in user SGPRs. |
1749 | void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo, |
1750 | MachineFunction &MF, |
1751 | const SIRegisterInfo &TRI, |
1752 | SIMachineFunctionInfo &Info) const { |
1753 | if (Info.hasImplicitBufferPtr()) { |
1754 | unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); |
1755 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); |
1756 | CCInfo.AllocateReg(ImplicitBufferPtrReg); |
1757 | } |
1758 | |
1759 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? |
1760 | if (Info.hasPrivateSegmentBuffer()) { |
1761 | unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); |
1762 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); |
1763 | CCInfo.AllocateReg(PrivateSegmentBufferReg); |
1764 | } |
1765 | |
1766 | if (Info.hasDispatchPtr()) { |
1767 | unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); |
1768 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); |
1769 | CCInfo.AllocateReg(DispatchPtrReg); |
1770 | } |
1771 | |
1772 | if (Info.hasQueuePtr()) { |
1773 | unsigned QueuePtrReg = Info.addQueuePtr(TRI); |
1774 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); |
1775 | CCInfo.AllocateReg(QueuePtrReg); |
1776 | } |
1777 | |
1778 | if (Info.hasKernargSegmentPtr()) { |
1779 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1780 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); |
1781 | CCInfo.AllocateReg(InputPtrReg); |
1782 | |
1783 | Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); |
1784 | MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); |
1785 | } |
1786 | |
1787 | if (Info.hasDispatchID()) { |
1788 | unsigned DispatchIDReg = Info.addDispatchID(TRI); |
1789 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); |
1790 | CCInfo.AllocateReg(DispatchIDReg); |
1791 | } |
1792 | |
1793 | if (Info.hasFlatScratchInit()) { |
1794 | unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); |
1795 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); |
1796 | CCInfo.AllocateReg(FlatScratchInitReg); |
1797 | } |
1798 | |
1799 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read |
1800 | // these from the dispatch pointer. |
1801 | } |
1802 | |
1803 | // Allocate special input registers that are initialized per-wave. |
1804 | void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, |
1805 | MachineFunction &MF, |
1806 | SIMachineFunctionInfo &Info, |
1807 | CallingConv::ID CallConv, |
1808 | bool IsShader) const { |
1809 | if (Info.hasWorkGroupIDX()) { |
1810 | unsigned Reg = Info.addWorkGroupIDX(); |
1811 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
1812 | CCInfo.AllocateReg(Reg); |
1813 | } |
1814 | |
1815 | if (Info.hasWorkGroupIDY()) { |
1816 | unsigned Reg = Info.addWorkGroupIDY(); |
1817 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
1818 | CCInfo.AllocateReg(Reg); |
1819 | } |
1820 | |
1821 | if (Info.hasWorkGroupIDZ()) { |
1822 | unsigned Reg = Info.addWorkGroupIDZ(); |
1823 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
1824 | CCInfo.AllocateReg(Reg); |
1825 | } |
1826 | |
1827 | if (Info.hasWorkGroupInfo()) { |
1828 | unsigned Reg = Info.addWorkGroupInfo(); |
1829 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
1830 | CCInfo.AllocateReg(Reg); |
1831 | } |
1832 | |
1833 | if (Info.hasPrivateSegmentWaveByteOffset()) { |
1834 | // Scratch wave offset passed in system SGPR. |
1835 | unsigned PrivateSegmentWaveByteOffsetReg; |
1836 | |
1837 | if (IsShader) { |
1838 | PrivateSegmentWaveByteOffsetReg = |
1839 | Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); |
1840 | |
1841 | // This is true if the scratch wave byte offset doesn't have a fixed |
1842 | // location. |
1843 | if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { |
1844 | PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); |
1845 | Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); |
1846 | } |
1847 | } else |
1848 | PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); |
1849 | |
1850 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); |
1851 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); |
1852 | } |
1853 | } |
1854 | |
1855 | static void reservePrivateMemoryRegs(const TargetMachine &TM, |
1856 | MachineFunction &MF, |
1857 | const SIRegisterInfo &TRI, |
1858 | SIMachineFunctionInfo &Info) { |
1859 | // Now that we've figured out where the scratch register inputs are, see if |
1860 | // should reserve the arguments and use them directly. |
1861 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1862 | bool HasStackObjects = MFI.hasStackObjects(); |
1863 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
1864 | |
1865 | // Record that we know we have non-spill stack objects so we don't need to |
1866 | // check all stack objects later. |
1867 | if (HasStackObjects) |
1868 | Info.setHasNonSpillStackObjects(true); |
1869 | |
1870 | // Everything live out of a block is spilled with fast regalloc, so it's |
1871 | // almost certain that spilling will be required. |
1872 | if (TM.getOptLevel() == CodeGenOpt::None) |
1873 | HasStackObjects = true; |
1874 | |
1875 | // For now assume stack access is needed in any callee functions, so we need |
1876 | // the scratch registers to pass in. |
1877 | bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); |
1878 | |
1879 | if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { |
1880 | // If we have stack objects, we unquestionably need the private buffer |
1881 | // resource. For the Code Object V2 ABI, this will be the first 4 user |
1882 | // SGPR inputs. We can reserve those and use them directly. |
1883 | |
1884 | Register PrivateSegmentBufferReg = |
1885 | Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); |
1886 | Info.setScratchRSrcReg(PrivateSegmentBufferReg); |
1887 | } else { |
1888 | unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); |
1889 | // We tentatively reserve the last registers (skipping the last registers |
1890 | // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, |
1891 | // we'll replace these with the ones immediately after those which were |
1892 | // really allocated. In the prologue copies will be inserted from the |
1893 | // argument to these reserved registers. |
1894 | |
1895 | // Without HSA, relocations are used for the scratch pointer and the |
1896 | // buffer resource setup is always inserted in the prologue. Scratch wave |
1897 | // offset is still in an input SGPR. |
1898 | Info.setScratchRSrcReg(ReservedBufferReg); |
1899 | } |
1900 | |
1901 | // hasFP should be accurate for kernels even before the frame is finalized. |
1902 | if (ST.getFrameLowering()->hasFP(MF)) { |
1903 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1904 | |
1905 | // Try to use s32 as the SP, but move it if it would interfere with input |
1906 | // arguments. This won't work with calls though. |
1907 | // |
1908 | // FIXME: Move SP to avoid any possible inputs, or find a way to spill input |
1909 | // registers. |
1910 | if (!MRI.isLiveIn(AMDGPU::SGPR32)) { |
1911 | Info.setStackPtrOffsetReg(AMDGPU::SGPR32); |
1912 | } else { |
1913 | assert(AMDGPU::isShader(MF.getFunction().getCallingConv()))((AMDGPU::isShader(MF.getFunction().getCallingConv())) ? static_cast <void> (0) : __assert_fail ("AMDGPU::isShader(MF.getFunction().getCallingConv())" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1913, __PRETTY_FUNCTION__)); |
1914 | |
1915 | if (MFI.hasCalls()) |
1916 | report_fatal_error("call in graphics shader with too many input SGPRs"); |
1917 | |
1918 | for (unsigned Reg : AMDGPU::SGPR_32RegClass) { |
1919 | if (!MRI.isLiveIn(Reg)) { |
1920 | Info.setStackPtrOffsetReg(Reg); |
1921 | break; |
1922 | } |
1923 | } |
1924 | |
1925 | if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) |
1926 | report_fatal_error("failed to find register for SP"); |
1927 | } |
1928 | |
1929 | if (MFI.hasCalls()) { |
1930 | Info.setScratchWaveOffsetReg(AMDGPU::SGPR33); |
1931 | Info.setFrameOffsetReg(AMDGPU::SGPR33); |
1932 | } else { |
1933 | unsigned ReservedOffsetReg = |
1934 | TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); |
1935 | Info.setScratchWaveOffsetReg(ReservedOffsetReg); |
1936 | Info.setFrameOffsetReg(ReservedOffsetReg); |
1937 | } |
1938 | } else if (RequiresStackAccess) { |
1939 | assert(!MFI.hasCalls())((!MFI.hasCalls()) ? static_cast<void> (0) : __assert_fail ("!MFI.hasCalls()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1939, __PRETTY_FUNCTION__)); |
1940 | // We know there are accesses and they will be done relative to SP, so just |
1941 | // pin it to the input. |
1942 | // |
1943 | // FIXME: Should not do this if inline asm is reading/writing these |
1944 | // registers. |
1945 | Register PreloadedSP = Info.getPreloadedReg( |
1946 | AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); |
1947 | |
1948 | Info.setStackPtrOffsetReg(PreloadedSP); |
1949 | Info.setScratchWaveOffsetReg(PreloadedSP); |
1950 | Info.setFrameOffsetReg(PreloadedSP); |
1951 | } else { |
1952 | assert(!MFI.hasCalls())((!MFI.hasCalls()) ? static_cast<void> (0) : __assert_fail ("!MFI.hasCalls()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1952, __PRETTY_FUNCTION__)); |
1953 | |
1954 | // There may not be stack access at all. There may still be spills, or |
1955 | // access of a constant pointer (in which cases an extra copy will be |
1956 | // emitted in the prolog). |
1957 | unsigned ReservedOffsetReg |
1958 | = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); |
1959 | Info.setStackPtrOffsetReg(ReservedOffsetReg); |
1960 | Info.setScratchWaveOffsetReg(ReservedOffsetReg); |
1961 | Info.setFrameOffsetReg(ReservedOffsetReg); |
1962 | } |
1963 | } |
1964 | |
1965 | bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { |
1966 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
1967 | return !Info->isEntryFunction(); |
1968 | } |
1969 | |
1970 | void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
1971 | |
1972 | } |
1973 | |
1974 | void SITargetLowering::insertCopiesSplitCSR( |
1975 | MachineBasicBlock *Entry, |
1976 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
1977 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
1978 | |
1979 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); |
1980 | if (!IStart) |
1981 | return; |
1982 | |
1983 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
1984 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
1985 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
1986 | for (const MCPhysReg *I = IStart; *I; ++I) { |
1987 | const TargetRegisterClass *RC = nullptr; |
1988 | if (AMDGPU::SReg_64RegClass.contains(*I)) |
1989 | RC = &AMDGPU::SGPR_64RegClass; |
1990 | else if (AMDGPU::SReg_32RegClass.contains(*I)) |
1991 | RC = &AMDGPU::SGPR_32RegClass; |
1992 | else |
1993 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1993); |
1994 | |
1995 | Register NewVR = MRI->createVirtualRegister(RC); |
1996 | // Create copy from CSR to a virtual register. |
1997 | Entry->addLiveIn(*I); |
1998 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) |
1999 | .addReg(*I); |
2000 | |
2001 | // Insert the copy-back instructions right before the terminator. |
2002 | for (auto *Exit : Exits) |
2003 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), |
2004 | TII->get(TargetOpcode::COPY), *I) |
2005 | .addReg(NewVR); |
2006 | } |
2007 | } |
2008 | |
2009 | SDValue SITargetLowering::LowerFormalArguments( |
2010 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
2011 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
2012 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
2013 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2014 | |
2015 | MachineFunction &MF = DAG.getMachineFunction(); |
2016 | const Function &Fn = MF.getFunction(); |
2017 | FunctionType *FType = MF.getFunction().getFunctionType(); |
2018 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
2019 | |
2020 | if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { |
2021 | DiagnosticInfoUnsupported NoGraphicsHSA( |
2022 | Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); |
2023 | DAG.getContext()->diagnose(NoGraphicsHSA); |
2024 | return DAG.getEntryNode(); |
2025 | } |
2026 | |
2027 | SmallVector<ISD::InputArg, 16> Splits; |
2028 | SmallVector<CCValAssign, 16> ArgLocs; |
2029 | BitVector Skipped(Ins.size()); |
2030 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
2031 | *DAG.getContext()); |
2032 | |
2033 | bool IsShader = AMDGPU::isShader(CallConv); |
2034 | bool IsKernel = AMDGPU::isKernel(CallConv); |
2035 | bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); |
2036 | |
2037 | if (IsShader) { |
2038 | processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); |
2039 | |
2040 | // At least one interpolation mode must be enabled or else the GPU will |
2041 | // hang. |
2042 | // |
2043 | // Check PSInputAddr instead of PSInputEnable. The idea is that if the user |
2044 | // set PSInputAddr, the user wants to enable some bits after the compilation |
2045 | // based on run-time states. Since we can't know what the final PSInputEna |
2046 | // will look like, so we shouldn't do anything here and the user should take |
2047 | // responsibility for the correct programming. |
2048 | // |
2049 | // Otherwise, the following restrictions apply: |
2050 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. |
2051 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be |
2052 | // enabled too. |
2053 | if (CallConv == CallingConv::AMDGPU_PS) { |
2054 | if ((Info->getPSInputAddr() & 0x7F) == 0 || |
2055 | ((Info->getPSInputAddr() & 0xF) == 0 && |
2056 | Info->isPSInputAllocated(11))) { |
2057 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
2058 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
2059 | Info->markPSInputAllocated(0); |
2060 | Info->markPSInputEnabled(0); |
2061 | } |
2062 | if (Subtarget->isAmdPalOS()) { |
2063 | // For isAmdPalOS, the user does not enable some bits after compilation |
2064 | // based on run-time states; the register values being generated here are |
2065 | // the final ones set in hardware. Therefore we need to apply the |
2066 | // workaround to PSInputAddr and PSInputEnable together. (The case where |
2067 | // a bit is set in PSInputAddr but not PSInputEnable is where the |
2068 | // frontend set up an input arg for a particular interpolation mode, but |
2069 | // nothing uses that input arg. Really we should have an earlier pass |
2070 | // that removes such an arg.) |
2071 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); |
2072 | if ((PsInputBits & 0x7F) == 0 || |
2073 | ((PsInputBits & 0xF) == 0 && |
2074 | (PsInputBits >> 11 & 1))) |
2075 | Info->markPSInputEnabled( |
2076 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); |
2077 | } |
2078 | } |
2079 | |
2080 | assert(!Info->hasDispatchPtr() &&((!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr () && !Info->hasFlatScratchInit() && !Info ->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info-> hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ ()) ? static_cast<void> (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2085, __PRETTY_FUNCTION__)) |
2081 | !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&((!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr () && !Info->hasFlatScratchInit() && !Info ->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info-> hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ ()) ? static_cast<void> (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2085, __PRETTY_FUNCTION__)) |
2082 | !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&((!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr () && !Info->hasFlatScratchInit() && !Info ->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info-> hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ ()) ? static_cast<void> (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2085, __PRETTY_FUNCTION__)) |
2083 | !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&((!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr () && !Info->hasFlatScratchInit() && !Info ->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info-> hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ ()) ? static_cast<void> (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2085, __PRETTY_FUNCTION__)) |
2084 | !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&((!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr () && !Info->hasFlatScratchInit() && !Info ->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info-> hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ ()) ? static_cast<void> (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2085, __PRETTY_FUNCTION__)) |
2085 | !Info->hasWorkItemIDZ())((!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr () && !Info->hasFlatScratchInit() && !Info ->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info-> hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ ()) ? static_cast<void> (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2085, __PRETTY_FUNCTION__)); |
2086 | } else if (IsKernel) { |
2087 | assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX())((Info->hasWorkGroupIDX() && Info->hasWorkItemIDX ()) ? static_cast<void> (0) : __assert_fail ("Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2087, __PRETTY_FUNCTION__)); |
2088 | } else { |
2089 | Splits.append(Ins.begin(), Ins.end()); |
2090 | } |
2091 | |
2092 | if (IsEntryFunc) { |
2093 | allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); |
2094 | allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); |
2095 | } |
2096 | |
2097 | if (IsKernel) { |
2098 | analyzeFormalArgumentsCompute(CCInfo, Ins); |
2099 | } else { |
2100 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); |
2101 | CCInfo.AnalyzeFormalArguments(Splits, AssignFn); |
2102 | } |
2103 | |
2104 | SmallVector<SDValue, 16> Chains; |
2105 | |
2106 | // FIXME: This is the minimum kernel argument alignment. We should improve |
2107 | // this to the maximum alignment of the arguments. |
2108 | // |
2109 | // FIXME: Alignment of explicit arguments totally broken with non-0 explicit |
2110 | // kern arg offset. |
2111 | const unsigned KernelArgBaseAlign = 16; |
2112 | |
2113 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { |
2114 | const ISD::InputArg &Arg = Ins[i]; |
2115 | if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { |
2116 | InVals.push_back(DAG.getUNDEF(Arg.VT)); |
2117 | continue; |
2118 | } |
2119 | |
2120 | CCValAssign &VA = ArgLocs[ArgIdx++]; |
2121 | MVT VT = VA.getLocVT(); |
2122 | |
2123 | if (IsEntryFunc && VA.isMemLoc()) { |
2124 | VT = Ins[i].VT; |
2125 | EVT MemVT = VA.getLocVT(); |
2126 | |
2127 | const uint64_t Offset = VA.getLocMemOffset(); |
2128 | unsigned Align = MinAlign(KernelArgBaseAlign, Offset); |
2129 | |
2130 | SDValue Arg = lowerKernargMemParameter( |
2131 | DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]); |
2132 | Chains.push_back(Arg.getValue(1)); |
2133 | |
2134 | auto *ParamTy = |
2135 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); |
2136 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
2137 | ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || |
2138 | ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { |
2139 | // On SI local pointers are just offsets into LDS, so they are always |
2140 | // less than 16-bits. On CI and newer they could potentially be |
2141 | // real pointers, so we can't guarantee their size. |
2142 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, |
2143 | DAG.getValueType(MVT::i16)); |
2144 | } |
2145 | |
2146 | InVals.push_back(Arg); |
2147 | continue; |
2148 | } else if (!IsEntryFunc && VA.isMemLoc()) { |
2149 | SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); |
2150 | InVals.push_back(Val); |
2151 | if (!Arg.Flags.isByVal()) |
2152 | Chains.push_back(Val.getValue(1)); |
2153 | continue; |
2154 | } |
2155 | |
2156 | assert(VA.isRegLoc() && "Parameter must be in a register!")((VA.isRegLoc() && "Parameter must be in a register!" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Parameter must be in a register!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2156, __PRETTY_FUNCTION__)); |
2157 | |
2158 | Register Reg = VA.getLocReg(); |
2159 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); |
2160 | EVT ValVT = VA.getValVT(); |
2161 | |
2162 | Reg = MF.addLiveIn(Reg, RC); |
2163 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
2164 | |
2165 | if (Arg.Flags.isSRet()) { |
2166 | // The return object should be reasonably addressable. |
2167 | |
2168 | // FIXME: This helps when the return is a real sret. If it is a |
2169 | // automatically inserted sret (i.e. CanLowerReturn returns false), an |
2170 | // extra copy is inserted in SelectionDAGBuilder which obscures this. |
2171 | unsigned NumBits |
2172 | = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); |
2173 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, |
2174 | DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); |
2175 | } |
2176 | |
2177 | // If this is an 8 or 16-bit value, it is really passed promoted |
2178 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
2179 | // truncate to the right size. |
2180 | switch (VA.getLocInfo()) { |
2181 | case CCValAssign::Full: |
2182 | break; |
2183 | case CCValAssign::BCvt: |
2184 | Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); |
2185 | break; |
2186 | case CCValAssign::SExt: |
2187 | Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, |
2188 | DAG.getValueType(ValVT)); |
2189 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
2190 | break; |
2191 | case CCValAssign::ZExt: |
2192 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, |
2193 | DAG.getValueType(ValVT)); |
2194 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
2195 | break; |
2196 | case CCValAssign::AExt: |
2197 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
2198 | break; |
2199 | default: |
2200 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2200); |
2201 | } |
2202 | |
2203 | InVals.push_back(Val); |
2204 | } |
2205 | |
2206 | if (!IsEntryFunc) { |
2207 | // Special inputs come after user arguments. |
2208 | allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); |
2209 | } |
2210 | |
2211 | // Start adding system SGPRs. |
2212 | if (IsEntryFunc) { |
2213 | allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); |
2214 | } else { |
2215 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); |
2216 | CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); |
2217 | CCInfo.AllocateReg(Info->getFrameOffsetReg()); |
2218 | allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); |
2219 | } |
2220 | |
2221 | auto &ArgUsageInfo = |
2222 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); |
2223 | ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); |
2224 | |
2225 | unsigned StackArgSize = CCInfo.getNextStackOffset(); |
2226 | Info->setBytesInStackArgArea(StackArgSize); |
2227 | |
2228 | return Chains.empty() ? Chain : |
2229 | DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
2230 | } |
2231 | |
2232 | // TODO: If return values can't fit in registers, we should return as many as |
2233 | // possible in registers before passing on stack. |
2234 | bool SITargetLowering::CanLowerReturn( |
2235 | CallingConv::ID CallConv, |
2236 | MachineFunction &MF, bool IsVarArg, |
2237 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2238 | LLVMContext &Context) const { |
2239 | // Replacing returns with sret/stack usage doesn't make sense for shaders. |
2240 | // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn |
2241 | // for shaders. Vector types should be explicitly handled by CC. |
2242 | if (AMDGPU::isEntryFunctionCC(CallConv)) |
2243 | return true; |
2244 | |
2245 | SmallVector<CCValAssign, 16> RVLocs; |
2246 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); |
2247 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); |
2248 | } |
2249 | |
2250 | SDValue |
2251 | SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
2252 | bool isVarArg, |
2253 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2254 | const SmallVectorImpl<SDValue> &OutVals, |
2255 | const SDLoc &DL, SelectionDAG &DAG) const { |
2256 | MachineFunction &MF = DAG.getMachineFunction(); |
2257 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
2258 | |
2259 | if (AMDGPU::isKernel(CallConv)) { |
2260 | return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, |
2261 | OutVals, DL, DAG); |
2262 | } |
2263 | |
2264 | bool IsShader = AMDGPU::isShader(CallConv); |
2265 | |
2266 | Info->setIfReturnsVoid(Outs.empty()); |
2267 | bool IsWaveEnd = Info->returnsVoid() && IsShader; |
2268 | |
2269 | // CCValAssign - represent the assignment of the return value to a location. |
2270 | SmallVector<CCValAssign, 48> RVLocs; |
2271 | SmallVector<ISD::OutputArg, 48> Splits; |
2272 | |
2273 | // CCState - Info about the registers and stack slots. |
2274 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
2275 | *DAG.getContext()); |
2276 | |
2277 | // Analyze outgoing return values. |
2278 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
2279 | |
2280 | SDValue Flag; |
2281 | SmallVector<SDValue, 48> RetOps; |
2282 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) |
2283 | |
2284 | // Add return address for callable functions. |
2285 | if (!Info->isEntryFunction()) { |
2286 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2287 | SDValue ReturnAddrReg = CreateLiveInRegister( |
2288 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); |
2289 | |
2290 | SDValue ReturnAddrVirtualReg = DAG.getRegister( |
2291 | MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass), |
2292 | MVT::i64); |
2293 | Chain = |
2294 | DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag); |
2295 | Flag = Chain.getValue(1); |
2296 | RetOps.push_back(ReturnAddrVirtualReg); |
2297 | } |
2298 | |
2299 | // Copy the result values into the output registers. |
2300 | for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; |
2301 | ++I, ++RealRVLocIdx) { |
2302 | CCValAssign &VA = RVLocs[I]; |
2303 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2303, __PRETTY_FUNCTION__)); |
2304 | // TODO: Partially return in registers if return values don't fit. |
2305 | SDValue Arg = OutVals[RealRVLocIdx]; |
2306 | |
2307 | // Copied from other backends. |
2308 | switch (VA.getLocInfo()) { |
2309 | case CCValAssign::Full: |
2310 | break; |
2311 | case CCValAssign::BCvt: |
2312 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
2313 | break; |
2314 | case CCValAssign::SExt: |
2315 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); |
2316 | break; |
2317 | case CCValAssign::ZExt: |
2318 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); |
2319 | break; |
2320 | case CCValAssign::AExt: |
2321 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); |
2322 | break; |
2323 | default: |
2324 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2324); |
2325 | } |
2326 | |
2327 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); |
2328 | Flag = Chain.getValue(1); |
2329 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
2330 | } |
2331 | |
2332 | // FIXME: Does sret work properly? |
2333 | if (!Info->isEntryFunction()) { |
2334 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
2335 | const MCPhysReg *I = |
2336 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); |
2337 | if (I) { |
2338 | for (; *I; ++I) { |
2339 | if (AMDGPU::SReg_64RegClass.contains(*I)) |
2340 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); |
2341 | else if (AMDGPU::SReg_32RegClass.contains(*I)) |
2342 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); |
2343 | else |
2344 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2344); |
2345 | } |
2346 | } |
2347 | } |
2348 | |
2349 | // Update chain and glue. |
2350 | RetOps[0] = Chain; |
2351 | if (Flag.getNode()) |
2352 | RetOps.push_back(Flag); |
2353 | |
2354 | unsigned Opc = AMDGPUISD::ENDPGM; |
2355 | if (!IsWaveEnd) |
2356 | Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; |
2357 | return DAG.getNode(Opc, DL, MVT::Other, RetOps); |
2358 | } |
2359 | |
2360 | SDValue SITargetLowering::LowerCallResult( |
2361 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, |
2362 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
2363 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, |
2364 | SDValue ThisVal) const { |
2365 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); |
2366 | |
2367 | // Assign locations to each value returned by this call. |
2368 | SmallVector<CCValAssign, 16> RVLocs; |
2369 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
2370 | *DAG.getContext()); |
2371 | CCInfo.AnalyzeCallResult(Ins, RetCC); |
2372 | |
2373 | // Copy all of the result registers out of their specified physreg. |
2374 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
2375 | CCValAssign VA = RVLocs[i]; |
2376 | SDValue Val; |
2377 | |
2378 | if (VA.isRegLoc()) { |
2379 | Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); |
2380 | Chain = Val.getValue(1); |
2381 | InFlag = Val.getValue(2); |
2382 | } else if (VA.isMemLoc()) { |
2383 | report_fatal_error("TODO: return values in memory"); |
2384 | } else |
2385 | llvm_unreachable("unknown argument location type")::llvm::llvm_unreachable_internal("unknown argument location type" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2385); |
2386 | |
2387 | switch (VA.getLocInfo()) { |
2388 | case CCValAssign::Full: |
2389 | break; |
2390 | case CCValAssign::BCvt: |
2391 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); |
2392 | break; |
2393 | case CCValAssign::ZExt: |
2394 | Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, |
2395 | DAG.getValueType(VA.getValVT())); |
2396 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
2397 | break; |
2398 | case CCValAssign::SExt: |
2399 | Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, |
2400 | DAG.getValueType(VA.getValVT())); |
2401 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
2402 | break; |
2403 | case CCValAssign::AExt: |
2404 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
2405 | break; |
2406 | default: |
2407 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2407); |
2408 | } |
2409 | |
2410 | InVals.push_back(Val); |
2411 | } |
2412 | |
2413 | return Chain; |
2414 | } |
2415 | |
2416 | // Add code to pass special inputs required depending on used features separate |
2417 | // from the explicit user arguments present in the IR. |
2418 | void SITargetLowering::passSpecialInputs( |
2419 | CallLoweringInfo &CLI, |
2420 | CCState &CCInfo, |
2421 | const SIMachineFunctionInfo &Info, |
2422 | SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, |
2423 | SmallVectorImpl<SDValue> &MemOpChains, |
2424 | SDValue Chain) const { |
2425 | // If we don't have a call site, this was a call inserted by |
2426 | // legalization. These can never use special inputs. |
2427 | if (!CLI.CS) |
2428 | return; |
2429 | |
2430 | const Function *CalleeFunc = CLI.CS.getCalledFunction(); |
2431 | assert(CalleeFunc)((CalleeFunc) ? static_cast<void> (0) : __assert_fail ( "CalleeFunc", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2431, __PRETTY_FUNCTION__)); |
2432 | |
2433 | SelectionDAG &DAG = CLI.DAG; |
2434 | const SDLoc &DL = CLI.DL; |
2435 | |
2436 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
2437 | |
2438 | auto &ArgUsageInfo = |
2439 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); |
2440 | const AMDGPUFunctionArgInfo &CalleeArgInfo |
2441 | = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); |
2442 | |
2443 | const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); |
2444 | |
2445 | // TODO: Unify with private memory register handling. This is complicated by |
2446 | // the fact that at least in kernels, the input argument is not necessarily |
2447 | // in the same location as the input. |
2448 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { |
2449 | AMDGPUFunctionArgInfo::DISPATCH_PTR, |
2450 | AMDGPUFunctionArgInfo::QUEUE_PTR, |
2451 | AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR, |
2452 | AMDGPUFunctionArgInfo::DISPATCH_ID, |
2453 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, |
2454 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, |
2455 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, |
2456 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR |
2457 | }; |
2458 | |
2459 | for (auto InputID : InputRegs) { |
2460 | const ArgDescriptor *OutgoingArg; |
2461 | const TargetRegisterClass *ArgRC; |
2462 | |
2463 | std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID); |
2464 | if (!OutgoingArg) |
2465 | continue; |
2466 | |
2467 | const ArgDescriptor *IncomingArg; |
2468 | const TargetRegisterClass *IncomingArgRC; |
2469 | std::tie(IncomingArg, IncomingArgRC) |
2470 | = CallerArgInfo.getPreloadedValue(InputID); |
2471 | assert(IncomingArgRC == ArgRC)((IncomingArgRC == ArgRC) ? static_cast<void> (0) : __assert_fail ("IncomingArgRC == ArgRC", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2471, __PRETTY_FUNCTION__)); |
2472 | |
2473 | // All special arguments are ints for now. |
2474 | EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; |
2475 | SDValue InputReg; |
2476 | |
2477 | if (IncomingArg) { |
2478 | InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); |
2479 | } else { |
2480 | // The implicit arg ptr is special because it doesn't have a corresponding |
2481 | // input for kernels, and is computed from the kernarg segment pointer. |
2482 | assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR)((InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) ? static_cast <void> (0) : __assert_fail ("InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2482, __PRETTY_FUNCTION__)); |
2483 | InputReg = getImplicitArgPtr(DAG, DL); |
2484 | } |
2485 | |
2486 | if (OutgoingArg->isRegister()) { |
2487 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); |
2488 | } else { |
2489 | unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4); |
2490 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, |
2491 | SpecialArgOffset); |
2492 | MemOpChains.push_back(ArgStore); |
2493 | } |
2494 | } |
2495 | |
2496 | // Pack workitem IDs into a single register or pass it as is if already |
2497 | // packed. |
2498 | const ArgDescriptor *OutgoingArg; |
2499 | const TargetRegisterClass *ArgRC; |
2500 | |
2501 | std::tie(OutgoingArg, ArgRC) = |
2502 | CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); |
2503 | if (!OutgoingArg) |
2504 | std::tie(OutgoingArg, ArgRC) = |
2505 | CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); |
2506 | if (!OutgoingArg) |
2507 | std::tie(OutgoingArg, ArgRC) = |
2508 | CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); |
2509 | if (!OutgoingArg) |
2510 | return; |
2511 | |
2512 | const ArgDescriptor *IncomingArgX |
2513 | = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first; |
2514 | const ArgDescriptor *IncomingArgY |
2515 | = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first; |
2516 | const ArgDescriptor *IncomingArgZ |
2517 | = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first; |
2518 | |
2519 | SDValue InputReg; |
2520 | SDLoc SL; |
2521 | |
2522 | // If incoming ids are not packed we need to pack them. |
2523 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX) |
2524 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); |
2525 | |
2526 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) { |
2527 | SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); |
2528 | Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, |
2529 | DAG.getShiftAmountConstant(10, MVT::i32, SL)); |
2530 | InputReg = InputReg.getNode() ? |
2531 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; |
2532 | } |
2533 | |
2534 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) { |
2535 | SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); |
2536 | Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, |
2537 | DAG.getShiftAmountConstant(20, MVT::i32, SL)); |
2538 | InputReg = InputReg.getNode() ? |
2539 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; |
2540 | } |
2541 | |
2542 | if (!InputReg.getNode()) { |
2543 | // Workitem ids are already packed, any of present incoming arguments |
2544 | // will carry all required fields. |
2545 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( |
2546 | IncomingArgX ? *IncomingArgX : |
2547 | IncomingArgY ? *IncomingArgY : |
2548 | *IncomingArgZ, ~0u); |
2549 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); |
2550 | } |
2551 | |
2552 | if (OutgoingArg->isRegister()) { |
2553 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); |
2554 | } else { |
2555 | unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4); |
2556 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, |
2557 | SpecialArgOffset); |
2558 | MemOpChains.push_back(ArgStore); |
2559 | } |
2560 | } |
2561 | |
2562 | static bool canGuaranteeTCO(CallingConv::ID CC) { |
2563 | return CC == CallingConv::Fast; |
2564 | } |
2565 | |
2566 | /// Return true if we might ever do TCO for calls with this calling convention. |
2567 | static bool mayTailCallThisCC(CallingConv::ID CC) { |
2568 | switch (CC) { |
2569 | case CallingConv::C: |
2570 | return true; |
2571 | default: |
2572 | return canGuaranteeTCO(CC); |
2573 | } |
2574 | } |
2575 | |
2576 | bool SITargetLowering::isEligibleForTailCallOptimization( |
2577 | SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, |
2578 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2579 | const SmallVectorImpl<SDValue> &OutVals, |
2580 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { |
2581 | if (!mayTailCallThisCC(CalleeCC)) |
2582 | return false; |
2583 | |
2584 | MachineFunction &MF = DAG.getMachineFunction(); |
2585 | const Function &CallerF = MF.getFunction(); |
2586 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
2587 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2588 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
2589 | |
2590 | // Kernels aren't callable, and don't have a live in return address so it |
2591 | // doesn't make sense to do a tail call with entry functions. |
2592 | if (!CallerPreserved) |
2593 | return false; |
2594 | |
2595 | bool CCMatch = CallerCC == CalleeCC; |
2596 | |
2597 | if (DAG.getTarget().Options.GuaranteedTailCallOpt) { |
2598 | if (canGuaranteeTCO(CalleeCC) && CCMatch) |
2599 | return true; |
2600 | return false; |
2601 | } |
2602 | |
2603 | // TODO: Can we handle var args? |
2604 | if (IsVarArg) |
2605 | return false; |
2606 | |
2607 | for (const Argument &Arg : CallerF.args()) { |
2608 | if (Arg.hasByValAttr()) |
2609 | return false; |
2610 | } |
2611 | |
2612 | LLVMContext &Ctx = *DAG.getContext(); |
2613 | |
2614 | // Check that the call results are passed in the same way. |
2615 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, |
2616 | CCAssignFnForCall(CalleeCC, IsVarArg), |
2617 | CCAssignFnForCall(CallerCC, IsVarArg))) |
2618 | return false; |
2619 | |
2620 | // The callee has to preserve all registers the caller needs to preserve. |
2621 | if (!CCMatch) { |
2622 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
2623 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
2624 | return false; |
2625 | } |
2626 | |
2627 | // Nothing more to check if the callee is taking no arguments. |
2628 | if (Outs.empty()) |
2629 | return true; |
2630 | |
2631 | SmallVector<CCValAssign, 16> ArgLocs; |
2632 | CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); |
2633 | |
2634 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); |
2635 | |
2636 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
2637 | // If the stack arguments for this call do not fit into our own save area then |
2638 | // the call cannot be made tail. |
2639 | // TODO: Is this really necessary? |
2640 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) |
2641 | return false; |
2642 | |
2643 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
2644 | return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); |
2645 | } |
2646 | |
2647 | bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
2648 | if (!CI->isTailCall()) |
2649 | return false; |
2650 | |
2651 | const Function *ParentFn = CI->getParent()->getParent(); |
2652 | if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) |
2653 | return false; |
2654 | return true; |
2655 | } |
2656 | |
2657 | // The wave scratch offset register is used as the global base pointer. |
2658 | SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, |
2659 | SmallVectorImpl<SDValue> &InVals) const { |
2660 | SelectionDAG &DAG = CLI.DAG; |
2661 | const SDLoc &DL = CLI.DL; |
2662 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; |
2663 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; |
2664 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; |
2665 | SDValue Chain = CLI.Chain; |
2666 | SDValue Callee = CLI.Callee; |
2667 | bool &IsTailCall = CLI.IsTailCall; |
2668 | CallingConv::ID CallConv = CLI.CallConv; |
2669 | bool IsVarArg = CLI.IsVarArg; |
2670 | bool IsSibCall = false; |
2671 | bool IsThisReturn = false; |
2672 | MachineFunction &MF = DAG.getMachineFunction(); |
2673 | |
2674 | if (Callee.isUndef() || isNullConstant(Callee)) { |
2675 | if (!CLI.IsTailCall) { |
2676 | for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) |
2677 | InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); |
2678 | } |
2679 | |
2680 | return Chain; |
2681 | } |
2682 | |
2683 | if (IsVarArg) { |
2684 | return lowerUnhandledCall(CLI, InVals, |
2685 | "unsupported call to variadic function "); |
2686 | } |
2687 | |
2688 | if (!CLI.CS.getInstruction()) |
2689 | report_fatal_error("unsupported libcall legalization"); |
2690 | |
2691 | if (!CLI.CS.getCalledFunction()) { |
2692 | return lowerUnhandledCall(CLI, InVals, |
2693 | "unsupported indirect call to function "); |
2694 | } |
2695 | |
2696 | if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { |
2697 | return lowerUnhandledCall(CLI, InVals, |
2698 | "unsupported required tail call to function "); |
2699 | } |
2700 | |
2701 | if (AMDGPU::isShader(MF.getFunction().getCallingConv())) { |
2702 | // Note the issue is with the CC of the calling function, not of the call |
2703 | // itself. |
2704 | return lowerUnhandledCall(CLI, InVals, |
2705 | "unsupported call from graphics shader of function "); |
2706 | } |
2707 | |
2708 | if (IsTailCall) { |
2709 | IsTailCall = isEligibleForTailCallOptimization( |
2710 | Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); |
2711 | if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) { |
2712 | report_fatal_error("failed to perform tail call elimination on a call " |
2713 | "site marked musttail"); |
2714 | } |
2715 | |
2716 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
2717 | |
2718 | // A sibling call is one where we're under the usual C ABI and not planning |
2719 | // to change that but can still do a tail call: |
2720 | if (!TailCallOpt && IsTailCall) |
2721 | IsSibCall = true; |
2722 | |
2723 | if (IsTailCall) |
2724 | ++NumTailCalls; |
2725 | } |
2726 | |
2727 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
2728 | |
2729 | // Analyze operands of the call, assigning locations to each operand. |
2730 | SmallVector<CCValAssign, 16> ArgLocs; |
2731 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
2732 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); |
2733 | |
2734 | CCInfo.AnalyzeCallOperands(Outs, AssignFn); |
2735 | |
2736 | // Get a count of how many bytes are to be pushed on the stack. |
2737 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
2738 | |
2739 | if (IsSibCall) { |
2740 | // Since we're not changing the ABI to make this a tail call, the memory |
2741 | // operands are already available in the caller's incoming argument space. |
2742 | NumBytes = 0; |
2743 | } |
2744 | |
2745 | // FPDiff is the byte offset of the call's argument area from the callee's. |
2746 | // Stores to callee stack arguments will be placed in FixedStackSlots offset |
2747 | // by this amount for a tail call. In a sibling call it must be 0 because the |
2748 | // caller will deallocate the entire stack and the callee still expects its |
2749 | // arguments to begin at SP+0. Completely unused for non-tail calls. |
2750 | int32_t FPDiff = 0; |
2751 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2752 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; |
2753 | |
2754 | // Adjust the stack pointer for the new arguments... |
2755 | // These operations are automatically eliminated by the prolog/epilog pass |
2756 | if (!IsSibCall) { |
2757 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); |
2758 | |
2759 | SmallVector<SDValue, 4> CopyFromChains; |
2760 | |
2761 | // In the HSA case, this should be an identity copy. |
2762 | SDValue ScratchRSrcReg |
2763 | = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); |
2764 | RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); |
2765 | CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); |
2766 | Chain = DAG.getTokenFactor(DL, CopyFromChains); |
2767 | } |
2768 | |
2769 | SmallVector<SDValue, 8> MemOpChains; |
2770 | MVT PtrVT = MVT::i32; |
2771 | |
2772 | // Walk the register/memloc assignments, inserting copies/loads. |
2773 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
2774 | CCValAssign &VA = ArgLocs[i]; |
2775 | SDValue Arg = OutVals[i]; |
2776 | |
2777 | // Promote the value if needed. |
2778 | switch (VA.getLocInfo()) { |
2779 | case CCValAssign::Full: |
2780 | break; |
2781 | case CCValAssign::BCvt: |
2782 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
2783 | break; |
2784 | case CCValAssign::ZExt: |
2785 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); |
2786 | break; |
2787 | case CCValAssign::SExt: |
2788 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); |
2789 | break; |
2790 | case CCValAssign::AExt: |
2791 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); |
2792 | break; |
2793 | case CCValAssign::FPExt: |
2794 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); |
2795 | break; |
2796 | default: |
2797 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2797); |
2798 | } |
2799 | |
2800 | if (VA.isRegLoc()) { |
2801 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
2802 | } else { |
2803 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2803, __PRETTY_FUNCTION__)); |
2804 | |
2805 | SDValue DstAddr; |
2806 | MachinePointerInfo DstInfo; |
2807 | |
2808 | unsigned LocMemOffset = VA.getLocMemOffset(); |
2809 | int32_t Offset = LocMemOffset; |
2810 | |
2811 | SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); |
2812 | MaybeAlign Alignment; |
2813 | |
2814 | if (IsTailCall) { |
2815 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
2816 | unsigned OpSize = Flags.isByVal() ? |
2817 | Flags.getByValSize() : VA.getValVT().getStoreSize(); |
2818 | |
2819 | // FIXME: We can have better than the minimum byval required alignment. |
2820 | Alignment = |
2821 | Flags.isByVal() |
2822 | ? Flags.getNonZeroByValAlign() |
2823 | : commonAlignment(Subtarget->getStackAlignment(), Offset); |
2824 | |
2825 | Offset = Offset + FPDiff; |
2826 | int FI = MFI.CreateFixedObject(OpSize, Offset, true); |
2827 | |
2828 | DstAddr = DAG.getFrameIndex(FI, PtrVT); |
2829 | DstInfo = MachinePointerInfo::getFixedStack(MF, FI); |
2830 | |
2831 | // Make sure any stack arguments overlapping with where we're storing |
2832 | // are loaded before this eventual operation. Otherwise they'll be |
2833 | // clobbered. |
2834 | |
2835 | // FIXME: Why is this really necessary? This seems to just result in a |
2836 | // lot of code to copy the stack and write them back to the same |
2837 | // locations, which are supposed to be immutable? |
2838 | Chain = addTokenForArgument(Chain, DAG, MFI, FI); |
2839 | } else { |
2840 | DstAddr = PtrOff; |
2841 | DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); |
2842 | Alignment = |
2843 | commonAlignment(Subtarget->getStackAlignment(), LocMemOffset); |
2844 | } |
2845 | |
2846 | if (Outs[i].Flags.isByVal()) { |
2847 | SDValue SizeNode = |
2848 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); |
2849 | SDValue Cpy = |
2850 | DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode, |
2851 | Outs[i].Flags.getNonZeroByValAlign(), |
2852 | /*isVol = */ false, /*AlwaysInline = */ true, |
2853 | /*isTailCall = */ false, DstInfo, |
2854 | MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS)); |
2855 | |
2856 | MemOpChains.push_back(Cpy); |
2857 | } else { |
2858 | SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, |
2859 | Alignment ? Alignment->value() : 0); |
2860 | MemOpChains.push_back(Store); |
2861 | } |
2862 | } |
2863 | } |
2864 | |
2865 | // Copy special input registers after user input arguments. |
2866 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); |
2867 | |
2868 | if (!MemOpChains.empty()) |
2869 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); |
2870 | |
2871 | // Build a sequence of copy-to-reg nodes chained together with token chain |
2872 | // and flag operands which copy the outgoing args into the appropriate regs. |
2873 | SDValue InFlag; |
2874 | for (auto &RegToPass : RegsToPass) { |
2875 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, |
2876 | RegToPass.second, InFlag); |
2877 | InFlag = Chain.getValue(1); |
2878 | } |
2879 | |
2880 | |
2881 | SDValue PhysReturnAddrReg; |
2882 | if (IsTailCall) { |
2883 | // Since the return is being combined with the call, we need to pass on the |
2884 | // return address. |
2885 | |
2886 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2887 | SDValue ReturnAddrReg = CreateLiveInRegister( |
2888 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); |
2889 | |
2890 | PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), |
2891 | MVT::i64); |
2892 | Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); |
2893 | InFlag = Chain.getValue(1); |
2894 | } |
2895 | |
2896 | // We don't usually want to end the call-sequence here because we would tidy |
2897 | // the frame up *after* the call, however in the ABI-changing tail-call case |
2898 | // we've carefully laid out the parameters so that when sp is reset they'll be |
2899 | // in the correct location. |
2900 | if (IsTailCall && !IsSibCall) { |
2901 | Chain = DAG.getCALLSEQ_END(Chain, |
2902 | DAG.getTargetConstant(NumBytes, DL, MVT::i32), |
2903 | DAG.getTargetConstant(0, DL, MVT::i32), |
2904 | InFlag, DL); |
2905 | InFlag = Chain.getValue(1); |
2906 | } |
2907 | |
2908 | std::vector<SDValue> Ops; |
2909 | Ops.push_back(Chain); |
2910 | Ops.push_back(Callee); |
2911 | // Add a redundant copy of the callee global which will not be legalized, as |
2912 | // we need direct access to the callee later. |
2913 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee); |
2914 | const GlobalValue *GV = GSD->getGlobal(); |
2915 | Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); |
2916 | |
2917 | if (IsTailCall) { |
2918 | // Each tail call may have to adjust the stack by a different amount, so |
2919 | // this information must travel along with the operation for eventual |
2920 | // consumption by emitEpilogue. |
2921 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); |
2922 | |
2923 | Ops.push_back(PhysReturnAddrReg); |
2924 | } |
2925 | |
2926 | // Add argument registers to the end of the list so that they are known live |
2927 | // into the call. |
2928 | for (auto &RegToPass : RegsToPass) { |
2929 | Ops.push_back(DAG.getRegister(RegToPass.first, |
2930 | RegToPass.second.getValueType())); |
2931 | } |
2932 | |
2933 | // Add a register mask operand representing the call-preserved registers. |
2934 | |
2935 | auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); |
2936 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); |
2937 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2937, __PRETTY_FUNCTION__)); |
2938 | Ops.push_back(DAG.getRegisterMask(Mask)); |
2939 | |
2940 | if (InFlag.getNode()) |
2941 | Ops.push_back(InFlag); |
2942 | |
2943 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
2944 | |
2945 | // If we're doing a tall call, use a TC_RETURN here rather than an |
2946 | // actual call instruction. |
2947 | if (IsTailCall) { |
2948 | MFI.setHasTailCall(); |
2949 | return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); |
2950 | } |
2951 | |
2952 | // Returns a chain and a flag for retval copy to use. |
2953 | SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); |
2954 | Chain = Call.getValue(0); |
2955 | InFlag = Call.getValue(1); |
2956 | |
2957 | uint64_t CalleePopBytes = NumBytes; |
2958 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), |
2959 | DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), |
2960 | InFlag, DL); |
2961 | if (!Ins.empty()) |
2962 | InFlag = Chain.getValue(1); |
2963 | |
2964 | // Handle result values, copying them out of physregs into vregs that we |
2965 | // return. |
2966 | return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, |
2967 | InVals, IsThisReturn, |
2968 | IsThisReturn ? OutVals[0] : SDValue()); |
2969 | } |
2970 | |
2971 | Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT, |
2972 | const MachineFunction &MF) const { |
2973 | Register Reg = StringSwitch<Register>(RegName) |
2974 | .Case("m0", AMDGPU::M0) |
2975 | .Case("exec", AMDGPU::EXEC) |
2976 | .Case("exec_lo", AMDGPU::EXEC_LO) |
2977 | .Case("exec_hi", AMDGPU::EXEC_HI) |
2978 | .Case("flat_scratch", AMDGPU::FLAT_SCR) |
2979 | .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) |
2980 | .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) |
2981 | .Default(Register()); |
2982 | |
2983 | if (Reg == AMDGPU::NoRegister) { |
2984 | report_fatal_error(Twine("invalid register name \"" |
2985 | + StringRef(RegName) + "\".")); |
2986 | |
2987 | } |
2988 | |
2989 | if (!Subtarget->hasFlatScrRegister() && |
2990 | Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { |
2991 | report_fatal_error(Twine("invalid register \"" |
2992 | + StringRef(RegName) + "\" for subtarget.")); |
2993 | } |
2994 | |
2995 | switch (Reg) { |
2996 | case AMDGPU::M0: |
2997 | case AMDGPU::EXEC_LO: |
2998 | case AMDGPU::EXEC_HI: |
2999 | case AMDGPU::FLAT_SCR_LO: |
3000 | case AMDGPU::FLAT_SCR_HI: |
3001 | if (VT.getSizeInBits() == 32) |
3002 | return Reg; |
3003 | break; |
3004 | case AMDGPU::EXEC: |
3005 | case AMDGPU::FLAT_SCR: |
3006 | if (VT.getSizeInBits() == 64) |
3007 | return Reg; |
3008 | break; |
3009 | default: |
3010 | llvm_unreachable("missing register type checking")::llvm::llvm_unreachable_internal("missing register type checking" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3010); |
3011 | } |
3012 | |
3013 | report_fatal_error(Twine("invalid type for register \"" |
3014 | + StringRef(RegName) + "\".")); |
3015 | } |
3016 | |
3017 | // If kill is not the last instruction, split the block so kill is always a |
3018 | // proper terminator. |
3019 | MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, |
3020 | MachineBasicBlock *BB) const { |
3021 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3022 | |
3023 | MachineBasicBlock::iterator SplitPoint(&MI); |
3024 | ++SplitPoint; |
3025 | |
3026 | if (SplitPoint == BB->end()) { |
3027 | // Don't bother with a new block. |
3028 | MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); |
3029 | return BB; |
3030 | } |
3031 | |
3032 | MachineFunction *MF = BB->getParent(); |
3033 | MachineBasicBlock *SplitBB |
3034 | = MF->CreateMachineBasicBlock(BB->getBasicBlock()); |
3035 | |
3036 | MF->insert(++MachineFunction::iterator(BB), SplitBB); |
3037 | SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); |
3038 | |
3039 | SplitBB->transferSuccessorsAndUpdatePHIs(BB); |
3040 | BB->addSuccessor(SplitBB); |
3041 | |
3042 | MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); |
3043 | return SplitBB; |
3044 | } |
3045 | |
3046 | // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true, |
3047 | // \p MI will be the only instruction in the loop body block. Otherwise, it will |
3048 | // be the first instruction in the remainder block. |
3049 | // |
3050 | /// \returns { LoopBody, Remainder } |
3051 | static std::pair<MachineBasicBlock *, MachineBasicBlock *> |
3052 | splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { |
3053 | MachineFunction *MF = MBB.getParent(); |
3054 | MachineBasicBlock::iterator I(&MI); |
3055 | |
3056 | // To insert the loop we need to split the block. Move everything after this |
3057 | // point to a new block, and insert a new empty block between the two. |
3058 | MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); |
3059 | MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); |
3060 | MachineFunction::iterator MBBI(MBB); |
3061 | ++MBBI; |
3062 | |
3063 | MF->insert(MBBI, LoopBB); |
3064 | MF->insert(MBBI, RemainderBB); |
3065 | |
3066 | LoopBB->addSuccessor(LoopBB); |
3067 | LoopBB->addSuccessor(RemainderBB); |
3068 | |
3069 | // Move the rest of the block into a new block. |
3070 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); |
3071 | |
3072 | if (InstInLoop) { |
3073 | auto Next = std::next(I); |
3074 | |
3075 | // Move instruction to loop body. |
3076 | LoopBB->splice(LoopBB->begin(), &MBB, I, Next); |
3077 | |
3078 | // Move the rest of the block. |
3079 | RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); |
3080 | } else { |
3081 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); |
3082 | } |
3083 | |
3084 | MBB.addSuccessor(LoopBB); |
3085 | |
3086 | return std::make_pair(LoopBB, RemainderBB); |
3087 | } |
3088 | |
3089 | /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it. |
3090 | void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { |
3091 | MachineBasicBlock *MBB = MI.getParent(); |
3092 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3093 | auto I = MI.getIterator(); |
3094 | auto E = std::next(I); |
3095 | |
3096 | BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) |
3097 | .addImm(0); |
3098 | |
3099 | MIBundleBuilder Bundler(*MBB, I, E); |
3100 | finalizeBundle(*MBB, Bundler.begin()); |
3101 | } |
3102 | |
3103 | MachineBasicBlock * |
3104 | SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, |
3105 | MachineBasicBlock *BB) const { |
3106 | const DebugLoc &DL = MI.getDebugLoc(); |
3107 | |
3108 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
3109 | |
3110 | MachineBasicBlock *LoopBB; |
3111 | MachineBasicBlock *RemainderBB; |
3112 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3113 | |
3114 | // Apparently kill flags are only valid if the def is in the same block? |
3115 | if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) |
3116 | Src->setIsKill(false); |
3117 | |
3118 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); |
3119 | |
3120 | MachineBasicBlock::iterator I = LoopBB->end(); |
3121 | |
3122 | const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( |
3123 | AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); |
3124 | |
3125 | // Clear TRAP_STS.MEM_VIOL |
3126 | BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) |
3127 | .addImm(0) |
3128 | .addImm(EncodedReg); |
3129 | |
3130 | bundleInstWithWaitcnt(MI); |
3131 | |
3132 | Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
3133 | |
3134 | // Load and check TRAP_STS.MEM_VIOL |
3135 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) |
3136 | .addImm(EncodedReg); |
3137 | |
3138 | // FIXME: Do we need to use an isel pseudo that may clobber scc? |
3139 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) |
3140 | .addReg(Reg, RegState::Kill) |
3141 | .addImm(0); |
3142 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
3143 | .addMBB(LoopBB); |
3144 | |
3145 | return RemainderBB; |
3146 | } |
3147 | |
3148 | // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the |
3149 | // wavefront. If the value is uniform and just happens to be in a VGPR, this |
3150 | // will only do one iteration. In the worst case, this will loop 64 times. |
3151 | // |
3152 | // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. |
3153 | static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( |
3154 | const SIInstrInfo *TII, |
3155 | MachineRegisterInfo &MRI, |
3156 | MachineBasicBlock &OrigBB, |
3157 | MachineBasicBlock &LoopBB, |
3158 | const DebugLoc &DL, |
3159 | const MachineOperand &IdxReg, |
3160 | unsigned InitReg, |
3161 | unsigned ResultReg, |
3162 | unsigned PhiReg, |
3163 | unsigned InitSaveExecReg, |
3164 | int Offset, |
3165 | bool UseGPRIdxMode, |
3166 | bool IsIndirectSrc) { |
3167 | MachineFunction *MF = OrigBB.getParent(); |
3168 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3169 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3170 | MachineBasicBlock::iterator I = LoopBB.begin(); |
3171 | |
3172 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); |
3173 | Register PhiExec = MRI.createVirtualRegister(BoolRC); |
3174 | Register NewExec = MRI.createVirtualRegister(BoolRC); |
3175 | Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
3176 | Register CondReg = MRI.createVirtualRegister(BoolRC); |
3177 | |
3178 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) |
3179 | .addReg(InitReg) |
3180 | .addMBB(&OrigBB) |
3181 | .addReg(ResultReg) |
3182 | .addMBB(&LoopBB); |
3183 | |
3184 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) |
3185 | .addReg(InitSaveExecReg) |
3186 | .addMBB(&OrigBB) |
3187 | .addReg(NewExec) |
3188 | .addMBB(&LoopBB); |
3189 | |
3190 | // Read the next variant <- also loop target. |
3191 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) |
3192 | .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); |
3193 | |
3194 | // Compare the just read M0 value to all possible Idx values. |
3195 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) |
3196 | .addReg(CurrentIdxReg) |
3197 | .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); |
3198 | |
3199 | // Update EXEC, save the original EXEC value to VCC. |
3200 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 |
3201 | : AMDGPU::S_AND_SAVEEXEC_B64), |
3202 | NewExec) |
3203 | .addReg(CondReg, RegState::Kill); |
3204 | |
3205 | MRI.setSimpleHint(NewExec, CondReg); |
3206 | |
3207 | if (UseGPRIdxMode) { |
3208 | unsigned IdxReg; |
3209 | if (Offset == 0) { |
3210 | IdxReg = CurrentIdxReg; |
3211 | } else { |
3212 | IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
3213 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) |
3214 | .addReg(CurrentIdxReg, RegState::Kill) |
3215 | .addImm(Offset); |
3216 | } |
3217 | unsigned IdxMode = IsIndirectSrc ? |
3218 | AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE; |
3219 | MachineInstr *SetOn = |
3220 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
3221 | .addReg(IdxReg, RegState::Kill) |
3222 | .addImm(IdxMode); |
3223 | SetOn->getOperand(3).setIsUndef(); |
3224 | } else { |
3225 | // Move index from VCC into M0 |
3226 | if (Offset == 0) { |
3227 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
3228 | .addReg(CurrentIdxReg, RegState::Kill); |
3229 | } else { |
3230 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
3231 | .addReg(CurrentIdxReg, RegState::Kill) |
3232 | .addImm(Offset); |
3233 | } |
3234 | } |
3235 | |
3236 | // Update EXEC, switch all done bits to 0 and all todo bits to 1. |
3237 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
3238 | MachineInstr *InsertPt = |
3239 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term |
3240 | : AMDGPU::S_XOR_B64_term), Exec) |
3241 | .addReg(Exec) |
3242 | .addReg(NewExec); |
3243 | |
3244 | // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use |
3245 | // s_cbranch_scc0? |
3246 | |
3247 | // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. |
3248 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) |
3249 | .addMBB(&LoopBB); |
3250 | |
3251 | return InsertPt->getIterator(); |
3252 | } |
3253 | |
3254 | // This has slightly sub-optimal regalloc when the source vector is killed by |
3255 | // the read. The register allocator does not understand that the kill is |
3256 | // per-workitem, so is kept alive for the whole loop so we end up not re-using a |
3257 | // subregister from it, using 1 more VGPR than necessary. This was saved when |
3258 | // this was expanded after register allocation. |
3259 | static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, |
3260 | MachineBasicBlock &MBB, |
3261 | MachineInstr &MI, |
3262 | unsigned InitResultReg, |
3263 | unsigned PhiReg, |
3264 | int Offset, |
3265 | bool UseGPRIdxMode, |
3266 | bool IsIndirectSrc) { |
3267 | MachineFunction *MF = MBB.getParent(); |
3268 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3269 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3270 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3271 | const DebugLoc &DL = MI.getDebugLoc(); |
3272 | MachineBasicBlock::iterator I(&MI); |
3273 | |
3274 | const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
3275 | Register DstReg = MI.getOperand(0).getReg(); |
3276 | Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); |
3277 | Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); |
3278 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
3279 | unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; |
3280 | |
3281 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); |
3282 | |
3283 | // Save the EXEC mask |
3284 | BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) |
3285 | .addReg(Exec); |
3286 | |
3287 | MachineBasicBlock *LoopBB; |
3288 | MachineBasicBlock *RemainderBB; |
3289 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); |
3290 | |
3291 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3292 | |
3293 | auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, |
3294 | InitResultReg, DstReg, PhiReg, TmpExec, |
3295 | Offset, UseGPRIdxMode, IsIndirectSrc); |
3296 | |
3297 | MachineBasicBlock::iterator First = RemainderBB->begin(); |
3298 | BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec) |
3299 | .addReg(SaveExec); |
3300 | |
3301 | return InsPt; |
3302 | } |
3303 | |
3304 | // Returns subreg index, offset |
3305 | static std::pair<unsigned, int> |
3306 | computeIndirectRegAndOffset(const SIRegisterInfo &TRI, |
3307 | const TargetRegisterClass *SuperRC, |
3308 | unsigned VecReg, |
3309 | int Offset) { |
3310 | int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; |
3311 | |
3312 | // Skip out of bounds offsets, or else we would end up using an undefined |
3313 | // register. |
3314 | if (Offset >= NumElts || Offset < 0) |
3315 | return std::make_pair(AMDGPU::sub0, Offset); |
3316 | |
3317 | return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); |
3318 | } |
3319 | |
3320 | // Return true if the index is an SGPR and was set. |
3321 | static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, |
3322 | MachineRegisterInfo &MRI, |
3323 | MachineInstr &MI, |
3324 | int Offset, |
3325 | bool UseGPRIdxMode, |
3326 | bool IsIndirectSrc) { |
3327 | MachineBasicBlock *MBB = MI.getParent(); |
3328 | const DebugLoc &DL = MI.getDebugLoc(); |
3329 | MachineBasicBlock::iterator I(&MI); |
3330 | |
3331 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3332 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); |
3333 | |
3334 | assert(Idx->getReg() != AMDGPU::NoRegister)((Idx->getReg() != AMDGPU::NoRegister) ? static_cast<void > (0) : __assert_fail ("Idx->getReg() != AMDGPU::NoRegister" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3334, __PRETTY_FUNCTION__)); |
3335 | |
3336 | if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) |
3337 | return false; |
3338 | |
3339 | if (UseGPRIdxMode) { |
3340 | unsigned IdxMode = IsIndirectSrc ? |
3341 | AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE; |
3342 | if (Offset == 0) { |
3343 | MachineInstr *SetOn = |
3344 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
3345 | .add(*Idx) |
3346 | .addImm(IdxMode); |
3347 | |
3348 | SetOn->getOperand(3).setIsUndef(); |
3349 | } else { |
3350 | Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
3351 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) |
3352 | .add(*Idx) |
3353 | .addImm(Offset); |
3354 | MachineInstr *SetOn = |
3355 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
3356 | .addReg(Tmp, RegState::Kill) |
3357 | .addImm(IdxMode); |
3358 | |
3359 | SetOn->getOperand(3).setIsUndef(); |
3360 | } |
3361 | |
3362 | return true; |
3363 | } |
3364 | |
3365 | if (Offset == 0) { |
3366 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
3367 | .add(*Idx); |
3368 | } else { |
3369 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
3370 | .add(*Idx) |
3371 | .addImm(Offset); |
3372 | } |
3373 | |
3374 | return true; |
3375 | } |
3376 | |
3377 | // Control flow needs to be inserted if indexing with a VGPR. |
3378 | static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, |
3379 | MachineBasicBlock &MBB, |
3380 | const GCNSubtarget &ST) { |
3381 | const SIInstrInfo *TII = ST.getInstrInfo(); |
3382 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
3383 | MachineFunction *MF = MBB.getParent(); |
3384 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3385 | |
3386 | Register Dst = MI.getOperand(0).getReg(); |
3387 | Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); |
3388 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
3389 | |
3390 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); |
3391 | |
3392 | unsigned SubReg; |
3393 | std::tie(SubReg, Offset) |
3394 | = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); |
3395 | |
3396 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); |
3397 | |
3398 | if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { |
3399 | MachineBasicBlock::iterator I(&MI); |
3400 | const DebugLoc &DL = MI.getDebugLoc(); |
3401 | |
3402 | if (UseGPRIdxMode) { |
3403 | // TODO: Look at the uses to avoid the copy. This may require rescheduling |
3404 | // to avoid interfering with other uses, so probably requires a new |
3405 | // optimization pass. |
3406 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) |
3407 | .addReg(SrcReg, RegState::Undef, SubReg) |
3408 | .addReg(SrcReg, RegState::Implicit) |
3409 | .addReg(AMDGPU::M0, RegState::Implicit); |
3410 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
3411 | } else { |
3412 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
3413 | .addReg(SrcReg, RegState::Undef, SubReg) |
3414 | .addReg(SrcReg, RegState::Implicit); |
3415 | } |
3416 | |
3417 | MI.eraseFromParent(); |
3418 | |
3419 | return &MBB; |
3420 | } |
3421 | |
3422 | const DebugLoc &DL = MI.getDebugLoc(); |
3423 | MachineBasicBlock::iterator I(&MI); |
3424 | |
3425 | Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3426 | Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3427 | |
3428 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); |
3429 | |
3430 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, |
3431 | Offset, UseGPRIdxMode, true); |
3432 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
3433 | |
3434 | if (UseGPRIdxMode) { |
3435 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) |
3436 | .addReg(SrcReg, RegState::Undef, SubReg) |
3437 | .addReg(SrcReg, RegState::Implicit) |
3438 | .addReg(AMDGPU::M0, RegState::Implicit); |
3439 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
3440 | } else { |
3441 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
3442 | .addReg(SrcReg, RegState::Undef, SubReg) |
3443 | .addReg(SrcReg, RegState::Implicit); |
3444 | } |
3445 | |
3446 | MI.eraseFromParent(); |
3447 | |
3448 | return LoopBB; |
3449 | } |
3450 | |
3451 | static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, |
3452 | MachineBasicBlock &MBB, |
3453 | const GCNSubtarget &ST) { |
3454 | const SIInstrInfo *TII = ST.getInstrInfo(); |
3455 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
3456 | MachineFunction *MF = MBB.getParent(); |
3457 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3458 | |
3459 | Register Dst = MI.getOperand(0).getReg(); |
3460 | const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); |
3461 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3462 | const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); |
3463 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
3464 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); |
3465 | |
3466 | // This can be an immediate, but will be folded later. |
3467 | assert(Val->getReg())((Val->getReg()) ? static_cast<void> (0) : __assert_fail ("Val->getReg()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3467, __PRETTY_FUNCTION__)); |
3468 | |
3469 | unsigned SubReg; |
3470 | std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, |
3471 | SrcVec->getReg(), |
3472 | Offset); |
3473 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); |
3474 | |
3475 | if (Idx->getReg() == AMDGPU::NoRegister) { |
3476 | MachineBasicBlock::iterator I(&MI); |
3477 | const DebugLoc &DL = MI.getDebugLoc(); |
3478 | |
3479 | assert(Offset == 0)((Offset == 0) ? static_cast<void> (0) : __assert_fail ( "Offset == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3479, __PRETTY_FUNCTION__)); |
3480 | |
3481 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) |
3482 | .add(*SrcVec) |
3483 | .add(*Val) |
3484 | .addImm(SubReg); |
3485 | |
3486 | MI.eraseFromParent(); |
3487 | return &MBB; |
3488 | } |
3489 | |
3490 | const MCInstrDesc &MovRelDesc |
3491 | = TII->getIndirectRegWritePseudo(TRI.getRegSizeInBits(*VecRC), 32, false); |
3492 | |
3493 | if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { |
3494 | MachineBasicBlock::iterator I(&MI); |
3495 | const DebugLoc &DL = MI.getDebugLoc(); |
3496 | BuildMI(MBB, I, DL, MovRelDesc, Dst) |
3497 | .addReg(SrcVec->getReg()) |
3498 | .add(*Val) |
3499 | .addImm(SubReg); |
3500 | if (UseGPRIdxMode) |
3501 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
3502 | |
3503 | MI.eraseFromParent(); |
3504 | return &MBB; |
3505 | } |
3506 | |
3507 | if (Val->isReg()) |
3508 | MRI.clearKillFlags(Val->getReg()); |
3509 | |
3510 | const DebugLoc &DL = MI.getDebugLoc(); |
3511 | |
3512 | Register PhiReg = MRI.createVirtualRegister(VecRC); |
3513 | |
3514 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, |
3515 | Offset, UseGPRIdxMode, false); |
3516 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
3517 | |
3518 | BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst) |
3519 | .addReg(PhiReg) |
3520 | .add(*Val) |
3521 | .addImm(AMDGPU::sub0); |
3522 | if (UseGPRIdxMode) |
3523 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
3524 | |
3525 | MI.eraseFromParent(); |
3526 | return LoopBB; |
3527 | } |
3528 | |
3529 | MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( |
3530 | MachineInstr &MI, MachineBasicBlock *BB) const { |
3531 | |
3532 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3533 | MachineFunction *MF = BB->getParent(); |
3534 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
3535 | |
3536 | if (TII->isMIMG(MI)) { |
3537 | if (MI.memoperands_empty() && MI.mayLoadOrStore()) { |
3538 | report_fatal_error("missing mem operand from MIMG instruction"); |
3539 | } |
3540 | // Add a memoperand for mimg instructions so that they aren't assumed to |
3541 | // be ordered memory instuctions. |
3542 | |
3543 | return BB; |
3544 | } |
3545 | |
3546 | switch (MI.getOpcode()) { |
3547 | case AMDGPU::S_ADD_U64_PSEUDO: |
3548 | case AMDGPU::S_SUB_U64_PSEUDO: { |
3549 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
3550 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3551 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3552 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); |
3553 | const DebugLoc &DL = MI.getDebugLoc(); |
3554 | |
3555 | MachineOperand &Dest = MI.getOperand(0); |
3556 | MachineOperand &Src0 = MI.getOperand(1); |
3557 | MachineOperand &Src1 = MI.getOperand(2); |
3558 | |
3559 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
3560 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
3561 | |
3562 | MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, |
3563 | Src0, BoolRC, AMDGPU::sub0, |
3564 | &AMDGPU::SReg_32RegClass); |
3565 | MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, |
3566 | Src0, BoolRC, AMDGPU::sub1, |
3567 | &AMDGPU::SReg_32RegClass); |
3568 | |
3569 | MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, |
3570 | Src1, BoolRC, AMDGPU::sub0, |
3571 | &AMDGPU::SReg_32RegClass); |
3572 | MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, |
3573 | Src1, BoolRC, AMDGPU::sub1, |
3574 | &AMDGPU::SReg_32RegClass); |
3575 | |
3576 | bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); |
3577 | |
3578 | unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; |
3579 | unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; |
3580 | BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) |
3581 | .add(Src0Sub0) |
3582 | .add(Src1Sub0); |
3583 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) |
3584 | .add(Src0Sub1) |
3585 | .add(Src1Sub1); |
3586 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) |
3587 | .addReg(DestSub0) |
3588 | .addImm(AMDGPU::sub0) |
3589 | .addReg(DestSub1) |
3590 | .addImm(AMDGPU::sub1); |
3591 | MI.eraseFromParent(); |
3592 | return BB; |
3593 | } |
3594 | case AMDGPU::SI_INIT_M0: { |
3595 | BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), |
3596 | TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
3597 | .add(MI.getOperand(0)); |
3598 | MI.eraseFromParent(); |
3599 | return BB; |
3600 | } |
3601 | case AMDGPU::SI_INIT_EXEC: |
3602 | // This should be before all vector instructions. |
3603 | BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), |
3604 | AMDGPU::EXEC) |
3605 | .addImm(MI.getOperand(0).getImm()); |
3606 | MI.eraseFromParent(); |
3607 | return BB; |
3608 | |
3609 | case AMDGPU::SI_INIT_EXEC_LO: |
3610 | // This should be before all vector instructions. |
3611 | BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), |
3612 | AMDGPU::EXEC_LO) |
3613 | .addImm(MI.getOperand(0).getImm()); |
3614 | MI.eraseFromParent(); |
3615 | return BB; |
3616 | |
3617 | case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { |
3618 | // Extract the thread count from an SGPR input and set EXEC accordingly. |
3619 | // Since BFM can't shift by 64, handle that case with CMP + CMOV. |
3620 | // |
3621 | // S_BFE_U32 count, input, {shift, 7} |
3622 | // S_BFM_B64 exec, count, 0 |
3623 | // S_CMP_EQ_U32 count, 64 |
3624 | // S_CMOV_B64 exec, -1 |
3625 | MachineInstr *FirstMI = &*BB->begin(); |
3626 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3627 | Register InputReg = MI.getOperand(0).getReg(); |
3628 | Register CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
3629 | bool Found = false; |
3630 | |
3631 | // Move the COPY of the input reg to the beginning, so that we can use it. |
3632 | for (auto I = BB->begin(); I != &MI; I++) { |
3633 | if (I->getOpcode() != TargetOpcode::COPY || |
3634 | I->getOperand(0).getReg() != InputReg) |
3635 | continue; |
3636 | |
3637 | if (I == FirstMI) { |
3638 | FirstMI = &*++BB->begin(); |
3639 | } else { |
3640 | I->removeFromParent(); |
3641 | BB->insert(FirstMI, &*I); |
3642 | } |
3643 | Found = true; |
3644 | break; |
3645 | } |
3646 | assert(Found)((Found) ? static_cast<void> (0) : __assert_fail ("Found" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3646, __PRETTY_FUNCTION__)); |
3647 | (void)Found; |
3648 | |
3649 | // This should be before all vector instructions. |
3650 | unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1; |
3651 | bool isWave32 = getSubtarget()->isWave32(); |
3652 | unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
3653 | BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) |
3654 | .addReg(InputReg) |
3655 | .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000); |
3656 | BuildMI(*BB, FirstMI, DebugLoc(), |
3657 | TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), |
3658 | Exec) |
3659 | .addReg(CountReg) |
3660 | .addImm(0); |
3661 | BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) |
3662 | .addReg(CountReg, RegState::Kill) |
3663 | .addImm(getSubtarget()->getWavefrontSize()); |
3664 | BuildMI(*BB, FirstMI, DebugLoc(), |
3665 | TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64), |
3666 | Exec) |
3667 | .addImm(-1); |
3668 | MI.eraseFromParent(); |
3669 | return BB; |
3670 | } |
3671 | |
3672 | case AMDGPU::GET_GROUPSTATICSIZE: { |
3673 | assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||((getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL ) ? static_cast<void> (0) : __assert_fail ("getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3674, __PRETTY_FUNCTION__)) |
3674 | getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)((getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL ) ? static_cast<void> (0) : __assert_fail ("getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3674, __PRETTY_FUNCTION__)); |
3675 | DebugLoc DL = MI.getDebugLoc(); |
3676 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) |
3677 | .add(MI.getOperand(0)) |
3678 | .addImm(MFI->getLDSSize()); |
3679 | MI.eraseFromParent(); |
3680 | return BB; |
3681 | } |
3682 | case AMDGPU::SI_INDIRECT_SRC_V1: |
3683 | case AMDGPU::SI_INDIRECT_SRC_V2: |
3684 | case AMDGPU::SI_INDIRECT_SRC_V4: |
3685 | case AMDGPU::SI_INDIRECT_SRC_V8: |
3686 | case AMDGPU::SI_INDIRECT_SRC_V16: |
3687 | return emitIndirectSrc(MI, *BB, *getSubtarget()); |
3688 | case AMDGPU::SI_INDIRECT_DST_V1: |
3689 | case AMDGPU::SI_INDIRECT_DST_V2: |
3690 | case AMDGPU::SI_INDIRECT_DST_V4: |
3691 | case AMDGPU::SI_INDIRECT_DST_V8: |
3692 | case AMDGPU::SI_INDIRECT_DST_V16: |
3693 | return emitIndirectDst(MI, *BB, *getSubtarget()); |
3694 | case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: |
3695 | case AMDGPU::SI_KILL_I1_PSEUDO: |
3696 | return splitKillBlock(MI, BB); |
3697 | case AMDGPU::V_CNDMASK_B64_PSEUDO: { |
3698 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
3699 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3700 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3701 | |
3702 | Register Dst = MI.getOperand(0).getReg(); |
3703 | Register Src0 = MI.getOperand(1).getReg(); |
3704 | Register Src1 = MI.getOperand(2).getReg(); |
3705 | const DebugLoc &DL = MI.getDebugLoc(); |
3706 | Register SrcCond = MI.getOperand(3).getReg(); |
3707 | |
3708 | Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3709 | Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3710 | const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
3711 | Register SrcCondCopy = MRI.createVirtualRegister(CondRC); |
3712 | |
3713 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) |
3714 | .addReg(SrcCond); |
3715 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) |
3716 | .addImm(0) |
3717 | .addReg(Src0, 0, AMDGPU::sub0) |
3718 | .addImm(0) |
3719 | .addReg(Src1, 0, AMDGPU::sub0) |
3720 | .addReg(SrcCondCopy); |
3721 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) |
3722 | .addImm(0) |
3723 | .addReg(Src0, 0, AMDGPU::sub1) |
3724 | .addImm(0) |
3725 | .addReg(Src1, 0, AMDGPU::sub1) |
3726 | .addReg(SrcCondCopy); |
3727 | |
3728 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) |
3729 | .addReg(DstLo) |
3730 | .addImm(AMDGPU::sub0) |
3731 | .addReg(DstHi) |
3732 | .addImm(AMDGPU::sub1); |
3733 | MI.eraseFromParent(); |
3734 | return BB; |
3735 | } |
3736 | case AMDGPU::SI_BR_UNDEF: { |
3737 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3738 | const DebugLoc &DL = MI.getDebugLoc(); |
3739 | MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
3740 | .add(MI.getOperand(0)); |
3741 | Br->getOperand(1).setIsUndef(true); // read undef SCC |
3742 | MI.eraseFromParent(); |
3743 | return BB; |
3744 | } |
3745 | case AMDGPU::ADJCALLSTACKUP: |
3746 | case AMDGPU::ADJCALLSTACKDOWN: { |
3747 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
3748 | MachineInstrBuilder MIB(*MF, &MI); |
3749 | |
3750 | // Add an implicit use of the frame offset reg to prevent the restore copy |
3751 | // inserted after the call from being reorderd after stack operations in the |
3752 | // the caller's frame. |
3753 | MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) |
3754 | .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit) |
3755 | .addReg(Info->getFrameOffsetReg(), RegState::Implicit); |
3756 | return BB; |
3757 | } |
3758 | case AMDGPU::SI_CALL_ISEL: { |
3759 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3760 | const DebugLoc &DL = MI.getDebugLoc(); |
3761 | |
3762 | unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); |
3763 | |
3764 | MachineInstrBuilder MIB; |
3765 | MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); |
3766 | |
3767 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) |
3768 | MIB.add(MI.getOperand(I)); |
3769 | |
3770 | MIB.cloneMemRefs(MI); |
3771 | MI.eraseFromParent(); |
3772 | return BB; |
3773 | } |
3774 | case AMDGPU::V_ADD_I32_e32: |
3775 | case AMDGPU::V_SUB_I32_e32: |
3776 | case AMDGPU::V_SUBREV_I32_e32: { |
3777 | // TODO: Define distinct V_*_I32_Pseudo instructions instead. |
3778 | const DebugLoc &DL = MI.getDebugLoc(); |
3779 | unsigned Opc = MI.getOpcode(); |
3780 | |
3781 | bool NeedClampOperand = false; |
3782 | if (TII->pseudoToMCOpcode(Opc) == -1) { |
3783 | Opc = AMDGPU::getVOPe64(Opc); |
3784 | NeedClampOperand = true; |
3785 | } |
3786 | |
3787 | auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); |
3788 | if (TII->isVOP3(*I)) { |
3789 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3790 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3791 | I.addReg(TRI->getVCC(), RegState::Define); |
3792 | } |
3793 | I.add(MI.getOperand(1)) |
3794 | .add(MI.getOperand(2)); |
3795 | if (NeedClampOperand) |
3796 | I.addImm(0); // clamp bit for e64 encoding |
3797 | |
3798 | TII->legalizeOperands(*I); |
3799 | |
3800 | MI.eraseFromParent(); |
3801 | return BB; |
3802 | } |
3803 | case AMDGPU::DS_GWS_INIT: |
3804 | case AMDGPU::DS_GWS_SEMA_V: |
3805 | case AMDGPU::DS_GWS_SEMA_BR: |
3806 | case AMDGPU::DS_GWS_SEMA_P: |
3807 | case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: |
3808 | case AMDGPU::DS_GWS_BARRIER: |
3809 | // A s_waitcnt 0 is required to be the instruction immediately following. |
3810 | if (getSubtarget()->hasGWSAutoReplay()) { |
3811 | bundleInstWithWaitcnt(MI); |
3812 | return BB; |
3813 | } |
3814 | |
3815 | return emitGWSMemViolTestLoop(MI, BB); |
3816 | default: |
3817 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); |
3818 | } |
3819 | } |
3820 | |
3821 | bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { |
3822 | return isTypeLegal(VT.getScalarType()); |
3823 | } |
3824 | |
3825 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
3826 | // This currently forces unfolding various combinations of fsub into fma with |
3827 | // free fneg'd operands. As long as we have fast FMA (controlled by |
3828 | // isFMAFasterThanFMulAndFAdd), we should perform these. |
3829 | |
3830 | // When fma is quarter rate, for f64 where add / sub are at best half rate, |
3831 | // most of these combines appear to be cycle neutral but save on instruction |
3832 | // count / code size. |
3833 | return true; |
3834 | } |
3835 | |
3836 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, |
3837 | EVT VT) const { |
3838 | if (!VT.isVector()) { |
3839 | return MVT::i1; |
3840 | } |
3841 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); |
3842 | } |
3843 | |
3844 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { |
3845 | // TODO: Should i16 be used always if legal? For now it would force VALU |
3846 | // shifts. |
3847 | return (VT == MVT::i16) ? MVT::i16 : MVT::i32; |
3848 | } |
3849 | |
3850 | // Answering this is somewhat tricky and depends on the specific device which |
3851 | // have different rates for fma or all f64 operations. |
3852 | // |
3853 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other |
3854 | // regardless of which device (although the number of cycles differs between |
3855 | // devices), so it is always profitable for f64. |
3856 | // |
3857 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable |
3858 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 |
3859 | // which we can always do even without fused FP ops since it returns the same |
3860 | // result as the separate operations and since it is always full |
3861 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 |
3862 | // however does not support denormals, so we do report fma as faster if we have |
3863 | // a fast fma device and require denormals. |
3864 | // |
3865 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
3866 | EVT VT) const { |
3867 | VT = VT.getScalarType(); |
3868 | |
3869 | switch (VT.getSimpleVT().SimpleTy) { |
3870 | case MVT::f32: { |
3871 | // This is as fast on some subtargets. However, we always have full rate f32 |
3872 | // mad available which returns the same result as the separate operations |
3873 | // which we should prefer over fma. We can't use this if we want to support |
3874 | // denormals, so only report this in these cases. |
3875 | if (hasFP32Denormals(MF)) |
3876 | return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); |
3877 | |
3878 | // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. |
3879 | return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); |
3880 | } |
3881 | case MVT::f64: |
3882 | return true; |
3883 | case MVT::f16: |
3884 | return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF); |
3885 | default: |
3886 | break; |
3887 | } |
3888 | |
3889 | return false; |
3890 | } |
3891 | |
3892 | bool SITargetLowering::isFMADLegalForFAddFSub(const SelectionDAG &DAG, |
3893 | const SDNode *N) const { |
3894 | // TODO: Check future ftz flag |
3895 | // v_mad_f32/v_mac_f32 do not support denormals. |
3896 | EVT VT = N->getValueType(0); |
3897 | if (VT == MVT::f32) |
3898 | return !hasFP32Denormals(DAG.getMachineFunction()); |
3899 | if (VT == MVT::f16) { |
3900 | return Subtarget->hasMadF16() && |
3901 | !hasFP64FP16Denormals(DAG.getMachineFunction()); |
3902 | } |
3903 | |
3904 | return false; |
3905 | } |
3906 | |
3907 | //===----------------------------------------------------------------------===// |
3908 | // Custom DAG Lowering Operations |
3909 | //===----------------------------------------------------------------------===// |
3910 | |
3911 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the |
3912 | // wider vector type is legal. |
3913 | SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, |
3914 | SelectionDAG &DAG) const { |
3915 | unsigned Opc = Op.getOpcode(); |
3916 | EVT VT = Op.getValueType(); |
3917 | assert(VT == MVT::v4f16 || VT == MVT::v4i16)((VT == MVT::v4f16 || VT == MVT::v4i16) ? static_cast<void > (0) : __assert_fail ("VT == MVT::v4f16 || VT == MVT::v4i16" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3917, __PRETTY_FUNCTION__)); |
3918 | |
3919 | SDValue Lo, Hi; |
3920 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); |
3921 | |
3922 | SDLoc SL(Op); |
3923 | SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, |
3924 | Op->getFlags()); |
3925 | SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, |
3926 | Op->getFlags()); |
3927 | |
3928 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
3929 | } |
3930 | |
3931 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the |
3932 | // wider vector type is legal. |
3933 | SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, |
3934 | SelectionDAG &DAG) const { |
3935 | unsigned Opc = Op.getOpcode(); |
3936 | EVT VT = Op.getValueType(); |
3937 | assert(VT == MVT::v4i16 || VT == MVT::v4f16)((VT == MVT::v4i16 || VT == MVT::v4f16) ? static_cast<void > (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3937, __PRETTY_FUNCTION__)); |
3938 | |
3939 | SDValue Lo0, Hi0; |
3940 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); |
3941 | SDValue Lo1, Hi1; |
3942 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); |
3943 | |
3944 | SDLoc SL(Op); |
3945 | |
3946 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, |
3947 | Op->getFlags()); |
3948 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, |
3949 | Op->getFlags()); |
3950 | |
3951 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
3952 | } |
3953 | |
3954 | SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op, |
3955 | SelectionDAG &DAG) const { |
3956 | unsigned Opc = Op.getOpcode(); |
3957 | EVT VT = Op.getValueType(); |
3958 | assert(VT == MVT::v4i16 || VT == MVT::v4f16)((VT == MVT::v4i16 || VT == MVT::v4f16) ? static_cast<void > (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3958, __PRETTY_FUNCTION__)); |
3959 | |
3960 | SDValue Lo0, Hi0; |
3961 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); |
3962 | SDValue Lo1, Hi1; |
3963 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); |
3964 | SDValue Lo2, Hi2; |
3965 | std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2); |
3966 | |
3967 | SDLoc SL(Op); |
3968 | |
3969 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2, |
3970 | Op->getFlags()); |
3971 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2, |
3972 | Op->getFlags()); |
3973 | |
3974 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
3975 | } |
3976 | |
3977 | |
3978 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
3979 | switch (Op.getOpcode()) { |
3980 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
3981 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
3982 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
3983 | case ISD::LOAD: { |
3984 | SDValue Result = LowerLOAD(Op, DAG); |
3985 | assert((!Result.getNode() ||(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3987, __PRETTY_FUNCTION__)) |
3986 | Result.getNode()->getNumValues() == 2) &&(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3987, __PRETTY_FUNCTION__)) |
3987 | "Load should return a value and a chain")(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3987, __PRETTY_FUNCTION__)); |
3988 | return Result; |
3989 | } |
3990 | |
3991 | case ISD::FSIN: |
3992 | case ISD::FCOS: |
3993 | return LowerTrig(Op, DAG); |
3994 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
3995 | case ISD::FDIV: return LowerFDIV(Op, DAG); |
3996 | case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); |
3997 | case ISD::STORE: return LowerSTORE(Op, DAG); |
3998 | case ISD::GlobalAddress: { |
3999 | MachineFunction &MF = DAG.getMachineFunction(); |
4000 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
4001 | return LowerGlobalAddress(MFI, Op, DAG); |
4002 | } |
4003 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
4004 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); |
4005 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
4006 | case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); |
4007 | case ISD::INSERT_SUBVECTOR: |
4008 | return lowerINSERT_SUBVECTOR(Op, DAG); |
4009 | case ISD::INSERT_VECTOR_ELT: |
4010 | return lowerINSERT_VECTOR_ELT(Op, DAG); |
4011 | case ISD::EXTRACT_VECTOR_ELT: |
4012 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); |
4013 | case ISD::VECTOR_SHUFFLE: |
4014 | return lowerVECTOR_SHUFFLE(Op, DAG); |
4015 | case ISD::BUILD_VECTOR: |
4016 | return lowerBUILD_VECTOR(Op, DAG); |
4017 | case ISD::FP_ROUND: |
4018 | return lowerFP_ROUND(Op, DAG); |
4019 | case ISD::TRAP: |
4020 | return lowerTRAP(Op, DAG); |
4021 | case ISD::DEBUGTRAP: |
4022 | return lowerDEBUGTRAP(Op, DAG); |
4023 | case ISD::FABS: |
4024 | case ISD::FNEG: |
4025 | case ISD::FCANONICALIZE: |
4026 | case ISD::BSWAP: |
4027 | return splitUnaryVectorOp(Op, DAG); |
4028 | case ISD::FMINNUM: |
4029 | case ISD::FMAXNUM: |
4030 | return lowerFMINNUM_FMAXNUM(Op, DAG); |
4031 | case ISD::FMA: |
4032 | return splitTernaryVectorOp(Op, DAG); |
4033 | case ISD::SHL: |
4034 | case ISD::SRA: |
4035 | case ISD::SRL: |
4036 | case ISD::ADD: |
4037 | case ISD::SUB: |
4038 | case ISD::MUL: |
4039 | case ISD::SMIN: |
4040 | case ISD::SMAX: |
4041 | case ISD::UMIN: |
4042 | case ISD::UMAX: |
4043 | case ISD::FADD: |
4044 | case ISD::FMUL: |
4045 | case ISD::FMINNUM_IEEE: |
4046 | case ISD::FMAXNUM_IEEE: |
4047 | return splitBinaryVectorOp(Op, DAG); |
4048 | } |
4049 | return SDValue(); |
4050 | } |
4051 | |
4052 | static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, |
4053 | const SDLoc &DL, |
4054 | SelectionDAG &DAG, bool Unpacked) { |
4055 | if (!LoadVT.isVector()) |
4056 | return Result; |
4057 | |
4058 | if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. |
4059 | // Truncate to v2i16/v4i16. |
4060 | EVT IntLoadVT = LoadVT.changeTypeToInteger(); |
4061 | |
4062 | // Workaround legalizer not scalarizing truncate after vector op |
4063 | // legalization byt not creating intermediate vector trunc. |
4064 | SmallVector<SDValue, 4> Elts; |
4065 | DAG.ExtractVectorElements(Result, Elts); |
4066 | for (SDValue &Elt : Elts) |
4067 | Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); |
4068 | |
4069 | Result = DAG.getBuildVector(IntLoadVT, DL, Elts); |
4070 | |
4071 | // Bitcast to original type (v2f16/v4f16). |
4072 | return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); |
4073 | } |
4074 | |
4075 | // Cast back to the original packed type. |
4076 | return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); |
4077 | } |
4078 | |
4079 | SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, |
4080 | MemSDNode *M, |
4081 | SelectionDAG &DAG, |
4082 | ArrayRef<SDValue> Ops, |
4083 | bool IsIntrinsic) const { |
4084 | SDLoc DL(M); |
4085 | |
4086 | bool Unpacked = Subtarget->hasUnpackedD16VMem(); |
4087 | EVT LoadVT = M->getValueType(0); |
4088 | |
4089 | EVT EquivLoadVT = LoadVT; |
4090 | if (Unpacked && LoadVT.isVector()) { |
4091 | EquivLoadVT = LoadVT.isVector() ? |
4092 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, |
4093 | LoadVT.getVectorNumElements()) : LoadVT; |
4094 | } |
4095 | |
4096 | // Change from v4f16/v2f16 to EquivLoadVT. |
4097 | SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); |
4098 | |
4099 | SDValue Load |
4100 | = DAG.getMemIntrinsicNode( |
4101 | IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, |
4102 | VTList, Ops, M->getMemoryVT(), |
4103 | M->getMemOperand()); |
4104 | if (!Unpacked) // Just adjusted the opcode. |
4105 | return Load; |
4106 | |
4107 | SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); |
4108 | |
4109 | return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); |
4110 | } |
4111 | |
4112 | SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, |
4113 | SelectionDAG &DAG, |
4114 | ArrayRef<SDValue> Ops) const { |
4115 | SDLoc DL(M); |
4116 | EVT LoadVT = M->getValueType(0); |
4117 | EVT EltType = LoadVT.getScalarType(); |
4118 | EVT IntVT = LoadVT.changeTypeToInteger(); |
4119 | |
4120 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
4121 | |
4122 | unsigned Opc = |
4123 | IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD; |
4124 | |
4125 | if (IsD16) { |
4126 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); |
4127 | } |
4128 | |
4129 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics |
4130 | if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32) |
4131 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); |
4132 | |
4133 | if (isTypeLegal(LoadVT)) { |
4134 | return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT, |
4135 | M->getMemOperand(), DAG); |
4136 | } |
4137 | |
4138 | EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT); |
4139 | SDVTList VTList = DAG.getVTList(CastVT, MVT::Other); |
4140 | SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT, |
4141 | M->getMemOperand(), DAG); |
4142 | return DAG.getMergeValues( |
4143 | {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)}, |
4144 | DL); |
4145 | } |
4146 | |
4147 | static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, |
4148 | SDNode *N, SelectionDAG &DAG) { |
4149 | EVT VT = N->getValueType(0); |
4150 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); |
4151 | int CondCode = CD->getSExtValue(); |
4152 | if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || |
4153 | CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) |
4154 | return DAG.getUNDEF(VT); |
4155 | |
4156 | ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); |
4157 | |
4158 | SDValue LHS = N->getOperand(1); |
4159 | SDValue RHS = N->getOperand(2); |
4160 | |
4161 | SDLoc DL(N); |
4162 | |
4163 | EVT CmpVT = LHS.getValueType(); |
4164 | if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { |
4165 | unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? |
4166 | ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
4167 | LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); |
4168 | RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); |
4169 | } |
4170 | |
4171 | ISD::CondCode CCOpcode = getICmpCondCode(IcInput); |
4172 | |
4173 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); |
4174 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); |
4175 | |
4176 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, |
4177 | DAG.getCondCode(CCOpcode)); |
4178 | if (VT.bitsEq(CCVT)) |
4179 | return SetCC; |
4180 | return DAG.getZExtOrTrunc(SetCC, DL, VT); |
4181 | } |
4182 | |
4183 | static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, |
4184 | SDNode *N, SelectionDAG &DAG) { |
4185 | EVT VT = N->getValueType(0); |
4186 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); |
4187 | |
4188 | int CondCode = CD->getSExtValue(); |
4189 | if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || |
4190 | CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) { |
4191 | return DAG.getUNDEF(VT); |
4192 | } |
4193 | |
4194 | SDValue Src0 = N->getOperand(1); |
4195 | SDValue Src1 = N->getOperand(2); |
4196 | EVT CmpVT = Src0.getValueType(); |
4197 | SDLoc SL(N); |
4198 | |
4199 | if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { |
4200 | Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
4201 | Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
4202 | } |
4203 | |
4204 | FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); |
4205 | ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); |
4206 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); |
4207 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); |
4208 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, |
4209 | Src1, DAG.getCondCode(CCOpcode)); |
4210 | if (VT.bitsEq(CCVT)) |
4211 | return SetCC; |
4212 | return DAG.getZExtOrTrunc(SetCC, SL, VT); |
4213 | } |
4214 | |
4215 | void SITargetLowering::ReplaceNodeResults(SDNode *N, |
4216 | SmallVectorImpl<SDValue> &Results, |
4217 | SelectionDAG &DAG) const { |
4218 | switch (N->getOpcode()) { |
4219 | case ISD::INSERT_VECTOR_ELT: { |
4220 | if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) |
4221 | Results.push_back(Res); |
4222 | return; |
4223 | } |
4224 | case ISD::EXTRACT_VECTOR_ELT: { |
4225 | if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) |
4226 | Results.push_back(Res); |
4227 | return; |
4228 | } |
4229 | case ISD::INTRINSIC_WO_CHAIN: { |
4230 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
4231 | switch (IID) { |
4232 | case Intrinsic::amdgcn_cvt_pkrtz: { |
4233 | SDValue Src0 = N->getOperand(1); |
4234 | SDValue Src1 = N->getOperand(2); |
4235 | SDLoc SL(N); |
4236 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, |
4237 | Src0, Src1); |
4238 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); |
4239 | return; |
4240 | } |
4241 | case Intrinsic::amdgcn_cvt_pknorm_i16: |
4242 | case Intrinsic::amdgcn_cvt_pknorm_u16: |
4243 | case Intrinsic::amdgcn_cvt_pk_i16: |
4244 | case Intrinsic::amdgcn_cvt_pk_u16: { |
4245 | SDValue Src0 = N->getOperand(1); |
4246 | SDValue Src1 = N->getOperand(2); |
4247 | SDLoc SL(N); |
4248 | unsigned Opcode; |
4249 | |
4250 | if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) |
4251 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; |
4252 | else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) |
4253 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; |
4254 | else if (IID == Intrinsic::amdgcn_cvt_pk_i16) |
4255 | Opcode = AMDGPUISD::CVT_PK_I16_I32; |
4256 | else |
4257 | Opcode = AMDGPUISD::CVT_PK_U16_U32; |
4258 | |
4259 | EVT VT = N->getValueType(0); |
4260 | if (isTypeLegal(VT)) |
4261 | Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); |
4262 | else { |
4263 | SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); |
4264 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); |
4265 | } |
4266 | return; |
4267 | } |
4268 | } |
4269 | break; |
4270 | } |
4271 | case ISD::INTRINSIC_W_CHAIN: { |
4272 | if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { |
4273 | if (Res.getOpcode() == ISD::MERGE_VALUES) { |
4274 | // FIXME: Hacky |
4275 | Results.push_back(Res.getOperand(0)); |
4276 | Results.push_back(Res.getOperand(1)); |
4277 | } else { |
4278 | Results.push_back(Res); |
4279 | Results.push_back(Res.getValue(1)); |
4280 | } |
4281 | return; |
4282 | } |
4283 | |
4284 | break; |
4285 | } |
4286 | case ISD::SELECT: { |
4287 | SDLoc SL(N); |
4288 | EVT VT = N->getValueType(0); |
4289 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); |
4290 | SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); |
4291 | SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); |
4292 | |
4293 | EVT SelectVT = NewVT; |
4294 | if (NewVT.bitsLT(MVT::i32)) { |
4295 | LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); |
4296 | RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); |
4297 | SelectVT = MVT::i32; |
4298 | } |
4299 | |
4300 | SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, |
4301 | N->getOperand(0), LHS, RHS); |
4302 | |
4303 | if (NewVT != SelectVT) |
4304 | NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); |
4305 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); |
4306 | return; |
4307 | } |
4308 | case ISD::FNEG: { |
4309 | if (N->getValueType(0) != MVT::v2f16) |
4310 | break; |
4311 | |
4312 | SDLoc SL(N); |
4313 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); |
4314 | |
4315 | SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, |
4316 | BC, |
4317 | DAG.getConstant(0x80008000, SL, MVT::i32)); |
4318 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); |
4319 | return; |
4320 | } |
4321 | case ISD::FABS: { |
4322 | if (N->getValueType(0) != MVT::v2f16) |
4323 | break; |
4324 | |
4325 | SDLoc SL(N); |
4326 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); |
4327 | |
4328 | SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, |
4329 | BC, |
4330 | DAG.getConstant(0x7fff7fff, SL, MVT::i32)); |
4331 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); |
4332 | return; |
4333 | } |
4334 | default: |
4335 | break; |
4336 | } |
4337 | } |
4338 | |
4339 | /// Helper function for LowerBRCOND |
4340 | static SDNode *findUser(SDValue Value, unsigned Opcode) { |
4341 | |
4342 | SDNode *Parent = Value.getNode(); |
4343 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); |
4344 | I != E; ++I) { |
4345 | |
4346 | if (I.getUse().get() != Value) |
4347 | continue; |
4348 | |
4349 | if (I->getOpcode() == Opcode) |
4350 | return *I; |
4351 | } |
4352 | return nullptr; |
4353 | } |
4354 | |
4355 | unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { |
4356 | if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { |
4357 | switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { |
4358 | case Intrinsic::amdgcn_if: |
4359 | return AMDGPUISD::IF; |
4360 | case Intrinsic::amdgcn_else: |
4361 | return AMDGPUISD::ELSE; |
4362 | case Intrinsic::amdgcn_loop: |
4363 | return AMDGPUISD::LOOP; |
4364 | case Intrinsic::amdgcn_end_cf: |
4365 | llvm_unreachable("should not occur")::llvm::llvm_unreachable_internal("should not occur", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4365); |
4366 | default: |
4367 | return 0; |
4368 | } |
4369 | } |
4370 | |
4371 | // break, if_break, else_break are all only used as inputs to loop, not |
4372 | // directly as branch conditions. |
4373 | return 0; |
4374 | } |
4375 | |
4376 | bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { |
4377 | const Triple &TT = getTargetMachine().getTargetTriple(); |
4378 | return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || |
4379 | GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && |
4380 | AMDGPU::shouldEmitConstantsToTextSection(TT); |
4381 | } |
4382 | |
4383 | bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { |
4384 | // FIXME: Either avoid relying on address space here or change the default |
4385 | // address space for functions to avoid the explicit check. |
4386 | return (GV->getValueType()->isFunctionTy() || |
4387 | !isNonGlobalAddrSpace(GV->getAddressSpace())) && |
4388 | !shouldEmitFixup(GV) && |
4389 | !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); |
4390 | } |
4391 | |
4392 | bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { |
4393 | return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); |
4394 | } |
4395 | |
4396 | bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { |
4397 | if (!GV->hasExternalLinkage()) |
4398 | return true; |
4399 | |
4400 | const auto OS = getTargetMachine().getTargetTriple().getOS(); |
4401 | return OS == Triple::AMDHSA || OS == Triple::AMDPAL; |
4402 | } |
4403 | |
4404 | /// This transforms the control flow intrinsics to get the branch destination as |
4405 | /// last parameter, also switches branch target with BR if the need arise |
4406 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, |
4407 | SelectionDAG &DAG) const { |
4408 | SDLoc DL(BRCOND); |
4409 | |
4410 | SDNode *Intr = BRCOND.getOperand(1).getNode(); |
4411 | SDValue Target = BRCOND.getOperand(2); |
4412 | SDNode *BR = nullptr; |
4413 | SDNode *SetCC = nullptr; |
4414 | |
4415 | if (Intr->getOpcode() == ISD::SETCC) { |
4416 | // As long as we negate the condition everything is fine |
4417 | SetCC = Intr; |
4418 | Intr = SetCC->getOperand(0).getNode(); |
4419 | |
4420 | } else { |
4421 | // Get the target from BR if we don't negate the condition |
4422 | BR = findUser(BRCOND, ISD::BR); |
4423 | Target = BR->getOperand(1); |
4424 | } |
4425 | |
4426 | // FIXME: This changes the types of the intrinsics instead of introducing new |
4427 | // nodes with the correct types. |
4428 | // e.g. llvm.amdgcn.loop |
4429 | |
4430 | // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 |
4431 | // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> |
4432 | |
4433 | unsigned CFNode = isCFIntrinsic(Intr); |
4434 | if (CFNode == 0) { |
4435 | // This is a uniform branch so we don't need to legalize. |
4436 | return BRCOND; |
4437 | } |
4438 | |
4439 | bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || |
4440 | Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; |
4441 | |
4442 | assert(!SetCC ||((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4445, __PRETTY_FUNCTION__)) |
4443 | (SetCC->getConstantOperandVal(1) == 1 &&((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4445, __PRETTY_FUNCTION__)) |
4444 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4445, __PRETTY_FUNCTION__)) |
4445 | ISD::SETNE))((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4445, __PRETTY_FUNCTION__)); |
4446 | |
4447 | // operands of the new intrinsic call |
4448 | SmallVector<SDValue, 4> Ops; |
4449 | if (HaveChain) |
4450 | Ops.push_back(BRCOND.getOperand(0)); |
4451 | |
4452 | Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); |
4453 | Ops.push_back(Target); |
4454 | |
4455 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); |
4456 | |
4457 | // build the new intrinsic call |
4458 | SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); |
4459 | |
4460 | if (!HaveChain) { |
4461 | SDValue Ops[] = { |
4462 | SDValue(Result, 0), |
4463 | BRCOND.getOperand(0) |
4464 | }; |
4465 | |
4466 | Result = DAG.getMergeValues(Ops, DL).getNode(); |
4467 | } |
4468 | |
4469 | if (BR) { |
4470 | // Give the branch instruction our target |
4471 | SDValue Ops[] = { |
4472 | BR->getOperand(0), |
4473 | BRCOND.getOperand(2) |
4474 | }; |
4475 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); |
4476 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); |
4477 | BR = NewBR.getNode(); |
Value stored to 'BR' is never read | |
4478 | } |
4479 | |
4480 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); |
4481 | |
4482 | // Copy the intrinsic results to registers |
4483 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { |
4484 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); |
4485 | if (!CopyToReg) |
4486 | continue; |
4487 | |
4488 | Chain = DAG.getCopyToReg( |
4489 | Chain, DL, |
4490 | CopyToReg->getOperand(1), |
4491 | SDValue(Result, i - 1), |
4492 | SDValue()); |
4493 | |
4494 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); |
4495 | } |
4496 | |
4497 | // Remove the old intrinsic from the chain |
4498 | DAG.ReplaceAllUsesOfValueWith( |
4499 | SDValue(Intr, Intr->getNumValues() - 1), |
4500 | Intr->getOperand(0)); |
4501 | |
4502 | return Chain; |
4503 | } |
4504 | |
4505 | SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, |
4506 | SelectionDAG &DAG) const { |
4507 | MVT VT = Op.getSimpleValueType(); |
4508 | SDLoc DL(Op); |
4509 | // Checking the depth |
4510 | if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) |
4511 | return DAG.getConstant(0, DL, VT); |
4512 | |
4513 | MachineFunction &MF = DAG.getMachineFunction(); |
4514 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
4515 | // Check for kernel and shader functions |
4516 | if (Info->isEntryFunction()) |
4517 | return DAG.getConstant(0, DL, VT); |
4518 | |
4519 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4520 | // There is a call to @llvm.returnaddress in this function |
4521 | MFI.setReturnAddressIsTaken(true); |
4522 | |
4523 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
4524 | // Get the return address reg and mark it as an implicit live-in |
4525 | unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); |
4526 | |
4527 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); |
4528 | } |
4529 | |
4530 | SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, |
4531 | SDValue Op, |
4532 | const SDLoc &DL, |
4533 | EVT VT) const { |
4534 | return Op.getValueType().bitsLE(VT) ? |
4535 | DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : |
4536 | DAG.getNode(ISD::FTRUNC, DL, VT, Op); |
4537 | } |
4538 | |
4539 | SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
4540 | assert(Op.getValueType() == MVT::f16 &&((Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type" ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4541, __PRETTY_FUNCTION__)) |
4541 | "Do not know how to custom lower FP_ROUND for non-f16 type")((Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type" ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4541, __PRETTY_FUNCTION__)); |
4542 | |
4543 | SDValue Src = Op.getOperand(0); |
4544 | EVT SrcVT = Src.getValueType(); |
4545 | if (SrcVT != MVT::f64) |
4546 | return Op; |
4547 | |
4548 | SDLoc DL(Op); |
4549 | |
4550 | SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); |
4551 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); |
4552 | return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); |
4553 | } |
4554 | |
4555 | SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, |
4556 | SelectionDAG &DAG) const { |
4557 | EVT VT = Op.getValueType(); |
4558 | const MachineFunction &MF = DAG.getMachineFunction(); |
4559 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
4560 | bool IsIEEEMode = Info->getMode().IEEE; |
4561 | |
4562 | // FIXME: Assert during eslection that this is only selected for |
4563 | // ieee_mode. Currently a combine can produce the ieee version for non-ieee |
4564 | // mode functions, but this happens to be OK since it's only done in cases |
4565 | // where there is known no sNaN. |
4566 | if (IsIEEEMode) |
4567 | return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); |
4568 | |
4569 | if (VT == MVT::v4f16) |
4570 | return splitBinaryVectorOp(Op, DAG); |
4571 | return Op; |
4572 | } |
4573 | |
4574 | SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { |
4575 | SDLoc SL(Op); |
4576 | SDValue Chain = Op.getOperand(0); |
4577 | |
4578 | if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || |
4579 | !Subtarget->isTrapHandlerEnabled()) |
4580 | return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); |
4581 | |
4582 | MachineFunction &MF = DAG.getMachineFunction(); |
4583 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
4584 | unsigned UserSGPR = Info->getQueuePtrUserSGPR(); |
4585 | assert(UserSGPR != AMDGPU::NoRegister)((UserSGPR != AMDGPU::NoRegister) ? static_cast<void> ( 0) : __assert_fail ("UserSGPR != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4585, __PRETTY_FUNCTION__)); |
4586 | SDValue QueuePtr = CreateLiveInRegister( |
4587 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
4588 | SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); |
4589 | SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, |
4590 | QueuePtr, SDValue()); |
4591 | SDValue Ops[] = { |
4592 | ToReg, |
4593 | DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16), |
4594 | SGPR01, |
4595 | ToReg.getValue(1) |
4596 | }; |
4597 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
4598 | } |
4599 | |
4600 | SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { |
4601 | SDLoc SL(Op); |
4602 | SDValue Chain = Op.getOperand(0); |
4603 | MachineFunction &MF = DAG.getMachineFunction(); |
4604 | |
4605 | if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || |
4606 | !Subtarget->isTrapHandlerEnabled()) { |
4607 | DiagnosticInfoUnsupported NoTrap(MF.getFunction(), |
4608 | "debugtrap handler not supported", |
4609 | Op.getDebugLoc(), |
4610 | DS_Warning); |
4611 | LLVMContext &Ctx = MF.getFunction().getContext(); |
4612 | Ctx.diagnose(NoTrap); |
4613 | return Chain; |
4614 | } |
4615 | |
4616 | SDValue Ops[] = { |
4617 | Chain, |
4618 | DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16) |
4619 | }; |
4620 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
4621 | } |
4622 | |
4623 | SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, |
4624 | SelectionDAG &DAG) const { |
4625 | // FIXME: Use inline constants (src_{shared, private}_base) instead. |
4626 | if (Subtarget->hasApertureRegs()) { |
4627 | unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? |
4628 | AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : |
4629 | AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; |
4630 | unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? |
4631 | AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : |
4632 | AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; |
4633 | unsigned Encoding = |
4634 | AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | |
4635 | Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | |
4636 | WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; |
4637 | |
4638 | SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); |
4639 | SDValue ApertureReg = SDValue( |
4640 | DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); |
4641 | SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); |
4642 | return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); |
4643 | } |
4644 | |
4645 | MachineFunction &MF = DAG.getMachineFunction(); |
4646 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
4647 | unsigned UserSGPR = Info->getQueuePtrUserSGPR(); |
4648 | assert(UserSGPR != AMDGPU::NoRegister)((UserSGPR != AMDGPU::NoRegister) ? static_cast<void> ( 0) : __assert_fail ("UserSGPR != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4648, __PRETTY_FUNCTION__)); |
4649 | |
4650 | SDValue QueuePtr = CreateLiveInRegister( |
4651 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
4652 | |
4653 | // Offset into amd_queue_t for group_segment_aperture_base_hi / |
4654 | // private_segment_aperture_base_hi. |
4655 | uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; |
4656 | |
4657 | SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset); |
4658 | |
4659 | // TODO: Use custom target PseudoSourceValue. |
4660 | // TODO: We should use the value from the IR intrinsic call, but it might not |
4661 | // be available and how do we get it? |
4662 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
4663 | return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, |
4664 | MinAlign(64, StructOffset), |
4665 | MachineMemOperand::MODereferenceable | |
4666 | MachineMemOperand::MOInvariant); |
4667 | } |
4668 | |
4669 | SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, |
4670 | SelectionDAG &DAG) const { |
4671 | SDLoc SL(Op); |
4672 | const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); |
4673 | |
4674 | SDValue Src = ASC->getOperand(0); |
4675 | SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); |
4676 | |
4677 | const AMDGPUTargetMachine &TM = |
4678 | static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); |
4679 | |
4680 | // flat -> local/private |
4681 | if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
4682 | unsigned DestAS = ASC->getDestAddressSpace(); |
4683 | |
4684 | if (DestAS == AMDGPUAS::LOCAL_ADDRESS || |
4685 | DestAS == AMDGPUAS::PRIVATE_ADDRESS) { |
4686 | unsigned NullVal = TM.getNullPointerValue(DestAS); |
4687 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); |
4688 | SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); |
4689 | SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); |
4690 | |
4691 | return DAG.getNode(ISD::SELECT, SL, MVT::i32, |
4692 | NonNull, Ptr, SegmentNullPtr); |
4693 | } |
4694 | } |
4695 | |
4696 | // local/private -> flat |
4697 | if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
4698 | unsigned SrcAS = ASC->getSrcAddressSpace(); |
4699 | |
4700 | if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || |
4701 | SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { |
4702 | unsigned NullVal = TM.getNullPointerValue(SrcAS); |
4703 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); |
4704 | |
4705 | SDValue NonNull |
4706 | = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); |
4707 | |
4708 | SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); |
4709 | SDValue CvtPtr |
4710 | = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); |
4711 | |
4712 | return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, |
4713 | DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), |
4714 | FlatNullPtr); |
4715 | } |
4716 | } |
4717 | |
4718 | // global <-> flat are no-ops and never emitted. |
4719 | |
4720 | const MachineFunction &MF = DAG.getMachineFunction(); |
4721 | DiagnosticInfoUnsupported InvalidAddrSpaceCast( |
4722 | MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); |
4723 | DAG.getContext()->diagnose(InvalidAddrSpaceCast); |
4724 | |
4725 | return DAG.getUNDEF(ASC->getValueType(0)); |
4726 | } |
4727 | |
4728 | // This lowers an INSERT_SUBVECTOR by extracting the individual elements from |
4729 | // the small vector and inserting them into the big vector. That is better than |
4730 | // the default expansion of doing it via a stack slot. Even though the use of |
4731 | // the stack slot would be optimized away afterwards, the stack slot itself |
4732 | // remains. |
4733 | SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, |
4734 | SelectionDAG &DAG) const { |
4735 | SDValue Vec = Op.getOperand(0); |
4736 | SDValue Ins = Op.getOperand(1); |
4737 | SDValue Idx = Op.getOperand(2); |
4738 | EVT VecVT = Vec.getValueType(); |
4739 | EVT InsVT = Ins.getValueType(); |
4740 | EVT EltVT = VecVT.getVectorElementType(); |
4741 | unsigned InsNumElts = InsVT.getVectorNumElements(); |
4742 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); |
4743 | SDLoc SL(Op); |
4744 | |
4745 | for (unsigned I = 0; I != InsNumElts; ++I) { |
4746 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, |
4747 | DAG.getConstant(I, SL, MVT::i32)); |
4748 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, |
4749 | DAG.getConstant(IdxVal + I, SL, MVT::i32)); |
4750 | } |
4751 | return Vec; |
4752 | } |
4753 | |
4754 | SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, |
4755 | SelectionDAG &DAG) const { |
4756 | SDValue Vec = Op.getOperand(0); |
4757 | SDValue InsVal = Op.getOperand(1); |
4758 | SDValue Idx = Op.getOperand(2); |
4759 | EVT VecVT = Vec.getValueType(); |
4760 | EVT EltVT = VecVT.getVectorElementType(); |
4761 | unsigned VecSize = VecVT.getSizeInBits(); |
4762 | unsigned EltSize = EltVT.getSizeInBits(); |
4763 | |
4764 | |
4765 | assert(VecSize <= 64)((VecSize <= 64) ? static_cast<void> (0) : __assert_fail ("VecSize <= 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4765, __PRETTY_FUNCTION__)); |
4766 | |
4767 | unsigned NumElts = VecVT.getVectorNumElements(); |
4768 | SDLoc SL(Op); |
4769 | auto KIdx = dyn_cast<ConstantSDNode>(Idx); |
4770 | |
4771 | if (NumElts == 4 && EltSize == 16 && KIdx) { |
4772 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); |
4773 | |
4774 | SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, |
4775 | DAG.getConstant(0, SL, MVT::i32)); |
4776 | SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, |
4777 | DAG.getConstant(1, SL, MVT::i32)); |
4778 | |
4779 | SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); |
4780 | SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); |
4781 | |
4782 | unsigned Idx = KIdx->getZExtValue(); |
4783 | bool InsertLo = Idx < 2; |
4784 | SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, |
4785 | InsertLo ? LoVec : HiVec, |
4786 | DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), |
4787 | DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); |
4788 | |
4789 | InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); |
4790 | |
4791 | SDValue Concat = InsertLo ? |
4792 | DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : |
4793 | DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); |
4794 | |
4795 | return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); |
4796 | } |
4797 | |
4798 | if (isa<ConstantSDNode>(Idx)) |
4799 | return SDValue(); |
4800 | |
4801 | MVT IntVT = MVT::getIntegerVT(VecSize); |
4802 | |
4803 | // Avoid stack access for dynamic indexing. |
4804 | // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec |
4805 | |
4806 | // Create a congruent vector with the target value in each element so that |
4807 | // the required element can be masked and ORed into the target vector. |
4808 | SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, |
4809 | DAG.getSplatBuildVector(VecVT, SL, InsVal)); |
4810 | |
4811 | assert(isPowerOf2_32(EltSize))((isPowerOf2_32(EltSize)) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(EltSize)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4811, __PRETTY_FUNCTION__)); |
4812 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); |
4813 | |
4814 | // Convert vector index to bit-index. |
4815 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); |
4816 | |
4817 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); |
4818 | SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, |
4819 | DAG.getConstant(0xffff, SL, IntVT), |
4820 | ScaledIdx); |
4821 | |
4822 | SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); |
4823 | SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, |
4824 | DAG.getNOT(SL, BFM, IntVT), BCVec); |
4825 | |
4826 | SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); |
4827 | return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); |
4828 | } |
4829 | |
4830 | SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, |
4831 | SelectionDAG &DAG) const { |
4832 | SDLoc SL(Op); |
4833 | |
4834 | EVT ResultVT = Op.getValueType(); |
4835 | SDValue Vec = Op.getOperand(0); |
4836 | SDValue Idx = Op.getOperand(1); |
4837 | EVT VecVT = Vec.getValueType(); |
4838 | unsigned VecSize = VecVT.getSizeInBits(); |
4839 | EVT EltVT = VecVT.getVectorElementType(); |
4840 | assert(VecSize <= 64)((VecSize <= 64) ? static_cast<void> (0) : __assert_fail ("VecSize <= 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4840, __PRETTY_FUNCTION__)); |
4841 | |
4842 | DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); |
4843 | |
4844 | // Make sure we do any optimizations that will make it easier to fold |
4845 | // source modifiers before obscuring it with bit operations. |
4846 | |
4847 | // XXX - Why doesn't this get called when vector_shuffle is expanded? |
4848 | if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) |
4849 | return Combined; |
4850 | |
4851 | unsigned EltSize = EltVT.getSizeInBits(); |
4852 | assert(isPowerOf2_32(EltSize))((isPowerOf2_32(EltSize)) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(EltSize)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4852, __PRETTY_FUNCTION__)); |
4853 | |
4854 | MVT IntVT = MVT::getIntegerVT(VecSize); |
4855 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); |
4856 | |
4857 | // Convert vector index to bit-index (* EltSize) |
4858 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); |
4859 | |
4860 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); |
4861 | SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); |
4862 | |
4863 | if (ResultVT == MVT::f16) { |
4864 | SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); |
4865 | return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); |
4866 | } |
4867 | |
4868 | return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); |
4869 | } |
4870 | |
4871 | static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { |
4872 | assert(Elt % 2 == 0)((Elt % 2 == 0) ? static_cast<void> (0) : __assert_fail ("Elt % 2 == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4872, __PRETTY_FUNCTION__)); |
4873 | return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); |
4874 | } |
4875 | |
4876 | SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, |
4877 | SelectionDAG &DAG) const { |
4878 | SDLoc SL(Op); |
4879 | EVT ResultVT = Op.getValueType(); |
4880 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); |
4881 | |
4882 | EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
4883 | EVT EltVT = PackVT.getVectorElementType(); |
4884 | int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); |
4885 | |
4886 | // vector_shuffle <0,1,6,7> lhs, rhs |
4887 | // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2) |
4888 | // |
4889 | // vector_shuffle <6,7,2,3> lhs, rhs |
4890 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2) |
4891 | // |
4892 | // vector_shuffle <6,7,0,1> lhs, rhs |
4893 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0) |
4894 | |
4895 | // Avoid scalarizing when both halves are reading from consecutive elements. |
4896 | SmallVector<SDValue, 4> Pieces; |
4897 | for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { |
4898 | if (elementPairIsContiguous(SVN->getMask(), I)) { |
4899 | const int Idx = SVN->getMaskElt(I); |
4900 | int VecIdx = Idx < SrcNumElts ? 0 : 1; |
4901 | int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; |
4902 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, |
4903 | PackVT, SVN->getOperand(VecIdx), |
4904 | DAG.getConstant(EltIdx, SL, MVT::i32)); |
4905 | Pieces.push_back(SubVec); |
4906 | } else { |
4907 | const int Idx0 = SVN->getMaskElt(I); |
4908 | const int Idx1 = SVN->getMaskElt(I + 1); |
4909 | int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; |
4910 | int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; |
4911 | int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; |
4912 | int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; |
4913 | |
4914 | SDValue Vec0 = SVN->getOperand(VecIdx0); |
4915 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
4916 | Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); |
4917 | |
4918 | SDValue Vec1 = SVN->getOperand(VecIdx1); |
4919 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
4920 | Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); |
4921 | Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); |
4922 | } |
4923 | } |
4924 | |
4925 | return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); |
4926 | } |
4927 | |
4928 | SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, |
4929 | SelectionDAG &DAG) const { |
4930 | SDLoc SL(Op); |
4931 | EVT VT = Op.getValueType(); |
4932 | |
4933 | if (VT == MVT::v4i16 || VT == MVT::v4f16) { |
4934 | EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); |
4935 | |
4936 | // Turn into pair of packed build_vectors. |
4937 | // TODO: Special case for constants that can be materialized with s_mov_b64. |
4938 | SDValue Lo = DAG.getBuildVector(HalfVT, SL, |
4939 | { Op.getOperand(0), Op.getOperand(1) }); |
4940 | SDValue Hi = DAG.getBuildVector(HalfVT, SL, |
4941 | { Op.getOperand(2), Op.getOperand(3) }); |
4942 | |
4943 | SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); |
4944 | SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); |
4945 | |
4946 | SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); |
4947 | return DAG.getNode(ISD::BITCAST, SL, VT, Blend); |
4948 | } |
4949 | |
4950 | assert(VT == MVT::v2f16 || VT == MVT::v2i16)((VT == MVT::v2f16 || VT == MVT::v2i16) ? static_cast<void > (0) : __assert_fail ("VT == MVT::v2f16 || VT == MVT::v2i16" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4950, __PRETTY_FUNCTION__)); |
4951 | assert(!Subtarget->hasVOP3PInsts() && "this should be legal")((!Subtarget->hasVOP3PInsts() && "this should be legal" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget->hasVOP3PInsts() && \"this should be legal\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4951, __PRETTY_FUNCTION__)); |
4952 | |
4953 | SDValue Lo = Op.getOperand(0); |
4954 | SDValue Hi = Op.getOperand(1); |
4955 | |
4956 | // Avoid adding defined bits with the zero_extend. |
4957 | if (Hi.isUndef()) { |
4958 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); |
4959 | SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); |
4960 | return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); |
4961 | } |
4962 | |
4963 | Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); |
4964 | Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); |
4965 | |
4966 | SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, |
4967 | DAG.getConstant(16, SL, MVT::i32)); |
4968 | if (Lo.isUndef()) |
4969 | return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); |
4970 | |
4971 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); |
4972 | Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); |
4973 | |
4974 | SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); |
4975 | return DAG.getNode(ISD::BITCAST, SL, VT, Or); |
4976 | } |
4977 | |
4978 | bool |
4979 | SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
4980 | // We can fold offsets for anything that doesn't require a GOT relocation. |
4981 | return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || |
4982 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || |
4983 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && |
4984 | !shouldEmitGOTReloc(GA->getGlobal()); |
4985 | } |
4986 | |
4987 | static SDValue |
4988 | buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, |
4989 | const SDLoc &DL, unsigned Offset, EVT PtrVT, |
4990 | unsigned GAFlags = SIInstrInfo::MO_NONE) { |
4991 | // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is |
4992 | // lowered to the following code sequence: |
4993 | // |
4994 | // For constant address space: |
4995 | // s_getpc_b64 s[0:1] |
4996 | // s_add_u32 s0, s0, $symbol |
4997 | // s_addc_u32 s1, s1, 0 |
4998 | // |
4999 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then |
5000 | // a fixup or relocation is emitted to replace $symbol with a literal |
5001 | // constant, which is a pc-relative offset from the encoding of the $symbol |
5002 | // operand to the global variable. |
5003 | // |
5004 | // For global address space: |
5005 | // s_getpc_b64 s[0:1] |
5006 | // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo |
5007 | // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi |
5008 | // |
5009 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then |
5010 | // fixups or relocations are emitted to replace $symbol@*@lo and |
5011 | // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, |
5012 | // which is a 64-bit pc-relative offset from the encoding of the $symbol |
5013 | // operand to the global variable. |
5014 | // |
5015 | // What we want here is an offset from the value returned by s_getpc |
5016 | // (which is the address of the s_add_u32 instruction) to the global |
5017 | // variable, but since the encoding of $symbol starts 4 bytes after the start |
5018 | // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too |
5019 | // small. This requires us to add 4 to the global variable offset in order to |
5020 | // compute the correct address. |
5021 | SDValue PtrLo = |
5022 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags); |
5023 | SDValue PtrHi; |
5024 | if (GAFlags == SIInstrInfo::MO_NONE) { |
5025 | PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); |
5026 | } else { |
5027 | PtrHi = |
5028 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1); |
5029 | } |
5030 | return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); |
5031 | } |
5032 | |
5033 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, |
5034 | SDValue Op, |
5035 | SelectionDAG &DAG) const { |
5036 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); |
5037 | const GlobalValue *GV = GSD->getGlobal(); |
5038 | if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && |
5039 | shouldUseLDSConstAddress(GV)) || |
5040 | GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || |
5041 | GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) |
5042 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); |
5043 | |
5044 | SDLoc DL(GSD); |
5045 | EVT PtrVT = Op.getValueType(); |
5046 | |
5047 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { |
5048 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), |
5049 | SIInstrInfo::MO_ABS32_LO); |
5050 | return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); |
5051 | } |
5052 | |
5053 | if (shouldEmitFixup(GV)) |
5054 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); |
5055 | else if (shouldEmitPCReloc(GV)) |
5056 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, |
5057 | SIInstrInfo::MO_REL32); |
5058 | |
5059 | SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, |
5060 | SIInstrInfo::MO_GOTPCREL32); |
5061 | |
5062 | Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); |
5063 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
5064 | const DataLayout &DataLayout = DAG.getDataLayout(); |
5065 | unsigned Align = DataLayout.getABITypeAlignment(PtrTy); |
5066 | MachinePointerInfo PtrInfo |
5067 | = MachinePointerInfo::getGOT(DAG.getMachineFunction()); |
5068 | |
5069 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, |
5070 | MachineMemOperand::MODereferenceable | |
5071 | MachineMemOperand::MOInvariant); |
5072 | } |
5073 | |
5074 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, |
5075 | const SDLoc &DL, SDValue V) const { |
5076 | // We can't use S_MOV_B32 directly, because there is no way to specify m0 as |
5077 | // the destination register. |
5078 | // |
5079 | // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, |
5080 | // so we will end up with redundant moves to m0. |
5081 | // |
5082 | // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. |
5083 | |
5084 | // A Null SDValue creates a glue result. |
5085 | SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, |
5086 | V, Chain); |
5087 | return SDValue(M0, 0); |
5088 | } |
5089 | |
5090 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, |
5091 | SDValue Op, |
5092 | MVT VT, |
5093 | unsigned Offset) const { |
5094 | SDLoc SL(Op); |
5095 | SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, |
5096 | DAG.getEntryNode(), Offset, 4, false); |
5097 | // The local size values will have the hi 16-bits as zero. |
5098 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, |
5099 | DAG.getValueType(VT)); |
5100 | } |
5101 | |
5102 | static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, |
5103 | EVT VT) { |
5104 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), |
5105 | "non-hsa intrinsic with hsa target", |
5106 | DL.getDebugLoc()); |
5107 | DAG.getContext()->diagnose(BadIntrin); |
5108 | return DAG.getUNDEF(VT); |
5109 | } |
5110 | |
5111 | static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, |
5112 | EVT VT) { |
5113 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), |
5114 | "intrinsic not supported on subtarget", |
5115 | DL.getDebugLoc()); |
5116 | DAG.getContext()->diagnose(BadIntrin); |
5117 | return DAG.getUNDEF(VT); |
5118 | } |
5119 | |
5120 | static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, |
5121 | ArrayRef<SDValue> Elts) { |
5122 | assert(!Elts.empty())((!Elts.empty()) ? static_cast<void> (0) : __assert_fail ("!Elts.empty()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5122, __PRETTY_FUNCTION__)); |
5123 | MVT Type; |
5124 | unsigned NumElts; |
5125 | |
5126 | if (Elts.size() == 1) { |
5127 | Type = MVT::f32; |
5128 | NumElts = 1; |
5129 | } else if (Elts.size() == 2) { |
5130 | Type = MVT::v2f32; |
5131 | NumElts = 2; |
5132 | } else if (Elts.size() == 3) { |
5133 | Type = MVT::v3f32; |
5134 | NumElts = 3; |
5135 | } else if (Elts.size() <= 4) { |
5136 | Type = MVT::v4f32; |
5137 | NumElts = 4; |
5138 | } else if (Elts.size() <= 8) { |
5139 | Type = MVT::v8f32; |
5140 | NumElts = 8; |
5141 | } else { |
5142 | assert(Elts.size() <= 16)((Elts.size() <= 16) ? static_cast<void> (0) : __assert_fail ("Elts.size() <= 16", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5142, __PRETTY_FUNCTION__)); |
5143 | Type = MVT::v16f32; |
5144 | NumElts = 16; |
5145 | } |
5146 | |
5147 | SmallVector<SDValue, 16> VecElts(NumElts); |
5148 | for (unsigned i = 0; i < Elts.size(); ++i) { |
5149 | SDValue Elt = Elts[i]; |
5150 | if (Elt.getValueType() != MVT::f32) |
5151 | Elt = DAG.getBitcast(MVT::f32, Elt); |
5152 | VecElts[i] = Elt; |
5153 | } |
5154 | for (unsigned i = Elts.size(); i < NumElts; ++i) |
5155 | VecElts[i] = DAG.getUNDEF(MVT::f32); |
5156 | |
5157 | if (NumElts == 1) |
5158 | return VecElts[0]; |
5159 | return DAG.getBuildVector(Type, DL, VecElts); |
5160 | } |
5161 | |
5162 | static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG, |
5163 | SDValue *GLC, SDValue *SLC, SDValue *DLC) { |
5164 | auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode()); |
5165 | |
5166 | uint64_t Value = CachePolicyConst->getZExtValue(); |
5167 | SDLoc DL(CachePolicy); |
5168 | if (GLC) { |
5169 | *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); |
5170 | Value &= ~(uint64_t)0x1; |
5171 | } |
5172 | if (SLC) { |
5173 | *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); |
5174 | Value &= ~(uint64_t)0x2; |
5175 | } |
5176 | if (DLC) { |
5177 | *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32); |
5178 | Value &= ~(uint64_t)0x4; |
5179 | } |
5180 | |
5181 | return Value == 0; |
5182 | } |
5183 | |
5184 | static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT, |
5185 | SDValue Src, int ExtraElts) { |
5186 | EVT SrcVT = Src.getValueType(); |
5187 | |
5188 | SmallVector<SDValue, 8> Elts; |
5189 | |
5190 | if (SrcVT.isVector()) |
5191 | DAG.ExtractVectorElements(Src, Elts); |
5192 | else |
5193 | Elts.push_back(Src); |
5194 | |
5195 | SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType()); |
5196 | while (ExtraElts--) |
5197 | Elts.push_back(Undef); |
5198 | |
5199 | return DAG.getBuildVector(CastVT, DL, Elts); |
5200 | } |
5201 | |
5202 | // Re-construct the required return value for a image load intrinsic. |
5203 | // This is more complicated due to the optional use TexFailCtrl which means the required |
5204 | // return type is an aggregate |
5205 | static SDValue constructRetValue(SelectionDAG &DAG, |
5206 | MachineSDNode *Result, |
5207 | ArrayRef<EVT> ResultTypes, |
5208 | bool IsTexFail, bool Unpacked, bool IsD16, |
5209 | int DMaskPop, int NumVDataDwords, |
5210 | const SDLoc &DL, LLVMContext &Context) { |
5211 | // Determine the required return type. This is the same regardless of IsTexFail flag |
5212 | EVT ReqRetVT = ResultTypes[0]; |
5213 | int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; |
5214 | int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ? |
5215 | ReqRetNumElts : (ReqRetNumElts + 1) / 2; |
5216 | |
5217 | int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ? |
5218 | DMaskPop : (DMaskPop + 1) / 2; |
5219 | |
5220 | MVT DataDwordVT = NumDataDwords == 1 ? |
5221 | MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords); |
5222 | |
5223 | MVT MaskPopVT = MaskPopDwords == 1 ? |
5224 | MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords); |
5225 | |
5226 | SDValue Data(Result, 0); |
5227 | SDValue TexFail; |
5228 | |
5229 | if (IsTexFail) { |
5230 | SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32); |
5231 | if (MaskPopVT.isVector()) { |
5232 | Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT, |
5233 | SDValue(Result, 0), ZeroIdx); |
5234 | } else { |
5235 | Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT, |
5236 | SDValue(Result, 0), ZeroIdx); |
5237 | } |
5238 | |
5239 | TexFail = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, |
5240 | SDValue(Result, 0), |
5241 | DAG.getConstant(MaskPopDwords, DL, MVT::i32)); |
5242 | } |
5243 | |
5244 | if (DataDwordVT.isVector()) |
5245 | Data = padEltsToUndef(DAG, DL, DataDwordVT, Data, |
5246 | NumDataDwords - MaskPopDwords); |
5247 | |
5248 | if (IsD16) |
5249 | Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked); |
5250 | |
5251 | if (!ReqRetVT.isVector()) |
5252 | Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data); |
5253 | |
5254 | Data = DAG.getNode(ISD::BITCAST, DL, ReqRetVT, Data); |
5255 | |
5256 | if (TexFail) |
5257 | return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL); |
5258 | |
5259 | if (Result->getNumValues() == 1) |
5260 | return Data; |
5261 | |
5262 | return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL); |
5263 | } |
5264 | |
5265 | static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, |
5266 | SDValue *LWE, bool &IsTexFail) { |
5267 | auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); |
5268 | |
5269 | uint64_t Value = TexFailCtrlConst->getZExtValue(); |
5270 | if (Value) { |
5271 | IsTexFail = true; |
5272 | } |
5273 | |
5274 | SDLoc DL(TexFailCtrlConst); |
5275 | *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); |
5276 | Value &= ~(uint64_t)0x1; |
5277 | *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); |
5278 | Value &= ~(uint64_t)0x2; |
5279 | |
5280 | return Value == 0; |
5281 | } |
5282 | |
5283 | SDValue SITargetLowering::lowerImage(SDValue Op, |
5284 | const AMDGPU::ImageDimIntrinsicInfo *Intr, |
5285 | SelectionDAG &DAG) const { |
5286 | SDLoc DL(Op); |
5287 | MachineFunction &MF = DAG.getMachineFunction(); |
5288 | const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); |
5289 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
5290 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
5291 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); |
5292 | const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = |
5293 | AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); |
5294 | const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = |
5295 | AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); |
5296 | unsigned IntrOpcode = Intr->BaseOpcode; |
5297 | bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10; |
5298 | |
5299 | SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end()); |
5300 | SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end()); |
5301 | bool IsD16 = false; |
5302 | bool IsA16 = false; |
5303 | SDValue VData; |
5304 | int NumVDataDwords; |
5305 | bool AdjustRetType = false; |
5306 | |
5307 | unsigned AddrIdx; // Index of first address argument |
5308 | unsigned DMask; |
5309 | unsigned DMaskLanes = 0; |
5310 | |
5311 | if (BaseOpcode->Atomic) { |
5312 | VData = Op.getOperand(2); |
5313 | |
5314 | bool Is64Bit = VData.getValueType() == MVT::i64; |
5315 | if (BaseOpcode->AtomicX2) { |
5316 | SDValue VData2 = Op.getOperand(3); |
5317 | VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, |
5318 | {VData, VData2}); |
5319 | if (Is64Bit) |
5320 | VData = DAG.getBitcast(MVT::v4i32, VData); |
5321 | |
5322 | ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; |
5323 | DMask = Is64Bit ? 0xf : 0x3; |
5324 | NumVDataDwords = Is64Bit ? 4 : 2; |
5325 | AddrIdx = 4; |
5326 | } else { |
5327 | DMask = Is64Bit ? 0x3 : 0x1; |
5328 | NumVDataDwords = Is64Bit ? 2 : 1; |
5329 | AddrIdx = 3; |
5330 | } |
5331 | } else { |
5332 | unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1; |
5333 | auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx)); |
5334 | DMask = DMaskConst->getZExtValue(); |
5335 | DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); |
5336 | |
5337 | if (BaseOpcode->Store) { |
5338 | VData = Op.getOperand(2); |
5339 | |
5340 | MVT StoreVT = VData.getSimpleValueType(); |
5341 | if (StoreVT.getScalarType() == MVT::f16) { |
5342 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) |
5343 | return Op; // D16 is unsupported for this instruction |
5344 | |
5345 | IsD16 = true; |
5346 | VData = handleD16VData(VData, DAG); |
5347 | } |
5348 | |
5349 | NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; |
5350 | } else { |
5351 | // Work out the num dwords based on the dmask popcount and underlying type |
5352 | // and whether packing is supported. |
5353 | MVT LoadVT = ResultTypes[0].getSimpleVT(); |
5354 | if (LoadVT.getScalarType() == MVT::f16) { |
5355 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) |
5356 | return Op; // D16 is unsupported for this instruction |
5357 | |
5358 | IsD16 = true; |
5359 | } |
5360 | |
5361 | // Confirm that the return type is large enough for the dmask specified |
5362 | if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || |
5363 | (!LoadVT.isVector() && DMaskLanes > 1)) |
5364 | return Op; |
5365 | |
5366 | if (IsD16 && !Subtarget->hasUnpackedD16VMem()) |
5367 | NumVDataDwords = (DMaskLanes + 1) / 2; |
5368 | else |
5369 | NumVDataDwords = DMaskLanes; |
5370 | |
5371 | AdjustRetType = true; |
5372 | } |
5373 | |
5374 | AddrIdx = DMaskIdx + 1; |
5375 | } |
5376 | |
5377 | unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0; |
5378 | unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0; |
5379 | unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0; |
5380 | unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients + |
5381 | NumCoords + NumLCM; |
5382 | unsigned NumMIVAddrs = NumVAddrs; |
5383 | |
5384 | SmallVector<SDValue, 4> VAddrs; |
5385 | |
5386 | // Optimize _L to _LZ when _L is zero |
5387 | if (LZMappingInfo) { |
5388 | if (auto ConstantLod = |
5389 | dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) { |
5390 | if (ConstantLod->isZero() || ConstantLod->isNegative()) { |
5391 | IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l |
5392 | NumMIVAddrs--; // remove 'lod' |
5393 | } |
5394 | } |
5395 | } |
5396 | |
5397 | // Optimize _mip away, when 'lod' is zero |
5398 | if (MIPMappingInfo) { |
5399 | if (auto ConstantLod = |
5400 | dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) { |
5401 | if (ConstantLod->isNullValue()) { |
5402 | IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip |
5403 | NumMIVAddrs--; // remove 'lod' |
5404 | } |
5405 | } |
5406 | } |
5407 | |
5408 | // Check for 16 bit addresses and pack if true. |
5409 | unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs; |
5410 | MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType(); |
5411 | const MVT VAddrScalarVT = VAddrVT.getScalarType(); |
5412 | if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16))) { |
5413 | // Illegal to use a16 images |
5414 | if (!ST->hasFeature(AMDGPU::FeatureR128A16) && !ST->hasFeature(AMDGPU::FeatureGFX10A16)) |
5415 | return Op; |
5416 | |
5417 | IsA16 = true; |
5418 | const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; |
5419 | for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) { |
5420 | SDValue AddrLo; |
5421 | // Push back extra arguments. |
5422 | if (i < DimIdx) { |
5423 | AddrLo = Op.getOperand(i); |
5424 | } else { |
5425 | // Dz/dh, dz/dv and the last odd coord are packed with undef. Also, |
5426 | // in 1D, derivatives dx/dh and dx/dv are packed with undef. |
5427 | if (((i + 1) >= (AddrIdx + NumMIVAddrs)) || |
5428 | ((NumGradients / 2) % 2 == 1 && |
5429 | (i == DimIdx + (NumGradients / 2) - 1 || |
5430 | i == DimIdx + NumGradients - 1))) { |
5431 | AddrLo = Op.getOperand(i); |
5432 | if (AddrLo.getValueType() != MVT::i16) |
5433 | AddrLo = DAG.getBitcast(MVT::i16, Op.getOperand(i)); |
5434 | AddrLo = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, AddrLo); |
5435 | } else { |
5436 | AddrLo = DAG.getBuildVector(VectorVT, DL, |
5437 | {Op.getOperand(i), Op.getOperand(i + 1)}); |
5438 | i++; |
5439 | } |
5440 | AddrLo = DAG.getBitcast(MVT::f32, AddrLo); |
5441 | } |
5442 | VAddrs.push_back(AddrLo); |
5443 | } |
5444 | } else { |
5445 | for (unsigned i = 0; i < NumMIVAddrs; ++i) |
5446 | VAddrs.push_back(Op.getOperand(AddrIdx + i)); |
5447 | } |
5448 | |
5449 | // If the register allocator cannot place the address registers contiguously |
5450 | // without introducing moves, then using the non-sequential address encoding |
5451 | // is always preferable, since it saves VALU instructions and is usually a |
5452 | // wash in terms of code size or even better. |
5453 | // |
5454 | // However, we currently have no way of hinting to the register allocator that |
5455 | // MIMG addresses should be placed contiguously when it is possible to do so, |
5456 | // so force non-NSA for the common 2-address case as a heuristic. |
5457 | // |
5458 | // SIShrinkInstructions will convert NSA encodings to non-NSA after register |
5459 | // allocation when possible. |
5460 | bool UseNSA = |
5461 | ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3; |
5462 | SDValue VAddr; |
5463 | if (!UseNSA) |
5464 | VAddr = getBuildDwordsVector(DAG, DL, VAddrs); |
5465 | |
5466 | SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); |
5467 | SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); |
5468 | unsigned CtrlIdx; // Index of texfailctrl argument |
5469 | SDValue Unorm; |
5470 | if (!BaseOpcode->Sampler) { |
5471 | Unorm = True; |
5472 | CtrlIdx = AddrIdx + NumVAddrs + 1; |
5473 | } else { |
5474 | auto UnormConst = |
5475 | cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2)); |
5476 | |
5477 | Unorm = UnormConst->getZExtValue() ? True : False; |
5478 | CtrlIdx = AddrIdx + NumVAddrs + 3; |
5479 | } |
5480 | |
5481 | SDValue TFE; |
5482 | SDValue LWE; |
5483 | SDValue TexFail = Op.getOperand(CtrlIdx); |
5484 | bool IsTexFail = false; |
5485 | if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) |
5486 | return Op; |
5487 | |
5488 | if (IsTexFail) { |
5489 | if (!DMaskLanes) { |
5490 | // Expecting to get an error flag since TFC is on - and dmask is 0 |
5491 | // Force dmask to be at least 1 otherwise the instruction will fail |
5492 | DMask = 0x1; |
5493 | DMaskLanes = 1; |
5494 | NumVDataDwords = 1; |
5495 | } |
5496 | NumVDataDwords += 1; |
5497 | AdjustRetType = true; |
5498 | } |
5499 | |
5500 | // Has something earlier tagged that the return type needs adjusting |
5501 | // This happens if the instruction is a load or has set TexFailCtrl flags |
5502 | if (AdjustRetType) { |
5503 | // NumVDataDwords reflects the true number of dwords required in the return type |
5504 | if (DMaskLanes == 0 && !BaseOpcode->Store) { |
5505 | // This is a no-op load. This can be eliminated |
5506 | SDValue Undef = DAG.getUNDEF(Op.getValueType()); |
5507 | if (isa<MemSDNode>(Op)) |
5508 | return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); |
5509 | return Undef; |
5510 | } |
5511 | |
5512 | EVT NewVT = NumVDataDwords > 1 ? |
5513 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords) |
5514 | : MVT::i32; |
5515 | |
5516 | ResultTypes[0] = NewVT; |
5517 | if (ResultTypes.size() == 3) { |
5518 | // Original result was aggregate type used for TexFailCtrl results |
5519 | // The actual instruction returns as a vector type which has now been |
5520 | // created. Remove the aggregate result. |
5521 | ResultTypes.erase(&ResultTypes[1]); |
5522 | } |
5523 | } |
5524 | |
5525 | SDValue GLC; |
5526 | SDValue SLC; |
5527 | SDValue DLC; |
5528 | if (BaseOpcode->Atomic) { |
5529 | GLC = True; // TODO no-return optimization |
5530 | if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC, |
5531 | IsGFX10 ? &DLC : nullptr)) |
5532 | return Op; |
5533 | } else { |
5534 | if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC, |
5535 | IsGFX10 ? &DLC : nullptr)) |
5536 | return Op; |
5537 | } |
5538 | |
5539 | SmallVector<SDValue, 26> Ops; |
5540 | if (BaseOpcode->Store || BaseOpcode->Atomic) |
5541 | Ops.push_back(VData); // vdata |
5542 | if (UseNSA) { |
5543 | for (const SDValue &Addr : VAddrs) |
5544 | Ops.push_back(Addr); |
5545 | } else { |
5546 | Ops.push_back(VAddr); |
5547 | } |
5548 | Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc |
5549 | if (BaseOpcode->Sampler) |
5550 | Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler |
5551 | Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); |
5552 | if (IsGFX10) |
5553 | Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); |
5554 | Ops.push_back(Unorm); |
5555 | if (IsGFX10) |
5556 | Ops.push_back(DLC); |
5557 | Ops.push_back(GLC); |
5558 | Ops.push_back(SLC); |
5559 | Ops.push_back(IsA16 && // r128, a16 for gfx9 |
5560 | ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); |
5561 | if (IsGFX10) |
5562 | Ops.push_back(IsA16 ? True : False); |
5563 | Ops.push_back(TFE); |
5564 | Ops.push_back(LWE); |
5565 | if (!IsGFX10) |
5566 | Ops.push_back(DimInfo->DA ? True : False); |
5567 | if (BaseOpcode->HasD16) |
5568 | Ops.push_back(IsD16 ? True : False); |
5569 | if (isa<MemSDNode>(Op)) |
5570 | Ops.push_back(Op.getOperand(0)); // chain |
5571 | |
5572 | int NumVAddrDwords = |
5573 | UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; |
5574 | int Opcode = -1; |
5575 | |
5576 | if (IsGFX10) { |
5577 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, |
5578 | UseNSA ? AMDGPU::MIMGEncGfx10NSA |
5579 | : AMDGPU::MIMGEncGfx10Default, |
5580 | NumVDataDwords, NumVAddrDwords); |
5581 | } else { |
5582 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
5583 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, |
5584 | NumVDataDwords, NumVAddrDwords); |
5585 | if (Opcode == -1) |
5586 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, |
5587 | NumVDataDwords, NumVAddrDwords); |
5588 | } |
5589 | assert(Opcode != -1)((Opcode != -1) ? static_cast<void> (0) : __assert_fail ("Opcode != -1", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5589, __PRETTY_FUNCTION__)); |
5590 | |
5591 | MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); |
5592 | if (auto MemOp = dyn_cast<MemSDNode>(Op)) { |
5593 | MachineMemOperand *MemRef = MemOp->getMemOperand(); |
5594 | DAG.setNodeMemRefs(NewNode, {MemRef}); |
5595 | } |
5596 | |
5597 | if (BaseOpcode->AtomicX2) { |
5598 | SmallVector<SDValue, 1> Elt; |
5599 | DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); |
5600 | return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); |
5601 | } else if (!BaseOpcode->Store) { |
5602 | return constructRetValue(DAG, NewNode, |
5603 | OrigResultTypes, IsTexFail, |
5604 | Subtarget->hasUnpackedD16VMem(), IsD16, |
5605 | DMaskLanes, NumVDataDwords, DL, |
5606 | *DAG.getContext()); |
5607 | } |
5608 | |
5609 | return SDValue(NewNode, 0); |
5610 | } |
5611 | |
5612 | SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, |
5613 | SDValue Offset, SDValue CachePolicy, |
5614 | SelectionDAG &DAG) const { |
5615 | MachineFunction &MF = DAG.getMachineFunction(); |
5616 | |
5617 | const DataLayout &DataLayout = DAG.getDataLayout(); |
5618 | unsigned Align = |
5619 | DataLayout.getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); |
5620 | |
5621 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
5622 | MachinePointerInfo(), |
5623 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | |
5624 | MachineMemOperand::MOInvariant, |
5625 | VT.getStoreSize(), Align); |
5626 | |
5627 | if (!Offset->isDivergent()) { |
5628 | SDValue Ops[] = { |
5629 | Rsrc, |
5630 | Offset, // Offset |
5631 | CachePolicy |
5632 | }; |
5633 | |
5634 | // Widen vec3 load to vec4. |
5635 | if (VT.isVector() && VT.getVectorNumElements() == 3) { |
5636 | EVT WidenedVT = |
5637 | EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); |
5638 | auto WidenedOp = DAG.getMemIntrinsicNode( |
5639 | AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, |
5640 | MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); |
5641 | auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, |
5642 | DAG.getVectorIdxConstant(0, DL)); |
5643 | return Subvector; |
5644 | } |
5645 | |
5646 | return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, |
5647 | DAG.getVTList(VT), Ops, VT, MMO); |
5648 | } |
5649 | |
5650 | // We have a divergent offset. Emit a MUBUF buffer load instead. We can |
5651 | // assume that the buffer is unswizzled. |
5652 | SmallVector<SDValue, 4> Loads; |
5653 | unsigned NumLoads = 1; |
5654 | MVT LoadVT = VT.getSimpleVT(); |
5655 | unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; |
5656 | assert((LoadVT.getScalarType() == MVT::i32 ||(((LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType () == MVT::f32)) ? static_cast<void> (0) : __assert_fail ("(LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5657, __PRETTY_FUNCTION__)) |
5657 | LoadVT.getScalarType() == MVT::f32))(((LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType () == MVT::f32)) ? static_cast<void> (0) : __assert_fail ("(LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5657, __PRETTY_FUNCTION__)); |
5658 | |
5659 | if (NumElts == 8 || NumElts == 16) { |
5660 | NumLoads = NumElts / 4; |
5661 | LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4); |
5662 | } |
5663 | |
5664 | SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); |
5665 | SDValue Ops[] = { |
5666 | DAG.getEntryNode(), // Chain |
5667 | Rsrc, // rsrc |
5668 | DAG.getConstant(0, DL, MVT::i32), // vindex |
5669 | {}, // voffset |
5670 | {}, // soffset |
5671 | {}, // offset |
5672 | CachePolicy, // cachepolicy |
5673 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
5674 | }; |
5675 | |
5676 | // Use the alignment to ensure that the required offsets will fit into the |
5677 | // immediate offsets. |
5678 | setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4); |
5679 | |
5680 | uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); |
5681 | for (unsigned i = 0; i < NumLoads; ++i) { |
5682 | Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); |
5683 | Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, |
5684 | LoadVT, MMO, DAG)); |
5685 | } |
5686 | |
5687 | if (NumElts == 8 || NumElts == 16) |
5688 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); |
5689 | |
5690 | return Loads[0]; |
5691 | } |
5692 | |
5693 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, |
5694 | SelectionDAG &DAG) const { |
5695 | MachineFunction &MF = DAG.getMachineFunction(); |
5696 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); |
5697 | |
5698 | EVT VT = Op.getValueType(); |
5699 | SDLoc DL(Op); |
5700 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
5701 | |
5702 | // TODO: Should this propagate fast-math-flags? |
5703 | |
5704 | switch (IntrinsicID) { |
5705 | case Intrinsic::amdgcn_implicit_buffer_ptr: { |
5706 | if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) |
5707 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5708 | return getPreloadedValue(DAG, *MFI, VT, |
5709 | AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); |
5710 | } |
5711 | case Intrinsic::amdgcn_dispatch_ptr: |
5712 | case Intrinsic::amdgcn_queue_ptr: { |
5713 | if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { |
5714 | DiagnosticInfoUnsupported BadIntrin( |
5715 | MF.getFunction(), "unsupported hsa intrinsic without hsa target", |
5716 | DL.getDebugLoc()); |
5717 | DAG.getContext()->diagnose(BadIntrin); |
5718 | return DAG.getUNDEF(VT); |
5719 | } |
5720 | |
5721 | auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? |
5722 | AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; |
5723 | return getPreloadedValue(DAG, *MFI, VT, RegID); |
5724 | } |
5725 | case Intrinsic::amdgcn_implicitarg_ptr: { |
5726 | if (MFI->isEntryFunction()) |
5727 | return getImplicitArgPtr(DAG, DL); |
5728 | return getPreloadedValue(DAG, *MFI, VT, |
5729 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); |
5730 | } |
5731 | case Intrinsic::amdgcn_kernarg_segment_ptr: { |
5732 | return getPreloadedValue(DAG, *MFI, VT, |
5733 | AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
5734 | } |
5735 | case Intrinsic::amdgcn_dispatch_id: { |
5736 | return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); |
5737 | } |
5738 | case Intrinsic::amdgcn_rcp: |
5739 | return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); |
5740 | case Intrinsic::amdgcn_rsq: |
5741 | return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
5742 | case Intrinsic::amdgcn_rsq_legacy: |
5743 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
5744 | return emitRemovedIntrinsicError(DAG, DL, VT); |
5745 | |
5746 | return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); |
5747 | case Intrinsic::amdgcn_rcp_legacy: |
5748 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
5749 | return emitRemovedIntrinsicError(DAG, DL, VT); |
5750 | return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); |
5751 | case Intrinsic::amdgcn_rsq_clamp: { |
5752 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) |
5753 | return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); |
5754 | |
5755 | Type *Type = VT.getTypeForEVT(*DAG.getContext()); |
5756 | APFloat Max = APFloat::getLargest(Type->getFltSemantics()); |
5757 | APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); |
5758 | |
5759 | SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
5760 | SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, |
5761 | DAG.getConstantFP(Max, DL, VT)); |
5762 | return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, |
5763 | DAG.getConstantFP(Min, DL, VT)); |
5764 | } |
5765 | case Intrinsic::r600_read_ngroups_x: |
5766 | if (Subtarget->isAmdHsaOS()) |
5767 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5768 | |
5769 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
5770 | SI::KernelInputOffsets::NGROUPS_X, 4, false); |
5771 | case Intrinsic::r600_read_ngroups_y: |
5772 | if (Subtarget->isAmdHsaOS()) |
5773 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5774 | |
5775 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
5776 | SI::KernelInputOffsets::NGROUPS_Y, 4, false); |
5777 | case Intrinsic::r600_read_ngroups_z: |
5778 | if (Subtarget->isAmdHsaOS()) |
5779 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5780 | |
5781 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
5782 | SI::KernelInputOffsets::NGROUPS_Z, 4, false); |
5783 | case Intrinsic::r600_read_global_size_x: |
5784 | if (Subtarget->isAmdHsaOS()) |
5785 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5786 | |
5787 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
5788 | SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false); |
5789 | case Intrinsic::r600_read_global_size_y: |
5790 | if (Subtarget->isAmdHsaOS()) |
5791 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5792 | |
5793 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
5794 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false); |
5795 | case Intrinsic::r600_read_global_size_z: |
5796 | if (Subtarget->isAmdHsaOS()) |
5797 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5798 | |
5799 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
5800 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false); |
5801 | case Intrinsic::r600_read_local_size_x: |
5802 | if (Subtarget->isAmdHsaOS()) |
5803 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5804 | |
5805 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
5806 | SI::KernelInputOffsets::LOCAL_SIZE_X); |
5807 | case Intrinsic::r600_read_local_size_y: |
5808 | if (Subtarget->isAmdHsaOS()) |
5809 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5810 | |
5811 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
5812 | SI::KernelInputOffsets::LOCAL_SIZE_Y); |
5813 | case Intrinsic::r600_read_local_size_z: |
5814 | if (Subtarget->isAmdHsaOS()) |
5815 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
5816 | |
5817 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
5818 | SI::KernelInputOffsets::LOCAL_SIZE_Z); |
5819 | case Intrinsic::amdgcn_workgroup_id_x: |
5820 | return getPreloadedValue(DAG, *MFI, VT, |
5821 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X); |
5822 | case Intrinsic::amdgcn_workgroup_id_y: |
5823 | return getPreloadedValue(DAG, *MFI, VT, |
5824 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); |
5825 | case Intrinsic::amdgcn_workgroup_id_z: |
5826 | return getPreloadedValue(DAG, *MFI, VT, |
5827 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); |
5828 | case Intrinsic::amdgcn_workitem_id_x: |
5829 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
5830 | SDLoc(DAG.getEntryNode()), |
5831 | MFI->getArgInfo().WorkItemIDX); |
5832 | case Intrinsic::amdgcn_workitem_id_y: |
5833 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
5834 | SDLoc(DAG.getEntryNode()), |
5835 | MFI->getArgInfo().WorkItemIDY); |
5836 | case Intrinsic::amdgcn_workitem_id_z: |
5837 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
5838 | SDLoc(DAG.getEntryNode()), |
5839 | MFI->getArgInfo().WorkItemIDZ); |
5840 | case Intrinsic::amdgcn_wavefrontsize: |
5841 | return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), |
5842 | SDLoc(Op), MVT::i32); |
5843 | case Intrinsic::amdgcn_s_buffer_load: { |
5844 | bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10; |
5845 | SDValue GLC; |
5846 | SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1); |
5847 | if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr, |
5848 | IsGFX10 ? &DLC : nullptr)) |
5849 | return Op; |
5850 | return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
5851 | DAG); |
5852 | } |
5853 | case Intrinsic::amdgcn_fdiv_fast: |
5854 | return lowerFDIV_FAST(Op, DAG); |
5855 | case Intrinsic::amdgcn_sin: |
5856 | return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); |
5857 | |
5858 | case Intrinsic::amdgcn_cos: |
5859 | return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); |
5860 | |
5861 | case Intrinsic::amdgcn_mul_u24: |
5862 | return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
5863 | case Intrinsic::amdgcn_mul_i24: |
5864 | return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
5865 | |
5866 | case Intrinsic::amdgcn_log_clamp: { |
5867 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) |
5868 | return SDValue(); |
5869 | |
5870 | DiagnosticInfoUnsupported BadIntrin( |
5871 | MF.getFunction(), "intrinsic not supported on subtarget", |
5872 | DL.getDebugLoc()); |
5873 | DAG.getContext()->diagnose(BadIntrin); |
5874 | return DAG.getUNDEF(VT); |
5875 | } |
5876 | case Intrinsic::amdgcn_ldexp: |
5877 | return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, |
5878 | Op.getOperand(1), Op.getOperand(2)); |
5879 | |
5880 | case Intrinsic::amdgcn_fract: |
5881 | return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); |
5882 | |
5883 | case Intrinsic::amdgcn_class: |
5884 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, |
5885 | Op.getOperand(1), Op.getOperand(2)); |
5886 | case Intrinsic::amdgcn_div_fmas: |
5887 | return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, |
5888 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
5889 | Op.getOperand(4)); |
5890 | |
5891 | case Intrinsic::amdgcn_div_fixup: |
5892 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, |
5893 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
5894 | |
5895 | case Intrinsic::amdgcn_trig_preop: |
5896 | return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, |
5897 | Op.getOperand(1), Op.getOperand(2)); |
5898 | case Intrinsic::amdgcn_div_scale: { |
5899 | const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); |
5900 | |
5901 | // Translate to the operands expected by the machine instruction. The |
5902 | // first parameter must be the same as the first instruction. |
5903 | SDValue Numerator = Op.getOperand(1); |
5904 | SDValue Denominator = Op.getOperand(2); |
5905 | |
5906 | // Note this order is opposite of the machine instruction's operations, |
5907 | // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The |
5908 | // intrinsic has the numerator as the first operand to match a normal |
5909 | // division operation. |
5910 | |
5911 | SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; |
5912 | |
5913 | return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, |
5914 | Denominator, Numerator); |
5915 | } |
5916 | case Intrinsic::amdgcn_icmp: { |
5917 | // There is a Pat that handles this variant, so return it as-is. |
5918 | if (Op.getOperand(1).getValueType() == MVT::i1 && |
5919 | Op.getConstantOperandVal(2) == 0 && |
5920 | Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) |
5921 | return Op; |
5922 | return lowerICMPIntrinsic(*this, Op.getNode(), DAG); |
5923 | } |
5924 | case Intrinsic::amdgcn_fcmp: { |
5925 | return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); |
5926 | } |
5927 | case Intrinsic::amdgcn_fmed3: |
5928 | return DAG.getNode(AMDGPUISD::FMED3, DL, VT, |
5929 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
5930 | case Intrinsic::amdgcn_fdot2: |
5931 | return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, |
5932 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
5933 | Op.getOperand(4)); |
5934 | case Intrinsic::amdgcn_fmul_legacy: |
5935 | return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, |
5936 | Op.getOperand(1), Op.getOperand(2)); |
5937 | case Intrinsic::amdgcn_sffbh: |
5938 | return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); |
5939 | case Intrinsic::amdgcn_sbfe: |
5940 | return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, |
5941 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
5942 | case Intrinsic::amdgcn_ubfe: |
5943 | return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, |
5944 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
5945 | case Intrinsic::amdgcn_cvt_pkrtz: |
5946 | case Intrinsic::amdgcn_cvt_pknorm_i16: |
5947 | case Intrinsic::amdgcn_cvt_pknorm_u16: |
5948 | case Intrinsic::amdgcn_cvt_pk_i16: |
5949 | case Intrinsic::amdgcn_cvt_pk_u16: { |
5950 | // FIXME: Stop adding cast if v2f16/v2i16 are legal. |
5951 | EVT VT = Op.getValueType(); |
5952 | unsigned Opcode; |
5953 | |
5954 | if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) |
5955 | Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; |
5956 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) |
5957 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; |
5958 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) |
5959 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; |
5960 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) |
5961 | Opcode = AMDGPUISD::CVT_PK_I16_I32; |
5962 | else |
5963 | Opcode = AMDGPUISD::CVT_PK_U16_U32; |
5964 | |
5965 | if (isTypeLegal(VT)) |
5966 | return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
5967 | |
5968 | SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, |
5969 | Op.getOperand(1), Op.getOperand(2)); |
5970 | return DAG.getNode(ISD::BITCAST, DL, VT, Node); |
5971 | } |
5972 | case Intrinsic::amdgcn_fmad_ftz: |
5973 | return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), |
5974 | Op.getOperand(2), Op.getOperand(3)); |
5975 | |
5976 | case Intrinsic::amdgcn_if_break: |
5977 | return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, |
5978 | Op->getOperand(1), Op->getOperand(2)), 0); |
5979 | |
5980 | case Intrinsic::amdgcn_groupstaticsize: { |
5981 | Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); |
5982 | if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) |
5983 | return Op; |
5984 | |
5985 | const Module *M = MF.getFunction().getParent(); |
5986 | const GlobalValue *GV = |
5987 | M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); |
5988 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, |
5989 | SIInstrInfo::MO_ABS32_LO); |
5990 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; |
5991 | } |
5992 | case Intrinsic::amdgcn_is_shared: |
5993 | case Intrinsic::amdgcn_is_private: { |
5994 | SDLoc SL(Op); |
5995 | unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? |
5996 | AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; |
5997 | SDValue Aperture = getSegmentAperture(AS, SL, DAG); |
5998 | SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, |
5999 | Op.getOperand(1)); |
6000 | |
6001 | SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, |
6002 | DAG.getConstant(1, SL, MVT::i32)); |
6003 | return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); |
6004 | } |
6005 | default: |
6006 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
6007 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) |
6008 | return lowerImage(Op, ImageDimIntr, DAG); |
6009 | |
6010 | return Op; |
6011 | } |
6012 | } |
6013 | |
6014 | // This function computes an appropriate offset to pass to |
6015 | // MachineMemOperand::setOffset() based on the offset inputs to |
6016 | // an intrinsic. If any of the offsets are non-contstant or |
6017 | // if VIndex is non-zero then this function returns 0. Otherwise, |
6018 | // it returns the sum of VOffset, SOffset, and Offset. |
6019 | static unsigned getBufferOffsetForMMO(SDValue VOffset, |
6020 | SDValue SOffset, |
6021 | SDValue Offset, |
6022 | SDValue VIndex = SDValue()) { |
6023 | |
6024 | if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || |
6025 | !isa<ConstantSDNode>(Offset)) |
6026 | return 0; |
6027 | |
6028 | if (VIndex) { |
6029 | if (!isa<ConstantSDNode>(VIndex) || !cast<ConstantSDNode>(VIndex)->isNullValue()) |
6030 | return 0; |
6031 | } |
6032 | |
6033 | return cast<ConstantSDNode>(VOffset)->getSExtValue() + |
6034 | cast<ConstantSDNode>(SOffset)->getSExtValue() + |
6035 | cast<ConstantSDNode>(Offset)->getSExtValue(); |
6036 | } |
6037 | |
6038 | static unsigned getDSShaderTypeValue(const MachineFunction &MF) { |
6039 | switch (MF.getFunction().getCallingConv()) { |
6040 | case CallingConv::AMDGPU_PS: |
6041 | return 1; |
6042 | case CallingConv::AMDGPU_VS: |
6043 | return 2; |
6044 | case CallingConv::AMDGPU_GS: |
6045 | return 3; |
6046 | case CallingConv::AMDGPU_HS: |
6047 | case CallingConv::AMDGPU_LS: |
6048 | case CallingConv::AMDGPU_ES: |
6049 | report_fatal_error("ds_ordered_count unsupported for this calling conv"); |
6050 | case CallingConv::AMDGPU_CS: |
6051 | case CallingConv::AMDGPU_KERNEL: |
6052 | case CallingConv::C: |
6053 | case CallingConv::Fast: |
6054 | default: |
6055 | // Assume other calling conventions are various compute callable functions |
6056 | return 0; |
6057 | } |
6058 | } |
6059 | |
6060 | SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, |
6061 | SelectionDAG &DAG) const { |
6062 | unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
6063 | SDLoc DL(Op); |
6064 | |
6065 | switch (IntrID) { |
6066 | case Intrinsic::amdgcn_ds_ordered_add: |
6067 | case Intrinsic::amdgcn_ds_ordered_swap: { |
6068 | MemSDNode *M = cast<MemSDNode>(Op); |
6069 | SDValue Chain = M->getOperand(0); |
6070 | SDValue M0 = M->getOperand(2); |
6071 | SDValue Value = M->getOperand(3); |
6072 | unsigned IndexOperand = M->getConstantOperandVal(7); |
6073 | unsigned WaveRelease = M->getConstantOperandVal(8); |
6074 | unsigned WaveDone = M->getConstantOperandVal(9); |
6075 | |
6076 | unsigned OrderedCountIndex = IndexOperand & 0x3f; |
6077 | IndexOperand &= ~0x3f; |
6078 | unsigned CountDw = 0; |
6079 | |
6080 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { |
6081 | CountDw = (IndexOperand >> 24) & 0xf; |
6082 | IndexOperand &= ~(0xf << 24); |
6083 | |
6084 | if (CountDw < 1 || CountDw > 4) { |
6085 | report_fatal_error( |
6086 | "ds_ordered_count: dword count must be between 1 and 4"); |
6087 | } |
6088 | } |
6089 | |
6090 | if (IndexOperand) |
6091 | report_fatal_error("ds_ordered_count: bad index operand"); |
6092 | |
6093 | if (WaveDone && !WaveRelease) |
6094 | report_fatal_error("ds_ordered_count: wave_done requires wave_release"); |
6095 | |
6096 | unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; |
6097 | unsigned ShaderType = getDSShaderTypeValue(DAG.getMachineFunction()); |
6098 | unsigned Offset0 = OrderedCountIndex << 2; |
6099 | unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | |
6100 | (Instruction << 4); |
6101 | |
6102 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) |
6103 | Offset1 |= (CountDw - 1) << 6; |
6104 | |
6105 | unsigned Offset = Offset0 | (Offset1 << 8); |
6106 | |
6107 | SDValue Ops[] = { |
6108 | Chain, |
6109 | Value, |
6110 | DAG.getTargetConstant(Offset, DL, MVT::i16), |
6111 | copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue |
6112 | }; |
6113 | return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, |
6114 | M->getVTList(), Ops, M->getMemoryVT(), |
6115 | M->getMemOperand()); |
6116 | } |
6117 | case Intrinsic::amdgcn_ds_fadd: { |
6118 | MemSDNode *M = cast<MemSDNode>(Op); |
6119 | unsigned Opc; |
6120 | switch (IntrID) { |
6121 | case Intrinsic::amdgcn_ds_fadd: |
6122 | Opc = ISD::ATOMIC_LOAD_FADD; |
6123 | break; |
6124 | } |
6125 | |
6126 | return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), |
6127 | M->getOperand(0), M->getOperand(2), M->getOperand(3), |
6128 | M->getMemOperand()); |
6129 | } |
6130 | case Intrinsic::amdgcn_atomic_inc: |
6131 | case Intrinsic::amdgcn_atomic_dec: |
6132 | case Intrinsic::amdgcn_ds_fmin: |
6133 | case Intrinsic::amdgcn_ds_fmax: { |
6134 | MemSDNode *M = cast<MemSDNode>(Op); |
6135 | unsigned Opc; |
6136 | switch (IntrID) { |
6137 | case Intrinsic::amdgcn_atomic_inc: |
6138 | Opc = AMDGPUISD::ATOMIC_INC; |
6139 | break; |
6140 | case Intrinsic::amdgcn_atomic_dec: |
6141 | Opc = AMDGPUISD::ATOMIC_DEC; |
6142 | break; |
6143 | case Intrinsic::amdgcn_ds_fmin: |
6144 | Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; |
6145 | break; |
6146 | case Intrinsic::amdgcn_ds_fmax: |
6147 | Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; |
6148 | break; |
6149 | default: |
6150 | llvm_unreachable("Unknown intrinsic!")::llvm::llvm_unreachable_internal("Unknown intrinsic!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6150); |
6151 | } |
6152 | SDValue Ops[] = { |
6153 | M->getOperand(0), // Chain |
6154 | M->getOperand(2), // Ptr |
6155 | M->getOperand(3) // Value |
6156 | }; |
6157 | |
6158 | return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, |
6159 | M->getMemoryVT(), M->getMemOperand()); |
6160 | } |
6161 | case Intrinsic::amdgcn_buffer_load: |
6162 | case Intrinsic::amdgcn_buffer_load_format: { |
6163 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); |
6164 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
6165 | unsigned IdxEn = 1; |
6166 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) |
6167 | IdxEn = Idx->getZExtValue() != 0; |
6168 | SDValue Ops[] = { |
6169 | Op.getOperand(0), // Chain |
6170 | Op.getOperand(2), // rsrc |
6171 | Op.getOperand(3), // vindex |
6172 | SDValue(), // voffset -- will be set by setBufferOffsets |
6173 | SDValue(), // soffset -- will be set by setBufferOffsets |
6174 | SDValue(), // offset -- will be set by setBufferOffsets |
6175 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
6176 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
6177 | }; |
6178 | |
6179 | unsigned Offset = setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); |
6180 | // We don't know the offset if vindex is non-zero, so clear it. |
6181 | if (IdxEn) |
6182 | Offset = 0; |
6183 | |
6184 | unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? |
6185 | AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; |
6186 | |
6187 | EVT VT = Op.getValueType(); |
6188 | EVT IntVT = VT.changeTypeToInteger(); |
6189 | auto *M = cast<MemSDNode>(Op); |
6190 | M->getMemOperand()->setOffset(Offset); |
6191 | EVT LoadVT = Op.getValueType(); |
6192 | |
6193 | if (LoadVT.getScalarType() == MVT::f16) |
6194 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, |
6195 | M, DAG, Ops); |
6196 | |
6197 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics |
6198 | if (LoadVT.getScalarType() == MVT::i8 || |
6199 | LoadVT.getScalarType() == MVT::i16) |
6200 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); |
6201 | |
6202 | return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, |
6203 | M->getMemOperand(), DAG); |
6204 | } |
6205 | case Intrinsic::amdgcn_raw_buffer_load: |
6206 | case Intrinsic::amdgcn_raw_buffer_load_format: { |
6207 | const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format; |
6208 | |
6209 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); |
6210 | SDValue Ops[] = { |
6211 | Op.getOperand(0), // Chain |
6212 | Op.getOperand(2), // rsrc |
6213 | DAG.getConstant(0, DL, MVT::i32), // vindex |
6214 | Offsets.first, // voffset |
6215 | Op.getOperand(4), // soffset |
6216 | Offsets.second, // offset |
6217 | Op.getOperand(5), // cachepolicy, swizzled buffer |
6218 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
6219 | }; |
6220 | |
6221 | auto *M = cast<MemSDNode>(Op); |
6222 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5])); |
6223 | return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); |
6224 | } |
6225 | case Intrinsic::amdgcn_struct_buffer_load: |
6226 | case Intrinsic::amdgcn_struct_buffer_load_format: { |
6227 | const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format; |
6228 | |
6229 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
6230 | SDValue Ops[] = { |
6231 | Op.getOperand(0), // Chain |
6232 | Op.getOperand(2), // rsrc |
6233 | Op.getOperand(3), // vindex |
6234 | Offsets.first, // voffset |
6235 | Op.getOperand(5), // soffset |
6236 | Offsets.second, // offset |
6237 | Op.getOperand(6), // cachepolicy, swizzled buffer |
6238 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
6239 | }; |
6240 | |
6241 | auto *M = cast<MemSDNode>(Op); |
6242 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5], |
6243 | Ops[2])); |
6244 | return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); |
6245 | } |
6246 | case Intrinsic::amdgcn_tbuffer_load: { |
6247 | MemSDNode *M = cast<MemSDNode>(Op); |
6248 | EVT LoadVT = Op.getValueType(); |
6249 | |
6250 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
6251 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); |
6252 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); |
6253 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); |
6254 | unsigned IdxEn = 1; |
6255 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) |
6256 | IdxEn = Idx->getZExtValue() != 0; |
6257 | SDValue Ops[] = { |
6258 | Op.getOperand(0), // Chain |
6259 | Op.getOperand(2), // rsrc |
6260 | Op.getOperand(3), // vindex |
6261 | Op.getOperand(4), // voffset |
6262 | Op.getOperand(5), // soffset |
6263 | Op.getOperand(6), // offset |
6264 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format |
6265 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
6266 | DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen |
6267 | }; |
6268 | |
6269 | if (LoadVT.getScalarType() == MVT::f16) |
6270 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
6271 | M, DAG, Ops); |
6272 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
6273 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
6274 | DAG); |
6275 | } |
6276 | case Intrinsic::amdgcn_raw_tbuffer_load: { |
6277 | MemSDNode *M = cast<MemSDNode>(Op); |
6278 | EVT LoadVT = Op.getValueType(); |
6279 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); |
6280 | |
6281 | SDValue Ops[] = { |
6282 | Op.getOperand(0), // Chain |
6283 | Op.getOperand(2), // rsrc |
6284 | DAG.getConstant(0, DL, MVT::i32), // vindex |
6285 | Offsets.first, // voffset |
6286 | Op.getOperand(4), // soffset |
6287 | Offsets.second, // offset |
6288 | Op.getOperand(5), // format |
6289 | Op.getOperand(6), // cachepolicy, swizzled buffer |
6290 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
6291 | }; |
6292 | |
6293 | if (LoadVT.getScalarType() == MVT::f16) |
6294 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
6295 | M, DAG, Ops); |
6296 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
6297 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
6298 | DAG); |
6299 | } |
6300 | case Intrinsic::amdgcn_struct_tbuffer_load: { |
6301 | MemSDNode *M = cast<MemSDNode>(Op); |
6302 | EVT LoadVT = Op.getValueType(); |
6303 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
6304 | |
6305 | SDValue Ops[] = { |
6306 | Op.getOperand(0), // Chain |
6307 | Op.getOperand(2), // rsrc |
6308 | Op.getOperand(3), // vindex |
6309 | Offsets.first, // voffset |
6310 | Op.getOperand(5), // soffset |
6311 | Offsets.second, // offset |
6312 | Op.getOperand(6), // format |
6313 | Op.getOperand(7), // cachepolicy, swizzled buffer |
6314 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
6315 | }; |
6316 | |
6317 | if (LoadVT.getScalarType() == MVT::f16) |
6318 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
6319 | M, DAG, Ops); |
6320 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
6321 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
6322 | DAG); |
6323 | } |
6324 | case Intrinsic::amdgcn_buffer_atomic_swap: |
6325 | case Intrinsic::amdgcn_buffer_atomic_add: |
6326 | case Intrinsic::amdgcn_buffer_atomic_sub: |
6327 | case Intrinsic::amdgcn_buffer_atomic_smin: |
6328 | case Intrinsic::amdgcn_buffer_atomic_umin: |
6329 | case Intrinsic::amdgcn_buffer_atomic_smax: |
6330 | case Intrinsic::amdgcn_buffer_atomic_umax: |
6331 | case Intrinsic::amdgcn_buffer_atomic_and: |
6332 | case Intrinsic::amdgcn_buffer_atomic_or: |
6333 | case Intrinsic::amdgcn_buffer_atomic_xor: { |
6334 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
6335 | unsigned IdxEn = 1; |
6336 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
6337 | IdxEn = Idx->getZExtValue() != 0; |
6338 | SDValue Ops[] = { |
6339 | Op.getOperand(0), // Chain |
6340 | Op.getOperand(2), // vdata |
6341 | Op.getOperand(3), // rsrc |
6342 | Op.getOperand(4), // vindex |
6343 | SDValue(), // voffset -- will be set by setBufferOffsets |
6344 | SDValue(), // soffset -- will be set by setBufferOffsets |
6345 | SDValue(), // offset -- will be set by setBufferOffsets |
6346 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy |
6347 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
6348 | }; |
6349 | unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
6350 | // We don't know the offset if vindex is non-zero, so clear it. |
6351 | if (IdxEn) |
6352 | Offset = 0; |
6353 | EVT VT = Op.getValueType(); |
6354 | |
6355 | auto *M = cast<MemSDNode>(Op); |
6356 | M->getMemOperand()->setOffset(Offset); |
6357 | unsigned Opcode = 0; |
6358 | |
6359 | switch (IntrID) { |
6360 | case Intrinsic::amdgcn_buffer_atomic_swap: |
6361 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; |
6362 | break; |
6363 | case Intrinsic::amdgcn_buffer_atomic_add: |
6364 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; |
6365 | break; |
6366 | case Intrinsic::amdgcn_buffer_atomic_sub: |
6367 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; |
6368 | break; |
6369 | case Intrinsic::amdgcn_buffer_atomic_smin: |
6370 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; |
6371 | break; |
6372 | case Intrinsic::amdgcn_buffer_atomic_umin: |
6373 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; |
6374 | break; |
6375 | case Intrinsic::amdgcn_buffer_atomic_smax: |
6376 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; |
6377 | break; |
6378 | case Intrinsic::amdgcn_buffer_atomic_umax: |
6379 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; |
6380 | break; |
6381 | case Intrinsic::amdgcn_buffer_atomic_and: |
6382 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; |
6383 | break; |
6384 | case Intrinsic::amdgcn_buffer_atomic_or: |
6385 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; |
6386 | break; |
6387 | case Intrinsic::amdgcn_buffer_atomic_xor: |
6388 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; |
6389 | break; |
6390 | default: |
6391 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6391); |
6392 | } |
6393 | |
6394 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, |
6395 | M->getMemOperand()); |
6396 | } |
6397 | case Intrinsic::amdgcn_raw_buffer_atomic_swap: |
6398 | case Intrinsic::amdgcn_raw_buffer_atomic_add: |
6399 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: |
6400 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: |
6401 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: |
6402 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: |
6403 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: |
6404 | case Intrinsic::amdgcn_raw_buffer_atomic_and: |
6405 | case Intrinsic::amdgcn_raw_buffer_atomic_or: |
6406 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: |
6407 | case Intrinsic::amdgcn_raw_buffer_atomic_inc: |
6408 | case Intrinsic::amdgcn_raw_buffer_atomic_dec: { |
6409 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
6410 | SDValue Ops[] = { |
6411 | Op.getOperand(0), // Chain |
6412 | Op.getOperand(2), // vdata |
6413 | Op.getOperand(3), // rsrc |
6414 | DAG.getConstant(0, DL, MVT::i32), // vindex |
6415 | Offsets.first, // voffset |
6416 | Op.getOperand(5), // soffset |
6417 | Offsets.second, // offset |
6418 | Op.getOperand(6), // cachepolicy |
6419 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
6420 | }; |
6421 | EVT VT = Op.getValueType(); |
6422 | |
6423 | auto *M = cast<MemSDNode>(Op); |
6424 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6])); |
6425 | unsigned Opcode = 0; |
6426 | |
6427 | switch (IntrID) { |
6428 | case Intrinsic::amdgcn_raw_buffer_atomic_swap: |
6429 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; |
6430 | break; |
6431 | case Intrinsic::amdgcn_raw_buffer_atomic_add: |
6432 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; |
6433 | break; |
6434 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: |
6435 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; |
6436 | break; |
6437 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: |
6438 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; |
6439 | break; |
6440 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: |
6441 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; |
6442 | break; |
6443 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: |
6444 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; |
6445 | break; |
6446 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: |
6447 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; |
6448 | break; |
6449 | case Intrinsic::amdgcn_raw_buffer_atomic_and: |
6450 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; |
6451 | break; |
6452 | case Intrinsic::amdgcn_raw_buffer_atomic_or: |
6453 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; |
6454 | break; |
6455 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: |
6456 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; |
6457 | break; |
6458 | case Intrinsic::amdgcn_raw_buffer_atomic_inc: |
6459 | Opcode = AMDGPUISD::BUFFER_ATOMIC_INC; |
6460 | break; |
6461 | case Intrinsic::amdgcn_raw_buffer_atomic_dec: |
6462 | Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC; |
6463 | break; |
6464 | default: |
6465 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6465); |
6466 | } |
6467 | |
6468 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, |
6469 | M->getMemOperand()); |
6470 | } |
6471 | case Intrinsic::amdgcn_struct_buffer_atomic_swap: |
6472 | case Intrinsic::amdgcn_struct_buffer_atomic_add: |
6473 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: |
6474 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: |
6475 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: |
6476 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: |
6477 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: |
6478 | case Intrinsic::amdgcn_struct_buffer_atomic_and: |
6479 | case Intrinsic::amdgcn_struct_buffer_atomic_or: |
6480 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: |
6481 | case Intrinsic::amdgcn_struct_buffer_atomic_inc: |
6482 | case Intrinsic::amdgcn_struct_buffer_atomic_dec: { |
6483 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
6484 | SDValue Ops[] = { |
6485 | Op.getOperand(0), // Chain |
6486 | Op.getOperand(2), // vdata |
6487 | Op.getOperand(3), // rsrc |
6488 | Op.getOperand(4), // vindex |
6489 | Offsets.first, // voffset |
6490 | Op.getOperand(6), // soffset |
6491 | Offsets.second, // offset |
6492 | Op.getOperand(7), // cachepolicy |
6493 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
6494 | }; |
6495 | EVT VT = Op.getValueType(); |
6496 | |
6497 | auto *M = cast<MemSDNode>(Op); |
6498 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6], |
6499 | Ops[3])); |
6500 | unsigned Opcode = 0; |
6501 | |
6502 | switch (IntrID) { |
6503 | case Intrinsic::amdgcn_struct_buffer_atomic_swap: |
6504 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; |
6505 | break; |
6506 | case Intrinsic::amdgcn_struct_buffer_atomic_add: |
6507 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; |
6508 | break; |
6509 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: |
6510 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; |
6511 | break; |
6512 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: |
6513 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; |
6514 | break; |
6515 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: |
6516 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; |
6517 | break; |
6518 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: |
6519 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; |
6520 | break; |
6521 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: |
6522 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; |
6523 | break; |
6524 | case Intrinsic::amdgcn_struct_buffer_atomic_and: |
6525 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; |
6526 | break; |
6527 | case Intrinsic::amdgcn_struct_buffer_atomic_or: |
6528 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; |
6529 | break; |
6530 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: |
6531 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; |
6532 | break; |
6533 | case Intrinsic::amdgcn_struct_buffer_atomic_inc: |
6534 | Opcode = AMDGPUISD::BUFFER_ATOMIC_INC; |
6535 | break; |
6536 | case Intrinsic::amdgcn_struct_buffer_atomic_dec: |
6537 | Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC; |
6538 | break; |
6539 | default: |
6540 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6540); |
6541 | } |
6542 | |
6543 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, |
6544 | M->getMemOperand()); |
6545 | } |
6546 | case Intrinsic::amdgcn_buffer_atomic_cmpswap: { |
6547 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
6548 | unsigned IdxEn = 1; |
6549 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5))) |
6550 | IdxEn = Idx->getZExtValue() != 0; |
6551 | SDValue Ops[] = { |
6552 | Op.getOperand(0), // Chain |
6553 | Op.getOperand(2), // src |
6554 | Op.getOperand(3), // cmp |
6555 | Op.getOperand(4), // rsrc |
6556 | Op.getOperand(5), // vindex |
6557 | SDValue(), // voffset -- will be set by setBufferOffsets |
6558 | SDValue(), // soffset -- will be set by setBufferOffsets |
6559 | SDValue(), // offset -- will be set by setBufferOffsets |
6560 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy |
6561 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
6562 | }; |
6563 | unsigned Offset = setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); |
6564 | // We don't know the offset if vindex is non-zero, so clear it. |
6565 | if (IdxEn) |
6566 | Offset = 0; |
6567 | EVT VT = Op.getValueType(); |
6568 | auto *M = cast<MemSDNode>(Op); |
6569 | M->getMemOperand()->setOffset(Offset); |
6570 | |
6571 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
6572 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
6573 | } |
6574 | case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { |
6575 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
6576 | SDValue Ops[] = { |
6577 | Op.getOperand(0), // Chain |
6578 | Op.getOperand(2), // src |
6579 | Op.getOperand(3), // cmp |
6580 | Op.getOperand(4), // rsrc |
6581 | DAG.getConstant(0, DL, MVT::i32), // vindex |
6582 | Offsets.first, // voffset |
6583 | Op.getOperand(6), // soffset |
6584 | Offsets.second, // offset |
6585 | Op.getOperand(7), // cachepolicy |
6586 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
6587 | }; |
6588 | EVT VT = Op.getValueType(); |
6589 | auto *M = cast<MemSDNode>(Op); |
6590 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7])); |
6591 | |
6592 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
6593 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
6594 | } |
6595 | case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { |
6596 | auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); |
6597 | SDValue Ops[] = { |
6598 | Op.getOperand(0), // Chain |
6599 | Op.getOperand(2), // src |
6600 | Op.getOperand(3), // cmp |
6601 | Op.getOperand(4), // rsrc |
6602 | Op.getOperand(5), // vindex |
6603 | Offsets.first, // voffset |
6604 | Op.getOperand(7), // soffset |
6605 | Offsets.second, // offset |
6606 | Op.getOperand(8), // cachepolicy |
6607 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
6608 | }; |
6609 | EVT VT = Op.getValueType(); |
6610 | auto *M = cast<MemSDNode>(Op); |
6611 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7], |
6612 | Ops[4])); |
6613 | |
6614 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
6615 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
6616 | } |
6617 | |
6618 | default: |
6619 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
6620 | AMDGPU::getImageDimIntrinsicInfo(IntrID)) |
6621 | return lowerImage(Op, ImageDimIntr, DAG); |
6622 | |
6623 | return SDValue(); |
6624 | } |
6625 | } |
6626 | |
6627 | // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to |
6628 | // dwordx4 if on SI. |
6629 | SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, |
6630 | SDVTList VTList, |
6631 | ArrayRef<SDValue> Ops, EVT MemVT, |
6632 | MachineMemOperand *MMO, |
6633 | SelectionDAG &DAG) const { |
6634 | EVT VT = VTList.VTs[0]; |
6635 | EVT WidenedVT = VT; |
6636 | EVT WidenedMemVT = MemVT; |
6637 | if (!Subtarget->hasDwordx3LoadStores() && |
6638 | (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { |
6639 | WidenedVT = EVT::getVectorVT(*DAG.getContext(), |
6640 | WidenedVT.getVectorElementType(), 4); |
6641 | WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), |
6642 | WidenedMemVT.getVectorElementType(), 4); |
6643 | MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); |
6644 | } |
6645 | |
6646 | assert(VTList.NumVTs == 2)((VTList.NumVTs == 2) ? static_cast<void> (0) : __assert_fail ("VTList.NumVTs == 2", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6646, __PRETTY_FUNCTION__)); |
6647 | SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); |
6648 | |
6649 | auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, |
6650 | WidenedMemVT, MMO); |
6651 | if (WidenedVT != VT) { |
6652 | auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, |
6653 | DAG.getVectorIdxConstant(0, DL)); |
6654 | NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); |
6655 | } |
6656 | return NewOp; |
6657 | } |
6658 | |
6659 | SDValue SITargetLowering::handleD16VData(SDValue VData, |
6660 | SelectionDAG &DAG) const { |
6661 | EVT StoreVT = VData.getValueType(); |
6662 | |
6663 | // No change for f16 and legal vector D16 types. |
6664 | if (!StoreVT.isVector()) |
6665 | return VData; |
6666 | |
6667 | SDLoc DL(VData); |
6668 | assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16")(((StoreVT.getVectorNumElements() != 3) && "Handle v3f16" ) ? static_cast<void> (0) : __assert_fail ("(StoreVT.getVectorNumElements() != 3) && \"Handle v3f16\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6668, __PRETTY_FUNCTION__)); |
6669 | |
6670 | if (Subtarget->hasUnpackedD16VMem()) { |
6671 | // We need to unpack the packed data to store. |
6672 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); |
6673 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
6674 | |
6675 | EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, |
6676 | StoreVT.getVectorNumElements()); |
6677 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); |
6678 | return DAG.UnrollVectorOp(ZExt.getNode()); |
6679 | } |
6680 | |
6681 | assert(isTypeLegal(StoreVT))((isTypeLegal(StoreVT)) ? static_cast<void> (0) : __assert_fail ("isTypeLegal(StoreVT)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6681, __PRETTY_FUNCTION__)); |
6682 | return VData; |
6683 | } |
6684 | |
6685 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
6686 | SelectionDAG &DAG) const { |
6687 | SDLoc DL(Op); |
6688 | SDValue Chain = Op.getOperand(0); |
6689 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
6690 | MachineFunction &MF = DAG.getMachineFunction(); |
6691 | |
6692 | switch (IntrinsicID) { |
6693 | case Intrinsic::amdgcn_exp_compr: { |
6694 | SDValue Src0 = Op.getOperand(4); |
6695 | SDValue Src1 = Op.getOperand(5); |
6696 | // Hack around illegal type on SI by directly selecting it. |
6697 | if (isTypeLegal(Src0.getValueType())) |
6698 | return SDValue(); |
6699 | |
6700 | const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); |
6701 | SDValue Undef = DAG.getUNDEF(MVT::f32); |
6702 | const SDValue Ops[] = { |
6703 | Op.getOperand(2), // tgt |
6704 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0 |
6705 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1 |
6706 | Undef, // src2 |
6707 | Undef, // src3 |
6708 | Op.getOperand(7), // vm |
6709 | DAG.getTargetConstant(1, DL, MVT::i1), // compr |
6710 | Op.getOperand(3), // en |
6711 | Op.getOperand(0) // Chain |
6712 | }; |
6713 | |
6714 | unsigned Opc = Done->isNullValue() ? AMDGPU::EXP : AMDGPU::EXP_DONE; |
6715 | return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0); |
6716 | } |
6717 | case Intrinsic::amdgcn_s_barrier: { |
6718 | if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { |
6719 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
6720 | unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; |
6721 | if (WGSize <= ST.getWavefrontSize()) |
6722 | return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, |
6723 | Op.getOperand(0)), 0); |
6724 | } |
6725 | return SDValue(); |
6726 | }; |
6727 | case Intrinsic::amdgcn_tbuffer_store: { |
6728 | SDValue VData = Op.getOperand(2); |
6729 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
6730 | if (IsD16) |
6731 | VData = handleD16VData(VData, DAG); |
6732 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); |
6733 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); |
6734 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); |
6735 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); |
6736 | unsigned IdxEn = 1; |
6737 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
6738 | IdxEn = Idx->getZExtValue() != 0; |
6739 | SDValue Ops[] = { |
6740 | Chain, |
6741 | VData, // vdata |
6742 | Op.getOperand(3), // rsrc |
6743 | Op.getOperand(4), // vindex |
6744 | Op.getOperand(5), // voffset |
6745 | Op.getOperand(6), // soffset |
6746 | Op.getOperand(7), // offset |
6747 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format |
6748 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
6749 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idexen |
6750 | }; |
6751 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
6752 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
6753 | MemSDNode *M = cast<MemSDNode>(Op); |
6754 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
6755 | M->getMemoryVT(), M->getMemOperand()); |
6756 | } |
6757 | |
6758 | case Intrinsic::amdgcn_struct_tbuffer_store: { |
6759 | SDValue VData = Op.getOperand(2); |
6760 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
6761 | if (IsD16) |
6762 | VData = handleD16VData(VData, DAG); |
6763 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
6764 | SDValue Ops[] = { |
6765 | Chain, |
6766 | VData, // vdata |
6767 | Op.getOperand(3), // rsrc |
6768 | Op.getOperand(4), // vindex |
6769 | Offsets.first, // voffset |
6770 | Op.getOperand(6), // soffset |
6771 | Offsets.second, // offset |
6772 | Op.getOperand(7), // format |
6773 | Op.getOperand(8), // cachepolicy, swizzled buffer |
6774 | DAG.getTargetConstant(1, DL, MVT::i1), // idexen |
6775 | }; |
6776 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
6777 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
6778 | MemSDNode *M = cast<MemSDNode>(Op); |
6779 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
6780 | M->getMemoryVT(), M->getMemOperand()); |
6781 | } |
6782 | |
6783 | case Intrinsic::amdgcn_raw_tbuffer_store: { |
6784 | SDValue VData = Op.getOperand(2); |
6785 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
6786 | if (IsD16) |
6787 | VData = handleD16VData(VData, DAG); |
6788 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
6789 | SDValue Ops[] = { |
6790 | Chain, |
6791 | VData, // vdata |
6792 | Op.getOperand(3), // rsrc |
6793 | DAG.getConstant(0, DL, MVT::i32), // vindex |
6794 | Offsets.first, // voffset |
6795 | Op.getOperand(5), // soffset |
6796 | Offsets.second, // offset |
6797 | Op.getOperand(6), // format |
6798 | Op.getOperand(7), // cachepolicy, swizzled buffer |
6799 | DAG.getTargetConstant(0, DL, MVT::i1), // idexen |
6800 | }; |
6801 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
6802 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
6803 | MemSDNode *M = cast<MemSDNode>(Op); |
6804 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
6805 | M->getMemoryVT(), M->getMemOperand()); |
6806 | } |
6807 | |
6808 | case Intrinsic::amdgcn_buffer_store: |
6809 | case Intrinsic::amdgcn_buffer_store_format: { |
6810 | SDValue VData = Op.getOperand(2); |
6811 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
6812 | if (IsD16) |
6813 | VData = handleD16VData(VData, DAG); |
6814 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
6815 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
6816 | unsigned IdxEn = 1; |
6817 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
6818 | IdxEn = Idx->getZExtValue() != 0; |
6819 | SDValue Ops[] = { |
6820 | Chain, |
6821 | VData, |
6822 | Op.getOperand(3), // rsrc |
6823 | Op.getOperand(4), // vindex |
6824 | SDValue(), // voffset -- will be set by setBufferOffsets |
6825 | SDValue(), // soffset -- will be set by setBufferOffsets |
6826 | SDValue(), // offset -- will be set by setBufferOffsets |
6827 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
6828 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
6829 | }; |
6830 | unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
6831 | // We don't know the offset if vindex is non-zero, so clear it. |
6832 | if (IdxEn) |
6833 | Offset = 0; |
6834 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? |
6835 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; |
6836 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
6837 | MemSDNode *M = cast<MemSDNode>(Op); |
6838 | M->getMemOperand()->setOffset(Offset); |
6839 | |
6840 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics |
6841 | EVT VDataType = VData.getValueType().getScalarType(); |
6842 | if (VDataType == MVT::i8 || VDataType == MVT::i16) |
6843 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); |
6844 | |
6845 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
6846 | M->getMemoryVT(), M->getMemOperand()); |
6847 | } |
6848 | |
6849 | case Intrinsic::amdgcn_raw_buffer_store: |
6850 | case Intrinsic::amdgcn_raw_buffer_store_format: { |
6851 | const bool IsFormat = |
6852 | IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format; |
6853 | |
6854 | SDValue VData = Op.getOperand(2); |
6855 | EVT VDataVT = VData.getValueType(); |
6856 | EVT EltType = VDataVT.getScalarType(); |
6857 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
6858 | if (IsD16) |
6859 | VData = handleD16VData(VData, DAG); |
6860 | |
6861 | if (!isTypeLegal(VDataVT)) { |
6862 | VData = |
6863 | DAG.getNode(ISD::BITCAST, DL, |
6864 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); |
6865 | } |
6866 | |
6867 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
6868 | SDValue Ops[] = { |
6869 | Chain, |
6870 | VData, |
6871 | Op.getOperand(3), // rsrc |
6872 | DAG.getConstant(0, DL, MVT::i32), // vindex |
6873 | Offsets.first, // voffset |
6874 | Op.getOperand(5), // soffset |
6875 | Offsets.second, // offset |
6876 | Op.getOperand(6), // cachepolicy, swizzled buffer |
6877 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
6878 | }; |
6879 | unsigned Opc = |
6880 | IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; |
6881 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
6882 | MemSDNode *M = cast<MemSDNode>(Op); |
6883 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6])); |
6884 | |
6885 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics |
6886 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) |
6887 | return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M); |
6888 | |
6889 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
6890 | M->getMemoryVT(), M->getMemOperand()); |
6891 | } |
6892 | |
6893 | case Intrinsic::amdgcn_struct_buffer_store: |
6894 | case Intrinsic::amdgcn_struct_buffer_store_format: { |
6895 | const bool IsFormat = |
6896 | IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format; |
6897 | |
6898 | SDValue VData = Op.getOperand(2); |
6899 | EVT VDataVT = VData.getValueType(); |
6900 | EVT EltType = VDataVT.getScalarType(); |
6901 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
6902 | |
6903 | if (IsD16) |
6904 | VData = handleD16VData(VData, DAG); |
6905 | |
6906 | if (!isTypeLegal(VDataVT)) { |
6907 | VData = |
6908 | DAG.getNode(ISD::BITCAST, DL, |
6909 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); |
6910 | } |
6911 | |
6912 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
6913 | SDValue Ops[] = { |
6914 | Chain, |
6915 | VData, |
6916 | Op.getOperand(3), // rsrc |
6917 | Op.getOperand(4), // vindex |
6918 | Offsets.first, // voffset |
6919 | Op.getOperand(6), // soffset |
6920 | Offsets.second, // offset |
6921 | Op.getOperand(7), // cachepolicy, swizzled buffer |
6922 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
6923 | }; |
6924 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? |
6925 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; |
6926 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
6927 | MemSDNode *M = cast<MemSDNode>(Op); |
6928 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6], |
6929 | Ops[3])); |
6930 | |
6931 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics |
6932 | EVT VDataType = VData.getValueType().getScalarType(); |
6933 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) |
6934 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); |
6935 | |
6936 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
6937 | M->getMemoryVT(), M->getMemOperand()); |
6938 | } |
6939 | |
6940 | case Intrinsic::amdgcn_buffer_atomic_fadd: { |
6941 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
6942 | unsigned IdxEn = 1; |
6943 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
6944 | IdxEn = Idx->getZExtValue() != 0; |
6945 | SDValue Ops[] = { |
6946 | Chain, |
6947 | Op.getOperand(2), // vdata |
6948 | Op.getOperand(3), // rsrc |
6949 | Op.getOperand(4), // vindex |
6950 | SDValue(), // voffset -- will be set by setBufferOffsets |
6951 | SDValue(), // soffset -- will be set by setBufferOffsets |
6952 | SDValue(), // offset -- will be set by setBufferOffsets |
6953 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy |
6954 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
6955 | }; |
6956 | unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
6957 | // We don't know the offset if vindex is non-zero, so clear it. |
6958 | if (IdxEn) |
6959 | Offset = 0; |
6960 | EVT VT = Op.getOperand(2).getValueType(); |
6961 | |
6962 | auto *M = cast<MemSDNode>(Op); |
6963 | M->getMemOperand()->setOffset(Offset); |
6964 | unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD |
6965 | : AMDGPUISD::BUFFER_ATOMIC_FADD; |
6966 | |
6967 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, |
6968 | M->getMemOperand()); |
6969 | } |
6970 | |
6971 | case Intrinsic::amdgcn_global_atomic_fadd: { |
6972 | SDValue Ops[] = { |
6973 | Chain, |
6974 | Op.getOperand(2), // ptr |
6975 | Op.getOperand(3) // vdata |
6976 | }; |
6977 | EVT VT = Op.getOperand(3).getValueType(); |
6978 | |
6979 | auto *M = cast<MemSDNode>(Op); |
6980 | if (VT.isVector()) { |
6981 | return DAG.getMemIntrinsicNode( |
6982 | AMDGPUISD::ATOMIC_PK_FADD, DL, Op->getVTList(), Ops, VT, |
6983 | M->getMemOperand()); |
6984 | } |
6985 | |
6986 | return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT, |
6987 | DAG.getVTList(VT, MVT::Other), Ops, |
6988 | M->getMemOperand()).getValue(1); |
6989 | } |
6990 | case Intrinsic::amdgcn_end_cf: |
6991 | return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, |
6992 | Op->getOperand(2), Chain), 0); |
6993 | |
6994 | default: { |
6995 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
6996 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) |
6997 | return lowerImage(Op, ImageDimIntr, DAG); |
6998 | |
6999 | return Op; |
7000 | } |
7001 | } |
7002 | } |
7003 | |
7004 | // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: |
7005 | // offset (the offset that is included in bounds checking and swizzling, to be |
7006 | // split between the instruction's voffset and immoffset fields) and soffset |
7007 | // (the offset that is excluded from bounds checking and swizzling, to go in |
7008 | // the instruction's soffset field). This function takes the first kind of |
7009 | // offset and figures out how to split it between voffset and immoffset. |
7010 | std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( |
7011 | SDValue Offset, SelectionDAG &DAG) const { |
7012 | SDLoc DL(Offset); |
7013 | const unsigned MaxImm = 4095; |
7014 | SDValue N0 = Offset; |
7015 | ConstantSDNode *C1 = nullptr; |
7016 | |
7017 | if ((C1 = dyn_cast<ConstantSDNode>(N0))) |
7018 | N0 = SDValue(); |
7019 | else if (DAG.isBaseWithConstantOffset(N0)) { |
7020 | C1 = cast<ConstantSDNode>(N0.getOperand(1)); |
7021 | N0 = N0.getOperand(0); |
7022 | } |
7023 | |
7024 | if (C1) { |
7025 | unsigned ImmOffset = C1->getZExtValue(); |
7026 | // If the immediate value is too big for the immoffset field, put the value |
7027 | // and -4096 into the immoffset field so that the value that is copied/added |
7028 | // for the voffset field is a multiple of 4096, and it stands more chance |
7029 | // of being CSEd with the copy/add for another similar load/store. |
7030 | // However, do not do that rounding down to a multiple of 4096 if that is a |
7031 | // negative number, as it appears to be illegal to have a negative offset |
7032 | // in the vgpr, even if adding the immediate offset makes it positive. |
7033 | unsigned Overflow = ImmOffset & ~MaxImm; |
7034 | ImmOffset -= Overflow; |
7035 | if ((int32_t)Overflow < 0) { |
7036 | Overflow += ImmOffset; |
7037 | ImmOffset = 0; |
7038 | } |
7039 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32)); |
7040 | if (Overflow) { |
7041 | auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); |
7042 | if (!N0) |
7043 | N0 = OverflowVal; |
7044 | else { |
7045 | SDValue Ops[] = { N0, OverflowVal }; |
7046 | N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); |
7047 | } |
7048 | } |
7049 | } |
7050 | if (!N0) |
7051 | N0 = DAG.getConstant(0, DL, MVT::i32); |
7052 | if (!C1) |
7053 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32)); |
7054 | return {N0, SDValue(C1, 0)}; |
7055 | } |
7056 | |
7057 | // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the |
7058 | // three offsets (voffset, soffset and instoffset) into the SDValue[3] array |
7059 | // pointed to by Offsets. |
7060 | unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset, |
7061 | SelectionDAG &DAG, SDValue *Offsets, |
7062 | unsigned Align) const { |
7063 | SDLoc DL(CombinedOffset); |
7064 | if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { |
7065 | uint32_t Imm = C->getZExtValue(); |
7066 | uint32_t SOffset, ImmOffset; |
7067 | if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) { |
7068 | Offsets[0] = DAG.getConstant(0, DL, MVT::i32); |
7069 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); |
7070 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); |
7071 | return SOffset + ImmOffset; |
7072 | } |
7073 | } |
7074 | if (DAG.isBaseWithConstantOffset(CombinedOffset)) { |
7075 | SDValue N0 = CombinedOffset.getOperand(0); |
7076 | SDValue N1 = CombinedOffset.getOperand(1); |
7077 | uint32_t SOffset, ImmOffset; |
7078 | int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); |
7079 | if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, |
7080 | Subtarget, Align)) { |
7081 | Offsets[0] = N0; |
7082 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); |
7083 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); |
7084 | return 0; |
7085 | } |
7086 | } |
7087 | Offsets[0] = CombinedOffset; |
7088 | Offsets[1] = DAG.getConstant(0, DL, MVT::i32); |
7089 | Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); |
7090 | return 0; |
7091 | } |
7092 | |
7093 | // Handle 8 bit and 16 bit buffer loads |
7094 | SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, |
7095 | EVT LoadVT, SDLoc DL, |
7096 | ArrayRef<SDValue> Ops, |
7097 | MemSDNode *M) const { |
7098 | EVT IntVT = LoadVT.changeTypeToInteger(); |
7099 | unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? |
7100 | AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; |
7101 | |
7102 | SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); |
7103 | SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, |
7104 | Ops, IntVT, |
7105 | M->getMemOperand()); |
7106 | SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad); |
7107 | LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal); |
7108 | |
7109 | return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL); |
7110 | } |
7111 | |
7112 | // Handle 8 bit and 16 bit buffer stores |
7113 | SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, |
7114 | EVT VDataType, SDLoc DL, |
7115 | SDValue Ops[], |
7116 | MemSDNode *M) const { |
7117 | if (VDataType == MVT::f16) |
7118 | Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]); |
7119 | |
7120 | SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); |
7121 | Ops[1] = BufferStoreExt; |
7122 | unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : |
7123 | AMDGPUISD::BUFFER_STORE_SHORT; |
7124 | ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); |
7125 | return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, |
7126 | M->getMemOperand()); |
7127 | } |
7128 | |
7129 | static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, |
7130 | ISD::LoadExtType ExtType, SDValue Op, |
7131 | const SDLoc &SL, EVT VT) { |
7132 | if (VT.bitsLT(Op.getValueType())) |
7133 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); |
7134 | |
7135 | switch (ExtType) { |
7136 | case ISD::SEXTLOAD: |
7137 | return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); |
7138 | case ISD::ZEXTLOAD: |
7139 | return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); |
7140 | case ISD::EXTLOAD: |
7141 | return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); |
7142 | case ISD::NON_EXTLOAD: |
7143 | return Op; |
7144 | } |
7145 | |
7146 | llvm_unreachable("invalid ext type")::llvm::llvm_unreachable_internal("invalid ext type", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7146); |
7147 | } |
7148 | |
7149 | SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { |
7150 | SelectionDAG &DAG = DCI.DAG; |
7151 | if (Ld->getAlignment() < 4 || Ld->isDivergent()) |
7152 | return SDValue(); |
7153 | |
7154 | // FIXME: Constant loads should all be marked invariant. |
7155 | unsigned AS = Ld->getAddressSpace(); |
7156 | if (AS != AMDGPUAS::CONSTANT_ADDRESS && |
7157 | AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && |
7158 | (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) |
7159 | return SDValue(); |
7160 | |
7161 | // Don't do this early, since it may interfere with adjacent load merging for |
7162 | // illegal types. We can avoid losing alignment information for exotic types |
7163 | // pre-legalize. |
7164 | EVT MemVT = Ld->getMemoryVT(); |
7165 | if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || |
7166 | MemVT.getSizeInBits() >= 32) |
7167 | return SDValue(); |
7168 | |
7169 | SDLoc SL(Ld); |
7170 | |
7171 | assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&(((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD ) && "unexpected vector extload") ? static_cast<void > (0) : __assert_fail ("(!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && \"unexpected vector extload\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7172, __PRETTY_FUNCTION__)) |
7172 | "unexpected vector extload")(((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD ) && "unexpected vector extload") ? static_cast<void > (0) : __assert_fail ("(!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && \"unexpected vector extload\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7172, __PRETTY_FUNCTION__)); |
7173 | |
7174 | // TODO: Drop only high part of range. |
7175 | SDValue Ptr = Ld->getBasePtr(); |
7176 | SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, |
7177 | MVT::i32, SL, Ld->getChain(), Ptr, |
7178 | Ld->getOffset(), |
7179 | Ld->getPointerInfo(), MVT::i32, |
7180 | Ld->getAlignment(), |
7181 | Ld->getMemOperand()->getFlags(), |
7182 | Ld->getAAInfo(), |
7183 | nullptr); // Drop ranges |
7184 | |
7185 | EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); |
7186 | if (MemVT.isFloatingPoint()) { |
7187 | assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&((Ld->getExtensionType() == ISD::NON_EXTLOAD && "unexpected fp extload" ) ? static_cast<void> (0) : __assert_fail ("Ld->getExtensionType() == ISD::NON_EXTLOAD && \"unexpected fp extload\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7188, __PRETTY_FUNCTION__)) |
7188 | "unexpected fp extload")((Ld->getExtensionType() == ISD::NON_EXTLOAD && "unexpected fp extload" ) ? static_cast<void> (0) : __assert_fail ("Ld->getExtensionType() == ISD::NON_EXTLOAD && \"unexpected fp extload\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7188, __PRETTY_FUNCTION__)); |
7189 | TruncVT = MemVT.changeTypeToInteger(); |
7190 | } |
7191 | |
7192 | SDValue Cvt = NewLoad; |
7193 | if (Ld->getExtensionType() == ISD::SEXTLOAD) { |
7194 | Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, |
7195 | DAG.getValueType(TruncVT)); |
7196 | } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || |
7197 | Ld->getExtensionType() == ISD::NON_EXTLOAD) { |
7198 | Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); |
7199 | } else { |
7200 | assert(Ld->getExtensionType() == ISD::EXTLOAD)((Ld->getExtensionType() == ISD::EXTLOAD) ? static_cast< void> (0) : __assert_fail ("Ld->getExtensionType() == ISD::EXTLOAD" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7200, __PRETTY_FUNCTION__)); |
7201 | } |
7202 | |
7203 | EVT VT = Ld->getValueType(0); |
7204 | EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); |
7205 | |
7206 | DCI.AddToWorklist(Cvt.getNode()); |
7207 | |
7208 | // We may need to handle exotic cases, such as i16->i64 extloads, so insert |
7209 | // the appropriate extension from the 32-bit load. |
7210 | Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); |
7211 | DCI.AddToWorklist(Cvt.getNode()); |
7212 | |
7213 | // Handle conversion back to floating point if necessary. |
7214 | Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); |
7215 | |
7216 | return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); |
7217 | } |
7218 | |
7219 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { |
7220 | SDLoc DL(Op); |
7221 | LoadSDNode *Load = cast<LoadSDNode>(Op); |
7222 | ISD::LoadExtType ExtType = Load->getExtensionType(); |
7223 | EVT MemVT = Load->getMemoryVT(); |
7224 | |
7225 | if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { |
7226 | if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) |
7227 | return SDValue(); |
7228 | |
7229 | // FIXME: Copied from PPC |
7230 | // First, load into 32 bits, then truncate to 1 bit. |
7231 | |
7232 | SDValue Chain = Load->getChain(); |
7233 | SDValue BasePtr = Load->getBasePtr(); |
7234 | MachineMemOperand *MMO = Load->getMemOperand(); |
7235 | |
7236 | EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; |
7237 | |
7238 | SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, |
7239 | BasePtr, RealMemVT, MMO); |
7240 | |
7241 | if (!MemVT.isVector()) { |
7242 | SDValue Ops[] = { |
7243 | DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), |
7244 | NewLD.getValue(1) |
7245 | }; |
7246 | |
7247 | return DAG.getMergeValues(Ops, DL); |
7248 | } |
7249 | |
7250 | SmallVector<SDValue, 3> Elts; |
7251 | for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { |
7252 | SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, |
7253 | DAG.getConstant(I, DL, MVT::i32)); |
7254 | |
7255 | Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); |
7256 | } |
7257 | |
7258 | SDValue Ops[] = { |
7259 | DAG.getBuildVector(MemVT, DL, Elts), |
7260 | NewLD.getValue(1) |
7261 | }; |
7262 | |
7263 | return DAG.getMergeValues(Ops, DL); |
7264 | } |
7265 | |
7266 | if (!MemVT.isVector()) |
7267 | return SDValue(); |
7268 | |
7269 | assert(Op.getValueType().getVectorElementType() == MVT::i32 &&((Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7270, __PRETTY_FUNCTION__)) |
7270 | "Custom lowering for non-i32 vectors hasn't been implemented.")((Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7270, __PRETTY_FUNCTION__)); |
7271 | |
7272 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
7273 | MemVT, *Load->getMemOperand())) { |
7274 | SDValue Ops[2]; |
7275 | std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); |
7276 | return DAG.getMergeValues(Ops, DL); |
7277 | } |
7278 | |
7279 | unsigned Alignment = Load->getAlignment(); |
7280 | unsigned AS = Load->getAddressSpace(); |
7281 | if (Subtarget->hasLDSMisalignedBug() && |
7282 | AS == AMDGPUAS::FLAT_ADDRESS && |
7283 | Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { |
7284 | return SplitVectorLoad(Op, DAG); |
7285 | } |
7286 | |
7287 | MachineFunction &MF = DAG.getMachineFunction(); |
7288 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
7289 | // If there is a possibilty that flat instruction access scratch memory |
7290 | // then we need to use the same legalization rules we use for private. |
7291 | if (AS == AMDGPUAS::FLAT_ADDRESS && |
7292 | !Subtarget->hasMultiDwordFlatScratchAddressing()) |
7293 | AS = MFI->hasFlatScratchInit() ? |
7294 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
7295 | |
7296 | unsigned NumElements = MemVT.getVectorNumElements(); |
7297 | |
7298 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
7299 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { |
7300 | if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { |
7301 | if (MemVT.isPow2VectorType()) |
7302 | return SDValue(); |
7303 | if (NumElements == 3) |
7304 | return WidenVectorLoad(Op, DAG); |
7305 | return SplitVectorLoad(Op, DAG); |
7306 | } |
7307 | // Non-uniform loads will be selected to MUBUF instructions, so they |
7308 | // have the same legalization requirements as global and private |
7309 | // loads. |
7310 | // |
7311 | } |
7312 | |
7313 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
7314 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
7315 | AS == AMDGPUAS::GLOBAL_ADDRESS) { |
7316 | if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && |
7317 | !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) && |
7318 | Alignment >= 4 && NumElements < 32) { |
7319 | if (MemVT.isPow2VectorType()) |
7320 | return SDValue(); |
7321 | if (NumElements == 3) |
7322 | return WidenVectorLoad(Op, DAG); |
7323 | return SplitVectorLoad(Op, DAG); |
7324 | } |
7325 | // Non-uniform loads will be selected to MUBUF instructions, so they |
7326 | // have the same legalization requirements as global and private |
7327 | // loads. |
7328 | // |
7329 | } |
7330 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
7331 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
7332 | AS == AMDGPUAS::GLOBAL_ADDRESS || |
7333 | AS == AMDGPUAS::FLAT_ADDRESS) { |
7334 | if (NumElements > 4) |
7335 | return SplitVectorLoad(Op, DAG); |
7336 | // v3 loads not supported on SI. |
7337 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
7338 | return WidenVectorLoad(Op, DAG); |
7339 | // v3 and v4 loads are supported for private and global memory. |
7340 | return SDValue(); |
7341 | } |
7342 | if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
7343 | // Depending on the setting of the private_element_size field in the |
7344 | // resource descriptor, we can only make private accesses up to a certain |
7345 | // size. |
7346 | switch (Subtarget->getMaxPrivateElementSize()) { |
7347 | case 4: { |
7348 | SDValue Ops[2]; |
7349 | std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); |
7350 | return DAG.getMergeValues(Ops, DL); |
7351 | } |
7352 | case 8: |
7353 | if (NumElements > 2) |
7354 | return SplitVectorLoad(Op, DAG); |
7355 | return SDValue(); |
7356 | case 16: |
7357 | // Same as global/flat |
7358 | if (NumElements > 4) |
7359 | return SplitVectorLoad(Op, DAG); |
7360 | // v3 loads not supported on SI. |
7361 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
7362 | return WidenVectorLoad(Op, DAG); |
7363 | return SDValue(); |
7364 | default: |
7365 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7365); |
7366 | } |
7367 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
7368 | // Use ds_read_b128 if possible. |
7369 | if (Subtarget->useDS128() && Load->getAlignment() >= 16 && |
7370 | MemVT.getStoreSize() == 16) |
7371 | return SDValue(); |
7372 | |
7373 | if (NumElements > 2) |
7374 | return SplitVectorLoad(Op, DAG); |
7375 | |
7376 | // SI has a hardware bug in the LDS / GDS boounds checking: if the base |
7377 | // address is negative, then the instruction is incorrectly treated as |
7378 | // out-of-bounds even if base + offsets is in bounds. Split vectorized |
7379 | // loads here to avoid emitting ds_read2_b32. We may re-combine the |
7380 | // load later in the SILoadStoreOptimizer. |
7381 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
7382 | NumElements == 2 && MemVT.getStoreSize() == 8 && |
7383 | Load->getAlignment() < 8) { |
7384 | return SplitVectorLoad(Op, DAG); |
7385 | } |
7386 | } |
7387 | return SDValue(); |
7388 | } |
7389 | |
7390 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
7391 | EVT VT = Op.getValueType(); |
7392 | assert(VT.getSizeInBits() == 64)((VT.getSizeInBits() == 64) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7392, __PRETTY_FUNCTION__)); |
7393 | |
7394 | SDLoc DL(Op); |
7395 | SDValue Cond = Op.getOperand(0); |
7396 | |
7397 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); |
7398 | SDValue One = DAG.getConstant(1, DL, MVT::i32); |
7399 | |
7400 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); |
7401 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); |
7402 | |
7403 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); |
7404 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); |
7405 | |
7406 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); |
7407 | |
7408 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); |
7409 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); |
7410 | |
7411 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); |
7412 | |
7413 | SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); |
7414 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
7415 | } |
7416 | |
7417 | // Catch division cases where we can use shortcuts with rcp and rsq |
7418 | // instructions. |
7419 | SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, |
7420 | SelectionDAG &DAG) const { |
7421 | SDLoc SL(Op); |
7422 | SDValue LHS = Op.getOperand(0); |
7423 | SDValue RHS = Op.getOperand(1); |
7424 | EVT VT = Op.getValueType(); |
7425 | const SDNodeFlags Flags = Op->getFlags(); |
7426 | |
7427 | bool AllowInaccurateRcp = DAG.getTarget().Options.UnsafeFPMath || |
7428 | Flags.hasApproximateFuncs(); |
7429 | |
7430 | // Without !fpmath accuracy information, we can't do more because we don't |
7431 | // know exactly whether rcp is accurate enough to meet !fpmath requirement. |
7432 | if (!AllowInaccurateRcp) |
7433 | return SDValue(); |
7434 | |
7435 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { |
7436 | if (CLHS->isExactlyValue(1.0)) { |
7437 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to |
7438 | // the CI documentation has a worst case error of 1 ulp. |
7439 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to |
7440 | // use it as long as we aren't trying to use denormals. |
7441 | // |
7442 | // v_rcp_f16 and v_rsq_f16 DO support denormals. |
7443 | |
7444 | // 1.0 / sqrt(x) -> rsq(x) |
7445 | |
7446 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP |
7447 | // error seems really high at 2^29 ULP. |
7448 | if (RHS.getOpcode() == ISD::FSQRT) |
7449 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); |
7450 | |
7451 | // 1.0 / x -> rcp(x) |
7452 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
7453 | } |
7454 | |
7455 | // Same as for 1.0, but expand the sign out of the constant. |
7456 | if (CLHS->isExactlyValue(-1.0)) { |
7457 | // -1.0 / x -> rcp (fneg x) |
7458 | SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
7459 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); |
7460 | } |
7461 | } |
7462 | |
7463 | // Turn into multiply by the reciprocal. |
7464 | // x / y -> x * (1.0 / y) |
7465 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
7466 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); |
7467 | } |
7468 | |
7469 | static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
7470 | EVT VT, SDValue A, SDValue B, SDValue GlueChain) { |
7471 | if (GlueChain->getNumValues() <= 1) { |
7472 | return DAG.getNode(Opcode, SL, VT, A, B); |
7473 | } |
7474 | |
7475 | assert(GlueChain->getNumValues() == 3)((GlueChain->getNumValues() == 3) ? static_cast<void> (0) : __assert_fail ("GlueChain->getNumValues() == 3", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7475, __PRETTY_FUNCTION__)); |
7476 | |
7477 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
7478 | switch (Opcode) { |
7479 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7479); |
7480 | case ISD::FMUL: |
7481 | Opcode = AMDGPUISD::FMUL_W_CHAIN; |
7482 | break; |
7483 | } |
7484 | |
7485 | return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, |
7486 | GlueChain.getValue(2)); |
7487 | } |
7488 | |
7489 | static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
7490 | EVT VT, SDValue A, SDValue B, SDValue C, |
7491 | SDValue GlueChain) { |
7492 | if (GlueChain->getNumValues() <= 1) { |
7493 | return DAG.getNode(Opcode, SL, VT, A, B, C); |
7494 | } |
7495 | |
7496 | assert(GlueChain->getNumValues() == 3)((GlueChain->getNumValues() == 3) ? static_cast<void> (0) : __assert_fail ("GlueChain->getNumValues() == 3", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7496, __PRETTY_FUNCTION__)); |
7497 | |
7498 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
7499 | switch (Opcode) { |
7500 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7500); |
7501 | case ISD::FMA: |
7502 | Opcode = AMDGPUISD::FMA_W_CHAIN; |
7503 | break; |
7504 | } |
7505 | |
7506 | return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, |
7507 | GlueChain.getValue(2)); |
7508 | } |
7509 | |
7510 | SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { |
7511 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
7512 | return FastLowered; |
7513 | |
7514 | SDLoc SL(Op); |
7515 | SDValue Src0 = Op.getOperand(0); |
7516 | SDValue Src1 = Op.getOperand(1); |
7517 | |
7518 | SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
7519 | SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
7520 | |
7521 | SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); |
7522 | SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); |
7523 | |
7524 | SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); |
7525 | SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); |
7526 | |
7527 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); |
7528 | } |
7529 | |
7530 | // Faster 2.5 ULP division that does not support denormals. |
7531 | SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { |
7532 | SDLoc SL(Op); |
7533 | SDValue LHS = Op.getOperand(1); |
7534 | SDValue RHS = Op.getOperand(2); |
7535 | |
7536 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); |
7537 | |
7538 | const APFloat K0Val(BitsToFloat(0x6f800000)); |
7539 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); |
7540 | |
7541 | const APFloat K1Val(BitsToFloat(0x2f800000)); |
7542 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); |
7543 | |
7544 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
7545 | |
7546 | EVT SetCCVT = |
7547 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); |
7548 | |
7549 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); |
7550 | |
7551 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); |
7552 | |
7553 | // TODO: Should this propagate fast-math-flags? |
7554 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); |
7555 | |
7556 | // rcp does not support denormals. |
7557 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); |
7558 | |
7559 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); |
7560 | |
7561 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); |
7562 | } |
7563 | |
7564 | // Returns immediate value for setting the F32 denorm mode when using the |
7565 | // S_DENORM_MODE instruction. |
7566 | static const SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG, |
7567 | const SDLoc &SL, const GCNSubtarget *ST) { |
7568 | assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE")((ST->hasDenormModeInst() && "Requires S_DENORM_MODE" ) ? static_cast<void> (0) : __assert_fail ("ST->hasDenormModeInst() && \"Requires S_DENORM_MODE\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7568, __PRETTY_FUNCTION__)); |
7569 | int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction()) |
7570 | ? FP_DENORM_FLUSH_NONE3 |
7571 | : FP_DENORM_FLUSH_IN_FLUSH_OUT0; |
7572 | |
7573 | int Mode = SPDenormMode | (DPDenormModeDefault << 2); |
7574 | return DAG.getTargetConstant(Mode, SL, MVT::i32); |
7575 | } |
7576 | |
7577 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { |
7578 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
7579 | return FastLowered; |
7580 | |
7581 | SDLoc SL(Op); |
7582 | SDValue LHS = Op.getOperand(0); |
7583 | SDValue RHS = Op.getOperand(1); |
7584 | |
7585 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
7586 | |
7587 | SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); |
7588 | |
7589 | SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
7590 | RHS, RHS, LHS); |
7591 | SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
7592 | LHS, RHS, LHS); |
7593 | |
7594 | // Denominator is scaled to not be denormal, so using rcp is ok. |
7595 | SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, |
7596 | DenominatorScaled); |
7597 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, |
7598 | DenominatorScaled); |
7599 | |
7600 | const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | |
7601 | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | |
7602 | (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); |
7603 | const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); |
7604 | |
7605 | const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction()); |
7606 | |
7607 | if (!HasFP32Denormals) { |
7608 | SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); |
7609 | |
7610 | SDValue EnableDenorm; |
7611 | if (Subtarget->hasDenormModeInst()) { |
7612 | const SDValue EnableDenormValue = |
7613 | getSPDenormModeValue(FP_DENORM_FLUSH_NONE3, DAG, SL, Subtarget); |
7614 | |
7615 | EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs, |
7616 | DAG.getEntryNode(), EnableDenormValue); |
7617 | } else { |
7618 | const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE3, |
7619 | SL, MVT::i32); |
7620 | EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, |
7621 | DAG.getEntryNode(), EnableDenormValue, |
7622 | BitField); |
7623 | } |
7624 | |
7625 | SDValue Ops[3] = { |
7626 | NegDivScale0, |
7627 | EnableDenorm.getValue(0), |
7628 | EnableDenorm.getValue(1) |
7629 | }; |
7630 | |
7631 | NegDivScale0 = DAG.getMergeValues(Ops, SL); |
7632 | } |
7633 | |
7634 | SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, |
7635 | ApproxRcp, One, NegDivScale0); |
7636 | |
7637 | SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, |
7638 | ApproxRcp, Fma0); |
7639 | |
7640 | SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, |
7641 | Fma1, Fma1); |
7642 | |
7643 | SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, |
7644 | NumeratorScaled, Mul); |
7645 | |
7646 | SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul, Fma2); |
7647 | |
7648 | SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, |
7649 | NumeratorScaled, Fma3); |
7650 | |
7651 | if (!HasFP32Denormals) { |
7652 | SDValue DisableDenorm; |
7653 | if (Subtarget->hasDenormModeInst()) { |
7654 | const SDValue DisableDenormValue = |
7655 | getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT0, DAG, SL, Subtarget); |
7656 | |
7657 | DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other, |
7658 | Fma4.getValue(1), DisableDenormValue, |
7659 | Fma4.getValue(2)); |
7660 | } else { |
7661 | const SDValue DisableDenormValue = |
7662 | DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT0, SL, MVT::i32); |
7663 | |
7664 | DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, |
7665 | Fma4.getValue(1), DisableDenormValue, |
7666 | BitField, Fma4.getValue(2)); |
7667 | } |
7668 | |
7669 | SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, |
7670 | DisableDenorm, DAG.getRoot()); |
7671 | DAG.setRoot(OutputChain); |
7672 | } |
7673 | |
7674 | SDValue Scale = NumeratorScaled.getValue(1); |
7675 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, |
7676 | Fma4, Fma1, Fma3, Scale); |
7677 | |
7678 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); |
7679 | } |
7680 | |
7681 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { |
7682 | if (DAG.getTarget().Options.UnsafeFPMath) |
7683 | return lowerFastUnsafeFDIV(Op, DAG); |
7684 | |
7685 | SDLoc SL(Op); |
7686 | SDValue X = Op.getOperand(0); |
7687 | SDValue Y = Op.getOperand(1); |
7688 | |
7689 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); |
7690 | |
7691 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); |
7692 | |
7693 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); |
7694 | |
7695 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); |
7696 | |
7697 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); |
7698 | |
7699 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); |
7700 | |
7701 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); |
7702 | |
7703 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); |
7704 | |
7705 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); |
7706 | |
7707 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); |
7708 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); |
7709 | |
7710 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, |
7711 | NegDivScale0, Mul, DivScale1); |
7712 | |
7713 | SDValue Scale; |
7714 | |
7715 | if (!Subtarget->hasUsableDivScaleConditionOutput()) { |
7716 | // Workaround a hardware bug on SI where the condition output from div_scale |
7717 | // is not usable. |
7718 | |
7719 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); |
7720 | |
7721 | // Figure out if the scale to use for div_fmas. |
7722 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); |
7723 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); |
7724 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); |
7725 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); |
7726 | |
7727 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); |
7728 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); |
7729 | |
7730 | SDValue Scale0Hi |
7731 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); |
7732 | SDValue Scale1Hi |
7733 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); |
7734 | |
7735 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); |
7736 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); |
7737 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); |
7738 | } else { |
7739 | Scale = DivScale1.getValue(1); |
7740 | } |
7741 | |
7742 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, |
7743 | Fma4, Fma3, Mul, Scale); |
7744 | |
7745 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); |
7746 | } |
7747 | |
7748 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
7749 | EVT VT = Op.getValueType(); |
7750 | |
7751 | if (VT == MVT::f32) |
7752 | return LowerFDIV32(Op, DAG); |
7753 | |
7754 | if (VT == MVT::f64) |
7755 | return LowerFDIV64(Op, DAG); |
7756 | |
7757 | if (VT == MVT::f16) |
7758 | return LowerFDIV16(Op, DAG); |
7759 | |
7760 | llvm_unreachable("Unexpected type for fdiv")::llvm::llvm_unreachable_internal("Unexpected type for fdiv", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7760); |
7761 | } |
7762 | |
7763 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { |
7764 | SDLoc DL(Op); |
7765 | StoreSDNode *Store = cast<StoreSDNode>(Op); |
7766 | EVT VT = Store->getMemoryVT(); |
7767 | |
7768 | if (VT == MVT::i1) { |
7769 | return DAG.getTruncStore(Store->getChain(), DL, |
7770 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), |
7771 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); |
7772 | } |
7773 | |
7774 | assert(VT.isVector() &&((VT.isVector() && Store->getValue().getValueType( ).getScalarType() == MVT::i32) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7775, __PRETTY_FUNCTION__)) |
7775 | Store->getValue().getValueType().getScalarType() == MVT::i32)((VT.isVector() && Store->getValue().getValueType( ).getScalarType() == MVT::i32) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7775, __PRETTY_FUNCTION__)); |
7776 | |
7777 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
7778 | VT, *Store->getMemOperand())) { |
7779 | return expandUnalignedStore(Store, DAG); |
7780 | } |
7781 | |
7782 | unsigned AS = Store->getAddressSpace(); |
7783 | if (Subtarget->hasLDSMisalignedBug() && |
7784 | AS == AMDGPUAS::FLAT_ADDRESS && |
7785 | Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { |
7786 | return SplitVectorStore(Op, DAG); |
7787 | } |
7788 | |
7789 | MachineFunction &MF = DAG.getMachineFunction(); |
7790 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
7791 | // If there is a possibilty that flat instruction access scratch memory |
7792 | // then we need to use the same legalization rules we use for private. |
7793 | if (AS == AMDGPUAS::FLAT_ADDRESS && |
7794 | !Subtarget->hasMultiDwordFlatScratchAddressing()) |
7795 | AS = MFI->hasFlatScratchInit() ? |
7796 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
7797 | |
7798 | unsigned NumElements = VT.getVectorNumElements(); |
7799 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || |
7800 | AS == AMDGPUAS::FLAT_ADDRESS) { |
7801 | if (NumElements > 4) |
7802 | return SplitVectorStore(Op, DAG); |
7803 | // v3 stores not supported on SI. |
7804 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
7805 | return SplitVectorStore(Op, DAG); |
7806 | return SDValue(); |
7807 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
7808 | switch (Subtarget->getMaxPrivateElementSize()) { |
7809 | case 4: |
7810 | return scalarizeVectorStore(Store, DAG); |
7811 | case 8: |
7812 | if (NumElements > 2) |
7813 | return SplitVectorStore(Op, DAG); |
7814 | return SDValue(); |
7815 | case 16: |
7816 | if (NumElements > 4 || NumElements == 3) |
7817 | return SplitVectorStore(Op, DAG); |
7818 | return SDValue(); |
7819 | default: |
7820 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7820); |
7821 | } |
7822 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
7823 | // Use ds_write_b128 if possible. |
7824 | if (Subtarget->useDS128() && Store->getAlignment() >= 16 && |
7825 | VT.getStoreSize() == 16 && NumElements != 3) |
7826 | return SDValue(); |
7827 | |
7828 | if (NumElements > 2) |
7829 | return SplitVectorStore(Op, DAG); |
7830 | |
7831 | // SI has a hardware bug in the LDS / GDS boounds checking: if the base |
7832 | // address is negative, then the instruction is incorrectly treated as |
7833 | // out-of-bounds even if base + offsets is in bounds. Split vectorized |
7834 | // stores here to avoid emitting ds_write2_b32. We may re-combine the |
7835 | // store later in the SILoadStoreOptimizer. |
7836 | if (!Subtarget->hasUsableDSOffset() && |
7837 | NumElements == 2 && VT.getStoreSize() == 8 && |
7838 | Store->getAlignment() < 8) { |
7839 | return SplitVectorStore(Op, DAG); |
7840 | } |
7841 | |
7842 | return SDValue(); |
7843 | } else { |
7844 | llvm_unreachable("unhandled address space")::llvm::llvm_unreachable_internal("unhandled address space", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7844); |
7845 | } |
7846 | } |
7847 | |
7848 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { |
7849 | SDLoc DL(Op); |
7850 | EVT VT = Op.getValueType(); |
7851 | SDValue Arg = Op.getOperand(0); |
7852 | SDValue TrigVal; |
7853 | |
7854 | // TODO: Should this propagate fast-math-flags? |
7855 | |
7856 | SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI3.14159265358979323846, DL, VT); |
7857 | |
7858 | if (Subtarget->hasTrigReducedRange()) { |
7859 | SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi); |
7860 | TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal); |
7861 | } else { |
7862 | TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi); |
7863 | } |
7864 | |
7865 | switch (Op.getOpcode()) { |
7866 | case ISD::FCOS: |
7867 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal); |
7868 | case ISD::FSIN: |
7869 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal); |
7870 | default: |
7871 | llvm_unreachable("Wrong trig opcode")::llvm::llvm_unreachable_internal("Wrong trig opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7871); |
7872 | } |
7873 | } |
7874 | |
7875 | SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { |
7876 | AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); |
7877 | assert(AtomicNode->isCompareAndSwap())((AtomicNode->isCompareAndSwap()) ? static_cast<void> (0) : __assert_fail ("AtomicNode->isCompareAndSwap()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7877, __PRETTY_FUNCTION__)); |
7878 | unsigned AS = AtomicNode->getAddressSpace(); |
7879 | |
7880 | // No custom lowering required for local address space |
7881 | if (!isFlatGlobalAddrSpace(AS)) |
7882 | return Op; |
7883 | |
7884 | // Non-local address space requires custom lowering for atomic compare |
7885 | // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 |
7886 | SDLoc DL(Op); |
7887 | SDValue ChainIn = Op.getOperand(0); |
7888 | SDValue Addr = Op.getOperand(1); |
7889 | SDValue Old = Op.getOperand(2); |
7890 | SDValue New = Op.getOperand(3); |
7891 | EVT VT = Op.getValueType(); |
7892 | MVT SimpleVT = VT.getSimpleVT(); |
7893 | MVT VecType = MVT::getVectorVT(SimpleVT, 2); |
7894 | |
7895 | SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); |
7896 | SDValue Ops[] = { ChainIn, Addr, NewOld }; |
7897 | |
7898 | return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), |
7899 | Ops, VT, AtomicNode->getMemOperand()); |
7900 | } |
7901 | |
7902 | //===----------------------------------------------------------------------===// |
7903 | // Custom DAG optimizations |
7904 | //===----------------------------------------------------------------------===// |
7905 | |
7906 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, |
7907 | DAGCombinerInfo &DCI) const { |
7908 | EVT VT = N->getValueType(0); |
7909 | EVT ScalarVT = VT.getScalarType(); |
7910 | if (ScalarVT != MVT::f32) |
7911 | return SDValue(); |
7912 | |
7913 | SelectionDAG &DAG = DCI.DAG; |
7914 | SDLoc DL(N); |
7915 | |
7916 | SDValue Src = N->getOperand(0); |
7917 | EVT SrcVT = Src.getValueType(); |
7918 | |
7919 | // TODO: We could try to match extracting the higher bytes, which would be |
7920 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after |
7921 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry |
7922 | // about in practice. |
7923 | if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { |
7924 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { |
7925 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); |
7926 | DCI.AddToWorklist(Cvt.getNode()); |
7927 | return Cvt; |
7928 | } |
7929 | } |
7930 | |
7931 | return SDValue(); |
7932 | } |
7933 | |
7934 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) |
7935 | |
7936 | // This is a variant of |
7937 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), |
7938 | // |
7939 | // The normal DAG combiner will do this, but only if the add has one use since |
7940 | // that would increase the number of instructions. |
7941 | // |
7942 | // This prevents us from seeing a constant offset that can be folded into a |
7943 | // memory instruction's addressing mode. If we know the resulting add offset of |
7944 | // a pointer can be folded into an addressing offset, we can replace the pointer |
7945 | // operand with the add of new constant offset. This eliminates one of the uses, |
7946 | // and may allow the remaining use to also be simplified. |
7947 | // |
7948 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, |
7949 | unsigned AddrSpace, |
7950 | EVT MemVT, |
7951 | DAGCombinerInfo &DCI) const { |
7952 | SDValue N0 = N->getOperand(0); |
7953 | SDValue N1 = N->getOperand(1); |
7954 | |
7955 | // We only do this to handle cases where it's profitable when there are |
7956 | // multiple uses of the add, so defer to the standard combine. |
7957 | if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || |
7958 | N0->hasOneUse()) |
7959 | return SDValue(); |
7960 | |
7961 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); |
7962 | if (!CN1) |
7963 | return SDValue(); |
7964 | |
7965 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
7966 | if (!CAdd) |
7967 | return SDValue(); |
7968 | |
7969 | // If the resulting offset is too large, we can't fold it into the addressing |
7970 | // mode offset. |
7971 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); |
7972 | Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); |
7973 | |
7974 | AddrMode AM; |
7975 | AM.HasBaseReg = true; |
7976 | AM.BaseOffs = Offset.getSExtValue(); |
7977 | if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) |
7978 | return SDValue(); |
7979 | |
7980 | SelectionDAG &DAG = DCI.DAG; |
7981 | SDLoc SL(N); |
7982 | EVT VT = N->getValueType(0); |
7983 | |
7984 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); |
7985 | SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); |
7986 | |
7987 | SDNodeFlags Flags; |
7988 | Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && |
7989 | (N0.getOpcode() == ISD::OR || |
7990 | N0->getFlags().hasNoUnsignedWrap())); |
7991 | |
7992 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); |
7993 | } |
7994 | |
7995 | SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, |
7996 | DAGCombinerInfo &DCI) const { |
7997 | SDValue Ptr = N->getBasePtr(); |
7998 | SelectionDAG &DAG = DCI.DAG; |
7999 | SDLoc SL(N); |
8000 | |
8001 | // TODO: We could also do this for multiplies. |
8002 | if (Ptr.getOpcode() == ISD::SHL) { |
8003 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), |
8004 | N->getMemoryVT(), DCI); |
8005 | if (NewPtr) { |
8006 | SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); |
8007 | |
8008 | NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; |
8009 | return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); |
8010 | } |
8011 | } |
8012 | |
8013 | return SDValue(); |
8014 | } |
8015 | |
8016 | static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { |
8017 | return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || |
8018 | (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || |
8019 | (Opc == ISD::XOR && Val == 0); |
8020 | } |
8021 | |
8022 | // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This |
8023 | // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit |
8024 | // integer combine opportunities since most 64-bit operations are decomposed |
8025 | // this way. TODO: We won't want this for SALU especially if it is an inline |
8026 | // immediate. |
8027 | SDValue SITargetLowering::splitBinaryBitConstantOp( |
8028 | DAGCombinerInfo &DCI, |
8029 | const SDLoc &SL, |
8030 | unsigned Opc, SDValue LHS, |
8031 | const ConstantSDNode *CRHS) const { |
8032 | uint64_t Val = CRHS->getZExtValue(); |
8033 | uint32_t ValLo = Lo_32(Val); |
8034 | uint32_t ValHi = Hi_32(Val); |
8035 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
8036 | |
8037 | if ((bitOpWithConstantIsReducible(Opc, ValLo) || |
8038 | bitOpWithConstantIsReducible(Opc, ValHi)) || |
8039 | (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { |
8040 | // If we need to materialize a 64-bit immediate, it will be split up later |
8041 | // anyway. Avoid creating the harder to understand 64-bit immediate |
8042 | // materialization. |
8043 | return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); |
8044 | } |
8045 | |
8046 | return SDValue(); |
8047 | } |
8048 | |
8049 | // Returns true if argument is a boolean value which is not serialized into |
8050 | // memory or argument and does not require v_cmdmask_b32 to be deserialized. |
8051 | static bool isBoolSGPR(SDValue V) { |
8052 | if (V.getValueType() != MVT::i1) |
8053 | return false; |
8054 | switch (V.getOpcode()) { |
8055 | default: break; |
8056 | case ISD::SETCC: |
8057 | case ISD::AND: |
8058 | case ISD::OR: |
8059 | case ISD::XOR: |
8060 | case AMDGPUISD::FP_CLASS: |
8061 | return true; |
8062 | } |
8063 | return false; |
8064 | } |
8065 | |
8066 | // If a constant has all zeroes or all ones within each byte return it. |
8067 | // Otherwise return 0. |
8068 | static uint32_t getConstantPermuteMask(uint32_t C) { |
8069 | // 0xff for any zero byte in the mask |
8070 | uint32_t ZeroByteMask = 0; |
8071 | if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; |
8072 | if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; |
8073 | if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; |
8074 | if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; |
8075 | uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte |
8076 | if ((NonZeroByteMask & C) != NonZeroByteMask) |
8077 | return 0; // Partial bytes selected. |
8078 | return C; |
8079 | } |
8080 | |
8081 | // Check if a node selects whole bytes from its operand 0 starting at a byte |
8082 | // boundary while masking the rest. Returns select mask as in the v_perm_b32 |
8083 | // or -1 if not succeeded. |
8084 | // Note byte select encoding: |
8085 | // value 0-3 selects corresponding source byte; |
8086 | // value 0xc selects zero; |
8087 | // value 0xff selects 0xff. |
8088 | static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { |
8089 | assert(V.getValueSizeInBits() == 32)((V.getValueSizeInBits() == 32) ? static_cast<void> (0) : __assert_fail ("V.getValueSizeInBits() == 32", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8089, __PRETTY_FUNCTION__)); |
8090 | |
8091 | if (V.getNumOperands() != 2) |
8092 | return ~0; |
8093 | |
8094 | ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); |
8095 | if (!N1) |
8096 | return ~0; |
8097 | |
8098 | uint32_t C = N1->getZExtValue(); |
8099 | |
8100 | switch (V.getOpcode()) { |
8101 | default: |
8102 | break; |
8103 | case ISD::AND: |
8104 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { |
8105 | return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); |
8106 | } |
8107 | break; |
8108 | |
8109 | case ISD::OR: |
8110 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { |
8111 | return (0x03020100 & ~ConstMask) | ConstMask; |
8112 | } |
8113 | break; |
8114 | |
8115 | case ISD::SHL: |
8116 | if (C % 8) |
8117 | return ~0; |
8118 | |
8119 | return uint32_t((0x030201000c0c0c0cull << C) >> 32); |
8120 | |
8121 | case ISD::SRL: |
8122 | if (C % 8) |
8123 | return ~0; |
8124 | |
8125 | return uint32_t(0x0c0c0c0c03020100ull >> C); |
8126 | } |
8127 | |
8128 | return ~0; |
8129 | } |
8130 | |
8131 | SDValue SITargetLowering::performAndCombine(SDNode *N, |
8132 | DAGCombinerInfo &DCI) const { |
8133 | if (DCI.isBeforeLegalize()) |
8134 | return SDValue(); |
8135 | |
8136 | SelectionDAG &DAG = DCI.DAG; |
8137 | EVT VT = N->getValueType(0); |
8138 | SDValue LHS = N->getOperand(0); |
8139 | SDValue RHS = N->getOperand(1); |
8140 | |
8141 | |
8142 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
8143 | if (VT == MVT::i64 && CRHS) { |
8144 | if (SDValue Split |
8145 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) |
8146 | return Split; |
8147 | } |
8148 | |
8149 | if (CRHS && VT == MVT::i32) { |
8150 | // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb |
8151 | // nb = number of trailing zeroes in mask |
8152 | // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, |
8153 | // given that we are selecting 8 or 16 bit fields starting at byte boundary. |
8154 | uint64_t Mask = CRHS->getZExtValue(); |
8155 | unsigned Bits = countPopulation(Mask); |
8156 | if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && |
8157 | (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { |
8158 | if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { |
8159 | unsigned Shift = CShift->getZExtValue(); |
8160 | unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); |
8161 | unsigned Offset = NB + Shift; |
8162 | if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. |
8163 | SDLoc SL(N); |
8164 | SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, |
8165 | LHS->getOperand(0), |
8166 | DAG.getConstant(Offset, SL, MVT::i32), |
8167 | DAG.getConstant(Bits, SL, MVT::i32)); |
8168 | EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); |
8169 | SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, |
8170 | DAG.getValueType(NarrowVT)); |
8171 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, |
8172 | DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); |
8173 | return Shl; |
8174 | } |
8175 | } |
8176 | } |
8177 | |
8178 | // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) |
8179 | if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && |
8180 | isa<ConstantSDNode>(LHS.getOperand(2))) { |
8181 | uint32_t Sel = getConstantPermuteMask(Mask); |
8182 | if (!Sel) |
8183 | return SDValue(); |
8184 | |
8185 | // Select 0xc for all zero bytes |
8186 | Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); |
8187 | SDLoc DL(N); |
8188 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), |
8189 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); |
8190 | } |
8191 | } |
8192 | |
8193 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> |
8194 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) |
8195 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { |
8196 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
8197 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
8198 | |
8199 | SDValue X = LHS.getOperand(0); |
8200 | SDValue Y = RHS.getOperand(0); |
8201 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) |
8202 | return SDValue(); |
8203 | |
8204 | if (LCC == ISD::SETO) { |
8205 | if (X != LHS.getOperand(1)) |
8206 | return SDValue(); |
8207 | |
8208 | if (RCC == ISD::SETUNE) { |
8209 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); |
8210 | if (!C1 || !C1->isInfinity() || C1->isNegative()) |
8211 | return SDValue(); |
8212 | |
8213 | const uint32_t Mask = SIInstrFlags::N_NORMAL | |
8214 | SIInstrFlags::N_SUBNORMAL | |
8215 | SIInstrFlags::N_ZERO | |
8216 | SIInstrFlags::P_ZERO | |
8217 | SIInstrFlags::P_SUBNORMAL | |
8218 | SIInstrFlags::P_NORMAL; |
8219 | |
8220 | static_assert(((~(SIInstrFlags::S_NAN | |
8221 | SIInstrFlags::Q_NAN | |
8222 | SIInstrFlags::N_INFINITY | |
8223 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, |
8224 | "mask not equal"); |
8225 | |
8226 | SDLoc DL(N); |
8227 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
8228 | X, DAG.getConstant(Mask, DL, MVT::i32)); |
8229 | } |
8230 | } |
8231 | } |
8232 | |
8233 | if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) |
8234 | std::swap(LHS, RHS); |
8235 | |
8236 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && |
8237 | RHS.hasOneUse()) { |
8238 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
8239 | // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) |
8240 | // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) |
8241 | const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
8242 | if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && |
8243 | (RHS.getOperand(0) == LHS.getOperand(0) && |
8244 | LHS.getOperand(0) == LHS.getOperand(1))) { |
8245 | const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; |
8246 | unsigned NewMask = LCC == ISD::SETO ? |
8247 | Mask->getZExtValue() & ~OrdMask : |
8248 | Mask->getZExtValue() & OrdMask; |
8249 | |
8250 | SDLoc DL(N); |
8251 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), |
8252 | DAG.getConstant(NewMask, DL, MVT::i32)); |
8253 | } |
8254 | } |
8255 | |
8256 | if (VT == MVT::i32 && |
8257 | (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { |
8258 | // and x, (sext cc from i1) => select cc, x, 0 |
8259 | if (RHS.getOpcode() != ISD::SIGN_EXTEND) |
8260 | std::swap(LHS, RHS); |
8261 | if (isBoolSGPR(RHS.getOperand(0))) |
8262 | return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), |
8263 | LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); |
8264 | } |
8265 | |
8266 | // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) |
8267 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
8268 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && |
8269 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { |
8270 | uint32_t LHSMask = getPermuteMask(DAG, LHS); |
8271 | uint32_t RHSMask = getPermuteMask(DAG, RHS); |
8272 | if (LHSMask != ~0u && RHSMask != ~0u) { |
8273 | // Canonicalize the expression in an attempt to have fewer unique masks |
8274 | // and therefore fewer registers used to hold the masks. |
8275 | if (LHSMask > RHSMask) { |
8276 | std::swap(LHSMask, RHSMask); |
8277 | std::swap(LHS, RHS); |
8278 | } |
8279 | |
8280 | // Select 0xc for each lane used from source operand. Zero has 0xc mask |
8281 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. |
8282 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
8283 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
8284 | |
8285 | // Check of we need to combine values from two sources within a byte. |
8286 | if (!(LHSUsedLanes & RHSUsedLanes) && |
8287 | // If we select high and lower word keep it for SDWA. |
8288 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. |
8289 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { |
8290 | // Each byte in each mask is either selector mask 0-3, or has higher |
8291 | // bits set in either of masks, which can be 0xff for 0xff or 0x0c for |
8292 | // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise |
8293 | // mask which is not 0xff wins. By anding both masks we have a correct |
8294 | // result except that 0x0c shall be corrected to give 0x0c only. |
8295 | uint32_t Mask = LHSMask & RHSMask; |
8296 | for (unsigned I = 0; I < 32; I += 8) { |
8297 | uint32_t ByteSel = 0xff << I; |
8298 | if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) |
8299 | Mask &= (0x0c << I) & 0xffffffff; |
8300 | } |
8301 | |
8302 | // Add 4 to each active LHS lane. It will not affect any existing 0xff |
8303 | // or 0x0c. |
8304 | uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); |
8305 | SDLoc DL(N); |
8306 | |
8307 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, |
8308 | LHS.getOperand(0), RHS.getOperand(0), |
8309 | DAG.getConstant(Sel, DL, MVT::i32)); |
8310 | } |
8311 | } |
8312 | } |
8313 | |
8314 | return SDValue(); |
8315 | } |
8316 | |
8317 | SDValue SITargetLowering::performOrCombine(SDNode *N, |
8318 | DAGCombinerInfo &DCI) const { |
8319 | SelectionDAG &DAG = DCI.DAG; |
8320 | SDValue LHS = N->getOperand(0); |
8321 | SDValue RHS = N->getOperand(1); |
8322 | |
8323 | EVT VT = N->getValueType(0); |
8324 | if (VT == MVT::i1) { |
8325 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) |
8326 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && |
8327 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { |
8328 | SDValue Src = LHS.getOperand(0); |
8329 | if (Src != RHS.getOperand(0)) |
8330 | return SDValue(); |
8331 | |
8332 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
8333 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
8334 | if (!CLHS || !CRHS) |
8335 | return SDValue(); |
8336 | |
8337 | // Only 10 bits are used. |
8338 | static const uint32_t MaxMask = 0x3ff; |
8339 | |
8340 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; |
8341 | SDLoc DL(N); |
8342 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
8343 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); |
8344 | } |
8345 | |
8346 | return SDValue(); |
8347 | } |
8348 | |
8349 | // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) |
8350 | if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && |
8351 | LHS.getOpcode() == AMDGPUISD::PERM && |
8352 | isa<ConstantSDNode>(LHS.getOperand(2))) { |
8353 | uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); |
8354 | if (!Sel) |
8355 | return SDValue(); |
8356 | |
8357 | Sel |= LHS.getConstantOperandVal(2); |
8358 | SDLoc DL(N); |
8359 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), |
8360 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); |
8361 | } |
8362 | |
8363 | // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) |
8364 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
8365 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && |
8366 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { |
8367 | uint32_t LHSMask = getPermuteMask(DAG, LHS); |
8368 | uint32_t RHSMask = getPermuteMask(DAG, RHS); |
8369 | if (LHSMask != ~0u && RHSMask != ~0u) { |
8370 | // Canonicalize the expression in an attempt to have fewer unique masks |
8371 | // and therefore fewer registers used to hold the masks. |
8372 | if (LHSMask > RHSMask) { |
8373 | std::swap(LHSMask, RHSMask); |
8374 | std::swap(LHS, RHS); |
8375 | } |
8376 | |
8377 | // Select 0xc for each lane used from source operand. Zero has 0xc mask |
8378 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. |
8379 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
8380 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
8381 | |
8382 | // Check of we need to combine values from two sources within a byte. |
8383 | if (!(LHSUsedLanes & RHSUsedLanes) && |
8384 | // If we select high and lower word keep it for SDWA. |
8385 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. |
8386 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { |
8387 | // Kill zero bytes selected by other mask. Zero value is 0xc. |
8388 | LHSMask &= ~RHSUsedLanes; |
8389 | RHSMask &= ~LHSUsedLanes; |
8390 | // Add 4 to each active LHS lane |
8391 | LHSMask |= LHSUsedLanes & 0x04040404; |
8392 | // Combine masks |
8393 | uint32_t Sel = LHSMask | RHSMask; |
8394 | SDLoc DL(N); |
8395 | |
8396 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, |
8397 | LHS.getOperand(0), RHS.getOperand(0), |
8398 | DAG.getConstant(Sel, DL, MVT::i32)); |
8399 | } |
8400 | } |
8401 | } |
8402 | |
8403 | if (VT != MVT::i64) |
8404 | return SDValue(); |
8405 | |
8406 | // TODO: This could be a generic combine with a predicate for extracting the |
8407 | // high half of an integer being free. |
8408 | |
8409 | // (or i64:x, (zero_extend i32:y)) -> |
8410 | // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) |
8411 | if (LHS.getOpcode() == ISD::ZERO_EXTEND && |
8412 | RHS.getOpcode() != ISD::ZERO_EXTEND) |
8413 | std::swap(LHS, RHS); |
8414 | |
8415 | if (RHS.getOpcode() == ISD::ZERO_EXTEND) { |
8416 | SDValue ExtSrc = RHS.getOperand(0); |
8417 | EVT SrcVT = ExtSrc.getValueType(); |
8418 | if (SrcVT == MVT::i32) { |
8419 | SDLoc SL(N); |
8420 | SDValue LowLHS, HiBits; |
8421 | std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); |
8422 | SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); |
8423 | |
8424 | DCI.AddToWorklist(LowOr.getNode()); |
8425 | DCI.AddToWorklist(HiBits.getNode()); |
8426 | |
8427 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, |
8428 | LowOr, HiBits); |
8429 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); |
8430 | } |
8431 | } |
8432 | |
8433 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
8434 | if (CRHS) { |
8435 | if (SDValue Split |
8436 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) |
8437 | return Split; |
8438 | } |
8439 | |
8440 | return SDValue(); |
8441 | } |
8442 | |
8443 | SDValue SITargetLowering::performXorCombine(SDNode *N, |
8444 | DAGCombinerInfo &DCI) const { |
8445 | EVT VT = N->getValueType(0); |
8446 | if (VT != MVT::i64) |
8447 | return SDValue(); |
8448 | |
8449 | SDValue LHS = N->getOperand(0); |
8450 | SDValue RHS = N->getOperand(1); |
8451 | |
8452 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
8453 | if (CRHS) { |
8454 | if (SDValue Split |
8455 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) |
8456 | return Split; |
8457 | } |
8458 | |
8459 | return SDValue(); |
8460 | } |
8461 | |
8462 | // Instructions that will be lowered with a final instruction that zeros the |
8463 | // high result bits. |
8464 | // XXX - probably only need to list legal operations. |
8465 | static bool fp16SrcZerosHighBits(unsigned Opc) { |
8466 | switch (Opc) { |
8467 | case ISD::FADD: |
8468 | case ISD::FSUB: |
8469 | case ISD::FMUL: |
8470 | case ISD::FDIV: |
8471 | case ISD::FREM: |
8472 | case ISD::FMA: |
8473 | case ISD::FMAD: |
8474 | case ISD::FCANONICALIZE: |
8475 | case ISD::FP_ROUND: |
8476 | case ISD::UINT_TO_FP: |
8477 | case ISD::SINT_TO_FP: |
8478 | case ISD::FABS: |
8479 | // Fabs is lowered to a bit operation, but it's an and which will clear the |
8480 | // high bits anyway. |
8481 | case ISD::FSQRT: |
8482 | case ISD::FSIN: |
8483 | case ISD::FCOS: |
8484 | case ISD::FPOWI: |
8485 | case ISD::FPOW: |
8486 | case ISD::FLOG: |
8487 | case ISD::FLOG2: |
8488 | case ISD::FLOG10: |
8489 | case ISD::FEXP: |
8490 | case ISD::FEXP2: |
8491 | case ISD::FCEIL: |
8492 | case ISD::FTRUNC: |
8493 | case ISD::FRINT: |
8494 | case ISD::FNEARBYINT: |
8495 | case ISD::FROUND: |
8496 | case ISD::FFLOOR: |
8497 | case ISD::FMINNUM: |
8498 | case ISD::FMAXNUM: |
8499 | case AMDGPUISD::FRACT: |
8500 | case AMDGPUISD::CLAMP: |
8501 | case AMDGPUISD::COS_HW: |
8502 | case AMDGPUISD::SIN_HW: |
8503 | case AMDGPUISD::FMIN3: |
8504 | case AMDGPUISD::FMAX3: |
8505 | case AMDGPUISD::FMED3: |
8506 | case AMDGPUISD::FMAD_FTZ: |
8507 | case AMDGPUISD::RCP: |
8508 | case AMDGPUISD::RSQ: |
8509 | case AMDGPUISD::RCP_IFLAG: |
8510 | case AMDGPUISD::LDEXP: |
8511 | return true; |
8512 | default: |
8513 | // fcopysign, select and others may be lowered to 32-bit bit operations |
8514 | // which don't zero the high bits. |
8515 | return false; |
8516 | } |
8517 | } |
8518 | |
8519 | SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, |
8520 | DAGCombinerInfo &DCI) const { |
8521 | if (!Subtarget->has16BitInsts() || |
8522 | DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
8523 | return SDValue(); |
8524 | |
8525 | EVT VT = N->getValueType(0); |
8526 | if (VT != MVT::i32) |
8527 | return SDValue(); |
8528 | |
8529 | SDValue Src = N->getOperand(0); |
8530 | if (Src.getValueType() != MVT::i16) |
8531 | return SDValue(); |
8532 | |
8533 | // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src |
8534 | // FIXME: It is not universally true that the high bits are zeroed on gfx9. |
8535 | if (Src.getOpcode() == ISD::BITCAST) { |
8536 | SDValue BCSrc = Src.getOperand(0); |
8537 | if (BCSrc.getValueType() == MVT::f16 && |
8538 | fp16SrcZerosHighBits(BCSrc.getOpcode())) |
8539 | return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); |
8540 | } |
8541 | |
8542 | return SDValue(); |
8543 | } |
8544 | |
8545 | SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, |
8546 | DAGCombinerInfo &DCI) |
8547 | const { |
8548 | SDValue Src = N->getOperand(0); |
8549 | auto *VTSign = cast<VTSDNode>(N->getOperand(1)); |
8550 | |
8551 | if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && |
8552 | VTSign->getVT() == MVT::i8) || |
8553 | (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && |
8554 | VTSign->getVT() == MVT::i16)) && |
8555 | Src.hasOneUse()) { |
8556 | auto *M = cast<MemSDNode>(Src); |
8557 | SDValue Ops[] = { |
8558 | Src.getOperand(0), // Chain |
8559 | Src.getOperand(1), // rsrc |
8560 | Src.getOperand(2), // vindex |
8561 | Src.getOperand(3), // voffset |
8562 | Src.getOperand(4), // soffset |
8563 | Src.getOperand(5), // offset |
8564 | Src.getOperand(6), |
8565 | Src.getOperand(7) |
8566 | }; |
8567 | // replace with BUFFER_LOAD_BYTE/SHORT |
8568 | SDVTList ResList = DCI.DAG.getVTList(MVT::i32, |
8569 | Src.getOperand(0).getValueType()); |
8570 | unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? |
8571 | AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; |
8572 | SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), |
8573 | ResList, |
8574 | Ops, M->getMemoryVT(), |
8575 | M->getMemOperand()); |
8576 | return DCI.DAG.getMergeValues({BufferLoadSignExt, |
8577 | BufferLoadSignExt.getValue(1)}, SDLoc(N)); |
8578 | } |
8579 | return SDValue(); |
8580 | } |
8581 | |
8582 | SDValue SITargetLowering::performClassCombine(SDNode *N, |
8583 | DAGCombinerInfo &DCI) const { |
8584 | SelectionDAG &DAG = DCI.DAG; |
8585 | SDValue Mask = N->getOperand(1); |
8586 | |
8587 | // fp_class x, 0 -> false |
8588 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { |
8589 | if (CMask->isNullValue()) |
8590 | return DAG.getConstant(0, SDLoc(N), MVT::i1); |
8591 | } |
8592 | |
8593 | if (N->getOperand(0).isUndef()) |
8594 | return DAG.getUNDEF(MVT::i1); |
8595 | |
8596 | return SDValue(); |
8597 | } |
8598 | |
8599 | SDValue SITargetLowering::performRcpCombine(SDNode *N, |
8600 | DAGCombinerInfo &DCI) const { |
8601 | EVT VT = N->getValueType(0); |
8602 | SDValue N0 = N->getOperand(0); |
8603 | |
8604 | if (N0.isUndef()) |
8605 | return N0; |
8606 | |
8607 | if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || |
8608 | N0.getOpcode() == ISD::SINT_TO_FP)) { |
8609 | return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, |
8610 | N->getFlags()); |
8611 | } |
8612 | |
8613 | if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) { |
8614 | return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT, |
8615 | N0.getOperand(0), N->getFlags()); |
8616 | } |
8617 | |
8618 | return AMDGPUTargetLowering::performRcpCombine(N, DCI); |
8619 | } |
8620 | |
8621 | bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, |
8622 | unsigned MaxDepth) const { |
8623 | unsigned Opcode = Op.getOpcode(); |
8624 | if (Opcode == ISD::FCANONICALIZE) |
8625 | return true; |
8626 | |
8627 | if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { |
8628 | auto F = CFP->getValueAPF(); |
8629 | if (F.isNaN() && F.isSignaling()) |
8630 | return false; |
8631 | return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType()); |
8632 | } |
8633 | |
8634 | // If source is a result of another standard FP operation it is already in |
8635 | // canonical form. |
8636 | if (MaxDepth == 0) |
8637 | return false; |
8638 | |
8639 | switch (Opcode) { |
8640 | // These will flush denorms if required. |
8641 | case ISD::FADD: |
8642 | case ISD::FSUB: |
8643 | case ISD::FMUL: |
8644 | case ISD::FCEIL: |
8645 | case ISD::FFLOOR: |
8646 | case ISD::FMA: |
8647 | case ISD::FMAD: |
8648 | case ISD::FSQRT: |
8649 | case ISD::FDIV: |
8650 | case ISD::FREM: |
8651 | case ISD::FP_ROUND: |
8652 | case ISD::FP_EXTEND: |
8653 | case AMDGPUISD::FMUL_LEGACY: |
8654 | case AMDGPUISD::FMAD_FTZ: |
8655 | case AMDGPUISD::RCP: |
8656 | case AMDGPUISD::RSQ: |
8657 | case AMDGPUISD::RSQ_CLAMP: |
8658 | case AMDGPUISD::RCP_LEGACY: |
8659 | case AMDGPUISD::RSQ_LEGACY: |
8660 | case AMDGPUISD::RCP_IFLAG: |
8661 | case AMDGPUISD::TRIG_PREOP: |
8662 | case AMDGPUISD::DIV_SCALE: |
8663 | case AMDGPUISD::DIV_FMAS: |
8664 | case AMDGPUISD::DIV_FIXUP: |
8665 | case AMDGPUISD::FRACT: |
8666 | case AMDGPUISD::LDEXP: |
8667 | case AMDGPUISD::CVT_PKRTZ_F16_F32: |
8668 | case AMDGPUISD::CVT_F32_UBYTE0: |
8669 | case AMDGPUISD::CVT_F32_UBYTE1: |
8670 | case AMDGPUISD::CVT_F32_UBYTE2: |
8671 | case AMDGPUISD::CVT_F32_UBYTE3: |
8672 | return true; |
8673 | |
8674 | // It can/will be lowered or combined as a bit operation. |
8675 | // Need to check their input recursively to handle. |
8676 | case ISD::FNEG: |
8677 | case ISD::FABS: |
8678 | case ISD::FCOPYSIGN: |
8679 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
8680 | |
8681 | case ISD::FSIN: |
8682 | case ISD::FCOS: |
8683 | case ISD::FSINCOS: |
8684 | return Op.getValueType().getScalarType() != MVT::f16; |
8685 | |
8686 | case ISD::FMINNUM: |
8687 | case ISD::FMAXNUM: |
8688 | case ISD::FMINNUM_IEEE: |
8689 | case ISD::FMAXNUM_IEEE: |
8690 | case AMDGPUISD::CLAMP: |
8691 | case AMDGPUISD::FMED3: |
8692 | case AMDGPUISD::FMAX3: |
8693 | case AMDGPUISD::FMIN3: { |
8694 | // FIXME: Shouldn't treat the generic operations different based these. |
8695 | // However, we aren't really required to flush the result from |
8696 | // minnum/maxnum.. |
8697 | |
8698 | // snans will be quieted, so we only need to worry about denormals. |
8699 | if (Subtarget->supportsMinMaxDenormModes() || |
8700 | denormalsEnabledForType(DAG, Op.getValueType())) |
8701 | return true; |
8702 | |
8703 | // Flushing may be required. |
8704 | // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such |
8705 | // targets need to check their input recursively. |
8706 | |
8707 | // FIXME: Does this apply with clamp? It's implemented with max. |
8708 | for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { |
8709 | if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) |
8710 | return false; |
8711 | } |
8712 | |
8713 | return true; |
8714 | } |
8715 | case ISD::SELECT: { |
8716 | return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && |
8717 | isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); |
8718 | } |
8719 | case ISD::BUILD_VECTOR: { |
8720 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { |
8721 | SDValue SrcOp = Op.getOperand(i); |
8722 | if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) |
8723 | return false; |
8724 | } |
8725 | |
8726 | return true; |
8727 | } |
8728 | case ISD::EXTRACT_VECTOR_ELT: |
8729 | case ISD::EXTRACT_SUBVECTOR: { |
8730 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
8731 | } |
8732 | case ISD::INSERT_VECTOR_ELT: { |
8733 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && |
8734 | isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); |
8735 | } |
8736 | case ISD::UNDEF: |
8737 | // Could be anything. |
8738 | return false; |
8739 | |
8740 | case ISD::BITCAST: { |
8741 | // Hack round the mess we make when legalizing extract_vector_elt |
8742 | SDValue Src = Op.getOperand(0); |
8743 | if (Src.getValueType() == MVT::i16 && |
8744 | Src.getOpcode() == ISD::TRUNCATE) { |
8745 | SDValue TruncSrc = Src.getOperand(0); |
8746 | if (TruncSrc.getValueType() == MVT::i32 && |
8747 | TruncSrc.getOpcode() == ISD::BITCAST && |
8748 | TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { |
8749 | return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); |
8750 | } |
8751 | } |
8752 | |
8753 | return false; |
8754 | } |
8755 | case ISD::INTRINSIC_WO_CHAIN: { |
8756 | unsigned IntrinsicID |
8757 | = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
8758 | // TODO: Handle more intrinsics |
8759 | switch (IntrinsicID) { |
8760 | case Intrinsic::amdgcn_cvt_pkrtz: |
8761 | case Intrinsic::amdgcn_cubeid: |
8762 | case Intrinsic::amdgcn_frexp_mant: |
8763 | case Intrinsic::amdgcn_fdot2: |
8764 | return true; |
8765 | default: |
8766 | break; |
8767 | } |
8768 | |
8769 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
8770 | } |
8771 | default: |
8772 | return denormalsEnabledForType(DAG, Op.getValueType()) && |
8773 | DAG.isKnownNeverSNaN(Op); |
8774 | } |
8775 | |
8776 | llvm_unreachable("invalid operation")::llvm::llvm_unreachable_internal("invalid operation", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8776); |
8777 | } |
8778 | |
8779 | // Constant fold canonicalize. |
8780 | SDValue SITargetLowering::getCanonicalConstantFP( |
8781 | SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { |
8782 | // Flush denormals to 0 if not enabled. |
8783 | if (C.isDenormal() && !denormalsEnabledForType(DAG, VT)) |
8784 | return DAG.getConstantFP(0.0, SL, VT); |
8785 | |
8786 | if (C.isNaN()) { |
8787 | APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); |
8788 | if (C.isSignaling()) { |
8789 | // Quiet a signaling NaN. |
8790 | // FIXME: Is this supposed to preserve payload bits? |
8791 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); |
8792 | } |
8793 | |
8794 | // Make sure it is the canonical NaN bitpattern. |
8795 | // |
8796 | // TODO: Can we use -1 as the canonical NaN value since it's an inline |
8797 | // immediate? |
8798 | if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) |
8799 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); |
8800 | } |
8801 | |
8802 | // Already canonical. |
8803 | return DAG.getConstantFP(C, SL, VT); |
8804 | } |
8805 | |
8806 | static bool vectorEltWillFoldAway(SDValue Op) { |
8807 | return Op.isUndef() || isa<ConstantFPSDNode>(Op); |
8808 | } |
8809 | |
8810 | SDValue SITargetLowering::performFCanonicalizeCombine( |
8811 | SDNode *N, |
8812 | DAGCombinerInfo &DCI) const { |
8813 | SelectionDAG &DAG = DCI.DAG; |
8814 | SDValue N0 = N->getOperand(0); |
8815 | EVT VT = N->getValueType(0); |
8816 | |
8817 | // fcanonicalize undef -> qnan |
8818 | if (N0.isUndef()) { |
8819 | APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); |
8820 | return DAG.getConstantFP(QNaN, SDLoc(N), VT); |
8821 | } |
8822 | |
8823 | if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { |
8824 | EVT VT = N->getValueType(0); |
8825 | return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); |
8826 | } |
8827 | |
8828 | // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), |
8829 | // (fcanonicalize k) |
8830 | // |
8831 | // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 |
8832 | |
8833 | // TODO: This could be better with wider vectors that will be split to v2f16, |
8834 | // and to consider uses since there aren't that many packed operations. |
8835 | if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && |
8836 | isTypeLegal(MVT::v2f16)) { |
8837 | SDLoc SL(N); |
8838 | SDValue NewElts[2]; |
8839 | SDValue Lo = N0.getOperand(0); |
8840 | SDValue Hi = N0.getOperand(1); |
8841 | EVT EltVT = Lo.getValueType(); |
8842 | |
8843 | if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { |
8844 | for (unsigned I = 0; I != 2; ++I) { |
8845 | SDValue Op = N0.getOperand(I); |
8846 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { |
8847 | NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, |
8848 | CFP->getValueAPF()); |
8849 | } else if (Op.isUndef()) { |
8850 | // Handled below based on what the other operand is. |
8851 | NewElts[I] = Op; |
8852 | } else { |
8853 | NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); |
8854 | } |
8855 | } |
8856 | |
8857 | // If one half is undef, and one is constant, perfer a splat vector rather |
8858 | // than the normal qNaN. If it's a register, prefer 0.0 since that's |
8859 | // cheaper to use and may be free with a packed operation. |
8860 | if (NewElts[0].isUndef()) { |
8861 | if (isa<ConstantFPSDNode>(NewElts[1])) |
8862 | NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? |
8863 | NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); |
8864 | } |
8865 | |
8866 | if (NewElts[1].isUndef()) { |
8867 | NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? |
8868 | NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); |
8869 | } |
8870 | |
8871 | return DAG.getBuildVector(VT, SL, NewElts); |
8872 | } |
8873 | } |
8874 | |
8875 | unsigned SrcOpc = N0.getOpcode(); |
8876 | |
8877 | // If it's free to do so, push canonicalizes further up the source, which may |
8878 | // find a canonical source. |
8879 | // |
8880 | // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for |
8881 | // sNaNs. |
8882 | if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { |
8883 | auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); |
8884 | if (CRHS && N0.hasOneUse()) { |
8885 | SDLoc SL(N); |
8886 | SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, |
8887 | N0.getOperand(0)); |
8888 | SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); |
8889 | DCI.AddToWorklist(Canon0.getNode()); |
8890 | |
8891 | return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); |
8892 | } |
8893 | } |
8894 | |
8895 | return isCanonicalized(DAG, N0) ? N0 : SDValue(); |
8896 | } |
8897 | |
8898 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { |
8899 | switch (Opc) { |
8900 | case ISD::FMAXNUM: |
8901 | case ISD::FMAXNUM_IEEE: |
8902 | return AMDGPUISD::FMAX3; |
8903 | case ISD::SMAX: |
8904 | return AMDGPUISD::SMAX3; |
8905 | case ISD::UMAX: |
8906 | return AMDGPUISD::UMAX3; |
8907 | case ISD::FMINNUM: |
8908 | case ISD::FMINNUM_IEEE: |
8909 | return AMDGPUISD::FMIN3; |
8910 | case ISD::SMIN: |
8911 | return AMDGPUISD::SMIN3; |
8912 | case ISD::UMIN: |
8913 | return AMDGPUISD::UMIN3; |
8914 | default: |
8915 | llvm_unreachable("Not a min/max opcode")::llvm::llvm_unreachable_internal("Not a min/max opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8915); |
8916 | } |
8917 | } |
8918 | |
8919 | SDValue SITargetLowering::performIntMed3ImmCombine( |
8920 | SelectionDAG &DAG, const SDLoc &SL, |
8921 | SDValue Op0, SDValue Op1, bool Signed) const { |
8922 | ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); |
8923 | if (!K1) |
8924 | return SDValue(); |
8925 | |
8926 | ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); |
8927 | if (!K0) |
8928 | return SDValue(); |
8929 | |
8930 | if (Signed) { |
8931 | if (K0->getAPIntValue().sge(K1->getAPIntValue())) |
8932 | return SDValue(); |
8933 | } else { |
8934 | if (K0->getAPIntValue().uge(K1->getAPIntValue())) |
8935 | return SDValue(); |
8936 | } |
8937 | |
8938 | EVT VT = K0->getValueType(0); |
8939 | unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; |
8940 | if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { |
8941 | return DAG.getNode(Med3Opc, SL, VT, |
8942 | Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); |
8943 | } |
8944 | |
8945 | // If there isn't a 16-bit med3 operation, convert to 32-bit. |
8946 | MVT NVT = MVT::i32; |
8947 | unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
8948 | |
8949 | SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); |
8950 | SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); |
8951 | SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); |
8952 | |
8953 | SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); |
8954 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); |
8955 | } |
8956 | |
8957 | static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { |
8958 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) |
8959 | return C; |
8960 | |
8961 | if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { |
8962 | if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) |
8963 | return C; |
8964 | } |
8965 | |
8966 | return nullptr; |
8967 | } |
8968 | |
8969 | SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, |
8970 | const SDLoc &SL, |
8971 | SDValue Op0, |
8972 | SDValue Op1) const { |
8973 | ConstantFPSDNode *K1 = getSplatConstantFP(Op1); |
8974 | if (!K1) |
8975 | return SDValue(); |
8976 | |
8977 | ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); |
8978 | if (!K0) |
8979 | return SDValue(); |
8980 | |
8981 | // Ordered >= (although NaN inputs should have folded away by now). |
8982 | if (K0->getValueAPF() > K1->getValueAPF()) |
8983 | return SDValue(); |
8984 | |
8985 | const MachineFunction &MF = DAG.getMachineFunction(); |
8986 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
8987 | |
8988 | // TODO: Check IEEE bit enabled? |
8989 | EVT VT = Op0.getValueType(); |
8990 | if (Info->getMode().DX10Clamp) { |
8991 | // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the |
8992 | // hardware fmed3 behavior converting to a min. |
8993 | // FIXME: Should this be allowing -0.0? |
8994 | if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) |
8995 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); |
8996 | } |
8997 | |
8998 | // med3 for f16 is only available on gfx9+, and not available for v2f16. |
8999 | if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { |
9000 | // This isn't safe with signaling NaNs because in IEEE mode, min/max on a |
9001 | // signaling NaN gives a quiet NaN. The quiet NaN input to the min would |
9002 | // then give the other result, which is different from med3 with a NaN |
9003 | // input. |
9004 | SDValue Var = Op0.getOperand(0); |
9005 | if (!DAG.isKnownNeverSNaN(Var)) |
9006 | return SDValue(); |
9007 | |
9008 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
9009 | |
9010 | if ((!K0->hasOneUse() || |
9011 | TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && |
9012 | (!K1->hasOneUse() || |
9013 | TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { |
9014 | return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), |
9015 | Var, SDValue(K0, 0), SDValue(K1, 0)); |
9016 | } |
9017 | } |
9018 | |
9019 | return SDValue(); |
9020 | } |
9021 | |
9022 | SDValue SITargetLowering::performMinMaxCombine(SDNode *N, |
9023 | DAGCombinerInfo &DCI) const { |
9024 | SelectionDAG &DAG = DCI.DAG; |
9025 | |
9026 | EVT VT = N->getValueType(0); |
9027 | unsigned Opc = N->getOpcode(); |
9028 | SDValue Op0 = N->getOperand(0); |
9029 | SDValue Op1 = N->getOperand(1); |
9030 | |
9031 | // Only do this if the inner op has one use since this will just increases |
9032 | // register pressure for no benefit. |
9033 | |
9034 | if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && |
9035 | !VT.isVector() && |
9036 | (VT == MVT::i32 || VT == MVT::f32 || |
9037 | ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { |
9038 | // max(max(a, b), c) -> max3(a, b, c) |
9039 | // min(min(a, b), c) -> min3(a, b, c) |
9040 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { |
9041 | SDLoc DL(N); |
9042 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
9043 | DL, |
9044 | N->getValueType(0), |
9045 | Op0.getOperand(0), |
9046 | Op0.getOperand(1), |
9047 | Op1); |
9048 | } |
9049 | |
9050 | // Try commuted. |
9051 | // max(a, max(b, c)) -> max3(a, b, c) |
9052 | // min(a, min(b, c)) -> min3(a, b, c) |
9053 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { |
9054 | SDLoc DL(N); |
9055 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
9056 | DL, |
9057 | N->getValueType(0), |
9058 | Op0, |
9059 | Op1.getOperand(0), |
9060 | Op1.getOperand(1)); |
9061 | } |
9062 | } |
9063 | |
9064 | // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) |
9065 | if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { |
9066 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) |
9067 | return Med3; |
9068 | } |
9069 | |
9070 | if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { |
9071 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) |
9072 | return Med3; |
9073 | } |
9074 | |
9075 | // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) |
9076 | if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || |
9077 | (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || |
9078 | (Opc == AMDGPUISD::FMIN_LEGACY && |
9079 | Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && |
9080 | (VT == MVT::f32 || VT == MVT::f64 || |
9081 | (VT == MVT::f16 && Subtarget->has16BitInsts()) || |
9082 | (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && |
9083 | Op0.hasOneUse()) { |
9084 | if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) |
9085 | return Res; |
9086 | } |
9087 | |
9088 | return SDValue(); |
9089 | } |
9090 | |
9091 | static bool isClampZeroToOne(SDValue A, SDValue B) { |
9092 | if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { |
9093 | if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { |
9094 | // FIXME: Should this be allowing -0.0? |
9095 | return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || |
9096 | (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); |
9097 | } |
9098 | } |
9099 | |
9100 | return false; |
9101 | } |
9102 | |
9103 | // FIXME: Should only worry about snans for version with chain. |
9104 | SDValue SITargetLowering::performFMed3Combine(SDNode *N, |
9105 | DAGCombinerInfo &DCI) const { |
9106 | EVT VT = N->getValueType(0); |
9107 | // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and |
9108 | // NaNs. With a NaN input, the order of the operands may change the result. |
9109 | |
9110 | SelectionDAG &DAG = DCI.DAG; |
9111 | SDLoc SL(N); |
9112 | |
9113 | SDValue Src0 = N->getOperand(0); |
9114 | SDValue Src1 = N->getOperand(1); |
9115 | SDValue Src2 = N->getOperand(2); |
9116 | |
9117 | if (isClampZeroToOne(Src0, Src1)) { |
9118 | // const_a, const_b, x -> clamp is safe in all cases including signaling |
9119 | // nans. |
9120 | // FIXME: Should this be allowing -0.0? |
9121 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); |
9122 | } |
9123 | |
9124 | const MachineFunction &MF = DAG.getMachineFunction(); |
9125 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
9126 | |
9127 | // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother |
9128 | // handling no dx10-clamp? |
9129 | if (Info->getMode().DX10Clamp) { |
9130 | // If NaNs is clamped to 0, we are free to reorder the inputs. |
9131 | |
9132 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) |
9133 | std::swap(Src0, Src1); |
9134 | |
9135 | if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) |
9136 | std::swap(Src1, Src2); |
9137 | |
9138 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) |
9139 | std::swap(Src0, Src1); |
9140 | |
9141 | if (isClampZeroToOne(Src1, Src2)) |
9142 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); |
9143 | } |
9144 | |
9145 | return SDValue(); |
9146 | } |
9147 | |
9148 | SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, |
9149 | DAGCombinerInfo &DCI) const { |
9150 | SDValue Src0 = N->getOperand(0); |
9151 | SDValue Src1 = N->getOperand(1); |
9152 | if (Src0.isUndef() && Src1.isUndef()) |
9153 | return DCI.DAG.getUNDEF(N->getValueType(0)); |
9154 | return SDValue(); |
9155 | } |
9156 | |
9157 | SDValue SITargetLowering::performExtractVectorEltCombine( |
9158 | SDNode *N, DAGCombinerInfo &DCI) const { |
9159 | SDValue Vec = N->getOperand(0); |
9160 | SelectionDAG &DAG = DCI.DAG; |
9161 | |
9162 | EVT VecVT = Vec.getValueType(); |
9163 | EVT EltVT = VecVT.getVectorElementType(); |
9164 | |
9165 | if ((Vec.getOpcode() == ISD::FNEG || |
9166 | Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { |
9167 | SDLoc SL(N); |
9168 | EVT EltVT = N->getValueType(0); |
9169 | SDValue Idx = N->getOperand(1); |
9170 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
9171 | Vec.getOperand(0), Idx); |
9172 | return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); |
9173 | } |
9174 | |
9175 | // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) |
9176 | // => |
9177 | // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) |
9178 | // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) |
9179 | // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt |
9180 | if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { |
9181 | SDLoc SL(N); |
9182 | EVT EltVT = N->getValueType(0); |
9183 | SDValue Idx = N->getOperand(1); |
9184 | unsigned Opc = Vec.getOpcode(); |
9185 | |
9186 | switch(Opc) { |
9187 | default: |
9188 | break; |
9189 | // TODO: Support other binary operations. |
9190 | case ISD::FADD: |
9191 | case ISD::FSUB: |
9192 | case ISD::FMUL: |
9193 | case ISD::ADD: |
9194 | case ISD::UMIN: |
9195 | case ISD::UMAX: |
9196 | case ISD::SMIN: |
9197 | case ISD::SMAX: |
9198 | case ISD::FMAXNUM: |
9199 | case ISD::FMINNUM: |
9200 | case ISD::FMAXNUM_IEEE: |
9201 | case ISD::FMINNUM_IEEE: { |
9202 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
9203 | Vec.getOperand(0), Idx); |
9204 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
9205 | Vec.getOperand(1), Idx); |
9206 | |
9207 | DCI.AddToWorklist(Elt0.getNode()); |
9208 | DCI.AddToWorklist(Elt1.getNode()); |
9209 | return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); |
9210 | } |
9211 | } |
9212 | } |
9213 | |
9214 | unsigned VecSize = VecVT.getSizeInBits(); |
9215 | unsigned EltSize = EltVT.getSizeInBits(); |
9216 | |
9217 | // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) |
9218 | // This elminates non-constant index and subsequent movrel or scratch access. |
9219 | // Sub-dword vectors of size 2 dword or less have better implementation. |
9220 | // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32 |
9221 | // instructions. |
9222 | if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) && |
9223 | !isa<ConstantSDNode>(N->getOperand(1))) { |
9224 | SDLoc SL(N); |
9225 | SDValue Idx = N->getOperand(1); |
9226 | SDValue V; |
9227 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { |
9228 | SDValue IC = DAG.getVectorIdxConstant(I, SL); |
9229 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); |
9230 | if (I == 0) |
9231 | V = Elt; |
9232 | else |
9233 | V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); |
9234 | } |
9235 | return V; |
9236 | } |
9237 | |
9238 | if (!DCI.isBeforeLegalize()) |
9239 | return SDValue(); |
9240 | |
9241 | // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit |
9242 | // elements. This exposes more load reduction opportunities by replacing |
9243 | // multiple small extract_vector_elements with a single 32-bit extract. |
9244 | auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
9245 | if (isa<MemSDNode>(Vec) && |
9246 | EltSize <= 16 && |
9247 | EltVT.isByteSized() && |
9248 | VecSize > 32 && |
9249 | VecSize % 32 == 0 && |
9250 | Idx) { |
9251 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); |
9252 | |
9253 | unsigned BitIndex = Idx->getZExtValue() * EltSize; |
9254 | unsigned EltIdx = BitIndex / 32; |
9255 | unsigned LeftoverBitIdx = BitIndex % 32; |
9256 | SDLoc SL(N); |
9257 | |
9258 | SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); |
9259 | DCI.AddToWorklist(Cast.getNode()); |
9260 | |
9261 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, |
9262 | DAG.getConstant(EltIdx, SL, MVT::i32)); |
9263 | DCI.AddToWorklist(Elt.getNode()); |
9264 | SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, |
9265 | DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); |
9266 | DCI.AddToWorklist(Srl.getNode()); |
9267 | |
9268 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); |
9269 | DCI.AddToWorklist(Trunc.getNode()); |
9270 | return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); |
9271 | } |
9272 | |
9273 | return SDValue(); |
9274 | } |
9275 | |
9276 | SDValue |
9277 | SITargetLowering::performInsertVectorEltCombine(SDNode *N, |
9278 | DAGCombinerInfo &DCI) const { |
9279 | SDValue Vec = N->getOperand(0); |
9280 | SDValue Idx = N->getOperand(2); |
9281 | EVT VecVT = Vec.getValueType(); |
9282 | EVT EltVT = VecVT.getVectorElementType(); |
9283 | unsigned VecSize = VecVT.getSizeInBits(); |
9284 | unsigned EltSize = EltVT.getSizeInBits(); |
9285 | |
9286 | // INSERT_VECTOR_ELT (<n x e>, var-idx) |
9287 | // => BUILD_VECTOR n x select (e, const-idx) |
9288 | // This elminates non-constant index and subsequent movrel or scratch access. |
9289 | // Sub-dword vectors of size 2 dword or less have better implementation. |
9290 | // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32 |
9291 | // instructions. |
9292 | if (isa<ConstantSDNode>(Idx) || |
9293 | VecSize > 256 || (VecSize <= 64 && EltSize < 32)) |
9294 | return SDValue(); |
9295 | |
9296 | SelectionDAG &DAG = DCI.DAG; |
9297 | SDLoc SL(N); |
9298 | SDValue Ins = N->getOperand(1); |
9299 | EVT IdxVT = Idx.getValueType(); |
9300 | |
9301 | SmallVector<SDValue, 16> Ops; |
9302 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { |
9303 | SDValue IC = DAG.getConstant(I, SL, IdxVT); |
9304 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); |
9305 | SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); |
9306 | Ops.push_back(V); |
9307 | } |
9308 | |
9309 | return DAG.getBuildVector(VecVT, SL, Ops); |
9310 | } |
9311 | |
9312 | unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, |
9313 | const SDNode *N0, |
9314 | const SDNode *N1) const { |
9315 | EVT VT = N0->getValueType(0); |
9316 | |
9317 | // Only do this if we are not trying to support denormals. v_mad_f32 does not |
9318 | // support denormals ever. |
9319 | if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) || |
9320 | (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) && |
9321 | getSubtarget()->hasMadF16())) && |
9322 | isOperationLegal(ISD::FMAD, VT)) |
9323 | return ISD::FMAD; |
9324 | |
9325 | const TargetOptions &Options = DAG.getTarget().Options; |
9326 | if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || |
9327 | (N0->getFlags().hasAllowContract() && |
9328 | N1->getFlags().hasAllowContract())) && |
9329 | isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { |
9330 | return ISD::FMA; |
9331 | } |
9332 | |
9333 | return 0; |
9334 | } |
9335 | |
9336 | // For a reassociatable opcode perform: |
9337 | // op x, (op y, z) -> op (op x, z), y, if x and z are uniform |
9338 | SDValue SITargetLowering::reassociateScalarOps(SDNode *N, |
9339 | SelectionDAG &DAG) const { |
9340 | EVT VT = N->getValueType(0); |
9341 | if (VT != MVT::i32 && VT != MVT::i64) |
9342 | return SDValue(); |
9343 | |
9344 | unsigned Opc = N->getOpcode(); |
9345 | SDValue Op0 = N->getOperand(0); |
9346 | SDValue Op1 = N->getOperand(1); |
9347 | |
9348 | if (!(Op0->isDivergent() ^ Op1->isDivergent())) |
9349 | return SDValue(); |
9350 | |
9351 | if (Op0->isDivergent()) |
9352 | std::swap(Op0, Op1); |
9353 | |
9354 | if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) |
9355 | return SDValue(); |
9356 | |
9357 | SDValue Op2 = Op1.getOperand(1); |
9358 | Op1 = Op1.getOperand(0); |
9359 | if (!(Op1->isDivergent() ^ Op2->isDivergent())) |
9360 | return SDValue(); |
9361 | |
9362 | if (Op1->isDivergent()) |
9363 | std::swap(Op1, Op2); |
9364 | |
9365 | // If either operand is constant this will conflict with |
9366 | // DAGCombiner::ReassociateOps(). |
9367 | if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || |
9368 | DAG.isConstantIntBuildVectorOrConstantInt(Op1)) |
9369 | return SDValue(); |
9370 | |
9371 | SDLoc SL(N); |
9372 | SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); |
9373 | return DAG.getNode(Opc, SL, VT, Add1, Op2); |
9374 | } |
9375 | |
9376 | static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, |
9377 | EVT VT, |
9378 | SDValue N0, SDValue N1, SDValue N2, |
9379 | bool Signed) { |
9380 | unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; |
9381 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); |
9382 | SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); |
9383 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); |
9384 | } |
9385 | |
9386 | SDValue SITargetLowering::performAddCombine(SDNode *N, |
9387 | DAGCombinerInfo &DCI) const { |
9388 | SelectionDAG &DAG = DCI.DAG; |
9389 | EVT VT = N->getValueType(0); |
9390 | SDLoc SL(N); |
9391 | SDValue LHS = N->getOperand(0); |
9392 | SDValue RHS = N->getOperand(1); |
9393 | |
9394 | if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) |
9395 | && Subtarget->hasMad64_32() && |
9396 | !VT.isVector() && VT.getScalarSizeInBits() > 32 && |
9397 | VT.getScalarSizeInBits() <= 64) { |
9398 | if (LHS.getOpcode() != ISD::MUL) |
9399 | std::swap(LHS, RHS); |
9400 | |
9401 | SDValue MulLHS = LHS.getOperand(0); |
9402 | SDValue MulRHS = LHS.getOperand(1); |
9403 | SDValue AddRHS = RHS; |
9404 | |
9405 | // TODO: Maybe restrict if SGPR inputs. |
9406 | if (numBitsUnsigned(MulLHS, DAG) <= 32 && |
9407 | numBitsUnsigned(MulRHS, DAG) <= 32) { |
9408 | MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); |
9409 | MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); |
9410 | AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); |
9411 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); |
9412 | } |
9413 | |
9414 | if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { |
9415 | MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); |
9416 | MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); |
9417 | AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); |
9418 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); |
9419 | } |
9420 | |
9421 | return SDValue(); |
9422 | } |
9423 | |
9424 | if (SDValue V = reassociateScalarOps(N, DAG)) { |
9425 | return V; |
9426 | } |
9427 | |
9428 | if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) |
9429 | return SDValue(); |
9430 | |
9431 | // add x, zext (setcc) => addcarry x, 0, setcc |
9432 | // add x, sext (setcc) => subcarry x, 0, setcc |
9433 | unsigned Opc = LHS.getOpcode(); |
9434 | if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || |
9435 | Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) |
9436 | std::swap(RHS, LHS); |
9437 | |
9438 | Opc = RHS.getOpcode(); |
9439 | switch (Opc) { |
9440 | default: break; |
9441 | case ISD::ZERO_EXTEND: |
9442 | case ISD::SIGN_EXTEND: |
9443 | case ISD::ANY_EXTEND: { |
9444 | auto Cond = RHS.getOperand(0); |
9445 | // If this won't be a real VOPC output, we would still need to insert an |
9446 | // extra instruction anyway. |
9447 | if (!isBoolSGPR(Cond)) |
9448 | break; |
9449 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); |
9450 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; |
9451 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; |
9452 | return DAG.getNode(Opc, SL, VTList, Args); |
9453 | } |
9454 | case ISD::ADDCARRY: { |
9455 | // add x, (addcarry y, 0, cc) => addcarry x, y, cc |
9456 | auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
9457 | if (!C || C->getZExtValue() != 0) break; |
9458 | SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; |
9459 | return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); |
9460 | } |
9461 | } |
9462 | return SDValue(); |
9463 | } |
9464 | |
9465 | SDValue SITargetLowering::performSubCombine(SDNode *N, |
9466 | DAGCombinerInfo &DCI) const { |
9467 | SelectionDAG &DAG = DCI.DAG; |
9468 | EVT VT = N->getValueType(0); |
9469 | |
9470 | if (VT != MVT::i32) |
9471 | return SDValue(); |
9472 | |
9473 | SDLoc SL(N); |
9474 | SDValue LHS = N->getOperand(0); |
9475 | SDValue RHS = N->getOperand(1); |
9476 | |
9477 | // sub x, zext (setcc) => subcarry x, 0, setcc |
9478 | // sub x, sext (setcc) => addcarry x, 0, setcc |
9479 | unsigned Opc = RHS.getOpcode(); |
9480 | switch (Opc) { |
9481 | default: break; |
9482 | case ISD::ZERO_EXTEND: |
9483 | case ISD::SIGN_EXTEND: |
9484 | case ISD::ANY_EXTEND: { |
9485 | auto Cond = RHS.getOperand(0); |
9486 | // If this won't be a real VOPC output, we would still need to insert an |
9487 | // extra instruction anyway. |
9488 | if (!isBoolSGPR(Cond)) |
9489 | break; |
9490 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); |
9491 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; |
9492 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY; |
9493 | return DAG.getNode(Opc, SL, VTList, Args); |
9494 | } |
9495 | } |
9496 | |
9497 | if (LHS.getOpcode() == ISD::SUBCARRY) { |
9498 | // sub (subcarry x, 0, cc), y => subcarry x, y, cc |
9499 | auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
9500 | if (!C || !C->isNullValue()) |
9501 | return SDValue(); |
9502 | SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; |
9503 | return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); |
9504 | } |
9505 | return SDValue(); |
9506 | } |
9507 | |
9508 | SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, |
9509 | DAGCombinerInfo &DCI) const { |
9510 | |
9511 | if (N->getValueType(0) != MVT::i32) |
9512 | return SDValue(); |
9513 | |
9514 | auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
9515 | if (!C || C->getZExtValue() != 0) |
9516 | return SDValue(); |
9517 | |
9518 | SelectionDAG &DAG = DCI.DAG; |
9519 | SDValue LHS = N->getOperand(0); |
9520 | |
9521 | // addcarry (add x, y), 0, cc => addcarry x, y, cc |
9522 | // subcarry (sub x, y), 0, cc => subcarry x, y, cc |
9523 | unsigned LHSOpc = LHS.getOpcode(); |
9524 | unsigned Opc = N->getOpcode(); |
9525 | if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || |
9526 | (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { |
9527 | SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; |
9528 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); |
9529 | } |
9530 | return SDValue(); |
9531 | } |
9532 | |
9533 | SDValue SITargetLowering::performFAddCombine(SDNode *N, |
9534 | DAGCombinerInfo &DCI) const { |
9535 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
9536 | return SDValue(); |
9537 | |
9538 | SelectionDAG &DAG = DCI.DAG; |
9539 | EVT VT = N->getValueType(0); |
9540 | |
9541 | SDLoc SL(N); |
9542 | SDValue LHS = N->getOperand(0); |
9543 | SDValue RHS = N->getOperand(1); |
9544 | |
9545 | // These should really be instruction patterns, but writing patterns with |
9546 | // source modiifiers is a pain. |
9547 | |
9548 | // fadd (fadd (a, a), b) -> mad 2.0, a, b |
9549 | if (LHS.getOpcode() == ISD::FADD) { |
9550 | SDValue A = LHS.getOperand(0); |
9551 | if (A == LHS.getOperand(1)) { |
9552 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
9553 | if (FusedOp != 0) { |
9554 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
9555 | return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); |
9556 | } |
9557 | } |
9558 | } |
9559 | |
9560 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b |
9561 | if (RHS.getOpcode() == ISD::FADD) { |
9562 | SDValue A = RHS.getOperand(0); |
9563 | if (A == RHS.getOperand(1)) { |
9564 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
9565 | if (FusedOp != 0) { |
9566 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
9567 | return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); |
9568 | } |
9569 | } |
9570 | } |
9571 | |
9572 | return SDValue(); |
9573 | } |
9574 | |
9575 | SDValue SITargetLowering::performFSubCombine(SDNode *N, |
9576 | DAGCombinerInfo &DCI) const { |
9577 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
9578 | return SDValue(); |
9579 | |
9580 | SelectionDAG &DAG = DCI.DAG; |
9581 | SDLoc SL(N); |
9582 | EVT VT = N->getValueType(0); |
9583 | assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail ("!VT.isVector()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9583, __PRETTY_FUNCTION__)); |
9584 | |
9585 | // Try to get the fneg to fold into the source modifier. This undoes generic |
9586 | // DAG combines and folds them into the mad. |
9587 | // |
9588 | // Only do this if we are not trying to support denormals. v_mad_f32 does |
9589 | // not support denormals ever. |
9590 | SDValue LHS = N->getOperand(0); |
9591 | SDValue RHS = N->getOperand(1); |
9592 | if (LHS.getOpcode() == ISD::FADD) { |
9593 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) |
9594 | SDValue A = LHS.getOperand(0); |
9595 | if (A == LHS.getOperand(1)) { |
9596 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
9597 | if (FusedOp != 0){ |
9598 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
9599 | SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
9600 | |
9601 | return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); |
9602 | } |
9603 | } |
9604 | } |
9605 | |
9606 | if (RHS.getOpcode() == ISD::FADD) { |
9607 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c |
9608 | |
9609 | SDValue A = RHS.getOperand(0); |
9610 | if (A == RHS.getOperand(1)) { |
9611 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
9612 | if (FusedOp != 0){ |
9613 | const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); |
9614 | return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); |
9615 | } |
9616 | } |
9617 | } |
9618 | |
9619 | return SDValue(); |
9620 | } |
9621 | |
9622 | SDValue SITargetLowering::performFMACombine(SDNode *N, |
9623 | DAGCombinerInfo &DCI) const { |
9624 | SelectionDAG &DAG = DCI.DAG; |
9625 | EVT VT = N->getValueType(0); |
9626 | SDLoc SL(N); |
9627 | |
9628 | if (!Subtarget->hasDot2Insts() || VT != MVT::f32) |
9629 | return SDValue(); |
9630 | |
9631 | // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> |
9632 | // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) |
9633 | SDValue Op1 = N->getOperand(0); |
9634 | SDValue Op2 = N->getOperand(1); |
9635 | SDValue FMA = N->getOperand(2); |
9636 | |
9637 | if (FMA.getOpcode() != ISD::FMA || |
9638 | Op1.getOpcode() != ISD::FP_EXTEND || |
9639 | Op2.getOpcode() != ISD::FP_EXTEND) |
9640 | return SDValue(); |
9641 | |
9642 | // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, |
9643 | // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract |
9644 | // is sufficient to allow generaing fdot2. |
9645 | const TargetOptions &Options = DAG.getTarget().Options; |
9646 | if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || |
9647 | (N->getFlags().hasAllowContract() && |
9648 | FMA->getFlags().hasAllowContract())) { |
9649 | Op1 = Op1.getOperand(0); |
9650 | Op2 = Op2.getOperand(0); |
9651 | if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
9652 | Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
9653 | return SDValue(); |
9654 | |
9655 | SDValue Vec1 = Op1.getOperand(0); |
9656 | SDValue Idx1 = Op1.getOperand(1); |
9657 | SDValue Vec2 = Op2.getOperand(0); |
9658 | |
9659 | SDValue FMAOp1 = FMA.getOperand(0); |
9660 | SDValue FMAOp2 = FMA.getOperand(1); |
9661 | SDValue FMAAcc = FMA.getOperand(2); |
9662 | |
9663 | if (FMAOp1.getOpcode() != ISD::FP_EXTEND || |
9664 | FMAOp2.getOpcode() != ISD::FP_EXTEND) |
9665 | return SDValue(); |
9666 | |
9667 | FMAOp1 = FMAOp1.getOperand(0); |
9668 | FMAOp2 = FMAOp2.getOperand(0); |
9669 | if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
9670 | FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
9671 | return SDValue(); |
9672 | |
9673 | SDValue Vec3 = FMAOp1.getOperand(0); |
9674 | SDValue Vec4 = FMAOp2.getOperand(0); |
9675 | SDValue Idx2 = FMAOp1.getOperand(1); |
9676 | |
9677 | if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || |
9678 | // Idx1 and Idx2 cannot be the same. |
9679 | Idx1 == Idx2) |
9680 | return SDValue(); |
9681 | |
9682 | if (Vec1 == Vec2 || Vec3 == Vec4) |
9683 | return SDValue(); |
9684 | |
9685 | if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) |
9686 | return SDValue(); |
9687 | |
9688 | if ((Vec1 == Vec3 && Vec2 == Vec4) || |
9689 | (Vec1 == Vec4 && Vec2 == Vec3)) { |
9690 | return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, |
9691 | DAG.getTargetConstant(0, SL, MVT::i1)); |
9692 | } |
9693 | } |
9694 | return SDValue(); |
9695 | } |
9696 | |
9697 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, |
9698 | DAGCombinerInfo &DCI) const { |
9699 | SelectionDAG &DAG = DCI.DAG; |
9700 | SDLoc SL(N); |
9701 | |
9702 | SDValue LHS = N->getOperand(0); |
9703 | SDValue RHS = N->getOperand(1); |
9704 | EVT VT = LHS.getValueType(); |
9705 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); |
9706 | |
9707 | auto CRHS = dyn_cast<ConstantSDNode>(RHS); |
9708 | if (!CRHS) { |
9709 | CRHS = dyn_cast<ConstantSDNode>(LHS); |
9710 | if (CRHS) { |
9711 | std::swap(LHS, RHS); |
9712 | CC = getSetCCSwappedOperands(CC); |
9713 | } |
9714 | } |
9715 | |
9716 | if (CRHS) { |
9717 | if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && |
9718 | isBoolSGPR(LHS.getOperand(0))) { |
9719 | // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 |
9720 | // setcc (sext from i1 cc), -1, eq|sle|uge) => cc |
9721 | // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 |
9722 | // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc |
9723 | if ((CRHS->isAllOnesValue() && |
9724 | (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || |
9725 | (CRHS->isNullValue() && |
9726 | (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) |
9727 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), |
9728 | DAG.getConstant(-1, SL, MVT::i1)); |
9729 | if ((CRHS->isAllOnesValue() && |
9730 | (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || |
9731 | (CRHS->isNullValue() && |
9732 | (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) |
9733 | return LHS.getOperand(0); |
9734 | } |
9735 | |
9736 | uint64_t CRHSVal = CRHS->getZExtValue(); |
9737 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && |
9738 | LHS.getOpcode() == ISD::SELECT && |
9739 | isa<ConstantSDNode>(LHS.getOperand(1)) && |
9740 | isa<ConstantSDNode>(LHS.getOperand(2)) && |
9741 | LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && |
9742 | isBoolSGPR(LHS.getOperand(0))) { |
9743 | // Given CT != FT: |
9744 | // setcc (select cc, CT, CF), CF, eq => xor cc, -1 |
9745 | // setcc (select cc, CT, CF), CF, ne => cc |
9746 | // setcc (select cc, CT, CF), CT, ne => xor cc, -1 |
9747 | // setcc (select cc, CT, CF), CT, eq => cc |
9748 | uint64_t CT = LHS.getConstantOperandVal(1); |
9749 | uint64_t CF = LHS.getConstantOperandVal(2); |
9750 | |
9751 | if ((CF == CRHSVal && CC == ISD::SETEQ) || |
9752 | (CT == CRHSVal && CC == ISD::SETNE)) |
9753 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), |
9754 | DAG.getConstant(-1, SL, MVT::i1)); |
9755 | if ((CF == CRHSVal && CC == ISD::SETNE) || |
9756 | (CT == CRHSVal && CC == ISD::SETEQ)) |
9757 | return LHS.getOperand(0); |
9758 | } |
9759 | } |
9760 | |
9761 | if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && |
9762 | VT != MVT::f16)) |
9763 | return SDValue(); |
9764 | |
9765 | // Match isinf/isfinite pattern |
9766 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) |
9767 | // (fcmp one (fabs x), inf) -> (fp_class x, |
9768 | // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) |
9769 | if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { |
9770 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); |
9771 | if (!CRHS) |
9772 | return SDValue(); |
9773 | |
9774 | const APFloat &APF = CRHS->getValueAPF(); |
9775 | if (APF.isInfinity() && !APF.isNegative()) { |
9776 | const unsigned IsInfMask = SIInstrFlags::P_INFINITY | |
9777 | SIInstrFlags::N_INFINITY; |
9778 | const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | |
9779 | SIInstrFlags::P_ZERO | |
9780 | SIInstrFlags::N_NORMAL | |
9781 | SIInstrFlags::P_NORMAL | |
9782 | SIInstrFlags::N_SUBNORMAL | |
9783 | SIInstrFlags::P_SUBNORMAL; |
9784 | unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; |
9785 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), |
9786 | DAG.getConstant(Mask, SL, MVT::i32)); |
9787 | } |
9788 | } |
9789 | |
9790 | return SDValue(); |
9791 | } |
9792 | |
9793 | SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, |
9794 | DAGCombinerInfo &DCI) const { |
9795 | SelectionDAG &DAG = DCI.DAG; |
9796 | SDLoc SL(N); |
9797 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; |
9798 | |
9799 | SDValue Src = N->getOperand(0); |
9800 | SDValue Shift = N->getOperand(0); |
9801 | if (Shift.getOpcode() == ISD::ZERO_EXTEND) |
9802 | Shift = Shift.getOperand(0); |
9803 | |
9804 | if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) { |
9805 | // cvt_f32_ubyte1 (shl x, 8) -> cvt_f32_ubyte0 x |
9806 | // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x |
9807 | // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x |
9808 | // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x |
9809 | // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x |
9810 | if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) { |
9811 | Shift = DAG.getZExtOrTrunc(Shift.getOperand(0), |
9812 | SDLoc(Shift.getOperand(0)), MVT::i32); |
9813 | |
9814 | unsigned ShiftOffset = 8 * Offset; |
9815 | if (Shift.getOpcode() == ISD::SHL) |
9816 | ShiftOffset -= C->getZExtValue(); |
9817 | else |
9818 | ShiftOffset += C->getZExtValue(); |
9819 | |
9820 | if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) { |
9821 | return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL, |
9822 | MVT::f32, Shift); |
9823 | } |
9824 | } |
9825 | } |
9826 | |
9827 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
9828 | APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); |
9829 | if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) { |
9830 | // We simplified Src. If this node is not dead, visit it again so it is |
9831 | // folded properly. |
9832 | if (N->getOpcode() != ISD::DELETED_NODE) |
9833 | DCI.AddToWorklist(N); |
9834 | return SDValue(N, 0); |
9835 | } |
9836 | |
9837 | // Handle (or x, (srl y, 8)) pattern when known bits are zero. |
9838 | if (SDValue DemandedSrc = |
9839 | TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG)) |
9840 | return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc); |
9841 | |
9842 | return SDValue(); |
9843 | } |
9844 | |
9845 | SDValue SITargetLowering::performClampCombine(SDNode *N, |
9846 | DAGCombinerInfo &DCI) const { |
9847 | ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); |
9848 | if (!CSrc) |
9849 | return SDValue(); |
9850 | |
9851 | const MachineFunction &MF = DCI.DAG.getMachineFunction(); |
9852 | const APFloat &F = CSrc->getValueAPF(); |
9853 | APFloat Zero = APFloat::getZero(F.getSemantics()); |
9854 | if (F < Zero || |
9855 | (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { |
9856 | return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); |
9857 | } |
9858 | |
9859 | APFloat One(F.getSemantics(), "1.0"); |
9860 | if (F > One) |
9861 | return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); |
9862 | |
9863 | return SDValue(CSrc, 0); |
9864 | } |
9865 | |
9866 | |
9867 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, |
9868 | DAGCombinerInfo &DCI) const { |
9869 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) |
9870 | return SDValue(); |
9871 | switch (N->getOpcode()) { |
9872 | default: |
9873 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
9874 | case ISD::ADD: |
9875 | return performAddCombine(N, DCI); |
9876 | case ISD::SUB: |
9877 | return performSubCombine(N, DCI); |
9878 | case ISD::ADDCARRY: |
9879 | case ISD::SUBCARRY: |
9880 | return performAddCarrySubCarryCombine(N, DCI); |
9881 | case ISD::FADD: |
9882 | return performFAddCombine(N, DCI); |
9883 | case ISD::FSUB: |
9884 | return performFSubCombine(N, DCI); |
9885 | case ISD::SETCC: |
9886 | return performSetCCCombine(N, DCI); |
9887 | case ISD::FMAXNUM: |
9888 | case ISD::FMINNUM: |
9889 | case ISD::FMAXNUM_IEEE: |
9890 | case ISD::FMINNUM_IEEE: |
9891 | case ISD::SMAX: |
9892 | case ISD::SMIN: |
9893 | case ISD::UMAX: |
9894 | case ISD::UMIN: |
9895 | case AMDGPUISD::FMIN_LEGACY: |
9896 | case AMDGPUISD::FMAX_LEGACY: |
9897 | return performMinMaxCombine(N, DCI); |
9898 | case ISD::FMA: |
9899 | return performFMACombine(N, DCI); |
9900 | case ISD::LOAD: { |
9901 | if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) |
9902 | return Widended; |
9903 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
9904 | } |
9905 | case ISD::STORE: |
9906 | case ISD::ATOMIC_LOAD: |
9907 | case ISD::ATOMIC_STORE: |
9908 | case ISD::ATOMIC_CMP_SWAP: |
9909 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
9910 | case ISD::ATOMIC_SWAP: |
9911 | case ISD::ATOMIC_LOAD_ADD: |
9912 | case ISD::ATOMIC_LOAD_SUB: |
9913 | case ISD::ATOMIC_LOAD_AND: |
9914 | case ISD::ATOMIC_LOAD_OR: |
9915 | case ISD::ATOMIC_LOAD_XOR: |
9916 | case ISD::ATOMIC_LOAD_NAND: |
9917 | case ISD::ATOMIC_LOAD_MIN: |
9918 | case ISD::ATOMIC_LOAD_MAX: |
9919 | case ISD::ATOMIC_LOAD_UMIN: |
9920 | case ISD::ATOMIC_LOAD_UMAX: |
9921 | case ISD::ATOMIC_LOAD_FADD: |
9922 | case AMDGPUISD::ATOMIC_INC: |
9923 | case AMDGPUISD::ATOMIC_DEC: |
9924 | case AMDGPUISD::ATOMIC_LOAD_FMIN: |
9925 | case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. |
9926 | if (DCI.isBeforeLegalize()) |
9927 | break; |
9928 | return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); |
9929 | case ISD::AND: |
9930 | return performAndCombine(N, DCI); |
9931 | case ISD::OR: |
9932 | return performOrCombine(N, DCI); |
9933 | case ISD::XOR: |
9934 | return performXorCombine(N, DCI); |
9935 | case ISD::ZERO_EXTEND: |
9936 | return performZeroExtendCombine(N, DCI); |
9937 | case ISD::SIGN_EXTEND_INREG: |
9938 | return performSignExtendInRegCombine(N , DCI); |
9939 | case AMDGPUISD::FP_CLASS: |
9940 | return performClassCombine(N, DCI); |
9941 | case ISD::FCANONICALIZE: |
9942 | return performFCanonicalizeCombine(N, DCI); |
9943 | case AMDGPUISD::RCP: |
9944 | return performRcpCombine(N, DCI); |
9945 | case AMDGPUISD::FRACT: |
9946 | case AMDGPUISD::RSQ: |
9947 | case AMDGPUISD::RCP_LEGACY: |
9948 | case AMDGPUISD::RSQ_LEGACY: |
9949 | case AMDGPUISD::RCP_IFLAG: |
9950 | case AMDGPUISD::RSQ_CLAMP: |
9951 | case AMDGPUISD::LDEXP: { |
9952 | SDValue Src = N->getOperand(0); |
9953 | if (Src.isUndef()) |
9954 | return Src; |
9955 | break; |
9956 | } |
9957 | case ISD::SINT_TO_FP: |
9958 | case ISD::UINT_TO_FP: |
9959 | return performUCharToFloatCombine(N, DCI); |
9960 | case AMDGPUISD::CVT_F32_UBYTE0: |
9961 | case AMDGPUISD::CVT_F32_UBYTE1: |
9962 | case AMDGPUISD::CVT_F32_UBYTE2: |
9963 | case AMDGPUISD::CVT_F32_UBYTE3: |
9964 | return performCvtF32UByteNCombine(N, DCI); |
9965 | case AMDGPUISD::FMED3: |
9966 | return performFMed3Combine(N, DCI); |
9967 | case AMDGPUISD::CVT_PKRTZ_F16_F32: |
9968 | return performCvtPkRTZCombine(N, DCI); |
9969 | case AMDGPUISD::CLAMP: |
9970 | return performClampCombine(N, DCI); |
9971 | case ISD::SCALAR_TO_VECTOR: { |
9972 | SelectionDAG &DAG = DCI.DAG; |
9973 | EVT VT = N->getValueType(0); |
9974 | |
9975 | // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) |
9976 | if (VT == MVT::v2i16 || VT == MVT::v2f16) { |
9977 | SDLoc SL(N); |
9978 | SDValue Src = N->getOperand(0); |
9979 | EVT EltVT = Src.getValueType(); |
9980 | if (EltVT == MVT::f16) |
9981 | Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); |
9982 | |
9983 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); |
9984 | return DAG.getNode(ISD::BITCAST, SL, VT, Ext); |
9985 | } |
9986 | |
9987 | break; |
9988 | } |
9989 | case ISD::EXTRACT_VECTOR_ELT: |
9990 | return performExtractVectorEltCombine(N, DCI); |
9991 | case ISD::INSERT_VECTOR_ELT: |
9992 | return performInsertVectorEltCombine(N, DCI); |
9993 | } |
9994 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
9995 | } |
9996 | |
9997 | /// Helper function for adjustWritemask |
9998 | static unsigned SubIdx2Lane(unsigned Idx) { |
9999 | switch (Idx) { |
10000 | default: return 0; |
10001 | case AMDGPU::sub0: return 0; |
10002 | case AMDGPU::sub1: return 1; |
10003 | case AMDGPU::sub2: return 2; |
10004 | case AMDGPU::sub3: return 3; |
10005 | case AMDGPU::sub4: return 4; // Possible with TFE/LWE |
10006 | } |
10007 | } |
10008 | |
10009 | /// Adjust the writemask of MIMG instructions |
10010 | SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, |
10011 | SelectionDAG &DAG) const { |
10012 | unsigned Opcode = Node->getMachineOpcode(); |
10013 | |
10014 | // Subtract 1 because the vdata output is not a MachineSDNode operand. |
10015 | int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; |
10016 | if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) |
10017 | return Node; // not implemented for D16 |
10018 | |
10019 | SDNode *Users[5] = { nullptr }; |
10020 | unsigned Lane = 0; |
10021 | unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; |
10022 | unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); |
10023 | unsigned NewDmask = 0; |
10024 | unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; |
10025 | unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; |
10026 | bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) || |
10027 | Node->getConstantOperandVal(LWEIdx)) ? 1 : 0; |
10028 | unsigned TFCLane = 0; |
10029 | bool HasChain = Node->getNumValues() > 1; |
10030 | |
10031 | if (OldDmask == 0) { |
10032 | // These are folded out, but on the chance it happens don't assert. |
10033 | return Node; |
10034 | } |
10035 | |
10036 | unsigned OldBitsSet = countPopulation(OldDmask); |
10037 | // Work out which is the TFE/LWE lane if that is enabled. |
10038 | if (UsesTFC) { |
10039 | TFCLane = OldBitsSet; |
10040 | } |
10041 | |
10042 | // Try to figure out the used register components |
10043 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); |
10044 | I != E; ++I) { |
10045 | |
10046 | // Don't look at users of the chain. |
10047 | if (I.getUse().getResNo() != 0) |
10048 | continue; |
10049 | |
10050 | // Abort if we can't understand the usage |
10051 | if (!I->isMachineOpcode() || |
10052 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) |
10053 | return Node; |
10054 | |
10055 | // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. |
10056 | // Note that subregs are packed, i.e. Lane==0 is the first bit set |
10057 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit |
10058 | // set, etc. |
10059 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); |
10060 | |
10061 | // Check if the use is for the TFE/LWE generated result at VGPRn+1. |
10062 | if (UsesTFC && Lane == TFCLane) { |
10063 | Users[Lane] = *I; |
10064 | } else { |
10065 | // Set which texture component corresponds to the lane. |
10066 | unsigned Comp; |
10067 | for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { |
10068 | Comp = countTrailingZeros(Dmask); |
10069 | Dmask &= ~(1 << Comp); |
10070 | } |
10071 | |
10072 | // Abort if we have more than one user per component. |
10073 | if (Users[Lane]) |
10074 | return Node; |
10075 | |
10076 | Users[Lane] = *I; |
10077 | NewDmask |= 1 << Comp; |
10078 | } |
10079 | } |
10080 | |
10081 | // Don't allow 0 dmask, as hardware assumes one channel enabled. |
10082 | bool NoChannels = !NewDmask; |
10083 | if (NoChannels) { |
10084 | if (!UsesTFC) { |
10085 | // No uses of the result and not using TFC. Then do nothing. |
10086 | return Node; |
10087 | } |
10088 | // If the original dmask has one channel - then nothing to do |
10089 | if (OldBitsSet == 1) |
10090 | return Node; |
10091 | // Use an arbitrary dmask - required for the instruction to work |
10092 | NewDmask = 1; |
10093 | } |
10094 | // Abort if there's no change |
10095 | if (NewDmask == OldDmask) |
10096 | return Node; |
10097 | |
10098 | unsigned BitsSet = countPopulation(NewDmask); |
10099 | |
10100 | // Check for TFE or LWE - increase the number of channels by one to account |
10101 | // for the extra return value |
10102 | // This will need adjustment for D16 if this is also included in |
10103 | // adjustWriteMask (this function) but at present D16 are excluded. |
10104 | unsigned NewChannels = BitsSet + UsesTFC; |
10105 | |
10106 | int NewOpcode = |
10107 | AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); |
10108 | assert(NewOpcode != -1 &&((NewOpcode != -1 && NewOpcode != static_cast<int> (Node->getMachineOpcode()) && "failed to find equivalent MIMG op" ) ? static_cast<void> (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10110, __PRETTY_FUNCTION__)) |
10109 | NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&((NewOpcode != -1 && NewOpcode != static_cast<int> (Node->getMachineOpcode()) && "failed to find equivalent MIMG op" ) ? static_cast<void> (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10110, __PRETTY_FUNCTION__)) |
10110 | "failed to find equivalent MIMG op")((NewOpcode != -1 && NewOpcode != static_cast<int> (Node->getMachineOpcode()) && "failed to find equivalent MIMG op" ) ? static_cast<void> (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10110, __PRETTY_FUNCTION__)); |
10111 | |
10112 | // Adjust the writemask in the node |
10113 | SmallVector<SDValue, 12> Ops; |
10114 | Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); |
10115 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); |
10116 | Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); |
10117 | |
10118 | MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); |
10119 | |
10120 | MVT ResultVT = NewChannels == 1 ? |
10121 | SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : |
10122 | NewChannels == 5 ? 8 : NewChannels); |
10123 | SDVTList NewVTList = HasChain ? |
10124 | DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); |
10125 | |
10126 | |
10127 | MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), |
10128 | NewVTList, Ops); |
10129 | |
10130 | if (HasChain) { |
10131 | // Update chain. |
10132 | DAG.setNodeMemRefs(NewNode, Node->memoperands()); |
10133 | DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); |
10134 | } |
10135 | |
10136 | if (NewChannels == 1) { |
10137 | assert(Node->hasNUsesOfValue(1, 0))((Node->hasNUsesOfValue(1, 0)) ? static_cast<void> ( 0) : __assert_fail ("Node->hasNUsesOfValue(1, 0)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10137, __PRETTY_FUNCTION__)); |
10138 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, |
10139 | SDLoc(Node), Users[Lane]->getValueType(0), |
10140 | SDValue(NewNode, 0)); |
10141 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); |
10142 | return nullptr; |
10143 | } |
10144 | |
10145 | // Update the users of the node with the new indices |
10146 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { |
10147 | SDNode *User = Users[i]; |
10148 | if (!User) { |
10149 | // Handle the special case of NoChannels. We set NewDmask to 1 above, but |
10150 | // Users[0] is still nullptr because channel 0 doesn't really have a use. |
10151 | if (i || !NoChannels) |
10152 | continue; |
10153 | } else { |
10154 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); |
10155 | DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); |
10156 | } |
10157 | |
10158 | switch (Idx) { |
10159 | default: break; |
10160 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; |
10161 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; |
10162 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; |
10163 | case AMDGPU::sub3: Idx = AMDGPU::sub4; break; |
10164 | } |
10165 | } |
10166 | |
10167 | DAG.RemoveDeadNode(Node); |
10168 | return nullptr; |
10169 | } |
10170 | |
10171 | static bool isFrameIndexOp(SDValue Op) { |
10172 | if (Op.getOpcode() == ISD::AssertZext) |
10173 | Op = Op.getOperand(0); |
10174 | |
10175 | return isa<FrameIndexSDNode>(Op); |
10176 | } |
10177 | |
10178 | /// Legalize target independent instructions (e.g. INSERT_SUBREG) |
10179 | /// with frame index operands. |
10180 | /// LLVM assumes that inputs are to these instructions are registers. |
10181 | SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, |
10182 | SelectionDAG &DAG) const { |
10183 | if (Node->getOpcode() == ISD::CopyToReg) { |
10184 | RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); |
10185 | SDValue SrcVal = Node->getOperand(2); |
10186 | |
10187 | // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have |
10188 | // to try understanding copies to physical registers. |
10189 | if (SrcVal.getValueType() == MVT::i1 && |
10190 | Register::isPhysicalRegister(DestReg->getReg())) { |
10191 | SDLoc SL(Node); |
10192 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
10193 | SDValue VReg = DAG.getRegister( |
10194 | MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); |
10195 | |
10196 | SDNode *Glued = Node->getGluedNode(); |
10197 | SDValue ToVReg |
10198 | = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, |
10199 | SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); |
10200 | SDValue ToResultReg |
10201 | = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), |
10202 | VReg, ToVReg.getValue(1)); |
10203 | DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); |
10204 | DAG.RemoveDeadNode(Node); |
10205 | return ToResultReg.getNode(); |
10206 | } |
10207 | } |
10208 | |
10209 | SmallVector<SDValue, 8> Ops; |
10210 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { |
10211 | if (!isFrameIndexOp(Node->getOperand(i))) { |
10212 | Ops.push_back(Node->getOperand(i)); |
10213 | continue; |
10214 | } |
10215 | |
10216 | SDLoc DL(Node); |
10217 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, |
10218 | Node->getOperand(i).getValueType(), |
10219 | Node->getOperand(i)), 0)); |
10220 | } |
10221 | |
10222 | return DAG.UpdateNodeOperands(Node, Ops); |
10223 | } |
10224 | |
10225 | /// Fold the instructions after selecting them. |
10226 | /// Returns null if users were already updated. |
10227 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, |
10228 | SelectionDAG &DAG) const { |
10229 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
10230 | unsigned Opcode = Node->getMachineOpcode(); |
10231 | |
10232 | if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && |
10233 | !TII->isGather4(Opcode)) { |
10234 | return adjustWritemask(Node, DAG); |
10235 | } |
10236 | |
10237 | if (Opcode == AMDGPU::INSERT_SUBREG || |
10238 | Opcode == AMDGPU::REG_SEQUENCE) { |
10239 | legalizeTargetIndependentNode(Node, DAG); |
10240 | return Node; |
10241 | } |
10242 | |
10243 | switch (Opcode) { |
10244 | case AMDGPU::V_DIV_SCALE_F32: |
10245 | case AMDGPU::V_DIV_SCALE_F64: { |
10246 | // Satisfy the operand register constraint when one of the inputs is |
10247 | // undefined. Ordinarily each undef value will have its own implicit_def of |
10248 | // a vreg, so force these to use a single register. |
10249 | SDValue Src0 = Node->getOperand(0); |
10250 | SDValue Src1 = Node->getOperand(1); |
10251 | SDValue Src2 = Node->getOperand(2); |
10252 | |
10253 | if ((Src0.isMachineOpcode() && |
10254 | Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && |
10255 | (Src0 == Src1 || Src0 == Src2)) |
10256 | break; |
10257 | |
10258 | MVT VT = Src0.getValueType().getSimpleVT(); |
10259 | const TargetRegisterClass *RC = |
10260 | getRegClassFor(VT, Src0.getNode()->isDivergent()); |
10261 | |
10262 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
10263 | SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); |
10264 | |
10265 | SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), |
10266 | UndefReg, Src0, SDValue()); |
10267 | |
10268 | // src0 must be the same register as src1 or src2, even if the value is |
10269 | // undefined, so make sure we don't violate this constraint. |
10270 | if (Src0.isMachineOpcode() && |
10271 | Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { |
10272 | if (Src1.isMachineOpcode() && |
10273 | Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) |
10274 | Src0 = Src1; |
10275 | else if (Src2.isMachineOpcode() && |
10276 | Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) |
10277 | Src0 = Src2; |
10278 | else { |
10279 | assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)((Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) ? static_cast <void> (0) : __assert_fail ("Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10279, __PRETTY_FUNCTION__)); |
10280 | Src0 = UndefReg; |
10281 | Src1 = UndefReg; |
10282 | } |
10283 | } else |
10284 | break; |
10285 | |
10286 | SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 }; |
10287 | for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I) |
10288 | Ops.push_back(Node->getOperand(I)); |
10289 | |
10290 | Ops.push_back(ImpDef.getValue(1)); |
10291 | return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); |
10292 | } |
10293 | default: |
10294 | break; |
10295 | } |
10296 | |
10297 | return Node; |
10298 | } |
10299 | |
10300 | /// Assign the register class depending on the number of |
10301 | /// bits set in the writemask |
10302 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
10303 | SDNode *Node) const { |
10304 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
10305 | |
10306 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
10307 | |
10308 | if (TII->isVOP3(MI.getOpcode())) { |
10309 | // Make sure constant bus requirements are respected. |
10310 | TII->legalizeOperandsVOP3(MRI, MI); |
10311 | |
10312 | // Prefer VGPRs over AGPRs in mAI instructions where possible. |
10313 | // This saves a chain-copy of registers and better ballance register |
10314 | // use between vgpr and agpr as agpr tuples tend to be big. |
10315 | if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) { |
10316 | unsigned Opc = MI.getOpcode(); |
10317 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
10318 | for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), |
10319 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { |
10320 | if (I == -1) |
10321 | break; |
10322 | MachineOperand &Op = MI.getOperand(I); |
10323 | if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID && |
10324 | OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) || |
10325 | !Register::isVirtualRegister(Op.getReg()) || |
10326 | !TRI->isAGPR(MRI, Op.getReg())) |
10327 | continue; |
10328 | auto *Src = MRI.getUniqueVRegDef(Op.getReg()); |
10329 | if (!Src || !Src->isCopy() || |
10330 | !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) |
10331 | continue; |
10332 | auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); |
10333 | auto *NewRC = TRI->getEquivalentVGPRClass(RC); |
10334 | // All uses of agpr64 and agpr32 can also accept vgpr except for |
10335 | // v_accvgpr_read, but we do not produce agpr reads during selection, |
10336 | // so no use checks are needed. |
10337 | MRI.setRegClass(Op.getReg(), NewRC); |
10338 | } |
10339 | } |
10340 | |
10341 | return; |
10342 | } |
10343 | |
10344 | // Replace unused atomics with the no return version. |
10345 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); |
10346 | if (NoRetAtomicOp != -1) { |
10347 | if (!Node->hasAnyUseOfValue(0)) { |
10348 | MI.setDesc(TII->get(NoRetAtomicOp)); |
10349 | MI.RemoveOperand(0); |
10350 | return; |
10351 | } |
10352 | |
10353 | // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg |
10354 | // instruction, because the return type of these instructions is a vec2 of |
10355 | // the memory type, so it can be tied to the input operand. |
10356 | // This means these instructions always have a use, so we need to add a |
10357 | // special case to check if the atomic has only one extract_subreg use, |
10358 | // which itself has no uses. |
10359 | if ((Node->hasNUsesOfValue(1, 0) && |
10360 | Node->use_begin()->isMachineOpcode() && |
10361 | Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && |
10362 | !Node->use_begin()->hasAnyUseOfValue(0))) { |
10363 | Register Def = MI.getOperand(0).getReg(); |
10364 | |
10365 | // Change this into a noret atomic. |
10366 | MI.setDesc(TII->get(NoRetAtomicOp)); |
10367 | MI.RemoveOperand(0); |
10368 | |
10369 | // If we only remove the def operand from the atomic instruction, the |
10370 | // extract_subreg will be left with a use of a vreg without a def. |
10371 | // So we need to insert an implicit_def to avoid machine verifier |
10372 | // errors. |
10373 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), |
10374 | TII->get(AMDGPU::IMPLICIT_DEF), Def); |
10375 | } |
10376 | return; |
10377 | } |
10378 | } |
10379 | |
10380 | static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, |
10381 | uint64_t Val) { |
10382 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); |
10383 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); |
10384 | } |
10385 | |
10386 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, |
10387 | const SDLoc &DL, |
10388 | SDValue Ptr) const { |
10389 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
10390 | |
10391 | // Build the half of the subregister with the constants before building the |
10392 | // full 128-bit register. If we are building multiple resource descriptors, |
10393 | // this will allow CSEing of the 2-component register. |
10394 | const SDValue Ops0[] = { |
10395 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), |
10396 | buildSMovImm32(DAG, DL, 0), |
10397 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
10398 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), |
10399 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) |
10400 | }; |
10401 | |
10402 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, |
10403 | MVT::v2i32, Ops0), 0); |
10404 | |
10405 | // Combine the constants and the pointer. |
10406 | const SDValue Ops1[] = { |
10407 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), |
10408 | Ptr, |
10409 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), |
10410 | SubRegHi, |
10411 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) |
10412 | }; |
10413 | |
10414 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); |
10415 | } |
10416 | |
10417 | /// Return a resource descriptor with the 'Add TID' bit enabled |
10418 | /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] |
10419 | /// of the resource descriptor) to create an offset, which is added to |
10420 | /// the resource pointer. |
10421 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, |
10422 | SDValue Ptr, uint32_t RsrcDword1, |
10423 | uint64_t RsrcDword2And3) const { |
10424 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); |
10425 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); |
10426 | if (RsrcDword1) { |
10427 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, |
10428 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), |
10429 | 0); |
10430 | } |
10431 | |
10432 | SDValue DataLo = buildSMovImm32(DAG, DL, |
10433 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)0xFFFFFFFFUL); |
10434 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); |
10435 | |
10436 | const SDValue Ops[] = { |
10437 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), |
10438 | PtrLo, |
10439 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
10440 | PtrHi, |
10441 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), |
10442 | DataLo, |
10443 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), |
10444 | DataHi, |
10445 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) |
10446 | }; |
10447 | |
10448 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
10449 | } |
10450 | |
10451 | //===----------------------------------------------------------------------===// |
10452 | // SI Inline Assembly Support |
10453 | //===----------------------------------------------------------------------===// |
10454 | |
10455 | std::pair<unsigned, const TargetRegisterClass *> |
10456 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
10457 | StringRef Constraint, |
10458 | MVT VT) const { |
10459 | const TargetRegisterClass *RC = nullptr; |
10460 | if (Constraint.size() == 1) { |
10461 | switch (Constraint[0]) { |
10462 | default: |
10463 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
10464 | case 's': |
10465 | case 'r': |
10466 | switch (VT.getSizeInBits()) { |
10467 | default: |
10468 | return std::make_pair(0U, nullptr); |
10469 | case 32: |
10470 | case 16: |
10471 | RC = &AMDGPU::SReg_32RegClass; |
10472 | break; |
10473 | case 64: |
10474 | RC = &AMDGPU::SGPR_64RegClass; |
10475 | break; |
10476 | case 96: |
10477 | RC = &AMDGPU::SReg_96RegClass; |
10478 | break; |
10479 | case 128: |
10480 | RC = &AMDGPU::SGPR_128RegClass; |
10481 | break; |
10482 | case 160: |
10483 | RC = &AMDGPU::SReg_160RegClass; |
10484 | break; |
10485 | case 256: |
10486 | RC = &AMDGPU::SReg_256RegClass; |
10487 | break; |
10488 | case 512: |
10489 | RC = &AMDGPU::SReg_512RegClass; |
10490 | break; |
10491 | } |
10492 | break; |
10493 | case 'v': |
10494 | switch (VT.getSizeInBits()) { |
10495 | default: |
10496 | return std::make_pair(0U, nullptr); |
10497 | case 32: |
10498 | case 16: |
10499 | RC = &AMDGPU::VGPR_32RegClass; |
10500 | break; |
10501 | case 64: |
10502 | RC = &AMDGPU::VReg_64RegClass; |
10503 | break; |
10504 | case 96: |
10505 | RC = &AMDGPU::VReg_96RegClass; |
10506 | break; |
10507 | case 128: |
10508 | RC = &AMDGPU::VReg_128RegClass; |
10509 | break; |
10510 | case 160: |
10511 | RC = &AMDGPU::VReg_160RegClass; |
10512 | break; |
10513 | case 256: |
10514 | RC = &AMDGPU::VReg_256RegClass; |
10515 | break; |
10516 | case 512: |
10517 | RC = &AMDGPU::VReg_512RegClass; |
10518 | break; |
10519 | } |
10520 | break; |
10521 | case 'a': |
10522 | if (!Subtarget->hasMAIInsts()) |
10523 | break; |
10524 | switch (VT.getSizeInBits()) { |
10525 | default: |
10526 | return std::make_pair(0U, nullptr); |
10527 | case 32: |
10528 | case 16: |
10529 | RC = &AMDGPU::AGPR_32RegClass; |
10530 | break; |
10531 | case 64: |
10532 | RC = &AMDGPU::AReg_64RegClass; |
10533 | break; |
10534 | case 128: |
10535 | RC = &AMDGPU::AReg_128RegClass; |
10536 | break; |
10537 | case 512: |
10538 | RC = &AMDGPU::AReg_512RegClass; |
10539 | break; |
10540 | case 1024: |
10541 | RC = &AMDGPU::AReg_1024RegClass; |
10542 | // v32 types are not legal but we support them here. |
10543 | return std::make_pair(0U, RC); |
10544 | } |
10545 | break; |
10546 | } |
10547 | // We actually support i128, i16 and f16 as inline parameters |
10548 | // even if they are not reported as legal |
10549 | if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || |
10550 | VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) |
10551 | return std::make_pair(0U, RC); |
10552 | } |
10553 | |
10554 | if (Constraint.size() > 1) { |
10555 | if (Constraint[1] == 'v') { |
10556 | RC = &AMDGPU::VGPR_32RegClass; |
10557 | } else if (Constraint[1] == 's') { |
10558 | RC = &AMDGPU::SGPR_32RegClass; |
10559 | } else if (Constraint[1] == 'a') { |
10560 | RC = &AMDGPU::AGPR_32RegClass; |
10561 | } |
10562 | |
10563 | if (RC) { |
10564 | uint32_t Idx; |
10565 | bool Failed = Constraint.substr(2).getAsInteger(10, Idx); |
10566 | if (!Failed && Idx < RC->getNumRegs()) |
10567 | return std::make_pair(RC->getRegister(Idx), RC); |
10568 | } |
10569 | } |
10570 | |
10571 | // FIXME: Returns VS_32 for physical SGPR constraints |
10572 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
10573 | } |
10574 | |
10575 | SITargetLowering::ConstraintType |
10576 | SITargetLowering::getConstraintType(StringRef Constraint) const { |
10577 | if (Constraint.size() == 1) { |
10578 | switch (Constraint[0]) { |
10579 | default: break; |
10580 | case 's': |
10581 | case 'v': |
10582 | case 'a': |
10583 | return C_RegisterClass; |
10584 | } |
10585 | } |
10586 | return TargetLowering::getConstraintType(Constraint); |
10587 | } |
10588 | |
10589 | // Figure out which registers should be reserved for stack access. Only after |
10590 | // the function is legalized do we know all of the non-spill stack objects or if |
10591 | // calls are present. |
10592 | void SITargetLowering::finalizeLowering(MachineFunction &MF) const { |
10593 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
10594 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
10595 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
10596 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
10597 | |
10598 | if (Info->isEntryFunction()) { |
10599 | // Callable functions have fixed registers used for stack access. |
10600 | reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); |
10601 | } |
10602 | |
10603 | assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),((!TRI->isSubRegister(Info->getScratchRSrcReg(), Info-> getStackPtrOffsetReg())) ? static_cast<void> (0) : __assert_fail ("!TRI->isSubRegister(Info->getScratchRSrcReg(), Info->getStackPtrOffsetReg())" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10604, __PRETTY_FUNCTION__)) |
10604 | Info->getStackPtrOffsetReg()))((!TRI->isSubRegister(Info->getScratchRSrcReg(), Info-> getStackPtrOffsetReg())) ? static_cast<void> (0) : __assert_fail ("!TRI->isSubRegister(Info->getScratchRSrcReg(), Info->getStackPtrOffsetReg())" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10604, __PRETTY_FUNCTION__)); |
10605 | if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) |
10606 | MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); |
10607 | |
10608 | // We need to worry about replacing the default register with itself in case |
10609 | // of MIR testcases missing the MFI. |
10610 | if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) |
10611 | MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); |
10612 | |
10613 | if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) |
10614 | MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); |
10615 | |
10616 | if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) { |
10617 | MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG, |
10618 | Info->getScratchWaveOffsetReg()); |
10619 | } |
10620 | |
10621 | Info->limitOccupancy(MF); |
10622 | |
10623 | if (ST.isWave32() && !MF.empty()) { |
10624 | // Add VCC_HI def because many instructions marked as imp-use VCC where |
10625 | // we may only define VCC_LO. If nothing defines VCC_HI we may end up |
10626 | // having a use of undef. |
10627 | |
10628 | const SIInstrInfo *TII = ST.getInstrInfo(); |
10629 | DebugLoc DL; |
10630 | |
10631 | MachineBasicBlock &MBB = MF.front(); |
10632 | MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr(); |
10633 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI); |
10634 | |
10635 | for (auto &MBB : MF) { |
10636 | for (auto &MI : MBB) { |
10637 | TII->fixImplicitOperands(MI); |
10638 | } |
10639 | } |
10640 | } |
10641 | |
10642 | TargetLoweringBase::finalizeLowering(MF); |
10643 | } |
10644 | |
10645 | void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, |
10646 | KnownBits &Known, |
10647 | const APInt &DemandedElts, |
10648 | const SelectionDAG &DAG, |
10649 | unsigned Depth) const { |
10650 | TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts, |
10651 | DAG, Depth); |
10652 | |
10653 | // Set the high bits to zero based on the maximum allowed scratch size per |
10654 | // wave. We can't use vaddr in MUBUF instructions if we don't know the address |
10655 | // calculation won't overflow, so assume the sign bit is never set. |
10656 | Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); |
10657 | } |
10658 | |
10659 | Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { |
10660 | const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); |
10661 | const Align CacheLineAlign = Align(64); |
10662 | |
10663 | // Pre-GFX10 target did not benefit from loop alignment |
10664 | if (!ML || DisableLoopAlignment || |
10665 | (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || |
10666 | getSubtarget()->hasInstFwdPrefetchBug()) |
10667 | return PrefAlign; |
10668 | |
10669 | // On GFX10 I$ is 4 x 64 bytes cache lines. |
10670 | // By default prefetcher keeps one cache line behind and reads two ahead. |
10671 | // We can modify it with S_INST_PREFETCH for larger loops to have two lines |
10672 | // behind and one ahead. |
10673 | // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. |
10674 | // If loop fits 64 bytes it always spans no more than two cache lines and |
10675 | // does not need an alignment. |
10676 | // Else if loop is less or equal 128 bytes we do not need to modify prefetch, |
10677 | // Else if loop is less or equal 192 bytes we need two lines behind. |
10678 | |
10679 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
10680 | const MachineBasicBlock *Header = ML->getHeader(); |
10681 | if (Header->getAlignment() != PrefAlign) |
10682 | return Header->getAlignment(); // Already processed. |
10683 | |
10684 | unsigned LoopSize = 0; |
10685 | for (const MachineBasicBlock *MBB : ML->blocks()) { |
10686 | // If inner loop block is aligned assume in average half of the alignment |
10687 | // size to be added as nops. |
10688 | if (MBB != Header) |
10689 | LoopSize += MBB->getAlignment().value() / 2; |
10690 | |
10691 | for (const MachineInstr &MI : *MBB) { |
10692 | LoopSize += TII->getInstSizeInBytes(MI); |
10693 | if (LoopSize > 192) |
10694 | return PrefAlign; |
10695 | } |
10696 | } |
10697 | |
10698 | if (LoopSize <= 64) |
10699 | return PrefAlign; |
10700 | |
10701 | if (LoopSize <= 128) |
10702 | return CacheLineAlign; |
10703 | |
10704 | // If any of parent loops is surrounded by prefetch instructions do not |
10705 | // insert new for inner loop, which would reset parent's settings. |
10706 | for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { |
10707 | if (MachineBasicBlock *Exit = P->getExitBlock()) { |
10708 | auto I = Exit->getFirstNonDebugInstr(); |
10709 | if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) |
10710 | return CacheLineAlign; |
10711 | } |
10712 | } |
10713 | |
10714 | MachineBasicBlock *Pre = ML->getLoopPreheader(); |
10715 | MachineBasicBlock *Exit = ML->getExitBlock(); |
10716 | |
10717 | if (Pre && Exit) { |
10718 | BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(), |
10719 | TII->get(AMDGPU::S_INST_PREFETCH)) |
10720 | .addImm(1); // prefetch 2 lines behind PC |
10721 | |
10722 | BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(), |
10723 | TII->get(AMDGPU::S_INST_PREFETCH)) |
10724 | .addImm(2); // prefetch 1 line behind PC |
10725 | } |
10726 | |
10727 | return CacheLineAlign; |
10728 | } |
10729 | |
10730 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) |
10731 | static bool isCopyFromRegOfInlineAsm(const SDNode *N) { |
10732 | assert(N->getOpcode() == ISD::CopyFromReg)((N->getOpcode() == ISD::CopyFromReg) ? static_cast<void > (0) : __assert_fail ("N->getOpcode() == ISD::CopyFromReg" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10732, __PRETTY_FUNCTION__)); |
10733 | do { |
10734 | // Follow the chain until we find an INLINEASM node. |
10735 | N = N->getOperand(0).getNode(); |
10736 | if (N->getOpcode() == ISD::INLINEASM || |
10737 | N->getOpcode() == ISD::INLINEASM_BR) |
10738 | return true; |
10739 | } while (N->getOpcode() == ISD::CopyFromReg); |
10740 | return false; |
10741 | } |
10742 | |
10743 | bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, |
10744 | FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const |
10745 | { |
10746 | switch (N->getOpcode()) { |
10747 | case ISD::CopyFromReg: |
10748 | { |
10749 | const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); |
10750 | const MachineFunction * MF = FLI->MF; |
10751 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
10752 | const MachineRegisterInfo &MRI = MF->getRegInfo(); |
10753 | const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); |
10754 | unsigned Reg = R->getReg(); |
10755 | if (Register::isPhysicalRegister(Reg)) |
10756 | return !TRI.isSGPRReg(MRI, Reg); |
10757 | |
10758 | if (MRI.isLiveIn(Reg)) { |
10759 | // workitem.id.x workitem.id.y workitem.id.z |
10760 | // Any VGPR formal argument is also considered divergent |
10761 | if (!TRI.isSGPRReg(MRI, Reg)) |
10762 | return true; |
10763 | // Formal arguments of non-entry functions |
10764 | // are conservatively considered divergent |
10765 | else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) |
10766 | return true; |
10767 | return false; |
10768 | } |
10769 | const Value *V = FLI->getValueFromVirtualReg(Reg); |
10770 | if (V) |
10771 | return KDA->isDivergent(V); |
10772 | assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N))((Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N )) ? static_cast<void> (0) : __assert_fail ("Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10772, __PRETTY_FUNCTION__)); |
10773 | return !TRI.isSGPRReg(MRI, Reg); |
10774 | } |
10775 | break; |
10776 | case ISD::LOAD: { |
10777 | const LoadSDNode *L = cast<LoadSDNode>(N); |
10778 | unsigned AS = L->getAddressSpace(); |
10779 | // A flat load may access private memory. |
10780 | return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; |
10781 | } break; |
10782 | case ISD::CALLSEQ_END: |
10783 | return true; |
10784 | break; |
10785 | case ISD::INTRINSIC_WO_CHAIN: |
10786 | { |
10787 | |
10788 | } |
10789 | return AMDGPU::isIntrinsicSourceOfDivergence( |
10790 | cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); |
10791 | case ISD::INTRINSIC_W_CHAIN: |
10792 | return AMDGPU::isIntrinsicSourceOfDivergence( |
10793 | cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); |
10794 | } |
10795 | return false; |
10796 | } |
10797 | |
10798 | bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG, |
10799 | EVT VT) const { |
10800 | switch (VT.getScalarType().getSimpleVT().SimpleTy) { |
10801 | case MVT::f32: |
10802 | return hasFP32Denormals(DAG.getMachineFunction()); |
10803 | case MVT::f64: |
10804 | case MVT::f16: |
10805 | return hasFP64FP16Denormals(DAG.getMachineFunction()); |
10806 | default: |
10807 | return false; |
10808 | } |
10809 | } |
10810 | |
10811 | bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, |
10812 | const SelectionDAG &DAG, |
10813 | bool SNaN, |
10814 | unsigned Depth) const { |
10815 | if (Op.getOpcode() == AMDGPUISD::CLAMP) { |
10816 | const MachineFunction &MF = DAG.getMachineFunction(); |
10817 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
10818 | |
10819 | if (Info->getMode().DX10Clamp) |
10820 | return true; // Clamped to 0. |
10821 | return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
10822 | } |
10823 | |
10824 | return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, |
10825 | SNaN, Depth); |
10826 | } |
10827 | |
10828 | TargetLowering::AtomicExpansionKind |
10829 | SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { |
10830 | switch (RMW->getOperation()) { |
10831 | case AtomicRMWInst::FAdd: { |
10832 | Type *Ty = RMW->getType(); |
10833 | |
10834 | // We don't have a way to support 16-bit atomics now, so just leave them |
10835 | // as-is. |
10836 | if (Ty->isHalfTy()) |
10837 | return AtomicExpansionKind::None; |
10838 | |
10839 | if (!Ty->isFloatTy()) |
10840 | return AtomicExpansionKind::CmpXChg; |
10841 | |
10842 | // TODO: Do have these for flat. Older targets also had them for buffers. |
10843 | unsigned AS = RMW->getPointerAddressSpace(); |
10844 | |
10845 | if (AS == AMDGPUAS::GLOBAL_ADDRESS && Subtarget->hasAtomicFaddInsts()) { |
10846 | return RMW->use_empty() ? AtomicExpansionKind::None : |
10847 | AtomicExpansionKind::CmpXChg; |
10848 | } |
10849 | |
10850 | return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ? |
10851 | AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg; |
10852 | } |
10853 | default: |
10854 | break; |
10855 | } |
10856 | |
10857 | return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); |
10858 | } |
10859 | |
10860 | const TargetRegisterClass * |
10861 | SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
10862 | const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false); |
10863 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
10864 | if (RC == &AMDGPU::VReg_1RegClass && !isDivergent) |
10865 | return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass |
10866 | : &AMDGPU::SReg_32RegClass; |
10867 | if (!TRI->isSGPRClass(RC) && !isDivergent) |
10868 | return TRI->getEquivalentSGPRClass(RC); |
10869 | else if (TRI->isSGPRClass(RC) && isDivergent) |
10870 | return TRI->getEquivalentVGPRClass(RC); |
10871 | |
10872 | return RC; |
10873 | } |
10874 | |
10875 | static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited, |
10876 | unsigned WaveSize) { |
10877 | // FIXME: We asssume we never cast the mask results of a control flow |
10878 | // intrinsic. |
10879 | // Early exit if the type won't be consistent as a compile time hack. |
10880 | IntegerType *IT = dyn_cast<IntegerType>(V->getType()); |
10881 | if (!IT || IT->getBitWidth() != WaveSize) |
10882 | return false; |
10883 | |
10884 | if (!isa<Instruction>(V)) |
10885 | return false; |
10886 | if (!Visited.insert(V).second) |
10887 | return false; |
10888 | bool Result = false; |
10889 | for (auto U : V->users()) { |
10890 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) { |
10891 | if (V == U->getOperand(1)) { |
10892 | switch (Intrinsic->getIntrinsicID()) { |
10893 | default: |
10894 | Result = false; |
10895 | break; |
10896 | case Intrinsic::amdgcn_if_break: |
10897 | case Intrinsic::amdgcn_if: |
10898 | case Intrinsic::amdgcn_else: |
10899 | Result = true; |
10900 | break; |
10901 | } |
10902 | } |
10903 | if (V == U->getOperand(0)) { |
10904 | switch (Intrinsic->getIntrinsicID()) { |
10905 | default: |
10906 | Result = false; |
10907 | break; |
10908 | case Intrinsic::amdgcn_end_cf: |
10909 | case Intrinsic::amdgcn_loop: |
10910 | Result = true; |
10911 | break; |
10912 | } |
10913 | } |
10914 | } else { |
10915 | Result = hasCFUser(U, Visited, WaveSize); |
10916 | } |
10917 | if (Result) |
10918 | break; |
10919 | } |
10920 | return Result; |
10921 | } |
10922 | |
10923 | bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, |
10924 | const Value *V) const { |
10925 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) { |
10926 | switch (Intrinsic->getIntrinsicID()) { |
10927 | default: |
10928 | return false; |
10929 | case Intrinsic::amdgcn_if_break: |
10930 | return true; |
10931 | } |
10932 | } |
10933 | if (const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V)) { |
10934 | if (const IntrinsicInst *Intrinsic = |
10935 | dyn_cast<IntrinsicInst>(ExtValue->getOperand(0))) { |
10936 | switch (Intrinsic->getIntrinsicID()) { |
10937 | default: |
10938 | return false; |
10939 | case Intrinsic::amdgcn_if: |
10940 | case Intrinsic::amdgcn_else: { |
10941 | ArrayRef<unsigned> Indices = ExtValue->getIndices(); |
10942 | if (Indices.size() == 1 && Indices[0] == 1) { |
10943 | return true; |
10944 | } |
10945 | } |
10946 | } |
10947 | } |
10948 | } |
10949 | if (const CallInst *CI = dyn_cast<CallInst>(V)) { |
10950 | if (isa<InlineAsm>(CI->getCalledValue())) { |
10951 | const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo(); |
10952 | ImmutableCallSite CS(CI); |
10953 | TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints( |
10954 | MF.getDataLayout(), Subtarget->getRegisterInfo(), CS); |
10955 | for (auto &TC : TargetConstraints) { |
10956 | if (TC.Type == InlineAsm::isOutput) { |
10957 | ComputeConstraintToUse(TC, SDValue()); |
10958 | unsigned AssignedReg; |
10959 | const TargetRegisterClass *RC; |
10960 | std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint( |
10961 | SIRI, TC.ConstraintCode, TC.ConstraintVT); |
10962 | if (RC) { |
10963 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
10964 | if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg)) |
10965 | return true; |
10966 | else if (SIRI->isSGPRClass(RC)) |
10967 | return true; |
10968 | } |
10969 | } |
10970 | } |
10971 | } |
10972 | } |
10973 | SmallPtrSet<const Value *, 16> Visited; |
10974 | return hasCFUser(V, Visited, Subtarget->getWavefrontSize()); |
10975 | } |