File: | tools/polly/lib/External/ppcg/gpu.c |
Warning: | line 5382, column 2 Value stored to 'space2' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2010-2011 INRIA Saclay |
3 | * Copyright 2012-2013 Ecole Normale Superieure |
4 | * Copyright 2015-2016 Sven Verdoolaege |
5 | * |
6 | * Use of this software is governed by the MIT license |
7 | * |
8 | * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France, |
9 | * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod, |
10 | * 91893 Orsay, France |
11 | * and Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France |
12 | */ |
13 | |
14 | #include <assert.h> |
15 | #include <stdlib.h> |
16 | #include <string.h> |
17 | |
18 | #include <isl/polynomial.h> |
19 | #include <isl/union_set.h> |
20 | #include <isl/aff.h> |
21 | #include <isl/ilp.h> |
22 | #include <isl/flow.h> |
23 | #include <isl/schedule.h> |
24 | #include <isl/schedule_node.h> |
25 | #include <isl/options.h> |
26 | #include <isl/ast_build.h> |
27 | |
28 | #include "cpu.h" |
29 | #include "gpu.h" |
30 | #include "gpu_array_tile.h" |
31 | #include "gpu_group.h" |
32 | #include "gpu_hybrid.h" |
33 | #include "gpu_tree.h" |
34 | #include "hybrid.h" |
35 | #include "schedule.h" |
36 | #include "ppcg_options.h" |
37 | #include "print.h" |
38 | #include "util.h" |
39 | |
40 | struct gpu_array_info; |
41 | |
42 | /* Return the name of the outer array (of structs) accessed by "access". |
43 | */ |
44 | static const char *get_outer_array_name(__isl_keep isl_map *access) |
45 | { |
46 | isl_space *space; |
47 | const char *name; |
48 | |
49 | space = isl_space_range(isl_map_get_space(access)); |
50 | while (space && isl_space_is_wrapping(space)) |
51 | space = isl_space_domain(isl_space_unwrap(space)); |
52 | name = isl_space_get_tuple_name(space, isl_dim_set); |
53 | isl_space_free(space); |
54 | |
55 | return name; |
56 | } |
57 | |
58 | /* Collect all references to the given array and store pointers to them |
59 | * in array->refs. |
60 | */ |
61 | void collect_references(struct gpu_prog *prog, |
62 | struct gpu_array_info *array) |
63 | { |
64 | int i; |
65 | int n; |
66 | |
67 | n = 0; |
68 | for (i = 0; i < prog->n_stmts; ++i) { |
69 | struct gpu_stmt *stmt = &prog->stmts[i]; |
70 | struct gpu_stmt_access *access; |
71 | |
72 | for (access = stmt->accesses; access; access = access->next) { |
73 | const char *name; |
74 | name = get_outer_array_name(access->access); |
75 | if (name && !strcmp(array->name, name)__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (array->name) && __builtin_constant_p (name) && (__s1_len = __builtin_strlen (array->name), __s2_len = __builtin_strlen (name), (!((size_t)(const void *)((array->name) + 1) - (size_t )(const void *)(array->name) == 1) || __s1_len >= 4) && (!((size_t)(const void *)((name) + 1) - (size_t)(const void * )(name) == 1) || __s2_len >= 4)) ? __builtin_strcmp (array ->name, name) : (__builtin_constant_p (array->name) && ((size_t)(const void *)((array->name) + 1) - (size_t)(const void *)(array->name) == 1) && (__s1_len = __builtin_strlen (array->name), __s1_len < 4) ? (__builtin_constant_p ( name) && ((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) ? __builtin_strcmp (array->name , name) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) (array->name))[0] - __s2[ 0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (array->name)) [1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (array ->name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (array ->name))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) && (__s2_len = __builtin_strlen (name), __s2_len < 4) ? (__builtin_constant_p (array-> name) && ((size_t)(const void *)((array->name) + 1 ) - (size_t)(const void *)(array->name) == 1) ? __builtin_strcmp (array->name, name) : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (array->name ); int __result = (((const unsigned char *) (const char *) (name ))[0] - __s2[0]); if (__s2_len > 0 && __result == 0 ) { __result = (((const unsigned char *) (const char *) (name ))[1] - __s2[1]); if (__s2_len > 1 && __result == 0 ) { __result = (((const unsigned char *) (const char *) (name ))[2] - __s2[2]); if (__s2_len > 2 && __result == 0 ) __result = (((const unsigned char *) (const char *) (name)) [3] - __s2[3]); } } __result; }))) : __builtin_strcmp (array-> name, name)))); })) |
76 | n++; |
77 | } |
78 | } |
79 | |
80 | array->n_ref = n; |
81 | array->refs = isl_alloc_array(prog->ctx, struct gpu_stmt_access *, n)((struct gpu_stmt_access * *)isl_malloc_or_die(prog->ctx, ( n)*sizeof(struct gpu_stmt_access *))); |
82 | assert(array->refs)((array->refs) ? (void) (0) : __assert_fail ("array->refs" , "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 82, __PRETTY_FUNCTION__)); |
83 | |
84 | n = 0; |
85 | for (i = 0; i < prog->n_stmts; ++i) { |
86 | struct gpu_stmt *stmt = &prog->stmts[i]; |
87 | struct gpu_stmt_access *access; |
88 | |
89 | for (access = stmt->accesses; access; access = access->next) { |
90 | const char *name; |
91 | name = get_outer_array_name(access->access); |
92 | if (!name || strcmp(array->name, name)__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (array->name) && __builtin_constant_p (name) && (__s1_len = __builtin_strlen (array->name), __s2_len = __builtin_strlen (name), (!((size_t)(const void *)((array->name) + 1) - (size_t )(const void *)(array->name) == 1) || __s1_len >= 4) && (!((size_t)(const void *)((name) + 1) - (size_t)(const void * )(name) == 1) || __s2_len >= 4)) ? __builtin_strcmp (array ->name, name) : (__builtin_constant_p (array->name) && ((size_t)(const void *)((array->name) + 1) - (size_t)(const void *)(array->name) == 1) && (__s1_len = __builtin_strlen (array->name), __s1_len < 4) ? (__builtin_constant_p ( name) && ((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) ? __builtin_strcmp (array->name , name) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) (array->name))[0] - __s2[ 0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (array->name)) [1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (array ->name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (array ->name))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) && (__s2_len = __builtin_strlen (name), __s2_len < 4) ? (__builtin_constant_p (array-> name) && ((size_t)(const void *)((array->name) + 1 ) - (size_t)(const void *)(array->name) == 1) ? __builtin_strcmp (array->name, name) : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (array->name ); int __result = (((const unsigned char *) (const char *) (name ))[0] - __s2[0]); if (__s2_len > 0 && __result == 0 ) { __result = (((const unsigned char *) (const char *) (name ))[1] - __s2[1]); if (__s2_len > 1 && __result == 0 ) { __result = (((const unsigned char *) (const char *) (name ))[2] - __s2[2]); if (__s2_len > 2 && __result == 0 ) __result = (((const unsigned char *) (const char *) (name)) [3] - __s2[3]); } } __result; }))) : __builtin_strcmp (array-> name, name)))); })) |
93 | continue; |
94 | |
95 | array->refs[n++] = access; |
96 | } |
97 | } |
98 | } |
99 | |
100 | /* Compute and return the extent of "array", taking into account the set of |
101 | * accessed elements. |
102 | * |
103 | * In particular, the extent in the outer dimension is taken |
104 | * from "accessed", while the extents in the remaining dimensions |
105 | * are taken from array->extent. |
106 | * |
107 | * The extent in the outer dimension cannot be taken from array->extent |
108 | * because that may be unbounded. Furthermore, even if it is bounded, |
109 | * it may be larger than the piece of the array that is being accessed. |
110 | */ |
111 | static __isl_give isl_set *compute_extent(struct pet_array *array, |
112 | __isl_keep isl_set *accessed) |
113 | { |
114 | int n_index; |
115 | isl_id *id; |
116 | isl_set *outer; |
117 | isl_set *extent; |
118 | |
119 | extent = isl_set_copy(array->extent); |
120 | |
121 | n_index = isl_set_dim(accessed, isl_dim_set); |
122 | if (n_index == 0) |
123 | return extent; |
124 | |
125 | extent = isl_set_project_out(extent, isl_dim_set, 0, 1); |
126 | outer = isl_set_copy(accessed); |
127 | outer = isl_set_project_out(outer, isl_dim_set, 1, n_index - 1); |
128 | extent = isl_set_flat_product(outer, extent); |
129 | id = isl_set_get_tuple_id(accessed); |
130 | extent = isl_set_set_tuple_id(extent, id); |
131 | |
132 | return extent; |
133 | } |
134 | |
135 | /* Is the array "array" being extracted a read-only scalar? |
136 | * |
137 | * That is, is "array" a scalar that is never possibly written to. |
138 | * An array containing structures is never considered to be a scalar. |
139 | */ |
140 | static int is_read_only_scalar(struct gpu_array_info *array, |
141 | struct gpu_prog *prog) |
142 | { |
143 | isl_set *space; |
144 | isl_union_map *write; |
145 | int empty; |
146 | |
147 | if (array->has_compound_element) |
148 | return 0; |
149 | if (array->n_index != 0) |
150 | return 0; |
151 | |
152 | write = isl_union_map_copy(prog->may_write); |
153 | space = isl_set_universe(isl_space_copy(array->space)); |
154 | write = isl_union_map_intersect_range(write, |
155 | isl_union_set_from_set(space)); |
156 | empty = isl_union_map_is_empty(write); |
157 | isl_union_map_free(write); |
158 | |
159 | return empty; |
160 | } |
161 | |
162 | /* Is "array" only accessed as individual, fixed elements? |
163 | * That is, does each access to "array" access a single, fixed element? |
164 | */ |
165 | isl_bool only_fixed_element_accessed(struct gpu_array_info *array) |
166 | { |
167 | int i; |
168 | |
169 | for (i = 0; i < array->n_ref; ++i) |
170 | if (!array->refs[i]->fixed_element) |
171 | return isl_bool_false; |
172 | |
173 | return isl_bool_true; |
174 | } |
175 | |
176 | /* Compute bounds on the host array "pa" based on the corresponding |
177 | * accessed elements in "arrays" |
178 | * and collect all references to the array. |
179 | * Store the results in "info". |
180 | * |
181 | * If the array is zero-dimensional and does not contain structures, |
182 | * i.e., if the array is a scalar, we check whether it is read-only. |
183 | * We also check whether the array is accessed at all. |
184 | */ |
185 | static int extract_array_info(struct gpu_prog *prog, |
186 | struct gpu_array_info *info, struct pet_array *pa, |
187 | __isl_keep isl_union_set *arrays) |
188 | { |
189 | int empty; |
190 | const char *name; |
191 | int n_index; |
192 | isl_multi_pw_aff *bounds; |
193 | isl_set *accessed, *extent; |
194 | |
195 | n_index = isl_set_dim(pa->extent, isl_dim_set); |
196 | name = isl_set_get_tuple_name(pa->extent); |
197 | |
198 | info->space = isl_set_get_space(pa->extent); |
199 | info->name = strdup(name)(__extension__ (__builtin_constant_p (name) && ((size_t )(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) ? (((const char *) (name))[0] == '\0' ? (char *) calloc ( (size_t) 1, (size_t) 1) : ({ size_t __len = strlen (name) + 1 ; char *__retval = (char *) malloc (__len); if (__retval != ( (void*)0)) __retval = (char *) memcpy (__retval, name, __len) ; __retval; })) : __strdup (name))); |
200 | info->n_index = n_index; |
201 | info->linearize = prog->scop->options->linearize_device_arrays; |
202 | |
203 | info->type = strdup(pa->element_type)(__extension__ (__builtin_constant_p (pa->element_type) && ((size_t)(const void *)((pa->element_type) + 1) - (size_t )(const void *)(pa->element_type) == 1) ? (((const char *) (pa->element_type))[0] == '\0' ? (char *) calloc ((size_t ) 1, (size_t) 1) : ({ size_t __len = strlen (pa->element_type ) + 1; char *__retval = (char *) malloc (__len); if (__retval != ((void*)0)) __retval = (char *) memcpy (__retval, pa-> element_type, __len); __retval; })) : __strdup (pa->element_type ))); |
204 | info->size = pa->element_size; |
205 | info->local = pa->declared && !pa->exposed; |
206 | info->has_compound_element = pa->element_is_record; |
207 | info->read_only_scalar = is_read_only_scalar(info, prog); |
208 | |
209 | info->declared_extent = isl_set_copy(pa->extent); |
210 | accessed = isl_union_set_extract_set(arrays, |
211 | isl_space_copy(info->space)); |
212 | empty = isl_set_is_empty(accessed); |
213 | extent = compute_extent(pa, accessed); |
214 | isl_set_free(accessed); |
215 | info->extent = extent; |
216 | if (empty < 0) |
217 | return -1; |
218 | info->accessed = !empty; |
219 | bounds = ppcg_size_from_extent(isl_set_copy(extent)); |
220 | bounds = isl_multi_pw_aff_gist(bounds, isl_set_copy(prog->context)); |
221 | if (!bounds) |
222 | return -1; |
223 | if (!isl_multi_pw_aff_is_cst(bounds)) |
224 | info->linearize = 1; |
225 | info->bound = bounds; |
226 | |
227 | collect_references(prog, info); |
228 | info->only_fixed_element = only_fixed_element_accessed(info); |
229 | |
230 | return 0; |
231 | } |
232 | |
233 | /* Remove independence from the order constraints "order" on array "array". |
234 | * Since the pairs of iterations in the filter relation of an independence |
235 | * are guaranteed to be completely independent by the user, there is |
236 | * no need to ensure that live ranges are ordered along those pairs. |
237 | * We make an exception for local variables, though, as the independence |
238 | * guarantee does not apply to those. |
239 | * |
240 | * The order constraints are used in two places. |
241 | * Those on scalars are used in check_scalar_live_ranges to check if |
242 | * we need to force the scalar to be private. Any non-local scalar |
243 | * should not be forced scalar if it only appears in independent loops. |
244 | * Those on non-scalars are added to the coincidence constraints |
245 | * in compute_schedule because we do not support any array expansion. |
246 | * Accesses to non-local arrays should not prevent a loop from being |
247 | * considered coincident so we should indeed remove those constraints |
248 | * from the order constraints. |
249 | */ |
250 | static __isl_give isl_union_map *remove_independences(struct gpu_prog *prog, |
251 | struct gpu_array_info *array, __isl_take isl_union_map *order) |
252 | { |
253 | // We do not have independence information in Polly. Hence, make this |
254 | // function a no-op. |
255 | return order; |
256 | int i; |
257 | |
258 | for (i = 0; i < prog->scop->pet->n_independence; ++i) { |
259 | struct pet_independence *pi = prog->scop->pet->independences[i]; |
260 | if (isl_union_set_contains(pi->local, array->space)) |
261 | continue; |
262 | |
263 | order = isl_union_map_subtract(order, |
264 | isl_union_map_copy(pi->filter)); |
265 | } |
266 | |
267 | return order; |
268 | } |
269 | |
270 | /* For each array in "prog", store the (untagged) order dependences |
271 | * derived from the array in array->dep_order. |
272 | * In particular, consider all references that access the given array |
273 | * and take the order dependences that have one of these references |
274 | * as source. (Since an order dependence relates two references to |
275 | * the same array, the target of these order dependences will also |
276 | * be one of these references.) |
277 | * Additionally, store the union of these array->dep_order relations |
278 | * for all arrays that cannot be mapped to private memory in prog->array_order. |
279 | */ |
280 | void collect_order_dependences(struct gpu_prog *prog) |
281 | { |
282 | int i; |
283 | isl_space *space; |
284 | isl_union_map *accesses; |
285 | |
286 | space = isl_union_map_get_space(prog->read); |
287 | prog->array_order = isl_union_map_empty(space); |
288 | |
289 | accesses = isl_union_map_copy(prog->scop->tagged_reads); |
290 | accesses = isl_union_map_union(accesses, |
291 | isl_union_map_copy(prog->scop->tagged_may_writes)); |
292 | accesses = isl_union_map_universe(accesses); |
293 | accesses = isl_union_map_apply_range(accesses, |
294 | isl_union_map_copy(prog->to_outer)); |
295 | |
296 | for (i = 0; i < prog->n_array; ++i) { |
297 | struct gpu_array_info *array = &prog->array[i]; |
298 | isl_set *set; |
299 | isl_union_set *uset; |
300 | isl_union_map *order; |
301 | |
302 | set = isl_set_universe(isl_space_copy(array->space)); |
303 | uset = isl_union_set_from_set(set); |
304 | uset = isl_union_map_domain( |
305 | isl_union_map_intersect_range(isl_union_map_copy(accesses), |
306 | uset)); |
307 | order = isl_union_map_copy(prog->scop->tagged_dep_order); |
308 | order = isl_union_map_intersect_domain(order, uset); |
309 | order = isl_union_map_zip(order); |
310 | order = isl_union_set_unwrap(isl_union_map_domain(order)); |
311 | order = remove_independences(prog, array, order); |
312 | array->dep_order = order; |
313 | |
314 | if (gpu_array_can_be_private(array)) |
315 | continue; |
316 | |
317 | prog->array_order = isl_union_map_union(prog->array_order, |
318 | isl_union_map_copy(array->dep_order)); |
319 | } |
320 | |
321 | isl_union_map_free(accesses); |
322 | } |
323 | |
324 | /* Construct a gpu_array_info for each array referenced by prog->scop and |
325 | * collect them in prog->array. |
326 | * |
327 | * The sizes are based on the extents and the set of possibly accessed |
328 | * elements by "prog". |
329 | * If there are any member accesses involved, then they are first mapped |
330 | * to the outer arrays of structs. |
331 | * Only extract gpu_array_info entries for these outer arrays. |
332 | * |
333 | * If we are allowing live range reordering, then also set |
334 | * the dep_order field. Otherwise leave it NULL. |
335 | */ |
336 | static int collect_array_info(struct gpu_prog *prog) |
337 | { |
338 | int i; |
339 | int r = 0; |
340 | isl_union_set *arrays; |
341 | |
342 | arrays = isl_union_map_range(isl_union_map_copy(prog->read)); |
343 | arrays = isl_union_set_union(arrays, |
344 | isl_union_map_range(isl_union_map_copy(prog->may_write))); |
345 | |
346 | arrays = isl_union_set_apply(arrays, |
347 | isl_union_map_copy(prog->to_outer)); |
348 | |
349 | arrays = isl_union_set_coalesce(arrays); |
350 | |
351 | prog->n_array = prog->scop->pet->n_array; |
352 | prog->array = isl_calloc_array(prog->ctx,((struct gpu_array_info *)isl_calloc_or_die(prog->ctx, prog ->n_array, sizeof(struct gpu_array_info))) |
353 | struct gpu_array_info, prog->n_array)((struct gpu_array_info *)isl_calloc_or_die(prog->ctx, prog ->n_array, sizeof(struct gpu_array_info))); |
354 | assert(prog->array)((prog->array) ? (void) (0) : __assert_fail ("prog->array" , "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 354, __PRETTY_FUNCTION__)); |
355 | prog->n_array = 0; |
356 | for (i = 0; i < prog->scop->pet->n_array; ++i) { |
357 | isl_bool field; |
358 | |
359 | field = isl_set_is_wrapping(prog->scop->pet->arrays[i]->extent); |
360 | if (field < 0) |
361 | break; |
362 | if (field) |
363 | continue; |
364 | if (extract_array_info(prog, &prog->array[prog->n_array++], |
365 | prog->scop->pet->arrays[i], arrays) < 0) |
366 | r = -1; |
367 | } |
368 | if (i < prog->scop->pet->n_array) |
369 | r = -1; |
370 | |
371 | isl_union_set_free(arrays); |
372 | |
373 | if (prog->scop->options->live_range_reordering) |
374 | collect_order_dependences(prog); |
375 | |
376 | return r; |
377 | } |
378 | |
379 | static void free_array_info(struct gpu_prog *prog) |
380 | { |
381 | int i; |
382 | |
383 | for (i = 0; i < prog->n_array; ++i) { |
384 | free(prog->array[i].type); |
385 | free(prog->array[i].name); |
386 | isl_multi_pw_aff_free(prog->array[i].bound); |
387 | isl_ast_expr_free(prog->array[i].bound_expr); |
388 | isl_space_free(prog->array[i].space); |
389 | isl_set_free(prog->array[i].declared_extent); |
390 | isl_set_free(prog->array[i].extent); |
391 | isl_ast_expr_free(prog->array[i].declared_size); |
392 | free(prog->array[i].refs); |
393 | isl_union_map_free(prog->array[i].dep_order); |
394 | } |
395 | free(prog->array); |
396 | } |
397 | |
398 | /* Check if a gpu array is a scalar. A scalar is a value that is not stored |
399 | * as an array or through a pointer reference, but as a single data element. |
400 | * At the moment, scalars are represented as zero-dimensional arrays. |
401 | * Note that the single data element may be an entire structure. |
402 | */ |
403 | int gpu_array_is_scalar(struct gpu_array_info *array) |
404 | { |
405 | return array->n_index == 0; |
406 | } |
407 | |
408 | /* Can "array" be mapped to private memory? |
409 | * That is, is it only accessed as individual elements with |
410 | * constant index expressions? |
411 | */ |
412 | isl_bool gpu_array_can_be_private(struct gpu_array_info *array) |
413 | { |
414 | if (!array) |
415 | return isl_bool_error; |
416 | return array->only_fixed_element; |
417 | } |
418 | |
419 | /* Is "array" a read-only scalar? |
420 | */ |
421 | int gpu_array_is_read_only_scalar(struct gpu_array_info *array) |
422 | { |
423 | return array->read_only_scalar; |
424 | } |
425 | |
426 | /* Does "array" need to be allocated on the device? |
427 | * If it is a read-only scalar, then it will be passed as an argument |
428 | * to the kernel and therefore does not require any allocation. |
429 | * If this device memory is not accessed at all, then it does not |
430 | * need to be allocated either. |
431 | */ |
432 | int gpu_array_requires_device_allocation(struct gpu_array_info *array) |
433 | { |
434 | if (gpu_array_is_read_only_scalar(array)) |
435 | return 0; |
436 | if (!array->global) |
437 | return 0; |
438 | return 1; |
439 | } |
440 | |
441 | /* Return the set of parameter values for which the array has a positive |
442 | * size in all dimensions. |
443 | * If the sizes are only valid for some parameter values, then those |
444 | * constraints are also taken into account. |
445 | */ |
446 | __isl_give isl_set *gpu_array_positive_size_guard(struct gpu_array_info *array) |
447 | { |
448 | int i; |
449 | isl_space *space; |
450 | isl_set *guard; |
451 | |
452 | if (!array) |
453 | return NULL((void*)0); |
454 | |
455 | space = isl_space_params(isl_space_copy(array->space)); |
456 | guard = isl_set_universe(space); |
457 | |
458 | for (i = 0; i < array->n_index; ++i) { |
459 | isl_pw_aff *bound; |
460 | isl_set *guard_i, *zero; |
461 | |
462 | bound = isl_multi_pw_aff_get_pw_aff(array->bound, i); |
463 | guard_i = isl_pw_aff_nonneg_set(isl_pw_aff_copy(bound)); |
464 | zero = isl_pw_aff_zero_set(bound); |
465 | guard_i = isl_set_subtract(guard_i, zero); |
466 | guard = isl_set_intersect(guard, guard_i); |
467 | } |
468 | |
469 | return guard; |
470 | } |
471 | |
472 | /* Internal data structure for extract_size_of_type. |
473 | * "type" specifies the name of the space that we want to extract. |
474 | * "res" is used to store the subset of that space. |
475 | */ |
476 | struct ppcg_extract_size_data { |
477 | const char *type; |
478 | isl_set *res; |
479 | }; |
480 | |
481 | /* This function is called for each set in a union_set. |
482 | * If the name of the set matches data->type, we store the |
483 | * set in data->res. |
484 | */ |
485 | static isl_stat extract_size_of_type(__isl_take isl_set *size, void *user) |
486 | { |
487 | struct ppcg_extract_size_data *data = user; |
488 | const char *name; |
489 | |
490 | name = isl_set_get_tuple_name(size); |
491 | if (name && !strcmp(name, data->type)__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p (data->type) && (__s1_len = __builtin_strlen (name), __s2_len = __builtin_strlen (data->type), (!((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) || __s1_len >= 4) && ( !((size_t)(const void *)((data->type) + 1) - (size_t)(const void *)(data->type) == 1) || __s2_len >= 4)) ? __builtin_strcmp (name, data->type) : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p (data->type) && ( (size_t)(const void *)((data->type) + 1) - (size_t)(const void *)(data->type) == 1) ? __builtin_strcmp (name, data->type ) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (data->type); int __result = (((const unsigned char *) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char * ) (const char *) (name))[3] - __s2[3]); } } __result; }))) : ( __builtin_constant_p (data->type) && ((size_t)(const void *)((data->type) + 1) - (size_t)(const void *)(data-> type) == 1) && (__s2_len = __builtin_strlen (data-> type), __s2_len < 4) ? (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) ? __builtin_strcmp (name, data->type) : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) (data->type))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (data->type))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (data->type))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (data->type))[3] - __s2[3]); } } __result; }))) : __builtin_strcmp (name, data->type)))); })) { |
492 | data->res = size; |
493 | return isl_stat_error; |
494 | } |
495 | |
496 | isl_set_free(size); |
497 | return isl_stat_ok; |
498 | } |
499 | |
500 | /* Given a union map { kernel[i] -> *[...] }, |
501 | * return the range in the space called "type" for the kernel with |
502 | * sequence number "id". |
503 | */ |
504 | static __isl_give isl_set *extract_sizes(__isl_keep isl_union_map *sizes, |
505 | const char *type, int id) |
506 | { |
507 | isl_space *space; |
508 | isl_set *dom; |
509 | isl_union_set *local_sizes; |
510 | struct ppcg_extract_size_data data = { type, NULL((void*)0) }; |
511 | |
512 | if (!sizes) |
513 | return NULL((void*)0); |
514 | |
515 | space = isl_union_map_get_space(sizes); |
516 | space = isl_space_set_from_params(space); |
517 | space = isl_space_add_dims(space, isl_dim_set, 1); |
518 | space = isl_space_set_tuple_name(space, isl_dim_set, "kernel"); |
519 | dom = isl_set_universe(space); |
520 | dom = isl_set_fix_si(dom, isl_dim_set, 0, id); |
521 | |
522 | local_sizes = isl_union_set_apply(isl_union_set_from_set(dom), |
523 | isl_union_map_copy(sizes)); |
524 | isl_union_set_foreach_set(local_sizes, &extract_size_of_type, &data); |
525 | isl_union_set_free(local_sizes); |
526 | return data.res; |
527 | } |
528 | |
529 | /* Given a singleton set, extract the first (at most *len) elements |
530 | * of the single integer tuple into *sizes and update *len if needed. |
531 | */ |
532 | static void read_sizes_from_set(__isl_take isl_set *set, int *sizes, int *len) |
533 | { |
534 | int i; |
535 | int dim; |
536 | |
537 | if (!set) |
538 | return; |
539 | |
540 | dim = isl_set_dim(set, isl_dim_set); |
541 | if (dim < *len) |
542 | *len = dim; |
543 | |
544 | for (i = 0; i < *len; ++i) { |
545 | isl_val *v; |
546 | |
547 | v = isl_set_plain_get_val_if_fixed(set, isl_dim_set, i); |
548 | assert(v)((v) ? (void) (0) : __assert_fail ("v", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 548, __PRETTY_FUNCTION__)); |
549 | |
550 | sizes[i] = isl_val_get_num_si(v); |
551 | isl_val_free(v); |
552 | } |
553 | |
554 | isl_set_free(set); |
555 | } |
556 | |
557 | /* Add the map { kernel[id] -> type[sizes] } to gen->used_sizes, |
558 | * if the option debug->dump_sizes is set. |
559 | */ |
560 | static void set_used_sizes(struct gpu_gen *gen, const char *type, int id, |
561 | int *sizes, int len) |
562 | { |
563 | int i; |
564 | isl_space *space; |
565 | isl_map *map; |
566 | |
567 | if (!gen->options->debug->dump_sizes) |
568 | return; |
569 | |
570 | space = isl_union_map_get_space(gen->used_sizes); |
571 | space = isl_space_set_from_params(space); |
572 | space = isl_space_add_dims(space, isl_dim_set, 1); |
573 | space = isl_space_set_tuple_name(space, isl_dim_set, "kernel"); |
574 | space = isl_space_from_domain(space); |
575 | space = isl_space_add_dims(space, isl_dim_out, len); |
576 | space = isl_space_set_tuple_name(space, isl_dim_out, type); |
577 | |
578 | map = isl_map_universe(space); |
579 | map = isl_map_fix_si(map, isl_dim_in, 0, id); |
580 | for (i = 0; i < len; ++i) |
581 | map = isl_map_fix_si(map, isl_dim_out, i, sizes[i]); |
582 | |
583 | gen->used_sizes = isl_union_map_add_map(gen->used_sizes, map); |
584 | } |
585 | |
586 | /* Extract user specified "tile" sizes from the "sizes" command line option, |
587 | * defaulting to option->tile_size in each dimension. |
588 | * *tile_len contains the maximum number of tile sizes needed. |
589 | * Update *tile_len to the number of specified tile sizes, if any, and |
590 | * return a pointer to the tile sizes (or NULL on error). |
591 | * Add the effectively used sizes to gen->used_sizes. |
592 | */ |
593 | static int *read_tile_sizes(struct gpu_gen *gen, int *tile_len) |
594 | { |
595 | int n; |
596 | int *tile_size; |
597 | isl_set *size; |
598 | |
599 | tile_size = isl_alloc_array(gen->ctx, int, *tile_len)((int *)isl_malloc_or_die(gen->ctx, (*tile_len)*sizeof(int ))); |
600 | if (!tile_size) |
601 | return NULL((void*)0); |
602 | for (n = 0; n < *tile_len; ++n) |
603 | tile_size[n] = gen->options->tile_size; |
604 | |
605 | size = extract_sizes(gen->sizes, "tile", gen->kernel_id); |
606 | read_sizes_from_set(size, tile_size, tile_len); |
607 | set_used_sizes(gen, "tile", gen->kernel_id, tile_size, *tile_len); |
608 | |
609 | return tile_size; |
610 | } |
611 | |
612 | /* Extract user specified "block" sizes from the "sizes" command line option, |
613 | * after filling in some potentially useful defaults. |
614 | */ |
615 | static void read_block_sizes(struct ppcg_kernel *kernel, |
616 | __isl_keep isl_union_map *sizes) |
617 | { |
618 | isl_set *size; |
619 | |
620 | if (kernel->n_block > 3) |
621 | kernel->n_block = 3; |
622 | switch (kernel->n_block) { |
623 | case 1: |
624 | kernel->block_dim[0] = 512; |
625 | break; |
626 | case 2: |
627 | kernel->block_dim[0] = 32; |
628 | kernel->block_dim[1] = 16; |
629 | break; |
630 | default: |
631 | kernel->block_dim[0] = 32; |
632 | kernel->block_dim[1] = 4; |
633 | kernel->block_dim[2] = 4; |
634 | break; |
635 | } |
636 | |
637 | size = extract_sizes(sizes, "block", kernel->id); |
638 | read_sizes_from_set(size, kernel->block_dim, &kernel->n_block); |
639 | } |
640 | |
641 | /* Extract user specified "grid" sizes from the "sizes" command line option, |
642 | * after filling in some potentially useful defaults. |
643 | */ |
644 | static void read_grid_sizes(struct ppcg_kernel *kernel, |
645 | __isl_keep isl_union_map *sizes) |
646 | { |
647 | isl_set *size; |
648 | |
649 | if (kernel->n_grid > 2) |
650 | kernel->n_grid = 2; |
651 | switch (kernel->n_grid) { |
652 | case 1: |
653 | kernel->grid_dim[0] = 32768; |
654 | break; |
655 | default: |
656 | kernel->grid_dim[0] = 256; |
657 | kernel->grid_dim[1] = 256; |
658 | break; |
659 | } |
660 | |
661 | size = extract_sizes(sizes, "grid", kernel->id); |
662 | read_sizes_from_set(size, kernel->grid_dim, &kernel->n_grid); |
663 | } |
664 | |
665 | /* Extract user specified grid and block sizes from the gen->sizes |
666 | * command line option after filling in some potentially useful defaults. |
667 | * Store the extracted sizes in "kernel". |
668 | * Add the effectively used sizes to gen->used_sizes. |
669 | */ |
670 | static void read_grid_and_block_sizes(struct ppcg_kernel *kernel, |
671 | struct gpu_gen *gen) |
672 | { |
673 | read_block_sizes(kernel, gen->sizes); |
674 | read_grid_sizes(kernel, gen->sizes); |
675 | set_used_sizes(gen, "block", kernel->id, |
676 | kernel->block_dim, kernel->n_block); |
677 | set_used_sizes(gen, "grid", kernel->id, |
678 | kernel->grid_dim, kernel->n_grid); |
679 | } |
680 | |
681 | static void *free_stmts(struct gpu_stmt *stmts, int n) |
682 | { |
683 | int i; |
684 | |
685 | if (!stmts) |
686 | return NULL((void*)0); |
687 | |
688 | for (i = 0; i < n; ++i) { |
689 | struct gpu_stmt_access *access, *next; |
690 | |
691 | for (access = stmts[i].accesses; access; access = next) { |
692 | next = access->next; |
693 | isl_id_free(access->ref_id); |
694 | isl_map_free(access->access); |
695 | isl_map_free(access->tagged_access); |
696 | free(access); |
697 | } |
698 | |
699 | isl_id_free(stmts[i].id); |
700 | } |
701 | free(stmts); |
702 | |
703 | return NULL((void*)0); |
704 | } |
705 | |
706 | /* Add parameters p[i] with identifiers "ids" to "set", |
707 | * with bounds to 0 <= p[i] < size[i]. |
708 | */ |
709 | __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set, |
710 | int *size, __isl_keep isl_id_list *ids) |
711 | { |
712 | int i, len; |
713 | unsigned nparam; |
714 | |
715 | len = isl_id_list_n_id(ids); |
716 | nparam = isl_set_dim(set, isl_dim_param); |
717 | set = isl_set_add_dims(set, isl_dim_param, len); |
718 | |
719 | for (i = 0; i < len; ++i) { |
720 | isl_id *id; |
721 | |
722 | id = isl_id_list_get_id(ids, i); |
723 | set = isl_set_set_dim_id(set, isl_dim_param, nparam + i, id); |
724 | set = isl_set_lower_bound_si(set, isl_dim_param, nparam + i, 0); |
725 | set = isl_set_upper_bound_si(set, isl_dim_param, |
726 | nparam + i, size[i] - 1); |
727 | } |
728 | |
729 | return set; |
730 | } |
731 | |
732 | /* Add "len" parameters p[i] with identifiers "ids" and intersect "set" |
733 | * with |
734 | * |
735 | * { : 0 <= p[i] < size[i] } |
736 | * |
737 | * or an overapproximation. |
738 | */ |
739 | static __isl_give isl_set *add_bounded_parameters_dynamic( |
740 | __isl_take isl_set *set, __isl_keep isl_multi_pw_aff *size, |
741 | __isl_keep isl_id_list *ids) |
742 | { |
743 | int i, len; |
744 | unsigned nparam; |
745 | isl_space *space; |
746 | isl_local_space *ls; |
747 | |
748 | len = isl_multi_pw_aff_dim(size, isl_dim_out); |
749 | nparam = isl_set_dim(set, isl_dim_param); |
750 | set = isl_set_add_dims(set, isl_dim_param, len); |
751 | |
752 | for (i = 0; i < len; ++i) { |
753 | isl_id *id; |
754 | |
755 | id = isl_id_list_get_id(ids, i); |
756 | set = isl_set_set_dim_id(set, isl_dim_param, nparam + i, id); |
757 | } |
758 | |
759 | space = isl_space_params(isl_set_get_space(set)); |
760 | ls = isl_local_space_from_space(space); |
761 | for (i = 0; i < len; ++i) { |
762 | isl_pw_aff *param, *size_i, *zero; |
763 | isl_set *bound; |
764 | |
765 | param = isl_pw_aff_var_on_domain(isl_local_space_copy(ls), |
766 | isl_dim_param, nparam + i); |
767 | |
768 | size_i = isl_multi_pw_aff_get_pw_aff(size, i); |
769 | bound = isl_pw_aff_lt_set(isl_pw_aff_copy(param), size_i); |
770 | bound = isl_set_from_basic_set(isl_set_simple_hull(bound)); |
771 | set = isl_set_intersect_params(set, bound); |
772 | |
773 | zero = isl_pw_aff_zero_on_domain(isl_local_space_copy(ls)); |
774 | bound = isl_pw_aff_ge_set(param, zero); |
775 | set = isl_set_intersect_params(set, bound); |
776 | } |
777 | isl_local_space_free(ls); |
778 | |
779 | return set; |
780 | } |
781 | |
782 | /* Return the union of all tagged access relations in the group. |
783 | */ |
784 | static __isl_give isl_union_map *group_tagged_access_relation( |
785 | struct gpu_array_ref_group *group) |
786 | { |
787 | int i; |
788 | isl_union_map *access; |
789 | |
790 | access = isl_union_map_empty(isl_map_get_space(group->access)); |
791 | for (i = 0; i < group->n_ref; ++i) { |
792 | isl_map *map_i; |
793 | |
794 | map_i = isl_map_copy(group->refs[i]->tagged_access); |
795 | access = isl_union_map_union(access, |
796 | isl_union_map_from_map(map_i)); |
797 | } |
798 | |
799 | return access; |
800 | } |
801 | |
802 | /* Return the extent of "array", recomputed from the bounds. |
803 | * The recomputed extent may be simpler than the original extent. |
804 | */ |
805 | static __isl_give isl_set *array_extent(struct gpu_array_info *array) |
806 | { |
807 | int i; |
808 | isl_id *id; |
809 | isl_space *space; |
810 | isl_local_space *ls; |
811 | isl_set *extent; |
812 | |
813 | id = isl_set_get_tuple_id(array->extent); |
814 | space = isl_set_get_space(array->extent); |
815 | extent = isl_set_universe(isl_space_copy(space)); |
816 | ls = isl_local_space_from_space(space); |
817 | for (i = 0; i < array->n_index; ++i) { |
818 | isl_pw_aff *bound; |
819 | isl_aff *aff; |
820 | isl_pw_aff *index; |
821 | isl_set *lt; |
822 | |
823 | extent = isl_set_lower_bound_si(extent, isl_dim_set, i, 0); |
824 | |
825 | aff = isl_aff_var_on_domain(isl_local_space_copy(ls), |
826 | isl_dim_set, i); |
827 | index = isl_pw_aff_from_aff(aff); |
828 | bound = isl_multi_pw_aff_get_pw_aff(array->bound, i); |
829 | bound = isl_pw_aff_from_range(bound); |
830 | bound = isl_pw_aff_add_dims(bound, isl_dim_in, array->n_index); |
831 | bound = isl_pw_aff_set_tuple_id(bound, isl_dim_in, |
832 | isl_id_copy(id)); |
833 | lt = isl_pw_aff_lt_set(index, bound); |
834 | extent = isl_set_intersect(extent, lt); |
835 | } |
836 | isl_local_space_free(ls); |
837 | isl_id_free(id); |
838 | |
839 | return extent; |
840 | } |
841 | |
842 | /* Return a map from the first group->shared_tile->depth dimensions |
843 | * of the computed schedule to the array tile in |
844 | * global memory that corresponds to the shared memory copy. |
845 | * |
846 | * In particular, return a map |
847 | * |
848 | * { D[i] -> A[a] } |
849 | * |
850 | * with constraints |
851 | * |
852 | * tile_offset(i) <= a <= tile_offset(i) + tile_size - 1 (1) |
853 | * |
854 | * and |
855 | * |
856 | * 0 <= a <= array_size - 1 (2) |
857 | * |
858 | * Note that if some stride has been detected (i.e., when |
859 | * group->shared_tile->bound[i].shift is set), then a in (1) refers |
860 | * to the shifted and scaled down version. |
861 | * |
862 | * Constraints (1) are obtained by mapping the size constraints on the |
863 | * shared/private memory tile back to the access relation. |
864 | * Constraints (2) are obtained from the (recomputed) extent. |
865 | */ |
866 | static __isl_give isl_map *group_tile(struct gpu_array_ref_group *group) |
867 | { |
868 | int i; |
869 | int n_index = group->array->n_index; |
870 | isl_map *tile; |
871 | isl_space *space; |
872 | isl_set *local; |
873 | isl_set *extent; |
874 | |
875 | space = isl_multi_aff_get_space(group->shared_tile->tiling); |
876 | space = isl_space_range(space); |
877 | local = isl_set_universe(space); |
878 | for (i = 0; i < n_index; ++i) { |
879 | isl_val *bound; |
880 | |
881 | local = isl_set_lower_bound_si(local, isl_dim_set, i, 0); |
882 | bound = isl_val_copy(group->shared_tile->bound[i].size); |
883 | bound = isl_val_sub_ui(bound, 1); |
884 | local = isl_set_upper_bound_val(local, isl_dim_set, i, bound); |
885 | } |
886 | local = isl_set_preimage_multi_aff(local, |
887 | isl_multi_aff_copy(group->shared_tile->tiling)); |
888 | tile = isl_set_unwrap(local); |
889 | extent = array_extent(group->array); |
890 | tile = isl_map_intersect_range(tile, extent); |
891 | |
892 | return tile; |
893 | } |
894 | |
895 | /* Given a mapping "iterator_map" from the AST schedule to a domain, |
896 | * return the corresponding mapping from the AST schedule to |
897 | * to the outer kernel->copy_schedule_dim dimensions of |
898 | * the schedule computed by PPCG for this kernel. |
899 | * |
900 | * Note that kernel->copy_schedule_dim is at least as large as |
901 | * the largest depth of any array reference group associated to the kernel. |
902 | * This is needed as the returned schedule is used to extract a mapping |
903 | * to the outer tile->depth dimensions in transform_index. |
904 | */ |
905 | static __isl_give isl_pw_multi_aff *compute_sched_to_copy( |
906 | struct ppcg_kernel *kernel, __isl_take isl_pw_multi_aff *iterator_map) |
907 | { |
908 | isl_union_pw_multi_aff *upma; |
909 | isl_pw_multi_aff *pma; |
910 | isl_space *space; |
911 | |
912 | space = isl_space_range(isl_pw_multi_aff_get_space(iterator_map)); |
913 | space = isl_space_from_domain(space); |
914 | space = isl_space_add_dims(space, isl_dim_out, |
915 | kernel->copy_schedule_dim); |
916 | |
917 | upma = isl_union_pw_multi_aff_copy(kernel->copy_schedule); |
918 | pma = isl_union_pw_multi_aff_extract_pw_multi_aff(upma, space); |
919 | isl_union_pw_multi_aff_free(upma); |
920 | |
921 | return isl_pw_multi_aff_pullback_pw_multi_aff(pma, iterator_map); |
922 | } |
923 | |
924 | /* If max_shared_memory is not set to infinity (-1), then make |
925 | * sure that the total amount of shared memory required by the |
926 | * array reference groups mapped to shared memory by "kernel" |
927 | * is no larger than this maximum. |
928 | * |
929 | * We apply a greedy approach and discard (keep in global memory) |
930 | * those groups that would result in a total memory size that |
931 | * is larger than the maximum. |
932 | * |
933 | * This function should be called after any function that may |
934 | * affect the decision on whether to place a reference group |
935 | * in private, shared or global memory. |
936 | */ |
937 | static void check_shared_memory_bound(struct ppcg_kernel *kernel) |
938 | { |
939 | int i, j; |
940 | isl_val *left, *size; |
941 | |
942 | if (kernel->options->max_shared_memory < 0) |
943 | return; |
944 | |
945 | left = isl_val_int_from_si(kernel->ctx, |
946 | kernel->options->max_shared_memory); |
947 | |
948 | for (i = 0; i < kernel->n_array; ++i) { |
949 | struct gpu_local_array_info *local = &kernel->array[i]; |
950 | |
951 | for (j = 0; j < local->n_group; ++j) { |
952 | struct gpu_array_ref_group *group; |
953 | enum ppcg_group_access_type type; |
954 | |
955 | group = local->groups[j]; |
956 | type = gpu_array_ref_group_type(group); |
957 | if (type != ppcg_access_shared) |
958 | continue; |
959 | |
960 | size = gpu_array_tile_size(group->shared_tile); |
961 | size = isl_val_mul_ui(size, local->array->size); |
962 | |
963 | if (isl_val_le(size, left)) { |
964 | left = isl_val_sub(left, size); |
965 | continue; |
966 | } |
967 | isl_val_free(size); |
968 | |
969 | group->shared_tile = |
970 | gpu_array_tile_free(group->shared_tile); |
971 | } |
972 | } |
973 | |
974 | isl_val_free(left); |
975 | } |
976 | |
977 | /* Mark all arrays of "kernel" that have an array reference group |
978 | * that is not mapped to private or shared memory as |
979 | * accessing the corresponding global device memory. |
980 | */ |
981 | static void mark_global_arrays(struct ppcg_kernel *kernel) |
982 | { |
983 | int i, j; |
984 | |
985 | for (i = 0; i < kernel->n_array; ++i) { |
986 | struct gpu_local_array_info *local = &kernel->array[i]; |
987 | |
988 | if (local->global) |
989 | continue; |
990 | for (j = 0; j < local->n_group; ++j) { |
991 | if (gpu_array_ref_group_tile(local->groups[j])) |
992 | continue; |
993 | |
994 | local->global = 1; |
995 | local->array->global = 1; |
996 | break; |
997 | } |
998 | } |
999 | } |
1000 | |
1001 | /* Compute a tiling for all the array reference groups in "kernel". |
1002 | */ |
1003 | static void compute_group_tilings(struct ppcg_kernel *kernel) |
1004 | { |
1005 | int i, j; |
1006 | |
1007 | for (i = 0; i < kernel->n_array; ++i) { |
1008 | struct gpu_local_array_info *array = &kernel->array[i]; |
1009 | |
1010 | for (j = 0; j < array->n_group; ++j) |
1011 | gpu_array_ref_group_compute_tiling(array->groups[j]); |
1012 | } |
1013 | } |
1014 | |
1015 | /* Compute the effective grid size as a list of the sizes in each dimension. |
1016 | * |
1017 | * The grid size specified by the user or set by default |
1018 | * in read_grid_sizes() and applied by the block filter, |
1019 | * may be too large for the given code in the sense that |
1020 | * it may contain blocks that don't need to execute anything. |
1021 | * We therefore don't return this grid size, but instead the |
1022 | * smallest grid size that ensures that all blocks that actually |
1023 | * execute code are included in the grid. |
1024 | * |
1025 | * We first extract a description of the grid, i.e., the possible values |
1026 | * of the block ids, from the domain elements in "domain" and |
1027 | * kernel->block_filter. |
1028 | * The block ids are parameters in kernel->block_filter. |
1029 | * We simply need to change them into set dimensions. |
1030 | * |
1031 | * Then, for each block dimension, we compute the maximal value of the block id |
1032 | * and add one. |
1033 | */ |
1034 | static __isl_give isl_multi_pw_aff *extract_grid_size( |
1035 | struct ppcg_kernel *kernel, __isl_take isl_union_set *domain) |
1036 | { |
1037 | int i; |
1038 | isl_set *grid; |
1039 | isl_set *context; |
1040 | isl_multi_pw_aff *size; |
1041 | |
1042 | domain = isl_union_set_intersect(domain, |
1043 | isl_union_set_copy(kernel->block_filter)); |
1044 | grid = isl_union_set_params(domain); |
1045 | grid = isl_set_from_params(grid); |
1046 | grid = isl_set_add_dims(grid, isl_dim_set, kernel->n_grid); |
1047 | for (i = 0; i < kernel->n_grid; ++i) { |
1048 | int pos; |
1049 | isl_id *id; |
1050 | |
1051 | id = isl_id_list_get_id(kernel->block_ids, i); |
1052 | pos = isl_set_find_dim_by_id(grid, isl_dim_param, id); |
1053 | isl_id_free(id); |
1054 | assert(pos >= 0)((pos >= 0) ? (void) (0) : __assert_fail ("pos >= 0", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1054, __PRETTY_FUNCTION__)); |
1055 | grid = isl_set_equate(grid, isl_dim_param, pos, isl_dim_set, i); |
1056 | grid = isl_set_project_out(grid, isl_dim_param, pos, 1); |
1057 | } |
1058 | |
1059 | grid = isl_set_coalesce(grid); |
1060 | size = ppcg_size_from_extent(grid); |
1061 | context = isl_set_params(isl_set_copy(kernel->context)); |
1062 | return isl_multi_pw_aff_gist(size, context); |
1063 | } |
1064 | |
1065 | /* Compute the size of a fixed bounding box around the origin and "set", |
1066 | * where "set" is assumed to contain only non-negative elements, |
1067 | * and store the results in "size". |
1068 | * In particular, compute the maximal value of "set" in each direction |
1069 | * and add one. |
1070 | */ |
1071 | static void extract_fixed_size(__isl_take isl_set *set, int *size) |
1072 | { |
1073 | int i, n; |
1074 | isl_local_space *ls; |
1075 | isl_aff *obj; |
1076 | |
1077 | n = isl_set_dim(set, isl_dim_set); |
1078 | ls = isl_local_space_from_space(isl_set_get_space(set)); |
1079 | obj = isl_aff_zero_on_domain(ls); |
1080 | for (i = 0; i < n; ++i) { |
1081 | isl_val *max; |
1082 | |
1083 | obj = isl_aff_set_coefficient_si(obj, isl_dim_in, i, 1); |
1084 | max = isl_set_max_val(set, obj); |
1085 | size[i] = isl_val_get_num_si(max) + 1; |
1086 | isl_val_free(max); |
1087 | obj = isl_aff_set_coefficient_si(obj, isl_dim_in, i, 0); |
1088 | } |
1089 | isl_aff_free(obj); |
1090 | isl_set_free(set); |
1091 | } |
1092 | |
1093 | /* Compute the effective block size as a list of the sizes in each dimension |
1094 | * and store the sizes in kernel->block_dim. |
1095 | * |
1096 | * The block size specified by the user or set by default |
1097 | * in read_block_sizes() and applied by the thread filter, |
1098 | * may be too large for the given code in the sense that |
1099 | * it may contain threads that don't need to execute anything. |
1100 | * We therefore update this block size in kernel->block_dim |
1101 | * to the smallest block size that ensures that all threads |
1102 | * that actually execute code are included in the block. |
1103 | * |
1104 | * The set of possible values of the thread ids is obtained from |
1105 | * the domain elements "domain" and kernel->thread_filter. |
1106 | * The current implementation eliminates all parameters, ensuring |
1107 | * that the size is a fixed constant in each dimension. |
1108 | * In principle we could also compute parametric sizes. |
1109 | * We would have to make sure to project out all b%d and t%d parameters, |
1110 | * however. |
1111 | */ |
1112 | static isl_stat extract_block_size(struct ppcg_kernel *kernel, |
1113 | __isl_take isl_union_set *domain) |
1114 | { |
1115 | int i; |
1116 | int nparam; |
1117 | isl_set *block; |
1118 | |
1119 | domain = isl_union_set_intersect(domain, |
1120 | isl_union_set_copy(kernel->thread_filter)); |
1121 | block = isl_union_set_params(domain); |
1122 | block = isl_set_from_params(block); |
1123 | block = isl_set_add_dims(block, isl_dim_set, kernel->n_block); |
1124 | for (i = 0; i < kernel->n_block; ++i) { |
1125 | int pos; |
1126 | isl_id *id; |
1127 | |
1128 | if (!block) |
1129 | return isl_stat_error; |
1130 | |
1131 | id = isl_id_list_get_id(kernel->thread_ids, i); |
1132 | pos = isl_set_find_dim_by_id(block, isl_dim_param, id); |
1133 | isl_id_free(id); |
1134 | if (pos < 0) |
1135 | isl_die(isl_set_get_ctx(block), isl_error_internal,do { isl_handle_error(isl_set_get_ctx(block), isl_error_internal , "missing constraints on thread identifier", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1137); block = isl_set_free(block); } while (0) |
1136 | "missing constraints on thread identifier",do { isl_handle_error(isl_set_get_ctx(block), isl_error_internal , "missing constraints on thread identifier", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1137); block = isl_set_free(block); } while (0) |
1137 | block = isl_set_free(block))do { isl_handle_error(isl_set_get_ctx(block), isl_error_internal , "missing constraints on thread identifier", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1137); block = isl_set_free(block); } while (0); |
1138 | block = isl_set_equate(block, isl_dim_param, pos, |
1139 | isl_dim_set, i); |
1140 | } |
1141 | nparam = isl_set_dim(block, isl_dim_param); |
1142 | block = isl_set_project_out(block, isl_dim_param, 0, nparam); |
1143 | |
1144 | if (!block) |
1145 | return isl_stat_error; |
1146 | |
1147 | extract_fixed_size(block, kernel->block_dim); |
1148 | |
1149 | return isl_stat_ok; |
1150 | } |
1151 | |
1152 | struct ppcg_kernel *ppcg_kernel_free(struct ppcg_kernel *kernel) |
1153 | { |
1154 | int i, j; |
1155 | |
1156 | if (!kernel) |
1157 | return NULL((void*)0); |
1158 | |
1159 | isl_id_list_free(kernel->block_ids); |
1160 | isl_id_list_free(kernel->thread_ids); |
1161 | isl_multi_pw_aff_free(kernel->grid_size); |
1162 | isl_ast_expr_free(kernel->grid_size_expr); |
1163 | isl_set_free(kernel->context); |
1164 | isl_union_set_free(kernel->core); |
1165 | isl_union_set_free(kernel->arrays); |
1166 | isl_union_pw_multi_aff_free(kernel->contraction); |
1167 | isl_union_set_free(kernel->expanded_domain); |
1168 | isl_space_free(kernel->space); |
1169 | isl_ast_node_free(kernel->tree); |
1170 | isl_union_set_free(kernel->block_filter); |
1171 | isl_union_set_free(kernel->thread_filter); |
1172 | isl_union_pw_multi_aff_free(kernel->copy_schedule); |
1173 | isl_union_set_free(kernel->sync_writes); |
1174 | |
1175 | for (i = 0; i < kernel->n_array; ++i) { |
1176 | struct gpu_local_array_info *array = &kernel->array[i]; |
1177 | |
1178 | for (j = 0; j < array->n_group; ++j) |
1179 | gpu_array_ref_group_free(array->groups[j]); |
1180 | free(array->groups); |
1181 | |
1182 | isl_multi_pw_aff_free(array->bound); |
1183 | isl_ast_expr_free(array->bound_expr); |
1184 | } |
1185 | free(kernel->array); |
1186 | |
1187 | for (i = 0; i < kernel->n_var; ++i) { |
1188 | free(kernel->var[i].name); |
1189 | isl_vec_free(kernel->var[i].size); |
1190 | } |
1191 | free(kernel->var); |
1192 | |
1193 | free(kernel); |
1194 | |
1195 | return NULL((void*)0); |
1196 | } |
1197 | |
1198 | /* Wrapper around ppcg_kernel_free for use as a isl_id_set_free_user callback. |
1199 | */ |
1200 | static void ppcg_kernel_free_wrap(void *user) |
1201 | { |
1202 | struct ppcg_kernel *kernel = user; |
1203 | |
1204 | ppcg_kernel_free(kernel); |
1205 | } |
1206 | |
1207 | static void create_kernel_var(isl_ctx *ctx, struct gpu_array_ref_group *group, |
1208 | struct ppcg_kernel_var *var) |
1209 | { |
1210 | int j; |
1211 | struct gpu_array_tile *tile; |
1212 | isl_printer *p; |
1213 | |
1214 | var->array = group->array; |
1215 | |
1216 | var->type = gpu_array_ref_group_type(group); |
1217 | tile = gpu_array_ref_group_tile(group); |
1218 | |
1219 | p = isl_printer_to_str(ctx); |
1220 | p = gpu_array_ref_group_print_name(group, p); |
1221 | var->name = isl_printer_get_str(p); |
1222 | isl_printer_free(p); |
1223 | |
1224 | var->size = isl_vec_alloc(ctx, group->array->n_index); |
1225 | |
1226 | for (j = 0; j < group->array->n_index; ++j) |
1227 | var->size = isl_vec_set_element_val(var->size, j, |
1228 | isl_val_copy(tile->bound[j].size)); |
1229 | } |
1230 | |
1231 | static int create_kernel_vars(struct ppcg_kernel *kernel) |
1232 | { |
1233 | int i, j, n; |
1234 | |
1235 | n = 0; |
1236 | for (i = 0; i < kernel->n_array; ++i) { |
1237 | struct gpu_local_array_info *array = &kernel->array[i]; |
1238 | |
1239 | for (j = 0; j < array->n_group; ++j) { |
1240 | struct gpu_array_ref_group *group = array->groups[j]; |
1241 | enum ppcg_group_access_type type; |
1242 | |
1243 | type = gpu_array_ref_group_type(group); |
1244 | if (type != ppcg_access_global) |
1245 | ++n; |
1246 | } |
1247 | } |
1248 | |
1249 | kernel->n_var = n; |
1250 | kernel->var = isl_calloc_array(kernel->ctx, struct ppcg_kernel_var, n)((struct ppcg_kernel_var *)isl_calloc_or_die(kernel->ctx, n , sizeof(struct ppcg_kernel_var))); |
1251 | if (!kernel->var) |
1252 | return -1; |
1253 | |
1254 | n = 0; |
1255 | for (i = 0; i < kernel->n_array; ++i) { |
1256 | struct gpu_local_array_info *array = &kernel->array[i]; |
1257 | |
1258 | for (j = 0; j < array->n_group; ++j) { |
1259 | struct gpu_array_ref_group *group = array->groups[j]; |
1260 | enum ppcg_group_access_type type; |
1261 | |
1262 | type = gpu_array_ref_group_type(group); |
1263 | if (type == ppcg_access_global) |
1264 | continue; |
1265 | create_kernel_var(kernel->ctx, group, &kernel->var[n]); |
1266 | ++n; |
1267 | } |
1268 | } |
1269 | |
1270 | return 0; |
1271 | } |
1272 | |
1273 | /* Replace "pa" by the zero function defined over the universe domain |
1274 | * in the space of "pa". |
1275 | */ |
1276 | static __isl_give isl_pw_aff *set_universally_zero(__isl_take isl_pw_aff *pa) |
1277 | { |
1278 | isl_space *space; |
1279 | isl_aff *zero; |
1280 | |
1281 | space = isl_space_domain(isl_pw_aff_get_space(pa)); |
1282 | isl_pw_aff_free(pa); |
1283 | zero = isl_aff_zero_on_domain(isl_local_space_from_space(space)); |
1284 | |
1285 | return isl_pw_aff_from_aff(zero); |
1286 | } |
1287 | |
1288 | /* The sizes of the arrays on the host that have been computed by |
1289 | * extract_array_info may depend on the parameters. Use the extra |
1290 | * constraints on the parameters that are valid at "host_domain" |
1291 | * to simplify these expressions and store the results in kernel->array. |
1292 | * |
1293 | * We only need these localized bounds for arrays that are accessed |
1294 | * by the current kernel. If we have found at least one reference group |
1295 | * then the array is accessed by the kernel. |
1296 | * |
1297 | * The resulting sizes may be functions that are nowhere defined |
1298 | * in case the access function cannot possibly access anything inside |
1299 | * the kernel for some reason. If so, they are replaced by the zero |
1300 | * function. Since the access function cannot actually access anything, |
1301 | * there is no harm in printing the array sizes as zero. |
1302 | */ |
1303 | static void localize_bounds(struct ppcg_kernel *kernel, |
1304 | __isl_keep isl_set *host_domain) |
1305 | { |
1306 | int i, j; |
1307 | isl_set *context; |
1308 | |
1309 | context = isl_set_copy(host_domain); |
1310 | context = isl_set_params(context); |
1311 | |
1312 | for (i = 0; i < kernel->n_array; ++i) { |
1313 | struct gpu_local_array_info *local = &kernel->array[i]; |
1314 | isl_multi_pw_aff *bound; |
1315 | int n_index; |
1316 | |
1317 | if (local->n_group == 0) |
1318 | continue; |
1319 | |
1320 | n_index = local->array->n_index; |
1321 | bound = isl_multi_pw_aff_copy(local->array->bound); |
1322 | |
1323 | for (j = 0; j < n_index; ++j) { |
1324 | isl_pw_aff *pwaff; |
1325 | int empty; |
1326 | |
1327 | pwaff = isl_multi_pw_aff_get_pw_aff(bound, j); |
1328 | pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context)); |
1329 | empty = isl_pw_aff_is_empty(pwaff); |
1330 | if (empty < 0) |
1331 | pwaff = isl_pw_aff_free(pwaff); |
1332 | else if (empty) |
1333 | pwaff = set_universally_zero(pwaff); |
1334 | bound = isl_multi_pw_aff_set_pw_aff(bound, j, pwaff); |
1335 | } |
1336 | |
1337 | local->n_index = n_index; |
1338 | local->bound = bound; |
1339 | } |
1340 | isl_set_free(context); |
1341 | } |
1342 | |
1343 | /* Create the array of gpu_local_array_info structures "array" |
1344 | * inside "kernel". The number of elements in this array is |
1345 | * the same as the number of arrays in "prog". |
1346 | * Initialize the "array" field of each local array to point |
1347 | * to the corresponding array in "prog". |
1348 | */ |
1349 | static struct ppcg_kernel *ppcg_kernel_create_local_arrays( |
1350 | struct ppcg_kernel *kernel, struct gpu_prog *prog) |
1351 | { |
1352 | int i; |
1353 | isl_ctx *ctx; |
1354 | |
1355 | ctx = isl_set_get_ctx(prog->context); |
1356 | kernel->array = isl_calloc_array(ctx,((struct gpu_local_array_info *)isl_calloc_or_die(ctx, prog-> n_array, sizeof(struct gpu_local_array_info))) |
1357 | struct gpu_local_array_info, prog->n_array)((struct gpu_local_array_info *)isl_calloc_or_die(ctx, prog-> n_array, sizeof(struct gpu_local_array_info))); |
1358 | if (!kernel->array) |
1359 | return ppcg_kernel_free(kernel); |
1360 | kernel->n_array = prog->n_array; |
1361 | |
1362 | for (i = 0; i < prog->n_array; ++i) |
1363 | kernel->array[i].array = &prog->array[i]; |
1364 | |
1365 | return kernel; |
1366 | } |
1367 | |
1368 | /* Does "kernel" need to be passed an argument corresponding to array "i"? |
1369 | * |
1370 | * The argument is only needed if the kernel accesses this device memory. |
1371 | */ |
1372 | int ppcg_kernel_requires_array_argument(struct ppcg_kernel *kernel, int i) |
1373 | { |
1374 | return kernel->array[i].global; |
1375 | } |
1376 | |
1377 | /* Find the element in gen->stmt that has the given "id". |
1378 | * Return NULL if no such gpu_stmt can be found. |
1379 | */ |
1380 | static struct gpu_stmt *find_stmt(struct gpu_prog *prog, __isl_keep isl_id *id) |
1381 | { |
1382 | int i; |
1383 | |
1384 | for (i = 0; i < prog->n_stmts; ++i) { |
1385 | if (id == prog->stmts[i].id) |
1386 | break; |
1387 | } |
1388 | |
1389 | return i < prog->n_stmts ? &prog->stmts[i] : NULL((void*)0); |
1390 | } |
1391 | |
1392 | void ppcg_kernel_stmt_free(void *user) |
1393 | { |
1394 | struct ppcg_kernel_stmt *stmt = user; |
1395 | |
1396 | if (!stmt) |
1397 | return; |
1398 | |
1399 | switch (stmt->type) { |
1400 | case ppcg_kernel_copy: |
1401 | isl_ast_expr_free(stmt->u.c.index); |
1402 | isl_ast_expr_free(stmt->u.c.local_index); |
1403 | break; |
1404 | case ppcg_kernel_domain: |
1405 | isl_id_to_ast_expr_free(stmt->u.d.ref2expr); |
1406 | break; |
1407 | case ppcg_kernel_sync: |
1408 | break; |
1409 | } |
1410 | |
1411 | free(stmt); |
1412 | } |
1413 | |
1414 | /* Return the gpu_stmt_access in the list "accesses" that corresponds |
1415 | * to "ref_id". |
1416 | */ |
1417 | static struct gpu_stmt_access *find_access(struct gpu_stmt_access *accesses, |
1418 | __isl_keep isl_id *ref_id) |
1419 | { |
1420 | struct gpu_stmt_access *access; |
1421 | |
1422 | for (access = accesses; access; access = access->next) |
1423 | if (access->ref_id == ref_id) |
1424 | return access; |
1425 | |
1426 | return NULL((void*)0); |
1427 | } |
1428 | |
1429 | /* Return the index of the array called "name" in the list of arrays. |
1430 | */ |
1431 | static int find_array_index(struct ppcg_kernel *kernel, const char *name) |
1432 | { |
1433 | int i; |
1434 | |
1435 | for (i = 0; i < kernel->n_array; ++i) |
1436 | if (!strcmp(name, kernel->array[i].array->name)__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p (kernel->array[i]. array->name) && (__s1_len = __builtin_strlen (name ), __s2_len = __builtin_strlen (kernel->array[i].array-> name), (!((size_t)(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) || __s1_len >= 4) && (!((size_t )(const void *)((kernel->array[i].array->name) + 1) - ( size_t)(const void *)(kernel->array[i].array->name) == 1 ) || __s2_len >= 4)) ? __builtin_strcmp (name, kernel-> array[i].array->name) : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p (kernel->array[i].array-> name) && ((size_t)(const void *)((kernel->array[i] .array->name) + 1) - (size_t)(const void *)(kernel->array [i].array->name) == 1) ? __builtin_strcmp (name, kernel-> array[i].array->name) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (kernel->array [i].array->name); int __result = (((const unsigned char *) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (name))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p (kernel->array[i].array->name) && ((size_t)(const void *)((kernel->array[i].array->name) + 1) - (size_t) (const void *)(kernel->array[i].array->name) == 1) && (__s2_len = __builtin_strlen (kernel->array[i].array-> name), __s2_len < 4) ? (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) ? __builtin_strcmp (name, kernel->array[i].array ->name) : -(__extension__ ({ const unsigned char *__s2 = ( const unsigned char *) (const char *) (name); int __result = ( ((const unsigned char *) (const char *) (kernel->array[i]. array->name))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (kernel->array[i].array->name))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = ( ((const unsigned char *) (const char *) (kernel->array[i]. array->name))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (kernel->array[i].array->name))[3] - __s2[3]); } } __result ; }))) : __builtin_strcmp (name, kernel->array[i].array-> name)))); })) |
1437 | return i; |
1438 | |
1439 | return -1; |
1440 | } |
1441 | |
1442 | /* Internal data structure for the index and AST expression transformation |
1443 | * callbacks for pet_stmt_build_ast_exprs. |
1444 | * |
1445 | * "kernel" is the kernel for which are computing AST expressions and |
1446 | * may be NULL if we are not inside a kernel. |
1447 | * "accesses" is the list of gpu_stmt_access in the statement. |
1448 | * "iterator_map" expresses the statement iterators in terms of |
1449 | * the AST loop iterators. |
1450 | * "sched2copy" expresses the outer copy_schedule_dim dimensions of |
1451 | * the kernel schedule in terms of the AST loop iterators and |
1452 | * may be NULL if we are not inside a kernel. |
1453 | * |
1454 | * The following fields are set in transform_index and used in transform_expr. |
1455 | * "array" is the array that is being accessed. |
1456 | * "global" is set if the global array is accessed (rather than |
1457 | * shared/private memory). |
1458 | * "local_array" refers to information on the array specialized |
1459 | * to the current kernel. |
1460 | */ |
1461 | struct ppcg_transform_data { |
1462 | struct ppcg_options *options; |
1463 | struct ppcg_kernel *kernel; |
1464 | struct gpu_stmt_access *accesses; |
1465 | isl_pw_multi_aff *iterator_map; |
1466 | isl_pw_multi_aff *sched2copy; |
1467 | |
1468 | struct gpu_array_info *array; |
1469 | int global; |
1470 | struct gpu_local_array_info *local_array; |
1471 | }; |
1472 | |
1473 | /* Return a pointer to the gpu_array_ref_group in "local" |
1474 | * that contains the reference "access". |
1475 | * Return NULL if no such group can be found. |
1476 | */ |
1477 | static struct gpu_array_ref_group *find_ref_group( |
1478 | struct gpu_local_array_info *local, struct gpu_stmt_access *access) |
1479 | { |
1480 | int i, j; |
1481 | |
1482 | for (i = 0; i < local->n_group; ++i) { |
1483 | struct gpu_array_ref_group *group = local->groups[i]; |
1484 | |
1485 | for (j = 0; j < group->n_ref; ++j) |
1486 | if (group->refs[j] == access) |
1487 | return group; |
1488 | } |
1489 | |
1490 | return NULL((void*)0); |
1491 | } |
1492 | |
1493 | /* Given an index expression "index" of the form |
1494 | * |
1495 | * L -> F(A), |
1496 | * |
1497 | * with F(A) either A or some subfield of A and L the AST loop iterators, |
1498 | * and a tiling "tiling" of the form |
1499 | * |
1500 | * [L -> A] -> T |
1501 | * |
1502 | * apply the tiling to the outer array in the index expression to obtain |
1503 | * |
1504 | * L -> T(A) |
1505 | * |
1506 | * If F(A) is some subfield of A, then separate the member access |
1507 | * into the base index expression and the field index expression, |
1508 | * apply the tiling to the base index expression and combine the result |
1509 | * with the field index expression. |
1510 | * |
1511 | * If F(A) is A, then modify index to keep track of the iterators |
1512 | * |
1513 | * L -> [L -> A] |
1514 | * |
1515 | * and combine the result with the tiling to obtain a tiled index expression |
1516 | * in terms of the AST loop iterators |
1517 | * |
1518 | * L -> T |
1519 | */ |
1520 | static __isl_give isl_multi_pw_aff *tile_outer( |
1521 | __isl_take isl_multi_pw_aff *index, __isl_take isl_multi_pw_aff *tiling) |
1522 | { |
1523 | isl_bool is_wrapping; |
1524 | isl_space *space; |
1525 | isl_multi_pw_aff *mpa; |
1526 | |
1527 | is_wrapping = isl_multi_pw_aff_range_is_wrapping(index); |
1528 | if (is_wrapping < 0) |
1529 | goto error; |
1530 | if (is_wrapping) { |
1531 | isl_multi_pw_aff *field; |
1532 | |
1533 | field = isl_multi_pw_aff_copy(index); |
1534 | field = isl_multi_pw_aff_range_factor_range(field); |
1535 | index = isl_multi_pw_aff_range_factor_domain(index); |
1536 | index = tile_outer(index, tiling); |
1537 | return isl_multi_pw_aff_range_product(index, field); |
1538 | } |
1539 | |
1540 | space = isl_space_domain(isl_multi_pw_aff_get_space(index)); |
1541 | space = isl_space_map_from_set(space); |
1542 | mpa = isl_multi_pw_aff_identity(space); |
1543 | index = isl_multi_pw_aff_range_product(mpa, index); |
1544 | index = isl_multi_pw_aff_pullback_multi_pw_aff(tiling, index); |
1545 | |
1546 | return index; |
1547 | error: |
1548 | isl_multi_pw_aff_free(index); |
1549 | isl_multi_pw_aff_free(tiling); |
1550 | return NULL((void*)0); |
1551 | } |
1552 | |
1553 | /* Index transformation callback for pet_stmt_build_ast_exprs. |
1554 | * |
1555 | * "index" expresses the array indices in terms of statement iterators |
1556 | * |
1557 | * We first reformulate "index" in terms of the AST loop iterators. |
1558 | * Then we check if we are accessing the global array or |
1559 | * a shared/private copy. In particular, if we are not inside a kernel |
1560 | * then we must be accessing a global array. |
1561 | * In the former case, we simply return |
1562 | * the updated index. If "index" is an affine expression rather |
1563 | * than an array access, then we also return the updated index here. |
1564 | * |
1565 | * If no reference groups have been computed for the array, |
1566 | * then we can only be accessing the global array. |
1567 | * |
1568 | * Otherwise, we apply the tiling to the index. |
1569 | * This tiling is of the form |
1570 | * |
1571 | * [D -> A] -> T |
1572 | * |
1573 | * where D corresponds to the outer tile->depth dimensions of |
1574 | * the kernel schedule. |
1575 | * The index is of the form |
1576 | * |
1577 | * L -> A |
1578 | * |
1579 | * We update the tiling to refer to the AST loop iterators |
1580 | * |
1581 | * [L -> A] -> T |
1582 | * |
1583 | * and combine it with the index to obtain a tiled index expression in terms |
1584 | * of the AST loop iterators |
1585 | * |
1586 | * L -> T |
1587 | * |
1588 | * Note that while the tiling applies directly to an outer array. |
1589 | * the index may refer to some subfield of this outer array. |
1590 | * In such cases, the result will refer to the same subfield of the tile. |
1591 | * That is, an index expression of the form L -> F(A) will be transformed |
1592 | * into an index expression of the form L -> F(T). |
1593 | */ |
1594 | static __isl_give isl_multi_pw_aff *transform_index( |
1595 | __isl_take isl_multi_pw_aff *index, __isl_keep isl_id *ref_id, |
1596 | void *user) |
1597 | { |
1598 | struct ppcg_transform_data *data = user; |
1599 | struct gpu_stmt_access *access; |
1600 | struct gpu_array_ref_group *group; |
1601 | struct gpu_array_tile *tile; |
1602 | isl_pw_multi_aff *iterator_map; |
1603 | int i; |
1604 | int dim; |
1605 | const char *name; |
1606 | isl_space *space; |
1607 | isl_multi_pw_aff *tiling; |
1608 | isl_pw_multi_aff *pma; |
1609 | isl_pw_multi_aff *sched2depth; |
1610 | |
1611 | data->array = NULL((void*)0); |
1612 | |
1613 | iterator_map = isl_pw_multi_aff_copy(data->iterator_map); |
1614 | index = isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map); |
1615 | |
1616 | if (!data->kernel) |
1617 | return index; |
1618 | |
1619 | access = find_access(data->accesses, ref_id); |
1620 | if (!access) |
1621 | return index; |
1622 | if (!isl_map_has_tuple_name(access->access, isl_dim_out)) |
1623 | return index; |
1624 | |
1625 | name = get_outer_array_name(access->access); |
1626 | i = find_array_index(data->kernel, name); |
1627 | if (i < 0) |
1628 | isl_die(isl_multi_pw_aff_get_ctx(index), isl_error_internal,do { isl_handle_error(isl_multi_pw_aff_get_ctx(index), isl_error_internal , "cannot find array", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1630); return isl_multi_pw_aff_free(index); } while (0) |
1629 | "cannot find array",do { isl_handle_error(isl_multi_pw_aff_get_ctx(index), isl_error_internal , "cannot find array", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1630); return isl_multi_pw_aff_free(index); } while (0) |
1630 | return isl_multi_pw_aff_free(index))do { isl_handle_error(isl_multi_pw_aff_get_ctx(index), isl_error_internal , "cannot find array", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 1630); return isl_multi_pw_aff_free(index); } while (0); |
1631 | data->local_array = &data->kernel->array[i]; |
1632 | data->array = data->local_array->array; |
1633 | |
1634 | group = find_ref_group(data->local_array, access); |
1635 | if (!group) { |
1636 | data->global = 1; |
1637 | return index; |
1638 | } |
1639 | |
1640 | tile = gpu_array_ref_group_tile(group); |
1641 | data->global = !tile; |
1642 | if (!tile) |
1643 | return index; |
1644 | |
1645 | space = isl_space_domain(isl_multi_aff_get_space(tile->tiling)); |
1646 | space = isl_space_range(isl_space_unwrap(space)); |
1647 | space = isl_space_map_from_set(space); |
1648 | pma = isl_pw_multi_aff_identity(space); |
1649 | sched2depth = isl_pw_multi_aff_copy(data->sched2copy); |
1650 | dim = isl_pw_multi_aff_dim(sched2depth, isl_dim_out); |
1651 | sched2depth = isl_pw_multi_aff_drop_dims(sched2depth, isl_dim_out, |
1652 | tile->depth, dim - tile->depth); |
1653 | pma = isl_pw_multi_aff_product(sched2depth, pma); |
1654 | tiling = isl_multi_pw_aff_from_multi_aff( |
1655 | isl_multi_aff_copy(tile->tiling)); |
1656 | tiling = isl_multi_pw_aff_pullback_pw_multi_aff(tiling, pma); |
1657 | |
1658 | index = tile_outer(index, tiling); |
1659 | |
1660 | return index; |
1661 | } |
1662 | |
1663 | /* Dereference "expr" by adding an index [0]. |
1664 | * The original "expr" is assumed not to have any indices. |
1665 | * |
1666 | * If "expr" is a member access, then the dereferencing needs |
1667 | * to be applied to the structure argument of this member access. |
1668 | */ |
1669 | static __isl_give isl_ast_expr *dereference(__isl_take isl_ast_expr *expr) |
1670 | { |
1671 | isl_ctx *ctx; |
1672 | isl_ast_expr *arg0, *res; |
1673 | isl_ast_expr_list *list; |
1674 | |
1675 | arg0 = isl_ast_expr_get_op_arg(expr, 0); |
1676 | if (!arg0) |
1677 | return isl_ast_expr_free(expr); |
1678 | if (isl_ast_expr_get_type(arg0) == isl_ast_expr_op && |
1679 | isl_ast_expr_get_op_type(arg0) == isl_ast_op_member) { |
1680 | isl_ast_expr *arg; |
1681 | |
1682 | arg = isl_ast_expr_get_op_arg(arg0, 0); |
1683 | arg = dereference(arg); |
1684 | arg0 = isl_ast_expr_set_op_arg(arg0, 0, arg); |
1685 | expr = isl_ast_expr_set_op_arg(expr, 0, arg0); |
1686 | |
1687 | return expr; |
1688 | } |
1689 | isl_ast_expr_free(arg0); |
1690 | |
1691 | ctx = isl_ast_expr_get_ctx(expr); |
1692 | res = isl_ast_expr_from_val(isl_val_zero(ctx)); |
1693 | list = isl_ast_expr_list_from_ast_expr(res); |
1694 | res = isl_ast_expr_get_op_arg(expr, 0); |
1695 | res = isl_ast_expr_access(res, list); |
1696 | isl_ast_expr_free(expr); |
1697 | |
1698 | return res; |
1699 | } |
1700 | |
1701 | /* Linearize the index expression "expr" based on the array bounds |
1702 | * of "array". |
1703 | * |
1704 | * That is, transform expression |
1705 | * |
1706 | * A[i_0][i_1]...[i_n] |
1707 | * |
1708 | * to |
1709 | * |
1710 | * A[(..((i_0 * b_1 + i_1) ... ) * b_n + i_n] |
1711 | * |
1712 | * where b_0, b_1, ..., b_n are the bounds on the array. |
1713 | * |
1714 | * If the base of "expr" is a member access, then the linearization needs |
1715 | * to be applied to the structure argument of this member access. |
1716 | * |
1717 | * In the base case, if "expr" has no arguments (other than the name of |
1718 | * the array), then we are passing an entire array to a function. |
1719 | * In this case, there is nothing to linearize. |
1720 | * Note that at this point an expression with no arguments can |
1721 | * only be an entire array because the scalar case and |
1722 | * the case of single struct are handled by the caller. |
1723 | * |
1724 | * If the number of specified index expressions in "expr" |
1725 | * is smaller than the dimension of the accessed array, |
1726 | * then the missing i_j also do not appear in the linearized expression. |
1727 | * Furthermore, since such an expression does not refer to a single |
1728 | * element while the default linearized expression would refer to |
1729 | * a single element, we return the expression |
1730 | * |
1731 | * A + (..((i_0 * b_1 + i_1) ... ) * b_l + i_l) |
1732 | * |
1733 | * instead. Note that because of the special case handling above, |
1734 | * we can assume here that there is at least one index expression. |
1735 | */ |
1736 | __isl_give isl_ast_expr *gpu_local_array_info_linearize_index( |
1737 | struct gpu_local_array_info *array, __isl_take isl_ast_expr *expr) |
1738 | { |
1739 | int i, n; |
1740 | isl_ast_expr *arg0; |
1741 | isl_ast_expr *res; |
1742 | isl_ast_expr_list *list; |
1743 | |
1744 | arg0 = isl_ast_expr_get_op_arg(expr, 0); |
1745 | if (isl_ast_expr_get_type(arg0) == isl_ast_expr_op && |
1746 | isl_ast_expr_get_op_type(arg0) == isl_ast_op_member) { |
1747 | isl_ast_expr *arg; |
1748 | |
1749 | arg = isl_ast_expr_get_op_arg(arg0, 0); |
1750 | arg = gpu_local_array_info_linearize_index(array, arg); |
1751 | arg0 = isl_ast_expr_set_op_arg(arg0, 0, arg); |
1752 | expr = isl_ast_expr_set_op_arg(expr, 0, arg0); |
1753 | |
1754 | return expr; |
1755 | } |
1756 | isl_ast_expr_free(arg0); |
1757 | |
1758 | if (isl_ast_expr_get_op_n_arg(expr) == 1) |
1759 | return expr; |
1760 | |
1761 | n = isl_ast_expr_get_op_n_arg(expr); |
1762 | res = isl_ast_expr_get_op_arg(expr, 1); |
1763 | for (i = 1; i < array->n_index; ++i) { |
1764 | isl_ast_expr *expr_i; |
1765 | |
1766 | expr_i = isl_ast_expr_get_op_arg(array->bound_expr, 1 + i); |
1767 | res = isl_ast_expr_mul(res, expr_i); |
1768 | |
1769 | if (i + 1 >= n) |
1770 | continue; |
1771 | expr_i = isl_ast_expr_get_op_arg(expr, i + 1); |
1772 | res = isl_ast_expr_add(res, expr_i); |
1773 | } |
1774 | |
1775 | if (1 + array->n_index > n) { |
1776 | res = isl_ast_expr_add(isl_ast_expr_get_op_arg(expr, 0), res); |
1777 | } else { |
1778 | list = isl_ast_expr_list_from_ast_expr(res); |
1779 | res = isl_ast_expr_get_op_arg(expr, 0); |
1780 | res = isl_ast_expr_access(res, list); |
1781 | } |
1782 | |
1783 | isl_ast_expr_free(expr); |
1784 | |
1785 | return res; |
1786 | } |
1787 | |
1788 | /* AST expression transformation callback for pet_stmt_build_ast_exprs. |
1789 | * |
1790 | * If the AST expression refers to an array that is not accessed |
1791 | * at all, then this means the value of the expression is not used, |
1792 | * so we might as well print zero (NULL pointer) instead. |
1793 | * |
1794 | * If the AST expression refers to a global scalar that is not |
1795 | * a read-only scalar, then its address was passed to the kernel and |
1796 | * we need to dereference it. |
1797 | * |
1798 | * If the AST expression refers to an access to a global array, |
1799 | * then we linearize the access exploiting the bounds in data->local_array. |
1800 | */ |
1801 | static __isl_give isl_ast_expr *transform_expr(__isl_take isl_ast_expr *expr, |
1802 | __isl_keep isl_id *id, void *user) |
1803 | { |
1804 | struct ppcg_transform_data *data = user; |
1805 | |
1806 | if (!data->array) |
1807 | return expr; |
1808 | if (!data->array->accessed) { |
1809 | isl_ctx *ctx; |
1810 | |
1811 | ctx = isl_ast_expr_get_ctx(expr); |
1812 | isl_ast_expr_free(expr); |
1813 | return isl_ast_expr_from_val(isl_val_zero(ctx)); |
1814 | } |
1815 | if (gpu_array_is_read_only_scalar(data->array)) |
1816 | return expr; |
1817 | if (!data->global) |
1818 | return expr; |
1819 | if (data->array->n_index == 0) |
1820 | return dereference(expr); |
1821 | if (!data->array->linearize) |
1822 | return expr; |
1823 | |
1824 | return gpu_local_array_info_linearize_index(data->local_array, expr); |
1825 | } |
1826 | |
1827 | /* This function is called for each instance of a user statement |
1828 | * in the kernel "kernel", identified by "gpu_stmt". |
1829 | * "kernel" may be NULL if we are not inside a kernel. |
1830 | * |
1831 | * We attach a struct ppcg_kernel_stmt to the "node", containing |
1832 | * a computed AST expression for each access, through an annotation |
1833 | * with name "user". |
1834 | * These AST expressions are computed from iterator_map, |
1835 | * which expresses the domain |
1836 | * elements in terms of the generated loops, and sched2copy, |
1837 | * which expresses the outer copy_schedule_dim dimensions of |
1838 | * the kernel schedule computed by PPCG in terms of the generated loops. |
1839 | */ |
1840 | static __isl_give isl_ast_node *create_domain_leaf( |
1841 | struct ppcg_kernel *kernel, __isl_take isl_ast_node *node, |
1842 | __isl_keep isl_ast_build *build, struct gpu_stmt *gpu_stmt, |
1843 | struct gpu_gen *gen) |
1844 | { |
1845 | struct ppcg_transform_data data; |
1846 | struct ppcg_kernel_stmt *stmt; |
1847 | isl_ctx *ctx; |
1848 | isl_id *id; |
1849 | isl_pw_multi_aff *sched2copy; |
1850 | isl_map *map; |
1851 | isl_pw_multi_aff *iterator_map; |
1852 | isl_union_map *schedule; |
1853 | |
1854 | if (!node) |
1855 | return NULL((void*)0); |
1856 | ctx = isl_ast_node_get_ctx(node); |
1857 | |
1858 | stmt = isl_calloc_type(ctx, struct ppcg_kernel_stmt)((struct ppcg_kernel_stmt *)isl_calloc_or_die(ctx, 1, sizeof( struct ppcg_kernel_stmt))); |
1859 | if (!stmt) |
1860 | return isl_ast_node_free(node); |
1861 | |
1862 | schedule = isl_ast_build_get_schedule(build); |
1863 | map = isl_map_reverse(isl_map_from_union_map(schedule)); |
1864 | iterator_map = isl_pw_multi_aff_from_map(map); |
1865 | if (kernel) |
1866 | sched2copy = compute_sched_to_copy(kernel, |
1867 | isl_pw_multi_aff_copy(iterator_map)); |
1868 | else |
1869 | sched2copy = NULL((void*)0); |
1870 | |
1871 | stmt->type = ppcg_kernel_domain; |
1872 | stmt->u.d.stmt = gpu_stmt; |
1873 | |
1874 | data.kernel = kernel; |
1875 | data.accesses = stmt->u.d.stmt->accesses; |
1876 | data.iterator_map = iterator_map; |
1877 | data.sched2copy = sched2copy; |
1878 | stmt->u.d.ref2expr = gen->build_ast_expr(stmt->u.d.stmt->stmt, |
1879 | build, &transform_index, &data, |
1880 | &transform_expr, &data); |
1881 | |
1882 | isl_pw_multi_aff_free(iterator_map); |
1883 | isl_pw_multi_aff_free(sched2copy); |
1884 | |
1885 | id = isl_id_alloc(ctx, "user", stmt); |
1886 | id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free); |
1887 | return isl_ast_node_set_annotation(node, id); |
1888 | } |
1889 | |
1890 | /* This function is called for each statement node in the AST |
1891 | * for copying to or from shared/private memory. |
1892 | * Attach a pointer to a ppcg_kernel_stmt representing the copy |
1893 | * statement to the node. |
1894 | * The statement name is "read" or "write", depending on whether we are |
1895 | * reading from global memory or writing to global memory. |
1896 | * |
1897 | * The schedule is of the form |
1898 | * |
1899 | * type[D -> A] -> L |
1900 | * |
1901 | * where D corresponds to the outer tile->depth dimensions of |
1902 | * the kernel schedule, A to the global array and L to the outer |
1903 | * generated AST schedule. |
1904 | * We compute the inverse and strip off the type, resulting in |
1905 | * |
1906 | * L -> [D -> A] |
1907 | * |
1908 | * We combine this mapping with on the one hand the projection |
1909 | * |
1910 | * [D -> A] -> A |
1911 | * |
1912 | * and on the other hand the group tiling |
1913 | * |
1914 | * [D -> A] -> T |
1915 | * |
1916 | * resulting in |
1917 | * |
1918 | * L -> A and L -> T |
1919 | * |
1920 | * and store the corresponding expressions in stmt->index and stmt->local_index, |
1921 | * where stmt points to the ppcg_kernel_stmt that is attached to the node. |
1922 | * stmt->index is linearized if the global memory array is linearized. |
1923 | */ |
1924 | static __isl_give isl_ast_node *create_access_leaf(struct ppcg_kernel *kernel, |
1925 | struct gpu_array_ref_group *group, __isl_take isl_ast_node *node, |
1926 | __isl_keep isl_ast_build *build) |
1927 | { |
1928 | struct ppcg_kernel_stmt *stmt; |
1929 | struct gpu_array_tile *tile; |
1930 | isl_id *id; |
1931 | isl_ast_expr *expr; |
1932 | isl_space *space; |
1933 | isl_map *access; |
1934 | isl_pw_multi_aff *pma, *pma2; |
1935 | const char *type; |
1936 | |
1937 | stmt = isl_calloc_type(kernel->ctx, struct ppcg_kernel_stmt)((struct ppcg_kernel_stmt *)isl_calloc_or_die(kernel->ctx, 1, sizeof(struct ppcg_kernel_stmt))); |
1938 | if (!stmt) |
1939 | return isl_ast_node_free(node); |
1940 | |
1941 | access = isl_map_from_union_map(isl_ast_build_get_schedule(build)); |
1942 | type = isl_map_get_tuple_name(access, isl_dim_in); |
1943 | stmt->u.c.read = !strcmp(type, "read")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (type) && __builtin_constant_p ("read") && ( __s1_len = __builtin_strlen (type), __s2_len = __builtin_strlen ("read"), (!((size_t)(const void *)((type) + 1) - (size_t)(const void *)(type) == 1) || __s1_len >= 4) && (!((size_t )(const void *)(("read") + 1) - (size_t)(const void *)("read" ) == 1) || __s2_len >= 4)) ? __builtin_strcmp (type, "read" ) : (__builtin_constant_p (type) && ((size_t)(const void *)((type) + 1) - (size_t)(const void *)(type) == 1) && (__s1_len = __builtin_strlen (type), __s1_len < 4) ? (__builtin_constant_p ("read") && ((size_t)(const void *)(("read") + 1) - ( size_t)(const void *)("read") == 1) ? __builtin_strcmp (type, "read") : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) ("read"); int __result = ((( const unsigned char *) (const char *) (type))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (type))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (type))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (type))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("read") && ((size_t)(const void *)(("read") + 1) - (size_t)(const void *)("read") == 1) && (__s2_len = __builtin_strlen ("read"), __s2_len < 4) ? (__builtin_constant_p (type) && ((size_t)(const void *)((type) + 1) - (size_t )(const void *)(type) == 1) ? __builtin_strcmp (type, "read") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (type); int __result = (((const unsigned char *) (const char *) ("read"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("read"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("read"))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("read"))[3] - __s2[3]); } } __result ; }))) : __builtin_strcmp (type, "read")))); }); |
1944 | access = isl_map_reverse(access); |
1945 | pma = isl_pw_multi_aff_from_map(access); |
1946 | pma = isl_pw_multi_aff_reset_tuple_id(pma, isl_dim_out); |
1947 | |
1948 | space = isl_space_range(isl_pw_multi_aff_get_space(pma)); |
1949 | space = isl_space_unwrap(space); |
1950 | pma2 = isl_pw_multi_aff_range_map(space); |
1951 | pma2 = isl_pw_multi_aff_pullback_pw_multi_aff(pma2, |
1952 | isl_pw_multi_aff_copy(pma)); |
1953 | expr = isl_ast_build_access_from_pw_multi_aff(build, pma2); |
1954 | if (group->array->linearize) |
1955 | expr = gpu_local_array_info_linearize_index(group->local_array, |
1956 | expr); |
1957 | stmt->u.c.index = expr; |
1958 | |
1959 | tile = gpu_array_ref_group_tile(group); |
1960 | pma2 = isl_pw_multi_aff_from_multi_aff( |
1961 | isl_multi_aff_copy(tile->tiling)); |
1962 | pma2 = isl_pw_multi_aff_pullback_pw_multi_aff(pma2, pma); |
1963 | expr = isl_ast_build_access_from_pw_multi_aff(build, pma2); |
1964 | stmt->u.c.local_index = expr; |
1965 | |
1966 | stmt->u.c.array = group->array; |
1967 | stmt->u.c.local_array = group->local_array; |
1968 | stmt->type = ppcg_kernel_copy; |
1969 | |
1970 | id = isl_id_alloc(kernel->ctx, "copy", stmt); |
1971 | id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free); |
1972 | return isl_ast_node_set_annotation(node, id); |
1973 | } |
1974 | |
1975 | /* Create a synchronization ppcg_kernel_stmt and |
1976 | * attach it to the node "node" representing the synchronization. |
1977 | */ |
1978 | static __isl_give isl_ast_node *create_sync_leaf( |
1979 | struct ppcg_kernel *kernel, __isl_take isl_ast_node *node, |
1980 | __isl_keep isl_ast_build *build) |
1981 | { |
1982 | struct ppcg_kernel_stmt *stmt; |
1983 | isl_id *id; |
1984 | |
1985 | stmt = isl_calloc_type(kernel->ctx, struct ppcg_kernel_stmt)((struct ppcg_kernel_stmt *)isl_calloc_or_die(kernel->ctx, 1, sizeof(struct ppcg_kernel_stmt))); |
1986 | if (!stmt) |
1987 | return isl_ast_node_free(node); |
1988 | |
1989 | stmt->type = ppcg_kernel_sync; |
1990 | id = isl_id_alloc(kernel->ctx, "sync", stmt); |
1991 | id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free); |
1992 | return isl_ast_node_set_annotation(node, id); |
1993 | } |
1994 | |
1995 | /* Build AST expressions for the device array sizes of all arrays in "prog" |
1996 | * that require allocation on the device using "build", as well as |
1997 | * for the original array sizes of all arrays that need to be declared |
1998 | * on the host. |
1999 | * "node" is freed in case of error. |
2000 | */ |
2001 | static __isl_give isl_ast_node *build_array_bounds( |
2002 | __isl_take isl_ast_node *node, struct gpu_prog *prog, |
2003 | __isl_keep isl_ast_build *build) |
2004 | { |
2005 | int i; |
2006 | |
2007 | for (i = 0; i < prog->n_array; ++i) { |
2008 | struct gpu_array_info *array = &prog->array[i]; |
2009 | isl_multi_pw_aff *size; |
2010 | isl_ast_expr *expr; |
2011 | |
2012 | if (!gpu_array_requires_device_allocation(array)) |
2013 | continue; |
2014 | |
2015 | size = isl_multi_pw_aff_copy(array->bound); |
2016 | expr = ppcg_build_size_expr(size, build); |
2017 | array->bound_expr = expr; |
2018 | if (!expr) |
2019 | return isl_ast_node_free(node); |
2020 | } |
2021 | |
2022 | for (i = 0; i < prog->n_array; ++i) { |
2023 | struct gpu_array_info *array = &prog->array[i]; |
2024 | isl_set *extent; |
2025 | isl_multi_pw_aff *size; |
2026 | isl_ast_expr *expr; |
2027 | |
2028 | if (!array->declare_local) |
2029 | continue; |
2030 | extent = isl_set_copy(array->declared_extent); |
2031 | size = ppcg_size_from_extent(extent); |
2032 | expr = ppcg_build_size_expr(size, build); |
2033 | array->declared_size = expr; |
2034 | if (!expr) |
2035 | return isl_ast_node_free(node); |
2036 | } |
2037 | |
2038 | return node; |
2039 | } |
2040 | |
2041 | /* Internal data structure for at_domain. |
2042 | * |
2043 | * "prog" represents the entire scop. |
2044 | * "kernel" points to the kernel to which the current schedule node |
2045 | * belongs. It is set by before_mark and reset by after_mark. |
2046 | * It may be NULL if we are outside any kernel. |
2047 | */ |
2048 | struct ppcg_at_domain_data { |
2049 | struct gpu_prog *prog; |
2050 | struct gpu_gen *gen; |
2051 | struct ppcg_kernel *kernel; |
2052 | }; |
2053 | |
2054 | /* This function is called for each instance of a user statement |
2055 | * in the kernel. This may be one of the original user statements |
2056 | * or a statement introduced by PPCG. |
2057 | * |
2058 | * We first check if the statement id corresponds to a gpu statement, |
2059 | * which indicates the statement is an original user statement. Any statement |
2060 | * that is not an original user statement has been introduced by PPCG and |
2061 | * requires special handling. |
2062 | * |
2063 | * If the user statement is one of the original user statements, then we call |
2064 | * create_domain_leaf. If it is "init_device", then we call |
2065 | * build_array_bounds. Otherwise, we check if it is a copy or synchronization |
2066 | * statement and call the appropriate functions. Statements that copy an array |
2067 | * to/from the device do not need any further treatment. |
2068 | * Neither does "clear_device". |
2069 | */ |
2070 | static __isl_give isl_ast_node *at_domain(__isl_take isl_ast_node *node, |
2071 | __isl_keep isl_ast_build *build, void *user) |
2072 | { |
2073 | struct ppcg_at_domain_data *data = user; |
2074 | struct gpu_stmt *gpu_stmt; |
2075 | isl_ast_expr *expr, *arg; |
2076 | isl_id *id; |
2077 | int is_sync; |
2078 | const char *name; |
2079 | void *p; |
2080 | |
2081 | expr = isl_ast_node_user_get_expr(node); |
2082 | arg = isl_ast_expr_get_op_arg(expr, 0); |
2083 | id = isl_ast_expr_get_id(arg); |
2084 | name = isl_id_get_name(id); |
2085 | p = isl_id_get_user(id); |
2086 | isl_ast_expr_free(expr); |
2087 | isl_ast_expr_free(arg); |
2088 | |
2089 | gpu_stmt = find_stmt(data->prog, id); |
2090 | is_sync = gpu_tree_id_is_sync(id, data->kernel); |
2091 | isl_id_free(id); |
2092 | |
2093 | if (gpu_stmt) |
2094 | return create_domain_leaf(data->kernel, node, build, gpu_stmt, |
2095 | data->gen); |
2096 | |
2097 | if (!prefixcmp(name, "to_device_") || !prefixcmp(name, "from_device_")) |
2098 | return node; |
2099 | if (!strcmp(name, "init_device")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p ("init_device") && (__s1_len = __builtin_strlen (name), __s2_len = __builtin_strlen ("init_device"), (!((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) || __s1_len >= 4) && ( !((size_t)(const void *)(("init_device") + 1) - (size_t)(const void *)("init_device") == 1) || __s2_len >= 4)) ? __builtin_strcmp (name, "init_device") : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p ("init_device") && ( (size_t)(const void *)(("init_device") + 1) - (size_t)(const void *)("init_device") == 1) ? __builtin_strcmp (name, "init_device" ) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) ("init_device"); int __result = (((const unsigned char *) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char * ) (const char *) (name))[3] - __s2[3]); } } __result; }))) : ( __builtin_constant_p ("init_device") && ((size_t)(const void *)(("init_device") + 1) - (size_t)(const void *)("init_device" ) == 1) && (__s2_len = __builtin_strlen ("init_device" ), __s2_len < 4) ? (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) ? __builtin_strcmp (name, "init_device") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) ("init_device"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("init_device"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("init_device"))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("init_device"))[3] - __s2[3]); } } __result; }))) : __builtin_strcmp (name, "init_device")))); })) |
2100 | return build_array_bounds(node, data->prog, build); |
2101 | if (!strcmp(name, "clear_device")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p ("clear_device") && (__s1_len = __builtin_strlen (name), __s2_len = __builtin_strlen ("clear_device"), (!((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) || __s1_len >= 4) && ( !((size_t)(const void *)(("clear_device") + 1) - (size_t)(const void *)("clear_device") == 1) || __s2_len >= 4)) ? __builtin_strcmp (name, "clear_device") : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p ("clear_device") && ( (size_t)(const void *)(("clear_device") + 1) - (size_t)(const void *)("clear_device") == 1) ? __builtin_strcmp (name, "clear_device" ) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) ("clear_device"); int __result = (((const unsigned char *) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char * ) (const char *) (name))[3] - __s2[3]); } } __result; }))) : ( __builtin_constant_p ("clear_device") && ((size_t)(const void *)(("clear_device") + 1) - (size_t)(const void *)("clear_device" ) == 1) && (__s2_len = __builtin_strlen ("clear_device" ), __s2_len < 4) ? (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) ? __builtin_strcmp (name, "clear_device") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) ("clear_device"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("clear_device"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("clear_device"))[2] - __s2[2]); if ( __s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("clear_device"))[3] - __s2[ 3]); } } __result; }))) : __builtin_strcmp (name, "clear_device" )))); })) |
2102 | return node; |
2103 | if (is_sync < 0) |
2104 | return isl_ast_node_free(node); |
2105 | if (!strcmp(name, "read")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p ("read") && ( __s1_len = __builtin_strlen (name), __s2_len = __builtin_strlen ("read"), (!((size_t)(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) || __s1_len >= 4) && (!((size_t )(const void *)(("read") + 1) - (size_t)(const void *)("read" ) == 1) || __s2_len >= 4)) ? __builtin_strcmp (name, "read" ) : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p ("read") && ((size_t)(const void *)(("read") + 1) - ( size_t)(const void *)("read") == 1) ? __builtin_strcmp (name, "read") : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) ("read"); int __result = ((( const unsigned char *) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (name))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("read") && ((size_t)(const void *)(("read") + 1) - (size_t)(const void *)("read") == 1) && (__s2_len = __builtin_strlen ("read"), __s2_len < 4) ? (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) ? __builtin_strcmp (name, "read") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) ("read"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("read"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("read"))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("read"))[3] - __s2[3]); } } __result ; }))) : __builtin_strcmp (name, "read")))); }) || !strcmp(name, "write")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p ("write") && ( __s1_len = __builtin_strlen (name), __s2_len = __builtin_strlen ("write"), (!((size_t)(const void *)((name) + 1) - (size_t)( const void *)(name) == 1) || __s1_len >= 4) && (!( (size_t)(const void *)(("write") + 1) - (size_t)(const void * )("write") == 1) || __s2_len >= 4)) ? __builtin_strcmp (name , "write") : (__builtin_constant_p (name) && ((size_t )(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p ("write") && ((size_t)(const void *)(("write") + 1) - (size_t)(const void *)("write") == 1 ) ? __builtin_strcmp (name, "write") : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) ("write"); int __result = (((const unsigned char *) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ( name))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ( name))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (name ))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ( "write") && ((size_t)(const void *)(("write") + 1) - ( size_t)(const void *)("write") == 1) && (__s2_len = __builtin_strlen ("write"), __s2_len < 4) ? (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *) (name) == 1) ? __builtin_strcmp (name, "write") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) ("write"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("write"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("write"))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("write"))[3] - __s2[3]); } } __result; }))) : __builtin_strcmp (name, "write")))); })) { |
2106 | struct gpu_array_ref_group *group = p; |
2107 | return create_access_leaf(data->kernel, group, node, build); |
2108 | } |
2109 | if (!is_sync) |
2110 | isl_die(data->prog->ctx, isl_error_internal,do { isl_handle_error(data->prog->ctx, isl_error_internal , "unknown statement type", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 2112); return isl_ast_node_free(node); } while (0) |
2111 | "unknown statement type",do { isl_handle_error(data->prog->ctx, isl_error_internal , "unknown statement type", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 2112); return isl_ast_node_free(node); } while (0) |
2112 | return isl_ast_node_free(node))do { isl_handle_error(data->prog->ctx, isl_error_internal , "unknown statement type", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 2112); return isl_ast_node_free(node); } while (0); |
2113 | return create_sync_leaf(data->kernel, node, build); |
2114 | } |
2115 | |
2116 | /* Given a set of wrapped references "ref", return the corresponding |
2117 | * access relations based on the tagged access relations "tagged". |
2118 | * |
2119 | * The elements of "ref" are of the form |
2120 | * |
2121 | * [D -> R] |
2122 | * |
2123 | * with D an iteration domains and R a reference. |
2124 | * The elements of "tagged" are of the form |
2125 | * |
2126 | * [D -> R] -> A |
2127 | * |
2128 | * with A an array. |
2129 | * |
2130 | * Extend "tagged" to include the iteration domain in the range, i.e., |
2131 | * |
2132 | * [D -> R] -> [D -> A] |
2133 | * |
2134 | * apply the result to "ref" and then unwrap the resulting set |
2135 | * to obtain relations of the form |
2136 | * |
2137 | * D -> A |
2138 | */ |
2139 | static __isl_give isl_union_map *wrapped_reference_to_access( |
2140 | __isl_take isl_union_set *ref, __isl_take isl_union_map *tagged) |
2141 | { |
2142 | isl_union_map *tag2access; |
2143 | |
2144 | tag2access = isl_union_map_copy(tagged); |
2145 | tag2access = isl_union_map_universe(tag2access); |
2146 | tag2access = isl_union_set_unwrap(isl_union_map_domain(tag2access)); |
2147 | tag2access = isl_union_map_domain_map(tag2access); |
2148 | tag2access = isl_union_map_range_product(tag2access, tagged); |
2149 | |
2150 | ref = isl_union_set_coalesce(ref); |
2151 | ref = isl_union_set_apply(ref, tag2access); |
2152 | |
2153 | return isl_union_set_unwrap(ref); |
2154 | } |
2155 | |
2156 | /* Given an access relation "access" from one or more array reference groups, |
2157 | * remove those reads if ("read" is 1) or writes (if "read" is 0) |
2158 | * that are only needed to communicate data within |
2159 | * the same iteration of "sched". |
2160 | * The domain of "sched" corresponds to the original statement instances, |
2161 | * i.e., those that appear in the domains of the access relations. |
2162 | * "tagged" contains all tagged access relations to all |
2163 | * the array reference groups accessed by "access" from statement |
2164 | * instances scheduled by "sched". |
2165 | * |
2166 | * If the access is a read then it is either an element of |
2167 | * |
2168 | * live_in union (range flow) |
2169 | * |
2170 | * where live_in and flow may be overapproximations, or |
2171 | * it reads an uninitialized value (that is not live-in because |
2172 | * there is an intermediate kill) or it reads a value that was |
2173 | * written within the same (compound) statement instance. |
2174 | * If the access is a write then it is either an element of |
2175 | * |
2176 | * live_out union (domain flow) |
2177 | * |
2178 | * or it writes a value that is never read (and is not live-out |
2179 | * because of an intermediate kill) or only |
2180 | * within the same (compound) statement instance. |
2181 | * In both cases, the access relation is also a subset of |
2182 | * the group access relation. |
2183 | * |
2184 | * The cases where an uninitialized value is read or a value is written |
2185 | * that is never read or where the dataflow occurs within a statement |
2186 | * instance are also considered local and may also be removed. |
2187 | * |
2188 | * Essentially, we compute the intersection of "access" with either |
2189 | * |
2190 | * live_in union (range non-local-flow) |
2191 | * |
2192 | * or |
2193 | * |
2194 | * live_out union (domain non-local-flow) |
2195 | * |
2196 | * We first construct a relation "local" |
2197 | * |
2198 | * [[D -> R] -> [D' -> R']] |
2199 | * |
2200 | * of pairs of domain iterations accessing the reference group |
2201 | * and references in the group that are coscheduled by "sched". |
2202 | * |
2203 | * If this relation does not intersect the dataflow dependences, |
2204 | * then there is nothing we can possibly remove, unless the dataflow |
2205 | * dependences themselves only relate a subset of the accesses. |
2206 | * In particular, the accesses may not be involved in any dataflow |
2207 | * dependences, either because they are uninitialized reads/dead writes |
2208 | * or because the dataflow occurs inside a statement instance. |
2209 | * |
2210 | * Since the computation below may break up the access relation |
2211 | * into smaller pieces, we only perform the intersection with |
2212 | * the non-local dependent accesses if the local pairs |
2213 | * intersect the dataflow dependences. Otherwise, we intersect |
2214 | * with the universe of the non-local dependent accesses. |
2215 | * This should at least remove accesses from statements that |
2216 | * do not participate in any dependences. |
2217 | * |
2218 | * In particular, we remove the "local" dataflow dependences from |
2219 | * the set of all dataflow dependences, or at least those |
2220 | * that may contribute to a domain/range that intersects |
2221 | * the domain of "access". |
2222 | * Note that if the potential dataflow dependences are an overapproximation |
2223 | * of the actual dataflow dependences, then the result remains an |
2224 | * overapproximation of the non-local dataflow dependences. |
2225 | * Copying to/from global memory is only needed for the references |
2226 | * in the domain/range of the result or for accesses that are live out/in |
2227 | * for the entire scop. |
2228 | * |
2229 | * We therefore map the domain/range of the "external" relation |
2230 | * to the corresponding access relation and take the union with |
2231 | * the live out/in relation. |
2232 | */ |
2233 | static __isl_give isl_union_map *remove_local_accesses( |
2234 | struct gpu_prog *prog, __isl_take isl_union_map *tagged, |
2235 | __isl_take isl_union_map *access, __isl_take isl_union_map *sched, |
2236 | int read) |
2237 | { |
2238 | int empty; |
2239 | isl_union_pw_multi_aff *tagger; |
2240 | isl_union_set *domain, *access_domain; |
2241 | isl_union_map *local, *external, *universe; |
2242 | isl_union_set *tag_set; |
2243 | |
2244 | if (isl_union_map_is_empty(access)) { |
2245 | isl_union_map_free(sched); |
2246 | isl_union_map_free(tagged); |
2247 | return access; |
2248 | } |
2249 | |
2250 | tagger = isl_union_pw_multi_aff_copy(prog->scop->tagger); |
2251 | domain = isl_union_map_domain(isl_union_map_copy(tagged)); |
2252 | tagger = isl_union_pw_multi_aff_intersect_domain(tagger, |
2253 | isl_union_set_copy(domain)); |
2254 | sched = isl_union_map_preimage_domain_union_pw_multi_aff(sched, tagger); |
2255 | |
2256 | local = isl_union_map_apply_range(sched, |
2257 | isl_union_map_reverse(isl_union_map_copy(sched))); |
2258 | local = isl_union_map_intersect(local, |
2259 | isl_union_map_copy(prog->scop->tagged_dep_flow)); |
2260 | |
2261 | empty = isl_union_map_is_empty(local); |
2262 | |
2263 | external = isl_union_map_copy(prog->scop->tagged_dep_flow); |
2264 | universe = isl_union_map_universe(isl_union_map_copy(access)); |
2265 | access_domain = isl_union_map_domain(universe); |
2266 | domain = isl_union_set_universe(domain); |
2267 | universe = isl_union_set_unwrap(domain); |
2268 | universe = isl_union_map_intersect_domain(universe, access_domain); |
2269 | domain = isl_union_map_wrap(universe); |
2270 | if (read) |
2271 | external = isl_union_map_intersect_range(external, domain); |
2272 | else |
2273 | external = isl_union_map_intersect_domain(external, domain); |
2274 | external = isl_union_map_intersect_params(external, |
2275 | isl_set_copy(prog->scop->context)); |
2276 | external = isl_union_map_subtract(external, local); |
2277 | |
2278 | if (read) { |
2279 | tag_set = isl_union_map_range(external); |
2280 | external = wrapped_reference_to_access(tag_set, tagged); |
2281 | external = isl_union_map_union(external, |
2282 | isl_union_map_copy(prog->scop->live_in)); |
2283 | } else { |
2284 | tag_set = isl_union_map_domain(external); |
2285 | external = wrapped_reference_to_access(tag_set, tagged); |
2286 | external = isl_union_map_union(external, |
2287 | isl_union_map_copy(prog->scop->live_out)); |
2288 | } |
2289 | |
2290 | if (empty < 0) |
2291 | external = isl_union_map_free(external); |
2292 | else if (empty) |
2293 | external = isl_union_map_universe(external); |
2294 | |
2295 | access = isl_union_map_intersect(access, external); |
2296 | |
2297 | return access; |
2298 | } |
2299 | |
2300 | /* Given an access relation "access" from "group", remove those reads |
2301 | * if ("read" is 1) or writes (if "read" is 0) that are only needed to |
2302 | * communicate data within the same iteration of the schedule "prefix" |
2303 | * at the position where the copying of the group is inserted. |
2304 | * That is, the output dimension of "prefix" |
2305 | * is equal to tile->depth. |
2306 | * The domain of "prefix" corresponds to the original statement instances, |
2307 | * i.e., those that appear in the domains of the access relations. |
2308 | * |
2309 | * Extract the tagged access relation of "group" and |
2310 | * then call remove_local_accesses. |
2311 | */ |
2312 | static __isl_give isl_union_map *remove_local_accesses_group( |
2313 | struct ppcg_kernel *kernel, struct gpu_array_ref_group *group, |
2314 | __isl_take isl_union_map *access, __isl_keep isl_union_map *prefix, |
2315 | int read) |
2316 | { |
2317 | isl_union_map *sched, *tagged; |
2318 | |
2319 | if (isl_union_map_is_empty(access)) |
2320 | return access; |
2321 | |
2322 | tagged = group_tagged_access_relation(group); |
2323 | sched = isl_union_map_copy(prefix); |
2324 | |
2325 | return remove_local_accesses(kernel->prog, tagged, access, sched, read); |
2326 | } |
2327 | |
2328 | /* Build an access AST expression for the effective grid size using "build". |
2329 | * Store the result in kernel->grid_size_expr. |
2330 | */ |
2331 | static isl_stat build_grid_size(struct ppcg_kernel *kernel, |
2332 | __isl_keep isl_ast_build *build) |
2333 | { |
2334 | isl_multi_pw_aff *size; |
2335 | |
2336 | size = isl_multi_pw_aff_copy(kernel->grid_size); |
2337 | size = isl_multi_pw_aff_set_tuple_name(size, isl_dim_out, "grid"); |
2338 | kernel->grid_size_expr = ppcg_build_size_expr(size, build); |
2339 | |
2340 | if (!kernel->grid_size_expr) |
2341 | return isl_stat_error; |
2342 | return isl_stat_ok; |
2343 | } |
2344 | |
2345 | /* Build access AST expressions for the localized array sizes using "build". |
2346 | * Store the result in local->bound_expr. |
2347 | * Only do this for arrays for which localized bounds have been computed. |
2348 | */ |
2349 | static isl_stat build_local_array_sizes(struct ppcg_kernel *kernel, |
2350 | __isl_keep isl_ast_build *build) |
2351 | { |
2352 | int i; |
2353 | |
2354 | for (i = 0; i < kernel->n_array; ++i) { |
2355 | struct gpu_local_array_info *local = &kernel->array[i]; |
2356 | isl_multi_pw_aff *size; |
2357 | |
2358 | if (local->n_group == 0) |
2359 | continue; |
2360 | size = isl_multi_pw_aff_copy(local->bound); |
2361 | local->bound_expr = ppcg_build_size_expr(size, build); |
2362 | if (!local->bound_expr) |
2363 | return isl_stat_error; |
2364 | } |
2365 | |
2366 | return isl_stat_ok; |
2367 | } |
2368 | |
2369 | /* Build access AST expressions for the effective grid size and |
2370 | * the localized array sizes using "build". |
2371 | */ |
2372 | static isl_stat build_grid_and_local_array_sizes(struct ppcg_kernel *kernel, |
2373 | __isl_keep isl_ast_build *build) |
2374 | { |
2375 | if (build_grid_size(kernel, build) < 0) |
2376 | return isl_stat_error; |
2377 | if (build_local_array_sizes(kernel, build) < 0) |
2378 | return isl_stat_error; |
2379 | return isl_stat_ok; |
2380 | } |
2381 | |
2382 | /* This function is called before the AST generator starts traversing |
2383 | * the schedule subtree of a node with mark "mark". |
2384 | * |
2385 | * If the mark is called "kernel", store the kernel pointer in data->kernel |
2386 | * for use in at_domain and build AST expressions for the grid size and |
2387 | * the localized array sizes. |
2388 | */ |
2389 | static isl_stat before_mark(__isl_keep isl_id *mark, |
2390 | __isl_keep isl_ast_build *build, void *user) |
2391 | { |
2392 | struct ppcg_at_domain_data *data = user; |
2393 | |
2394 | if (!mark) |
2395 | return isl_stat_error; |
2396 | if (!strcmp(isl_id_get_name(mark), "kernel")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (isl_id_get_name(mark)) && __builtin_constant_p ("kernel" ) && (__s1_len = __builtin_strlen (isl_id_get_name(mark )), __s2_len = __builtin_strlen ("kernel"), (!((size_t)(const void *)((isl_id_get_name(mark)) + 1) - (size_t)(const void * )(isl_id_get_name(mark)) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("kernel") + 1) - (size_t)(const void *)("kernel") == 1) || __s2_len >= 4)) ? __builtin_strcmp ( isl_id_get_name(mark), "kernel") : (__builtin_constant_p (isl_id_get_name (mark)) && ((size_t)(const void *)((isl_id_get_name(mark )) + 1) - (size_t)(const void *)(isl_id_get_name(mark)) == 1) && (__s1_len = __builtin_strlen (isl_id_get_name(mark )), __s1_len < 4) ? (__builtin_constant_p ("kernel") && ((size_t)(const void *)(("kernel") + 1) - (size_t)(const void *)("kernel") == 1) ? __builtin_strcmp (isl_id_get_name(mark) , "kernel") : (__extension__ ({ const unsigned char *__s2 = ( const unsigned char *) (const char *) ("kernel"); int __result = (((const unsigned char *) (const char *) (isl_id_get_name( mark)))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ( isl_id_get_name(mark)))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (isl_id_get_name(mark)))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (isl_id_get_name(mark)))[3] - __s2[3] ); } } __result; }))) : (__builtin_constant_p ("kernel") && ((size_t)(const void *)(("kernel") + 1) - (size_t)(const void *)("kernel") == 1) && (__s2_len = __builtin_strlen ( "kernel"), __s2_len < 4) ? (__builtin_constant_p (isl_id_get_name (mark)) && ((size_t)(const void *)((isl_id_get_name(mark )) + 1) - (size_t)(const void *)(isl_id_get_name(mark)) == 1) ? __builtin_strcmp (isl_id_get_name(mark), "kernel") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (isl_id_get_name(mark)); int __result = (((const unsigned char *) (const char *) ("kernel"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("kernel"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("kernel"))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("kernel"))[3] - __s2[3]); } } __result ; }))) : __builtin_strcmp (isl_id_get_name(mark), "kernel"))) ); })) { |
2397 | data->kernel = isl_id_get_user(mark); |
2398 | if (build_grid_and_local_array_sizes(data->kernel, build) < 0) |
2399 | return isl_stat_error; |
2400 | } |
2401 | return isl_stat_ok; |
2402 | } |
2403 | |
2404 | /* This function is called after the AST generator has finished traversing |
2405 | * the schedule subtree of a mark node. "node" points to the corresponding |
2406 | * mark AST node. |
2407 | * |
2408 | * If the mark is called "kernel", then replace "node" by a user node |
2409 | * that "calls" the kernel, representing the launch of the kernel. |
2410 | * The original "node" is stored inside the kernel object so that |
2411 | * it can be used to print the device code. |
2412 | * Note that this assumes that a kernel is only launched once. |
2413 | * Also clear data->kernel. |
2414 | */ |
2415 | static __isl_give isl_ast_node *after_mark(__isl_take isl_ast_node *node, |
2416 | __isl_keep isl_ast_build *build, void *user) |
2417 | { |
2418 | isl_ctx *ctx; |
2419 | isl_id *id; |
2420 | isl_ast_expr *expr; |
2421 | isl_ast_expr_list *list; |
2422 | struct ppcg_kernel *kernel; |
2423 | struct ppcg_at_domain_data *data = user; |
2424 | |
2425 | ctx = isl_ast_node_get_ctx(node); |
2426 | id = isl_ast_node_mark_get_id(node); |
2427 | if (!id) |
2428 | return isl_ast_node_free(node); |
2429 | if (strcmp(isl_id_get_name(id), "kernel")__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (isl_id_get_name(id)) && __builtin_constant_p ("kernel" ) && (__s1_len = __builtin_strlen (isl_id_get_name(id )), __s2_len = __builtin_strlen ("kernel"), (!((size_t)(const void *)((isl_id_get_name(id)) + 1) - (size_t)(const void *)( isl_id_get_name(id)) == 1) || __s1_len >= 4) && (! ((size_t)(const void *)(("kernel") + 1) - (size_t)(const void *)("kernel") == 1) || __s2_len >= 4)) ? __builtin_strcmp ( isl_id_get_name(id), "kernel") : (__builtin_constant_p (isl_id_get_name (id)) && ((size_t)(const void *)((isl_id_get_name(id) ) + 1) - (size_t)(const void *)(isl_id_get_name(id)) == 1) && (__s1_len = __builtin_strlen (isl_id_get_name(id)), __s1_len < 4) ? (__builtin_constant_p ("kernel") && ((size_t )(const void *)(("kernel") + 1) - (size_t)(const void *)("kernel" ) == 1) ? __builtin_strcmp (isl_id_get_name(id), "kernel") : ( __extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) ("kernel"); int __result = (((const unsigned char *) (const char *) (isl_id_get_name(id)))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = ( ((const unsigned char *) (const char *) (isl_id_get_name(id)) )[1] - __s2[1]); if (__s1_len > 1 && __result == 0 ) { __result = (((const unsigned char *) (const char *) (isl_id_get_name (id)))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (isl_id_get_name (id)))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("kernel") && ((size_t)(const void *)(("kernel") + 1 ) - (size_t)(const void *)("kernel") == 1) && (__s2_len = __builtin_strlen ("kernel"), __s2_len < 4) ? (__builtin_constant_p (isl_id_get_name(id)) && ((size_t)(const void *)((isl_id_get_name (id)) + 1) - (size_t)(const void *)(isl_id_get_name(id)) == 1 ) ? __builtin_strcmp (isl_id_get_name(id), "kernel") : -(__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (isl_id_get_name(id)); int __result = (((const unsigned char *) (const char *) ("kernel"))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) ("kernel"))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) ("kernel"))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) ("kernel"))[3] - __s2[3]); } } __result ; }))) : __builtin_strcmp (isl_id_get_name(id), "kernel")))); }) || !data->kernel) { |
2430 | isl_id_free(id); |
2431 | return node; |
2432 | } |
2433 | kernel = data->kernel; |
2434 | data->kernel = NULL((void*)0); |
2435 | kernel->space = isl_ast_build_get_schedule_space(build); |
2436 | kernel->tree = isl_ast_node_mark_get_node(node); |
2437 | isl_ast_node_free(node); |
2438 | |
2439 | expr = isl_ast_expr_from_id(isl_id_copy(id)); |
2440 | list = isl_ast_expr_list_alloc(ctx, 0); |
2441 | expr = isl_ast_expr_call(expr, list); |
2442 | node = isl_ast_node_alloc_user(expr); |
2443 | node = isl_ast_node_set_annotation(node, id); |
2444 | |
2445 | return node; |
2446 | } |
2447 | |
2448 | static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user) |
2449 | { |
2450 | int *depth = user; |
2451 | int node_depth; |
2452 | |
2453 | if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) |
2454 | return isl_bool_true; |
2455 | node_depth = isl_schedule_node_get_schedule_depth(node); |
2456 | if (node_depth > *depth) |
2457 | *depth = node_depth; |
2458 | |
2459 | return isl_bool_false; |
2460 | } |
2461 | |
2462 | /* Use isl to generate code for both the host and the device |
2463 | * from "schedule". |
2464 | * The device code is marked by "kernel" mark nodes in the schedule tree, |
2465 | * containing a pointer to a ppcg_kernel object. |
2466 | * The returned AST only contains the AST for the host code. |
2467 | * The ASTs for the device code are embedded in ppcg_kernel objects |
2468 | * attached to the leaf nodes that call "kernel". |
2469 | */ |
2470 | __isl_give isl_ast_node *generate_code(struct gpu_gen *gen, |
2471 | __isl_take isl_schedule *schedule) |
2472 | { |
2473 | struct ppcg_at_domain_data data; |
2474 | isl_ast_build *build; |
2475 | isl_ast_node *tree; |
2476 | isl_id_list *iterators; |
2477 | int depth; |
2478 | |
2479 | data.prog = gen->prog; |
2480 | data.gen = gen; |
2481 | data.kernel = NULL((void*)0); |
2482 | |
2483 | depth = 0; |
2484 | if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth, |
2485 | &depth) < 0) |
2486 | return NULL((void*)0); |
2487 | build = isl_ast_build_alloc(gen->prog->ctx); |
2488 | iterators = ppcg_scop_generate_names(gen->prog->scop, depth, "c"); |
2489 | build = isl_ast_build_set_iterators(build, iterators); |
2490 | build = isl_ast_build_set_at_each_domain(build, &at_domain, &data); |
2491 | build = isl_ast_build_set_before_each_mark(build, &before_mark, &data); |
2492 | build = isl_ast_build_set_after_each_mark(build, &after_mark, &data); |
2493 | if (gen->prog->scop->options->debug->dump_final_schedule) |
2494 | isl_schedule_dump(schedule); |
2495 | tree = isl_ast_build_node_from_schedule(build, schedule); |
2496 | isl_ast_build_free(build); |
2497 | |
2498 | return tree; |
2499 | } |
2500 | |
2501 | __isl_give isl_union_map *extract_sizes_from_str(isl_ctx *ctx, const char *str) |
2502 | { |
2503 | if (!str) |
2504 | return NULL((void*)0); |
2505 | return isl_union_map_read_from_str(ctx, str); |
2506 | } |
2507 | |
2508 | /* Can "node" be tiled and then mapped to block and thread identifiers? |
2509 | * That is, is it permutable with at least one coincident dimension? |
2510 | */ |
2511 | static int is_permutable(__isl_keep isl_schedule_node *node) |
2512 | { |
2513 | if (!node) |
2514 | return -1; |
2515 | |
2516 | if (isl_schedule_node_get_type(node) != isl_schedule_node_band) |
2517 | return 0; |
2518 | if (!isl_schedule_node_band_get_permutable(node)) |
2519 | return 0; |
2520 | if (isl_schedule_node_band_n_member(node) < 1) |
2521 | return 0; |
2522 | if (!isl_schedule_node_band_member_get_coincident(node, 0)) |
2523 | return 0; |
2524 | |
2525 | return 1; |
2526 | } |
2527 | |
2528 | /* A isl_schedule_foreach_schedule_node_top_down callback |
2529 | * for setting *any_permutable and aborting the search |
2530 | * if "node" is a permutable band with coincident dimensions. |
2531 | * Otherwise, continue searching. |
2532 | */ |
2533 | static isl_bool set_permutable(__isl_keep isl_schedule_node *node, void *user) |
2534 | { |
2535 | int *any_permutable = user; |
2536 | int permutable; |
2537 | |
2538 | permutable = is_permutable(node); |
2539 | if (permutable < 0) |
2540 | return isl_bool_error; |
2541 | if (!permutable) |
2542 | return isl_bool_true; |
2543 | |
2544 | *any_permutable = 1; |
2545 | |
2546 | return isl_bool_error; |
2547 | } |
2548 | |
2549 | /* Does the subtree rooted at "node" have any suitably permutable band nodes? |
2550 | * That is, does it have any nodes that are permutable and that |
2551 | * have a least one coincident dimension? |
2552 | */ |
2553 | static int subtree_has_permutable_bands(__isl_keep isl_schedule_node *node) |
2554 | { |
2555 | int any_parallelism = 0; |
2556 | |
2557 | if (isl_schedule_node_foreach_descendant_top_down(node, &set_permutable, |
2558 | &any_parallelism) < 0 && |
2559 | !any_parallelism) |
2560 | return -1; |
2561 | |
2562 | return any_parallelism; |
2563 | } |
2564 | |
2565 | /* Does "schedule" contain any permutable band with at least one coincident |
2566 | * member? |
2567 | */ |
2568 | int has_any_permutable_node(__isl_keep isl_schedule *schedule) |
2569 | { |
2570 | isl_schedule_node *root; |
2571 | int any_permutable; |
2572 | |
2573 | root = isl_schedule_get_root(schedule); |
2574 | any_permutable = subtree_has_permutable_bands(root); |
2575 | isl_schedule_node_free(root); |
2576 | |
2577 | return any_permutable; |
2578 | } |
2579 | |
2580 | /* Is "node" a candidate for mapping to block and thread identifiers? |
2581 | * In particular, is it permutable with at least one coincident dimension? |
2582 | * Alternatively, does the subtree rooted at "node" not contain |
2583 | * any such permutable node? Filter nodes are skipped in this case, |
2584 | * because a band node will be inserted in front of the returned |
2585 | * node and this is not possible for filter nodes that are children |
2586 | * of set or sequence nodes. |
2587 | */ |
2588 | static int is_candidate(__isl_keep isl_schedule_node *node) |
2589 | { |
2590 | int permutable; |
2591 | |
2592 | if (isl_schedule_node_get_type(node) == isl_schedule_node_leaf) |
2593 | return 1; |
2594 | permutable = is_permutable(node); |
2595 | if (permutable < 0 || permutable) |
2596 | return permutable; |
2597 | if (isl_schedule_node_get_type(node) == isl_schedule_node_filter) |
2598 | return 0; |
2599 | permutable = subtree_has_permutable_bands(node); |
2600 | if (permutable < 0) |
2601 | return -1; |
2602 | return !permutable; |
2603 | } |
2604 | |
2605 | /* Is "node" the outermost node in its branch that can be tiled |
2606 | * and then mapped to block and thread identifiers? |
2607 | * If there are no such nodes in the subtree at "node" and |
2608 | * if "node" is not a filter node, then it is accepted too. |
2609 | */ |
2610 | static int is_outer_tilable(__isl_keep isl_schedule_node *node) |
2611 | { |
2612 | int tilable; |
2613 | isl_schedule_node *ancestor; |
2614 | |
2615 | tilable = is_candidate(node); |
2616 | if (tilable < 0) |
2617 | return -1; |
2618 | if (!tilable) |
2619 | return 0; |
2620 | |
2621 | tilable = 0; |
2622 | ancestor = isl_schedule_node_copy(node); |
2623 | while (isl_schedule_node_has_parent(ancestor)) { |
2624 | ancestor = isl_schedule_node_parent(ancestor); |
2625 | |
2626 | tilable = is_candidate(ancestor); |
2627 | if (tilable < 0 || tilable) |
2628 | break; |
2629 | } |
2630 | |
2631 | isl_schedule_node_free(ancestor); |
2632 | return tilable < 0 ? -1 : !tilable; |
2633 | } |
2634 | |
2635 | /* Collect the references to all writes in "group". |
2636 | * Each reference is represented by a universe set in a space |
2637 | * |
2638 | * [S[i,j] -> R[]] |
2639 | * |
2640 | * with S[i,j] the statement instance space and R[] the array reference. |
2641 | */ |
2642 | static __isl_give isl_union_set *group_tagged_writes( |
2643 | struct gpu_array_ref_group *group) |
2644 | { |
2645 | int i; |
2646 | isl_space *space; |
2647 | isl_union_set *writes; |
2648 | |
2649 | space = isl_map_get_space(group->access); |
2650 | writes = isl_union_set_empty(space); |
2651 | for (i = 0; i < group->n_ref; ++i) { |
2652 | isl_space *space; |
2653 | isl_set *writes_i; |
2654 | |
2655 | if (!group->refs[i]->write) |
2656 | continue; |
2657 | |
2658 | space = isl_map_get_space(group->refs[i]->tagged_access); |
2659 | space = isl_space_domain(space); |
2660 | writes_i = isl_set_universe(space); |
2661 | writes = isl_union_set_add_set(writes, writes_i); |
2662 | } |
2663 | |
2664 | return writes; |
2665 | } |
2666 | |
2667 | /* Is there any write access in "group" that requires synchronization |
2668 | * on a write to global memory? |
2669 | * We currently take into account all writes that would require |
2670 | * synchronization at the thread level depth, but if the copying |
2671 | * for this group is performed at an outer level, then we do not |
2672 | * actually need to take into account dependences at intermediate levels. |
2673 | */ |
2674 | static int any_sync_writes_in_group(struct ppcg_kernel *kernel, |
2675 | struct gpu_array_ref_group *group) |
2676 | { |
2677 | isl_union_set *writes; |
2678 | int empty, disjoint; |
2679 | |
2680 | empty = isl_union_set_is_empty(kernel->sync_writes); |
2681 | if (empty < 0) |
2682 | return -1; |
2683 | if (empty) |
2684 | return 0; |
2685 | |
2686 | writes = group_tagged_writes(group); |
2687 | disjoint = isl_union_set_is_disjoint(kernel->sync_writes, writes); |
2688 | isl_union_set_free(writes); |
2689 | |
2690 | return disjoint < 0 ? -1 : !disjoint; |
2691 | } |
2692 | |
2693 | /* Collect the references to all writes in "kernel" that write directly |
2694 | * to global or shared memory, i.e., that are not mapped to private memory. |
2695 | * Each reference is represented by a universe set in a space |
2696 | * |
2697 | * [S[i,j] -> R[]] |
2698 | * |
2699 | * with S[i,j] the statement instance space and R[] the array reference. |
2700 | */ |
2701 | static __isl_give isl_union_set *collect_non_private_tagged_writes( |
2702 | struct ppcg_kernel *kernel) |
2703 | { |
2704 | isl_union_set *writes; |
2705 | int i, j; |
2706 | |
2707 | writes = isl_union_set_empty(isl_union_set_get_space(kernel->arrays)); |
2708 | |
2709 | for (i = 0; i < kernel->n_array; ++i) { |
2710 | struct gpu_local_array_info *array = &kernel->array[i]; |
2711 | |
2712 | for (j = 0; j < array->n_group; ++j) { |
2713 | struct gpu_array_ref_group *group = array->groups[j]; |
2714 | enum ppcg_group_access_type type; |
2715 | isl_union_set *writes_ij; |
2716 | |
2717 | if (!group->write) |
2718 | continue; |
2719 | type = gpu_array_ref_group_type(group); |
2720 | if (type == ppcg_access_private) |
2721 | continue; |
2722 | writes_ij = group_tagged_writes(group); |
2723 | writes = isl_union_set_union(writes, writes_ij); |
2724 | } |
2725 | } |
2726 | |
2727 | return writes; |
2728 | } |
2729 | |
2730 | /* Are there any direct writes to global memory that require |
2731 | * synchronization? |
2732 | */ |
2733 | static int any_global_or_shared_sync_writes(struct ppcg_kernel *kernel) |
2734 | { |
2735 | isl_union_set *writes; |
2736 | int empty, disjoint; |
2737 | |
2738 | empty = isl_union_set_is_empty(kernel->sync_writes); |
2739 | if (empty < 0) |
2740 | return -1; |
2741 | if (empty) |
2742 | return 0; |
2743 | |
2744 | writes = collect_non_private_tagged_writes(kernel); |
2745 | disjoint = isl_union_set_is_disjoint(kernel->sync_writes, writes); |
2746 | isl_union_set_free(writes); |
2747 | |
2748 | return disjoint < 0 ? -1 : !disjoint; |
2749 | } |
2750 | |
2751 | /* Construct an isl_multi_val for use as tile sizes for tiling "node" |
2752 | * from the elements in "tile_size". |
2753 | */ |
2754 | static __isl_give isl_multi_val *construct_band_tiles_sizes( |
2755 | __isl_keep isl_schedule_node *node, int *tile_size) |
2756 | { |
2757 | isl_space *space; |
2758 | |
2759 | if (!node) |
2760 | return NULL((void*)0); |
2761 | |
2762 | space = isl_schedule_node_band_get_space(node); |
2763 | return ppcg_multi_val_from_int_list(space, tile_size); |
2764 | } |
2765 | |
2766 | /* Replace the partial schedule S of the band node "node" by |
2767 | * |
2768 | * floor(S/f) |
2769 | * |
2770 | * or |
2771 | * |
2772 | * f * floor(S/f) |
2773 | * |
2774 | * if scale_tile_loops is set, with f the integers in "factor". |
2775 | * The list that "factor" points to is assumed to contain at least |
2776 | * as many elements as the number of members in the band. |
2777 | */ |
2778 | static __isl_give isl_schedule_node *snap_band_to_sizes( |
2779 | __isl_take isl_schedule_node *node, int *factor, |
2780 | struct ppcg_options *options) |
2781 | { |
2782 | isl_multi_val *mv; |
2783 | |
2784 | mv = construct_band_tiles_sizes(node, factor); |
2785 | node = isl_schedule_node_band_scale_down(node, isl_multi_val_copy(mv)); |
2786 | if (options->scale_tile_loops) |
2787 | node = isl_schedule_node_band_scale(node, |
2788 | isl_multi_val_copy(mv)); |
2789 | isl_multi_val_free(mv); |
2790 | |
2791 | return node; |
2792 | } |
2793 | |
2794 | /* Tile "band" with tile size specified by "sizes". |
2795 | * |
2796 | * Since the tile loops will be mapped to block ids, we forcibly |
2797 | * turn off tile loop scaling. We may want to enable tile loop scaling |
2798 | * at some later point, but then we would have to support the detection |
2799 | * of strides during the mapping to block ids. |
2800 | * Similarly, since the point loops will be mapped to thread ids, |
2801 | * we forcibly shift the point loops so that they start at zero. |
2802 | */ |
2803 | static __isl_give isl_schedule_node *tile_band( |
2804 | __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes) |
2805 | { |
2806 | isl_ctx *ctx = isl_schedule_node_get_ctx(node); |
2807 | int scale_tile; |
2808 | int shift_point; |
2809 | |
2810 | scale_tile = isl_options_get_tile_scale_tile_loops(ctx); |
2811 | isl_options_set_tile_scale_tile_loops(ctx, 0); |
2812 | shift_point = isl_options_get_tile_shift_point_loops(ctx); |
2813 | isl_options_set_tile_shift_point_loops(ctx, 1); |
2814 | |
2815 | node = isl_schedule_node_band_tile(node, sizes); |
2816 | |
2817 | isl_options_set_tile_scale_tile_loops(ctx, scale_tile); |
2818 | isl_options_set_tile_shift_point_loops(ctx, shift_point); |
2819 | |
2820 | return node; |
2821 | } |
2822 | |
2823 | /* Extract the set of parameter values and outer schedule dimensions |
2824 | * for which any statement instance |
2825 | * in the kernel inserted at "node" needs to be executed. |
2826 | * Intersect the set of parameter values derived from the host schedule |
2827 | * relation with the context of "prog". |
2828 | */ |
2829 | static __isl_give isl_set *extract_context(__isl_keep isl_schedule_node *node, |
2830 | struct gpu_prog *prog) |
2831 | { |
2832 | isl_union_map *schedule; |
2833 | isl_union_set *schedule_domain; |
2834 | isl_set *context; |
2835 | int empty; |
2836 | |
2837 | schedule = isl_schedule_node_get_prefix_schedule_relation(node); |
2838 | schedule_domain = isl_union_map_range(schedule); |
2839 | empty = isl_union_set_is_empty(schedule_domain); |
2840 | if (empty < 0) { |
2841 | isl_union_set_free(schedule_domain); |
2842 | return NULL((void*)0); |
2843 | } |
2844 | if (empty) { |
2845 | int depth; |
2846 | isl_space *space; |
2847 | |
2848 | space = isl_union_set_get_space(schedule_domain); |
2849 | isl_union_set_free(schedule_domain); |
2850 | space = isl_space_set_from_params(space); |
2851 | depth = isl_schedule_node_get_schedule_depth(node); |
2852 | space = isl_space_add_dims(space, isl_dim_set, depth); |
2853 | context = isl_set_empty(space); |
2854 | } else { |
2855 | context = isl_set_from_union_set(schedule_domain); |
2856 | } |
2857 | context = isl_set_intersect_params(context, |
2858 | isl_set_copy(prog->context)); |
2859 | |
2860 | return context; |
2861 | } |
2862 | |
2863 | /* Return the set of outer array elements accessed by |
2864 | * by the statement instances in "domain" in "prog". |
2865 | * The instances in "domain" are those that appear |
2866 | * in the domains of the access relations in "prog". |
2867 | */ |
2868 | static __isl_give isl_union_set *accessed_by_domain( |
2869 | __isl_take isl_union_set *domain, struct gpu_prog *prog) |
2870 | { |
2871 | isl_union_map *access; |
2872 | isl_union_set *arrays; |
2873 | |
2874 | access = isl_union_map_union(isl_union_map_copy(prog->read), |
2875 | isl_union_map_copy(prog->may_write)); |
2876 | access = isl_union_map_intersect_domain(access, domain); |
2877 | arrays = isl_union_map_range(access); |
2878 | arrays = isl_union_set_apply(arrays, |
2879 | isl_union_map_copy(prog->to_outer)); |
2880 | |
2881 | return arrays; |
2882 | } |
2883 | |
2884 | /* Return the number of outer band members of the band node "node" |
2885 | * that are marked coincident. |
2886 | */ |
2887 | static int n_outer_coincidence(__isl_keep isl_schedule_node *node) |
2888 | { |
2889 | int i, n; |
2890 | |
2891 | n = isl_schedule_node_band_n_member(node); |
2892 | |
2893 | for (i = 0; i < n; ++i) |
2894 | if (!isl_schedule_node_band_member_get_coincident(node, i)) |
2895 | break; |
2896 | |
2897 | return i; |
2898 | } |
2899 | |
2900 | /* If the band node "node" has more than "n" members, then split off |
2901 | * the first "n" of them. |
2902 | */ |
2903 | static __isl_give isl_schedule_node *split_band( |
2904 | __isl_take isl_schedule_node *node, int n) |
2905 | { |
2906 | int dim; |
2907 | |
2908 | dim = isl_schedule_node_band_n_member(node); |
2909 | if (n < dim) |
2910 | node = isl_schedule_node_band_split(node, n); |
2911 | |
2912 | return node; |
2913 | } |
2914 | |
2915 | /* Scale a band node that may have been split by split_band. |
2916 | * "sizes" are the scaling factors for the original node. |
2917 | * "node" either points to the original band node, or the outer |
2918 | * of the two pieces after splitting. |
2919 | * |
2920 | * If the number of elements in "node" is smaller than the number of |
2921 | * elements in "sizes", then some splitting has occurred and we split |
2922 | * "sizes" in the same way. |
2923 | */ |
2924 | static __isl_give isl_schedule_node *scale_band( |
2925 | __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes) |
2926 | { |
2927 | int n, dim; |
2928 | |
2929 | n = isl_multi_val_dim(sizes, isl_dim_set); |
2930 | dim = isl_schedule_node_band_n_member(node); |
2931 | if (n > dim) { |
2932 | isl_multi_val *sizes2; |
2933 | |
2934 | sizes2 = isl_multi_val_copy(sizes); |
2935 | sizes = isl_multi_val_drop_dims(sizes, |
2936 | isl_dim_set, dim, n - dim); |
2937 | sizes2 = isl_multi_val_drop_dims(sizes2, isl_dim_set, 0, dim); |
2938 | node = isl_schedule_node_child(node, 0); |
2939 | node = isl_schedule_node_band_scale(node, sizes2); |
2940 | node = isl_schedule_node_parent(node); |
2941 | } |
2942 | |
2943 | return isl_schedule_node_band_scale(node, sizes); |
2944 | } |
2945 | |
2946 | /* Return an isl_multi_aff, with as elements the parameters in "space" |
2947 | * that have the names specified by the elements in "names". |
2948 | * If (some of) these parameters do not already appear in "space", |
2949 | * then they are added first. |
2950 | */ |
2951 | static __isl_give isl_multi_aff *parameter_vector(__isl_take isl_space *space, |
2952 | __isl_keep isl_id_list *names) |
2953 | { |
2954 | int i, n; |
2955 | isl_local_space *ls; |
2956 | isl_multi_aff *ma; |
2957 | |
2958 | if (!names) |
2959 | space = isl_space_free(space); |
2960 | |
2961 | n = isl_id_list_n_id(names); |
2962 | for (i = 0; i < n; ++i) { |
2963 | int pos; |
2964 | isl_id *id; |
2965 | |
2966 | id = isl_id_list_get_id(names, i); |
2967 | pos = isl_space_find_dim_by_id(space, isl_dim_param, id); |
2968 | if (pos >= 0) { |
2969 | isl_id_free(id); |
2970 | continue; |
2971 | } |
2972 | pos = isl_space_dim(space, isl_dim_param); |
2973 | space = isl_space_add_dims(space, isl_dim_param, 1); |
2974 | space = isl_space_set_dim_id(space, isl_dim_param, pos, id); |
2975 | } |
2976 | ma = isl_multi_aff_zero(isl_space_copy(space)); |
2977 | ls = isl_local_space_from_space(isl_space_domain(space)); |
2978 | for (i = 0; i < n; ++i) { |
2979 | int pos; |
2980 | isl_id *id; |
2981 | isl_aff *aff; |
2982 | |
2983 | id = isl_id_list_get_id(names, i); |
2984 | pos = isl_space_find_dim_by_id(space, isl_dim_param, id); |
2985 | isl_id_free(id); |
2986 | aff = isl_aff_var_on_domain(isl_local_space_copy(ls), |
2987 | isl_dim_param, pos); |
2988 | ma = isl_multi_aff_set_aff(ma, i, aff); |
2989 | } |
2990 | isl_local_space_free(ls); |
2991 | |
2992 | return ma; |
2993 | } |
2994 | |
2995 | /* Return constraints on the domain elements that equate a sequence of |
2996 | * parameters called "names", to the partial schedule |
2997 | * of "node" modulo the integers in "size". |
2998 | * The number of elements in the array "size" should be equal |
2999 | * to the number of elements in "names". |
3000 | * The number of members of the band node "node" should be smaller |
3001 | * than or equal to this number. If it is smaller, then the first |
3002 | * elements of "names" are equated to zero. |
3003 | */ |
3004 | static __isl_give isl_union_set *set_schedule_modulo( |
3005 | __isl_keep isl_schedule_node *node, __isl_keep isl_id_list *names, |
3006 | int *size) |
3007 | { |
3008 | int n, n_zero; |
3009 | isl_space *space; |
3010 | isl_multi_aff *ma; |
3011 | isl_multi_union_pw_aff *mupa, *mupa2; |
3012 | isl_multi_val *mv; |
3013 | isl_union_set *domain; |
3014 | |
3015 | if (!node) |
3016 | return NULL((void*)0); |
3017 | n = isl_id_list_n_id(names); |
3018 | if (n == 0) |
3019 | return isl_schedule_node_get_universe_domain(node); |
3020 | n_zero = n - isl_schedule_node_band_n_member(node); |
3021 | |
3022 | mupa = isl_schedule_node_band_get_partial_schedule(node); |
3023 | mv = construct_band_tiles_sizes(node, size + n_zero); |
3024 | mupa = isl_multi_union_pw_aff_mod_multi_val(mupa, mv); |
3025 | |
3026 | space = isl_multi_union_pw_aff_get_space(mupa); |
3027 | space = isl_space_params(space); |
3028 | space = isl_space_set_from_params(space); |
3029 | space = isl_space_add_dims(space, isl_dim_set, n_zero); |
3030 | ma = isl_multi_aff_zero(space); |
3031 | |
3032 | domain = isl_schedule_node_get_universe_domain(node); |
3033 | mupa2 = isl_multi_union_pw_aff_multi_aff_on_domain( |
3034 | isl_union_set_copy(domain), ma); |
3035 | mupa = isl_multi_union_pw_aff_range_product(mupa2, mupa); |
3036 | |
3037 | space = isl_multi_union_pw_aff_get_space(mupa); |
3038 | ma = parameter_vector(space, names); |
3039 | |
3040 | mupa2 = isl_multi_union_pw_aff_multi_aff_on_domain(domain, ma); |
3041 | mupa = isl_multi_union_pw_aff_sub(mupa, mupa2); |
3042 | |
3043 | return isl_multi_union_pw_aff_zero_union_set(mupa); |
3044 | } |
3045 | |
3046 | /* Insert a context node at "node" introducing the block and thread |
3047 | * identifiers along with their bounds, which are stored in kernel->grid_size |
3048 | * and kernel->block_dim. |
3049 | * Note that the bounds on the block identifiers may implicitly impose |
3050 | * constraints on the parameters. A guard needs to be inserted |
3051 | * in the schedule tree to ensure that those bounds hold at "node". |
3052 | * This guard is inserted in insert_guard. |
3053 | */ |
3054 | static __isl_give isl_schedule_node *insert_context(struct ppcg_kernel *kernel, |
3055 | __isl_take isl_schedule_node *node) |
3056 | { |
3057 | isl_set *context; |
3058 | |
3059 | context = isl_set_universe(isl_set_get_space(kernel->context)); |
3060 | |
3061 | context = add_bounded_parameters_dynamic(context, |
3062 | kernel->grid_size, kernel->block_ids); |
3063 | context = add_bounded_parameters(context, |
3064 | kernel->block_dim, kernel->thread_ids); |
3065 | |
3066 | node = isl_schedule_node_insert_context(node, context); |
3067 | |
3068 | return node; |
3069 | } |
3070 | |
3071 | /* Insert a guard that eliminates kernel launches where the kernel |
3072 | * obviously does not have any work to do. |
3073 | * |
3074 | * In particular, eliminate kernel launches where there are obviously |
3075 | * zero blocks. |
3076 | * Use the same block size constraints that are used to create the context |
3077 | * to ensure that all constraints implicit in the constructed context |
3078 | * are imposed by the guard. |
3079 | * |
3080 | * Additionally, add other constraints that are valid |
3081 | * for each executed instance ("context"), as long as this does not result |
3082 | * in a disjunction. |
3083 | */ |
3084 | static __isl_give isl_schedule_node *insert_guard( |
3085 | __isl_take isl_schedule_node *node, __isl_keep isl_set *context, |
3086 | __isl_keep isl_multi_pw_aff *size, struct ppcg_scop *scop) |
3087 | { |
3088 | unsigned nparam, n; |
3089 | isl_set *guard; |
3090 | isl_id_list *ids; |
3091 | |
3092 | guard = isl_set_copy(context); |
3093 | guard = isl_set_compute_divs(guard); |
3094 | guard = isl_set_from_basic_set(isl_set_simple_hull(guard)); |
3095 | |
3096 | nparam = isl_set_dim(guard, isl_dim_param); |
3097 | n = isl_multi_pw_aff_dim(size, isl_dim_out); |
3098 | ids = ppcg_scop_generate_names(scop, n, "__ppcg_tmp"); |
3099 | guard = add_bounded_parameters_dynamic(guard, size, ids); |
3100 | isl_id_list_free(ids); |
3101 | guard = isl_set_project_out(guard, isl_dim_param, nparam, n); |
3102 | |
3103 | node = isl_schedule_node_insert_guard(node, guard); |
3104 | |
3105 | return node; |
3106 | } |
3107 | |
3108 | /* Does any array reference group mapping require the band that is mapped |
3109 | * to threads to be unrolled? |
3110 | */ |
3111 | static int kernel_requires_unroll(struct ppcg_kernel *kernel) |
3112 | { |
3113 | int i, j; |
3114 | |
3115 | for (i = 0; i < kernel->n_array; ++i) { |
3116 | struct gpu_local_array_info *array = &kernel->array[i]; |
3117 | |
3118 | for (j = 0; j < array->n_group; ++j) { |
3119 | struct gpu_array_ref_group *group = array->groups[j]; |
3120 | if (gpu_array_ref_group_requires_unroll(group)) |
3121 | return 1; |
3122 | } |
3123 | } |
3124 | |
3125 | return 0; |
3126 | } |
3127 | |
3128 | /* Mark the given band node "node" for unrolling by the AST generator and |
3129 | * then sink it to the leaves of the schedule tree. |
3130 | * All dimensions of "node" are assumed to be coincident, such that this |
3131 | * sinking is a valid operation. |
3132 | */ |
3133 | static __isl_give isl_schedule_node *unroll(__isl_take isl_schedule_node *node) |
3134 | { |
3135 | node = ppcg_set_schedule_node_type(node, isl_ast_loop_unroll); |
3136 | |
3137 | node = isl_schedule_node_band_sink(node); |
3138 | |
3139 | return node; |
3140 | } |
3141 | |
3142 | /* Insert a synchronization node in the schedule tree of "node" |
3143 | * after the core computation of "kernel" at the level of the band |
3144 | * that is mapped to threads, except if that level is equal to |
3145 | * that of the band that is mapped to blocks or if there are no writes |
3146 | * to global or shared memory in the core computation that require |
3147 | * synchronization. |
3148 | * If there are any writes to shared memory and the shared memory |
3149 | * copying is performed at the same level, then synchronization |
3150 | * is needed between the core and the copying anyway, so we might |
3151 | * as well add it here. If the copying is performed at a higher |
3152 | * level, then different iterations of intermediate schedule dimensions |
3153 | * may have a different mapping from between shared memory elements and |
3154 | * threads, such that synchronization is required after the core. |
3155 | * "node" is assumed to point to the kernel node. |
3156 | * |
3157 | * If the shared and the thread mark point to the same node, then make |
3158 | * sure the synchronization is inserted outside of the shared mark. |
3159 | */ |
3160 | static __isl_give isl_schedule_node *add_sync(struct ppcg_kernel *kernel, |
3161 | __isl_take isl_schedule_node *node) |
3162 | { |
3163 | int depth; |
3164 | int need_sync; |
3165 | |
3166 | need_sync = any_global_or_shared_sync_writes(kernel); |
3167 | if (need_sync < 0) |
3168 | return isl_schedule_node_free(node); |
3169 | if (!need_sync) |
3170 | return node; |
3171 | |
3172 | node = gpu_tree_move_down_to_thread(node, kernel->core); |
3173 | depth = isl_schedule_node_get_schedule_depth(node); |
3174 | node = gpu_tree_move_up_to_kernel(node); |
3175 | if (depth == isl_schedule_node_get_schedule_depth(node)) |
3176 | return node; |
3177 | |
3178 | node = gpu_tree_move_down_to_depth(node, depth, kernel->core); |
3179 | node = gpu_tree_ensure_following_sync(node, kernel); |
3180 | |
3181 | node = gpu_tree_move_up_to_kernel(node); |
3182 | |
3183 | return node; |
3184 | } |
3185 | |
3186 | /* Return a read ("read" is 1) or write access relation for "group" |
3187 | * with those accesses removed that are only needed to communicate data |
3188 | * within the subtree of the schedule rooted at "node". |
3189 | * Furthermore, include the prefix schedule at "node". |
3190 | * That is, return a relation of the form |
3191 | * |
3192 | * S -> [D -> A] |
3193 | * |
3194 | * with D the outer schedule dimensions at "node". |
3195 | */ |
3196 | static __isl_give isl_union_map *anchored_non_local_accesses( |
3197 | struct ppcg_kernel *kernel, struct gpu_array_ref_group *group, |
3198 | __isl_take isl_schedule_node *node, int read) |
3199 | { |
3200 | isl_union_map *access; |
3201 | isl_union_map *prefix; |
3202 | |
3203 | prefix = isl_schedule_node_get_prefix_schedule_relation(node); |
3204 | prefix = isl_union_map_preimage_domain_union_pw_multi_aff(prefix, |
3205 | isl_union_pw_multi_aff_copy(kernel->contraction)); |
3206 | access = gpu_array_ref_group_access_relation(group, read, !read); |
3207 | access = remove_local_accesses_group(kernel, group, access, prefix, |
3208 | read); |
3209 | access = isl_union_map_range_product(prefix, access); |
3210 | |
3211 | return access; |
3212 | } |
3213 | |
3214 | /* Given an array reference group "group", create a mapping |
3215 | * |
3216 | * read[D -> A] -> [D -> A] |
3217 | * |
3218 | * if "read" is set or |
3219 | * |
3220 | * write[D -> A] -> [D -> A] |
3221 | * |
3222 | * if "read" is not set. |
3223 | * D corresponds to the outer tile->depth dimensions of |
3224 | * the kernel schedule. |
3225 | */ |
3226 | static __isl_give isl_multi_aff *create_from_access(isl_ctx *ctx, |
3227 | struct gpu_array_ref_group *group, int read) |
3228 | { |
3229 | struct gpu_array_tile *tile; |
3230 | isl_space *space; |
3231 | isl_id *id; |
3232 | |
3233 | tile = gpu_array_ref_group_tile(group); |
3234 | space = isl_space_copy(group->array->space); |
3235 | space = isl_space_from_range(space); |
3236 | space = isl_space_add_dims(space, isl_dim_in, tile->depth); |
3237 | space = isl_space_wrap(space); |
3238 | space = isl_space_map_from_set(space); |
3239 | |
3240 | id = isl_id_alloc(ctx, read ? "read" : "write", group); |
3241 | space = isl_space_set_tuple_id(space, isl_dim_in, id); |
3242 | |
3243 | return isl_multi_aff_identity(space); |
3244 | } |
3245 | |
3246 | /* If any writes in "group" require synchronization, then make sure |
3247 | * that there is a synchronization node for "kernel" after the node |
3248 | * following "node" in a sequence. |
3249 | * |
3250 | * If "shared" is set and no synchronization is needed for |
3251 | * the writes to global memory, then add synchronization before |
3252 | * the kernel to protect shared memory from being overwritten |
3253 | * by the next iteration of the core computation. |
3254 | * No additional synchronization is needed to protect against |
3255 | * the next copy into shared memory because each element of |
3256 | * the shared memory tile is always copied by the same thread. |
3257 | */ |
3258 | static __isl_give isl_schedule_node *add_group_write_sync( |
3259 | __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel, |
3260 | struct gpu_array_ref_group *group, int shared) |
3261 | { |
3262 | int need_sync; |
3263 | |
3264 | need_sync = any_sync_writes_in_group(kernel, group); |
3265 | if (need_sync < 0) |
3266 | return isl_schedule_node_free(node); |
3267 | if (need_sync) { |
3268 | node = isl_schedule_node_parent(node); |
3269 | node = isl_schedule_node_next_sibling(node); |
3270 | node = isl_schedule_node_child(node, 0); |
3271 | node = gpu_tree_ensure_following_sync(node, kernel); |
3272 | } else if (shared) { |
3273 | struct gpu_array_tile *tile; |
3274 | |
3275 | tile = gpu_array_ref_group_tile(group); |
3276 | node = isl_schedule_node_parent(node); |
3277 | node = isl_schedule_node_parent(node); |
3278 | node = gpu_tree_move_down_to_depth(node, tile->depth, |
3279 | kernel->core); |
3280 | node = gpu_tree_move_left_to_sync(node, kernel); |
3281 | } |
3282 | |
3283 | return node; |
3284 | } |
3285 | |
3286 | /* Add copy statements to the schedule tree of "node" |
3287 | * for reading from global memory to private memory (if "read" is set) or |
3288 | * for writing back from private memory to global memory |
3289 | * (if "read" is not set) for the array reference group "group" that |
3290 | * is mapped to private memory. |
3291 | * On input, "node" points to the kernel node, and it is moved |
3292 | * back there on output. |
3293 | * |
3294 | * The copies are performed in the order of the array elements. |
3295 | * The copy statement instances include a reference to the outer |
3296 | * tile->depth dimensions of the kernel schedule for ease of |
3297 | * combining them with the group tiling. |
3298 | * |
3299 | * That is, the extra schedule is of the form |
3300 | * |
3301 | * type[D -> A] -> A |
3302 | * |
3303 | * where D corresponds to the outer tile->depth dimensions of |
3304 | * the kernel schedule and A to the global array. |
3305 | * This schedule is unrolled because registers are not addressable. |
3306 | * |
3307 | * The copying is inserted in the schedule tree through an extension |
3308 | * of the form |
3309 | * |
3310 | * D -> type[D -> A] |
3311 | * |
3312 | * where the extra domain elements type[D -> A] are those accessed |
3313 | * by the group. |
3314 | * A filter is inserted on type[D -> A] to ensure that the element |
3315 | * is read/written by the same thread that needs the element. |
3316 | * This filter is obtained by applying |
3317 | * |
3318 | * S -> type[D -> A] |
3319 | * |
3320 | * to the thread filter for the core statements. |
3321 | * |
3322 | * The extension is inserted before the core computation in case of a read |
3323 | * and after the core computation in case of a write. |
3324 | * In the latter case, we also make sure that there is a synchronization |
3325 | * node after the write to global memory, unless this write is performed |
3326 | * at the outer level of the kernel. |
3327 | * In principle, this synchronization could be inserted higher |
3328 | * in the schedule tree depending on where the corresponding reads |
3329 | * from global memory are performed. |
3330 | */ |
3331 | static __isl_give isl_schedule_node *add_copies_group_private( |
3332 | struct ppcg_kernel *kernel, struct gpu_array_ref_group *group, |
3333 | __isl_take isl_schedule_node *node, int read) |
3334 | { |
3335 | struct gpu_array_tile *tile; |
3336 | isl_union_map *access; |
3337 | isl_union_set *domain; |
3338 | isl_space *space; |
3339 | isl_multi_aff *from_access; |
3340 | isl_multi_pw_aff *mpa; |
3341 | isl_multi_union_pw_aff *mupa; |
3342 | isl_union_pw_multi_aff *contraction; |
3343 | isl_schedule_node *graft; |
3344 | isl_union_set *filter; |
3345 | int kernel_depth; |
3346 | int empty; |
3347 | |
3348 | kernel_depth = isl_schedule_node_get_schedule_depth(node); |
3349 | tile = gpu_array_ref_group_tile(group); |
3350 | node = gpu_tree_move_down_to_depth(node, tile->depth, kernel->core); |
3351 | |
3352 | access = anchored_non_local_accesses(kernel, group, node, read); |
3353 | empty = isl_union_map_is_empty(access); |
3354 | if (empty < 0 || empty) { |
3355 | isl_union_map_free(access); |
3356 | if (empty < 0) |
3357 | return isl_schedule_node_free(node); |
3358 | return gpu_tree_move_up_to_kernel(node); |
3359 | } |
3360 | |
3361 | group->array->global = 1; |
3362 | group->local_array->global = 1; |
3363 | |
3364 | from_access = create_from_access(kernel->ctx, group, read); |
3365 | space = isl_space_domain(isl_multi_aff_get_space(from_access)); |
3366 | access = isl_union_map_preimage_range_multi_aff(access, from_access); |
3367 | |
3368 | filter = isl_union_set_copy(kernel->thread_filter); |
3369 | contraction = isl_union_pw_multi_aff_copy(kernel->contraction); |
3370 | filter = isl_union_set_preimage_union_pw_multi_aff(filter, contraction); |
3371 | filter = isl_union_set_apply(filter, isl_union_map_copy(access)); |
3372 | filter = isl_union_set_detect_equalities(filter); |
3373 | filter = isl_union_set_coalesce(filter); |
3374 | |
3375 | domain = isl_union_map_range(access); |
3376 | access = isl_union_set_wrapped_domain_map(domain); |
3377 | access = isl_union_map_reverse(access); |
3378 | access = isl_union_map_coalesce(access); |
3379 | graft = isl_schedule_node_from_extension(access); |
3380 | |
3381 | space = isl_space_map_from_set(space); |
3382 | mpa = isl_multi_pw_aff_identity(space); |
3383 | mpa = isl_multi_pw_aff_range_factor_range(mpa); |
3384 | mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa); |
3385 | |
3386 | graft = isl_schedule_node_child(graft, 0); |
3387 | graft = isl_schedule_node_insert_partial_schedule(graft, mupa); |
3388 | graft = unroll(graft); |
3389 | |
3390 | graft = isl_schedule_node_insert_filter(graft, filter); |
3391 | |
3392 | graft = isl_schedule_node_parent(graft); |
3393 | |
3394 | if (read) |
3395 | node = isl_schedule_node_graft_before(node, graft); |
3396 | else { |
3397 | node = isl_schedule_node_graft_after(node, graft); |
3398 | if (kernel_depth < tile->depth) |
3399 | node = add_group_write_sync(node, kernel, group, 0); |
3400 | } |
3401 | |
3402 | node = gpu_tree_move_up_to_kernel(node); |
3403 | |
3404 | return node; |
3405 | } |
3406 | |
3407 | /* Add copy statements to the schedule tree of "node" |
3408 | * for reading from global memory to shared memory (if "read" is set) or |
3409 | * for writing back from shared memory to global memory |
3410 | * (if "read" is not set) for the array reference group "group" that |
3411 | * is mapped to shared memory. |
3412 | * On input, "node" points to the kernel node, and it is moved |
3413 | * back there on output. |
3414 | * |
3415 | * The copies are performed in the order of the corresponding shared |
3416 | * memory tile. |
3417 | * The copy statement instances include a reference to the outer |
3418 | * tile->depth dimensions of the kernel schedule for ease of |
3419 | * combining them with the group tiling. |
3420 | * |
3421 | * If we are performing a read from global memory to shared memory and |
3422 | * if the array involved is not a scalar, then we copy |
3423 | * the entire tile to shared memory. This may result in some extra |
3424 | * elements getting copied, but it should lead to simpler code |
3425 | * (which means that fewer registers may be needed) and less divergence. |
3426 | * |
3427 | * Otherwise, we only copy the elements that will be read or have been written |
3428 | * in the kernel. |
3429 | * |
3430 | * That is, the extra schedule is of the form |
3431 | * |
3432 | * type[D -> A] -> T |
3433 | * |
3434 | * where D corresponds to the outer tile->depth dimensions of |
3435 | * the kernel schedule, A to the global array and T is the corresponding |
3436 | * shared memory tile. |
3437 | * |
3438 | * The copying is inserted in the schedule tree through an extension |
3439 | * of the form |
3440 | * |
3441 | * D -> type[D -> A] |
3442 | * |
3443 | * where the extra domain elements type[D -> A] are those accessed |
3444 | * by the group. In the case of read from a non-scalar, this set |
3445 | * is replaced by the entire shared memory tile. |
3446 | * |
3447 | * If the "unroll_copy_shared" option is set, then the AST generator |
3448 | * is instructed to unroll the copying code. |
3449 | * |
3450 | * A filter is inserted on type[D -> A] to map the copy instances |
3451 | * to the threads. In particular, the thread identifiers are |
3452 | * equated to the position inside the shared memory tile (T) |
3453 | * modulo the block size. |
3454 | * We try to align the innermost tile dimension with the innermost |
3455 | * thread identifier (x) as a heuristic to improve coalescing. |
3456 | * In particular, if the dimension of the tile is greater than |
3457 | * the dimension of the block, then the schedule mapping to the tile |
3458 | * is broken up into two pieces and the filter is applied to the inner part. |
3459 | * If, on the other hand, the dimension of the tile is smaller than |
3460 | * the dimension of the block, then the initial thread identifiers |
3461 | * are equated to zero and the remaining thread identifiers are |
3462 | * matched to the memory tile. |
3463 | * |
3464 | * The extension is inserted before the core computation in case of a read |
3465 | * and after the core computation in case of a write. |
3466 | * In the case of a read, we first need to make sure there is some |
3467 | * synchronization before the core computation such that we can put the read |
3468 | * from global memory to shared memory before that synchronization. |
3469 | * This ensures that all threads have finished copying into shared memory |
3470 | * before the shared memory is used. |
3471 | * We also need to make sure that there is a synchronization node after |
3472 | * the core computation to ensure that the next load into shared memory |
3473 | * only happens after all data has been used. There is no need for |
3474 | * this synchronization if we are at the outer level since then there |
3475 | * won't be a next load. |
3476 | * In the case of a write, we need to make sure there is some synchronization |
3477 | * after the core computation such taht we can put the write from shared |
3478 | * memory to global memory after that synchronization. |
3479 | * Unless we are at the outer level, we also need a synchronization node |
3480 | * after the write to ensure the data is saved to global memory |
3481 | * before the next iteration write to the same shared memory. |
3482 | * It also makes sure the data has arrived in global memory before |
3483 | * it is read in a subsequent iteration. |
3484 | */ |
3485 | static __isl_give isl_schedule_node *add_copies_group_shared( |
3486 | struct ppcg_kernel *kernel, struct gpu_array_ref_group *group, |
3487 | __isl_take isl_schedule_node *node, int read) |
3488 | { |
3489 | struct gpu_array_tile *tile; |
3490 | isl_union_map *access; |
3491 | isl_union_set *domain; |
3492 | isl_multi_aff *ma; |
3493 | isl_multi_aff *from_access; |
3494 | isl_multi_pw_aff *mpa; |
3495 | isl_multi_union_pw_aff *mupa; |
3496 | isl_schedule_node *graft; |
3497 | isl_union_set *filter; |
3498 | int skip; |
3499 | int kernel_depth; |
3500 | int empty; |
3501 | |
3502 | tile = gpu_array_ref_group_tile(group); |
3503 | kernel_depth = isl_schedule_node_get_schedule_depth(node); |
3504 | node = gpu_tree_move_down_to_depth(node, tile->depth, kernel->core); |
3505 | |
3506 | access = anchored_non_local_accesses(kernel, group, node, read); |
3507 | empty = isl_union_map_is_empty(access); |
3508 | if (empty < 0 || empty) { |
3509 | isl_union_map_free(access); |
3510 | if (empty < 0) |
3511 | return isl_schedule_node_free(node); |
3512 | return gpu_tree_move_up_to_kernel(node); |
3513 | } |
3514 | |
3515 | group->array->global = 1; |
3516 | group->local_array->global = 1; |
3517 | |
3518 | from_access = create_from_access(kernel->ctx, group, read); |
3519 | |
3520 | ma = isl_multi_aff_copy(tile->tiling); |
3521 | ma = isl_multi_aff_pullback_multi_aff(ma, |
3522 | isl_multi_aff_copy(from_access)); |
3523 | mpa = isl_multi_pw_aff_from_multi_aff(ma); |
3524 | mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa); |
3525 | |
3526 | domain = isl_union_map_range(access); |
3527 | |
3528 | if (read && !gpu_array_is_scalar(group->array)) { |
3529 | isl_map *map; |
3530 | isl_union_set_free(domain); |
3531 | map = group_tile(group); |
3532 | domain = isl_union_set_from_set(isl_map_wrap(map)); |
3533 | } |
3534 | |
3535 | domain = isl_union_set_preimage_multi_aff(domain, from_access); |
3536 | access = isl_union_set_wrapped_domain_map(domain); |
3537 | access = isl_union_map_reverse(access); |
3538 | access = isl_union_map_coalesce(access); |
3539 | graft = isl_schedule_node_from_extension(access); |
3540 | |
3541 | graft = isl_schedule_node_child(graft, 0); |
3542 | |
3543 | graft = isl_schedule_node_insert_partial_schedule(graft, mupa); |
3544 | if (kernel->options->unroll_copy_shared) |
3545 | graft = ppcg_set_schedule_node_type(graft, isl_ast_loop_unroll); |
3546 | |
3547 | if (tile->n > kernel->n_block && kernel->n_block > 0) { |
3548 | graft = isl_schedule_node_band_split(graft, |
3549 | tile->n - kernel->n_block); |
3550 | graft = isl_schedule_node_child(graft, 0); |
3551 | } |
3552 | if (tile->n < kernel->n_block) |
3553 | skip = kernel->n_block - tile->n; |
3554 | else |
3555 | skip = 0; |
3556 | filter = set_schedule_modulo(graft, kernel->thread_ids, |
3557 | kernel->block_dim); |
3558 | if (!kernel->options->wrap) |
3559 | graft = snap_band_to_sizes(graft, kernel->block_dim + skip, |
3560 | kernel->options); |
3561 | if (tile->n > kernel->n_block && kernel->n_block > 0) |
3562 | graft = isl_schedule_node_parent(graft); |
3563 | graft = isl_schedule_node_insert_filter(graft, filter); |
3564 | |
3565 | while (graft && isl_schedule_node_has_parent(graft)) |
3566 | graft = isl_schedule_node_parent(graft); |
3567 | |
3568 | if (read) { |
3569 | if (kernel_depth < tile->depth) |
3570 | node = gpu_tree_ensure_sync_after_core(node, kernel); |
3571 | node = gpu_tree_move_left_to_sync(node, kernel); |
3572 | node = isl_schedule_node_graft_before(node, graft); |
3573 | } else { |
3574 | node = gpu_tree_move_right_to_sync(node, kernel); |
3575 | node = isl_schedule_node_graft_after(node, graft); |
3576 | if (kernel_depth < tile->depth) |
3577 | node = add_group_write_sync(node, kernel, group, 1); |
3578 | } |
3579 | |
3580 | node = gpu_tree_move_up_to_kernel(node); |
3581 | |
3582 | return node; |
3583 | } |
3584 | |
3585 | /* Check whether the array reference group "group" is mapped to |
3586 | * private or shared memory and, if so, |
3587 | * add copy statements to the schedule tree of "node" |
3588 | * for reading from global memory to private or shared memory |
3589 | * (if "read" is set) or for writing back from private or shared memory |
3590 | * to global memory (if "read" is not set) for this group. |
3591 | * On input, "node" points to the kernel node, and it is moved |
3592 | * back there on output. |
3593 | */ |
3594 | static __isl_give isl_schedule_node *add_copies_group( |
3595 | struct ppcg_kernel *kernel, struct gpu_array_ref_group *group, |
3596 | __isl_take isl_schedule_node *node, int read) |
3597 | { |
3598 | enum ppcg_group_access_type type; |
3599 | |
3600 | type = gpu_array_ref_group_type(group); |
3601 | if (type == ppcg_access_private) |
3602 | return add_copies_group_private(kernel, group, node, read); |
3603 | if (type == ppcg_access_shared) |
3604 | return add_copies_group_shared(kernel, group, node, read); |
3605 | return node; |
3606 | } |
3607 | |
3608 | /* For each array reference group that is mapped to private or shared memory, |
3609 | * add copy statements to the schedule tree of "node" |
3610 | * for reading from global memory to private or shared memory |
3611 | * and for writing back. |
3612 | * On input, "node" points to the kernel node, and it is moved |
3613 | * back there on output. |
3614 | */ |
3615 | static __isl_give isl_schedule_node *add_copies(struct ppcg_kernel *kernel, |
3616 | __isl_take isl_schedule_node *node) |
3617 | { |
3618 | int i, j; |
3619 | |
3620 | for (i = 0; i < kernel->n_array; ++i) { |
3621 | struct gpu_local_array_info *array = &kernel->array[i]; |
3622 | |
3623 | for (j = 0; j < array->n_group; ++j) { |
3624 | struct gpu_array_ref_group *group = array->groups[j]; |
3625 | |
3626 | node = add_copies_group(kernel, group, node, 1); |
3627 | if (!node) |
3628 | return NULL((void*)0); |
3629 | node = add_copies_group(kernel, group, node, 0); |
3630 | if (!node) |
3631 | return NULL((void*)0); |
3632 | } |
3633 | } |
3634 | |
3635 | return node; |
3636 | } |
3637 | |
3638 | /* Mark all dimensions in the current band node atomic. |
3639 | */ |
3640 | static __isl_give isl_schedule_node *atomic(__isl_take isl_schedule_node *node) |
3641 | { |
3642 | return ppcg_set_schedule_node_type(node, isl_ast_loop_atomic); |
3643 | } |
3644 | |
3645 | /* Mark "node" atomic, if it is a band node. |
3646 | * Do the same for all ancestors. |
3647 | * Return a pointer to "node" (in the updated schedule tree). |
3648 | */ |
3649 | static __isl_give isl_schedule_node *atomic_ancestors( |
3650 | __isl_take isl_schedule_node *node) |
3651 | { |
3652 | int pos; |
3653 | |
3654 | if (!node) |
3655 | return NULL((void*)0); |
3656 | if (!isl_schedule_node_has_parent(node)) |
3657 | return node; |
3658 | |
3659 | pos = isl_schedule_node_get_child_position(node); |
3660 | node = isl_schedule_node_parent(node); |
3661 | if (isl_schedule_node_get_type(node) == isl_schedule_node_band) |
3662 | node = atomic(node); |
3663 | node = atomic_ancestors(node); |
3664 | node = isl_schedule_node_child(node, pos); |
3665 | |
3666 | return node; |
3667 | } |
3668 | |
3669 | /* Collect all write references that require synchronization. |
3670 | * "node" is assumed to point to the kernel node. |
3671 | * Each reference is represented by a universe set in a space |
3672 | * |
3673 | * [S[i,j] -> R[]] |
3674 | * |
3675 | * with S[i,j] the statement instance space and R[] the array reference. |
3676 | * |
3677 | * This function should be called before block and thread filters are added. |
3678 | * |
3679 | * Synchronization is needed after a write if there is a subsequent read |
3680 | * within the same block that may not be performed by the same thread. |
3681 | * There should not be any dependences between different blocks, |
3682 | * so we start with the flow dependences within the same kernel invocation |
3683 | * and we subtract from these those dependences that are mapped |
3684 | * to the same iteration of the bands where synchronization is inserted. |
3685 | * We do not remove pairs of instances that are known to map to |
3686 | * the same thread across different iterations of the intermediate |
3687 | * bands because the read may be performed by a different thread |
3688 | * than the one that needs the value if shared memory is involved. |
3689 | * |
3690 | * We also consider all pairs of possible writes that access the same |
3691 | * memory location and that may be mapped to the same block but not |
3692 | * to the same iteration of the intermediate bands. |
3693 | * In theory, it would be possible for one thread to still be in |
3694 | * a previous iteration of a loop in these bands. |
3695 | * A write to global memory in this delayed thread could then overwrite |
3696 | * a write from another thread that has already moved on to |
3697 | * the next iteration. |
3698 | * |
3699 | * After computing the above writes paired off with reads or writes |
3700 | * that depend on them, we project onto the domain writes. |
3701 | * Sychronization is needed after writes to global memory |
3702 | * through these references. |
3703 | */ |
3704 | static __isl_give isl_union_set *compute_sync_writes( |
3705 | struct ppcg_kernel *kernel, __isl_keep isl_schedule_node *node) |
3706 | { |
3707 | isl_union_map *local; |
3708 | isl_union_map *may_writes, *shared_access; |
3709 | isl_union_map *kernel_prefix, *thread_prefix; |
3710 | isl_union_map *equal; |
3711 | isl_union_set *wrap; |
3712 | isl_union_set *domain; |
3713 | isl_union_pw_multi_aff *contraction; |
3714 | |
3715 | kernel_prefix = isl_schedule_node_get_prefix_schedule_union_map(node); |
3716 | node = isl_schedule_node_copy(node); |
3717 | node = gpu_tree_move_down_to_thread(node, kernel->core); |
3718 | thread_prefix = isl_schedule_node_get_prefix_schedule_union_map(node); |
3719 | isl_schedule_node_free(node); |
3720 | |
3721 | contraction = kernel->contraction; |
3722 | kernel_prefix = isl_union_map_preimage_domain_union_pw_multi_aff( |
3723 | kernel_prefix, isl_union_pw_multi_aff_copy(contraction)); |
3724 | thread_prefix = isl_union_map_preimage_domain_union_pw_multi_aff( |
3725 | thread_prefix, isl_union_pw_multi_aff_copy(contraction)); |
3726 | domain = isl_union_set_copy(kernel->expanded_domain); |
3727 | domain = isl_union_set_universe(domain); |
3728 | |
3729 | may_writes = isl_union_map_copy(kernel->prog->scop->tagged_may_writes); |
3730 | may_writes = isl_union_map_curry(may_writes); |
3731 | may_writes = isl_union_map_intersect_domain(may_writes, domain); |
3732 | may_writes = isl_union_map_uncurry(may_writes); |
3733 | shared_access = isl_union_map_copy(may_writes); |
3734 | shared_access = isl_union_map_apply_range(shared_access, |
3735 | isl_union_map_reverse(may_writes)); |
3736 | |
3737 | local = isl_union_map_copy(kernel->prog->scop->tagged_dep_flow); |
3738 | local = isl_union_map_union(local, shared_access); |
3739 | local = isl_union_map_zip(local); |
3740 | |
3741 | equal = isl_union_map_apply_range(kernel_prefix, |
3742 | isl_union_map_reverse(isl_union_map_copy(kernel_prefix))); |
3743 | wrap = isl_union_map_wrap(equal); |
3744 | local = isl_union_map_intersect_domain(local, wrap); |
3745 | equal = isl_union_map_apply_range(thread_prefix, |
3746 | isl_union_map_reverse(isl_union_map_copy(thread_prefix))); |
3747 | wrap = isl_union_map_wrap(equal); |
3748 | local = isl_union_map_subtract_domain(local, wrap); |
3749 | |
3750 | local = isl_union_map_zip(local); |
3751 | local = isl_union_map_universe(local); |
3752 | |
3753 | return isl_union_map_domain(local); |
3754 | } |
3755 | |
3756 | /* Group the domain elements into a single space, named kernelX, |
3757 | * with X the kernel sequence number "kernel_id". |
3758 | */ |
3759 | static __isl_give isl_schedule_node *group_statements( |
3760 | __isl_take isl_schedule_node *node, int kernel_id) |
3761 | { |
3762 | char buffer[20]; |
3763 | isl_id *id; |
3764 | |
3765 | if (!node) |
3766 | return NULL((void*)0); |
3767 | |
3768 | snprintf(buffer, sizeof(buffer), "kernel%d", kernel_id); |
3769 | id = isl_id_alloc(isl_schedule_node_get_ctx(node), buffer, NULL((void*)0)); |
3770 | return isl_schedule_node_group(node, id); |
3771 | } |
3772 | |
3773 | /* Create a ppcg_kernel representing the domain instances that reach "node" |
3774 | * and insert a mark node pointing to the ppcg_kernel before "node". |
3775 | * The band that "node" points to is the band that needs to be mapped |
3776 | * to block identifiers. The band that needs to be mapped to thread |
3777 | * identifiers should be marked by a "thread" mark by the caller. |
3778 | * The linear branch between the current node and the "thread" mark |
3779 | * may also have a "shared" mark. If present, the mapping to shared |
3780 | * memory is computed at that point. |
3781 | * Both marks are removed by this function. |
3782 | * If "scale" is set, then the band that "node" points to is scaled |
3783 | * by "sizes". |
3784 | * |
3785 | * Mark all outer band nodes as atomic to ensure each kernel is only |
3786 | * scheduled once. |
3787 | * If the domain elements that reach "node" live in more than one space, |
3788 | * then group the domain elements into a single space, named kernelX, |
3789 | * with X the kernel sequence number. |
3790 | * |
3791 | * Insert a guard node governing the kernel node to ensure that |
3792 | * no kernels with zero blocks are launched. |
3793 | * |
3794 | * Insert a context node describing the block and thread |
3795 | * identifiers inside the kernel mark. |
3796 | * The context node needs to be inserted after the effective block size |
3797 | * has been determined such that the bounds on the thread identifiers |
3798 | * would reflect the effective block size. |
3799 | * Insert a filter node inside the context node mapping the statement |
3800 | * instances to block identifiers. In particular, the block identifiers |
3801 | * are equated to the partial schedule of band that was marked for mapping |
3802 | * to blocks modulo the grid size. |
3803 | * Insert a filter node inside the "thread" mark mapping the statement |
3804 | * instances to thread identifiers. In particular, the thread identifiers |
3805 | * are equated to the partial schedule of band that was marked for mapping |
3806 | * to threads modulo the block size. |
3807 | * |
3808 | * Compute array reference groups for all arrays, set the local |
3809 | * array bounds based on the set of domain instances that reach |
3810 | * the kernel node, check the total amount of shared memory used |
3811 | * and compute all group tilings. |
3812 | * The array reference groups are computed after the block filter |
3813 | * has been inserted because it affects the mapping to shared or |
3814 | * private memory. This computation also requires the thread filter |
3815 | * (in the ppcg_kernel object), but this thread filter should not |
3816 | * have been added to the schedule tree yet since the computation |
3817 | * requires the schedule of the band that needs to be mapped to |
3818 | * threads before the privatization is applied. |
3819 | * |
3820 | * If any array reference group requires the band mapped to threads |
3821 | * to be unrolled, then we perform the required unrolling. |
3822 | * |
3823 | * We save a copy of the schedule that may influence the mappings |
3824 | * to shared or private memory in kernel->copy_schedule. |
3825 | * |
3826 | * Finally, we add synchronization and copy statements to the schedule tree, |
3827 | * remove the "thread" mark and create representations for the local |
3828 | * variables in the kernel. |
3829 | * |
3830 | * We keep a copy of the isl_id that points to the kernel to ensure |
3831 | * that the kernel does not get destroyed if the schedule node |
3832 | * is freed due to some error condition. |
3833 | */ |
3834 | __isl_give isl_schedule_node *gpu_create_kernel(struct gpu_gen *gen, |
3835 | __isl_take isl_schedule_node *node, int scale, |
3836 | __isl_keep isl_multi_val *sizes) |
3837 | { |
3838 | struct ppcg_kernel *kernel; |
3839 | isl_id *id; |
3840 | isl_schedule_node *node_thread; |
3841 | isl_union_map *host_schedule; |
3842 | isl_union_pw_multi_aff *contraction; |
3843 | isl_set *host_domain; |
3844 | isl_union_set *domain, *expanded; |
3845 | int single_statement; |
3846 | |
3847 | node = gpu_tree_insert_shared_before_thread(node); |
3848 | if (!node) |
3849 | return NULL((void*)0); |
3850 | |
3851 | kernel = isl_calloc_type(gen->ctx, struct ppcg_kernel)((struct ppcg_kernel *)isl_calloc_or_die(gen->ctx, 1, sizeof (struct ppcg_kernel))); |
3852 | kernel = ppcg_kernel_create_local_arrays(kernel, gen->prog); |
3853 | if (!kernel) |
3854 | return isl_schedule_node_free(node); |
3855 | |
3856 | domain = isl_schedule_node_get_domain(node); |
3857 | single_statement = isl_union_set_n_set(domain) == 1; |
3858 | |
3859 | kernel->ctx = gen->ctx; |
3860 | kernel->prog = gen->prog; |
3861 | kernel->options = gen->options; |
3862 | kernel->context = extract_context(node, gen->prog); |
3863 | kernel->core = isl_union_set_universe(isl_union_set_copy(domain)); |
3864 | contraction = isl_schedule_node_get_subtree_contraction(node); |
3865 | kernel->contraction = isl_union_pw_multi_aff_copy(contraction); |
3866 | expanded = isl_union_set_copy(domain); |
3867 | expanded = isl_union_set_preimage_union_pw_multi_aff(expanded, |
3868 | contraction); |
3869 | kernel->expanded_domain = isl_union_set_copy(expanded); |
3870 | kernel->arrays = accessed_by_domain(expanded, gen->prog); |
3871 | kernel->n_grid = n_outer_coincidence(node); |
3872 | node_thread = isl_schedule_node_copy(node); |
3873 | node_thread = gpu_tree_move_down_to_thread(node_thread, kernel->core); |
3874 | node_thread = isl_schedule_node_child(node_thread, 0); |
3875 | kernel->n_block = n_outer_coincidence(node_thread); |
3876 | isl_schedule_node_free(node_thread); |
3877 | kernel->id = gen->kernel_id++; |
3878 | read_grid_and_block_sizes(kernel, gen); |
3879 | |
3880 | kernel->sync_writes = compute_sync_writes(kernel, node); |
3881 | |
3882 | host_schedule = isl_schedule_node_get_prefix_schedule_union_map(node); |
3883 | host_domain = isl_set_from_union_set(isl_union_map_range( |
3884 | host_schedule)); |
3885 | |
3886 | node = atomic_ancestors(node); |
3887 | |
3888 | id = isl_id_alloc(gen->ctx, "kernel", kernel); |
3889 | id = isl_id_set_free_user(id, &ppcg_kernel_free_wrap); |
3890 | node = isl_schedule_node_insert_mark(node, isl_id_copy(id)); |
3891 | |
3892 | if (!single_statement) |
3893 | node = group_statements(node, kernel->id); |
3894 | |
3895 | node = isl_schedule_node_child(node, 0); |
3896 | node = split_band(node, kernel->n_grid); |
3897 | kernel->block_ids = ppcg_scop_generate_names(gen->prog->scop, |
3898 | kernel->n_grid, "b"); |
3899 | kernel->block_filter = set_schedule_modulo(node, kernel->block_ids, |
3900 | kernel->grid_dim); |
3901 | kernel->grid_size = extract_grid_size(kernel, |
3902 | isl_union_set_copy(domain)); |
3903 | if (!kernel->options->wrap) |
3904 | node = snap_band_to_sizes(node, kernel->grid_dim, |
3905 | kernel->options); |
3906 | if (scale) |
3907 | node = scale_band(node, isl_multi_val_copy(sizes)); |
3908 | node = isl_schedule_node_parent(node); |
3909 | if (!single_statement) |
3910 | node = isl_schedule_node_parent(node); |
3911 | node = insert_guard(node, kernel->context, kernel->grid_size, |
3912 | gen->prog->scop); |
3913 | node = gpu_tree_move_down_to_thread(node, kernel->core); |
3914 | node = isl_schedule_node_child(node, 0); |
3915 | node = split_band(node, kernel->n_block); |
3916 | kernel->thread_ids = ppcg_scop_generate_names(gen->prog->scop, |
3917 | kernel->n_block, "t"); |
3918 | kernel->thread_filter = set_schedule_modulo(node, kernel->thread_ids, |
3919 | kernel->block_dim); |
3920 | if (extract_block_size(kernel, domain) < 0) |
3921 | node = isl_schedule_node_free(node); |
3922 | |
3923 | node = gpu_tree_move_up_to_kernel(node); |
3924 | node = isl_schedule_node_child(node, 0); |
3925 | node = insert_context(kernel, node); |
3926 | node = isl_schedule_node_child(node, 0); |
3927 | node = isl_schedule_node_insert_filter(node, |
3928 | isl_union_set_copy(kernel->block_filter)); |
3929 | |
3930 | node = gpu_tree_move_up_to_kernel(node); |
3931 | |
3932 | if (gpu_group_references(kernel, node) < 0) |
3933 | node = isl_schedule_node_free(node); |
3934 | localize_bounds(kernel, host_domain); |
3935 | isl_set_free(host_domain); |
3936 | |
3937 | check_shared_memory_bound(kernel); |
3938 | mark_global_arrays(kernel); |
3939 | compute_group_tilings(kernel); |
3940 | |
3941 | node = gpu_tree_move_down_to_thread(node, kernel->core); |
3942 | node = isl_schedule_node_child(node, 0); |
3943 | if (!kernel->options->wrap) |
3944 | node = snap_band_to_sizes(node, kernel->block_dim, |
3945 | kernel->options); |
3946 | node = isl_schedule_node_insert_filter(node, |
3947 | isl_union_set_copy(kernel->thread_filter)); |
3948 | if (kernel_requires_unroll(kernel)) { |
3949 | node = isl_schedule_node_child(node, 0); |
3950 | node = unroll(node); |
3951 | } |
3952 | |
3953 | node = gpu_tree_move_up_to_thread(node); |
3954 | kernel->copy_schedule_dim = isl_schedule_node_get_schedule_depth(node); |
3955 | kernel->copy_schedule = |
3956 | isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node); |
3957 | contraction = isl_union_pw_multi_aff_copy(kernel->contraction); |
3958 | kernel->copy_schedule = |
3959 | isl_union_pw_multi_aff_pullback_union_pw_multi_aff( |
3960 | kernel->copy_schedule, contraction); |
3961 | |
3962 | node = gpu_tree_move_up_to_kernel(node); |
3963 | |
3964 | node = add_sync(kernel, node); |
3965 | node = add_copies(kernel, node); |
3966 | |
3967 | node = gpu_tree_move_down_to_shared(node, kernel->core); |
3968 | node = isl_schedule_node_delete(node); |
3969 | |
3970 | node = gpu_tree_move_down_to_thread(node, kernel->core); |
3971 | node = isl_schedule_node_delete(node); |
3972 | |
3973 | node = gpu_tree_move_up_to_kernel(node); |
3974 | |
3975 | if (create_kernel_vars(kernel) < 0) |
3976 | node = isl_schedule_node_free(node); |
3977 | |
3978 | if (!single_statement) |
3979 | node = isl_schedule_node_parent(node); |
3980 | node = isl_schedule_node_parent(node); |
3981 | |
3982 | isl_id_free(id); |
3983 | return node; |
3984 | } |
3985 | |
3986 | /* Insert a zero-dimensional permutable band at "node". |
3987 | */ |
3988 | static __isl_give isl_schedule_node *insert_empty_permutable_band( |
3989 | __isl_take isl_schedule_node *node) |
3990 | { |
3991 | isl_space *space; |
3992 | isl_schedule *schedule; |
3993 | isl_union_set *domain; |
3994 | isl_multi_union_pw_aff *mupa; |
3995 | |
3996 | schedule = isl_schedule_node_get_schedule(node); |
3997 | domain = isl_schedule_get_domain(schedule); |
3998 | space = isl_union_set_get_space(domain); |
3999 | isl_union_set_free(domain); |
4000 | isl_schedule_free(schedule); |
4001 | |
4002 | space = isl_space_set_from_params(space); |
4003 | mupa = isl_multi_union_pw_aff_zero(space); |
4004 | node = isl_schedule_node_insert_partial_schedule(node, mupa); |
4005 | node = isl_schedule_node_band_set_permutable(node, 1); |
4006 | |
4007 | return node; |
4008 | } |
4009 | |
4010 | /* See if hybrid tiling can be performed on "node" and its parent. |
4011 | * If so, apply hybrid tiling and return the updated schedule tree. |
4012 | * If not, return the original schedule tree. |
4013 | * Return NULL on error. |
4014 | * |
4015 | * First check if "node", together with its parent, meets |
4016 | * the basic requirements for hybrid tiling. |
4017 | * If so, compute the relative dependence distances of "node" |
4018 | * with respect to its parent and check if they are sufficiently bounded. |
4019 | * If so, apply hybrid tiling using user specified tile sizes. |
4020 | * |
4021 | * The tile sizes are read before the dependence distance bounds are |
4022 | * computed, because the user may have specified fewer dimensions |
4023 | * than are available. In this case, the remaining schedule dimensions |
4024 | * are split off and the dependence distances should be computed |
4025 | * after these dimensions have been split off. |
4026 | */ |
4027 | static __isl_give isl_schedule_node *try_hybrid_tile(struct gpu_gen *gen, |
4028 | __isl_take isl_schedule_node *node) |
4029 | { |
4030 | int tile_len; |
4031 | int *tile_size; |
4032 | isl_bool ok; |
4033 | isl_schedule_node *orig = node; |
4034 | ppcg_ht_bounds *bounds; |
4035 | |
4036 | ok = ppcg_ht_parent_has_input_pattern(node); |
4037 | if (ok < 0) |
4038 | return isl_schedule_node_free(node); |
4039 | if (!ok) |
4040 | return orig; |
4041 | |
4042 | tile_len = 1 + isl_schedule_node_band_n_member(node); |
4043 | tile_size = read_tile_sizes(gen, &tile_len); |
4044 | if (!tile_size) |
4045 | return isl_schedule_node_free(node); |
4046 | |
4047 | node = isl_schedule_node_copy(node); |
4048 | node = split_band(node, tile_len - 1); |
4049 | node = isl_schedule_node_parent(node); |
4050 | bounds = ppcg_ht_compute_bounds(gen->prog->scop, node); |
4051 | node = isl_schedule_node_child(node, 0); |
4052 | |
4053 | ok = ppcg_ht_bounds_is_valid(bounds); |
4054 | if (ok >= 0 && ok) |
4055 | node = gpu_hybrid_tile(gen, node, bounds, tile_size); |
4056 | else |
4057 | ppcg_ht_bounds_free(bounds); |
4058 | free(tile_size); |
4059 | |
4060 | if (ok >= 0 && !ok) { |
4061 | isl_schedule_node_free(node); |
4062 | return orig; |
4063 | } |
4064 | isl_schedule_node_free(orig); |
4065 | if (ok < 0) |
4066 | return isl_schedule_node_free(node); |
4067 | return node; |
4068 | } |
4069 | |
4070 | /* If "node" is the outermost permutable band that can be mapped to block and |
4071 | * thread identifiers in its branch (or the root of a subtree with |
4072 | * no such outer bands), |
4073 | * then mark the band as such, attaching a ppcg_kernel to the mark. |
4074 | * |
4075 | * If hybrid tiling is allowed, then first try and apply it |
4076 | * to "node" and its parent. |
4077 | * |
4078 | * If "node" is the root of a subtree without permutable bands, |
4079 | * then insert a zero-dimensional permutable band such that |
4080 | * we can assume that "node" always points to a band node. |
4081 | * This includes the case where "node" already points to a band node, |
4082 | * but one without any coincident dimension. In this case, |
4083 | * the extra node ensures that this original node does not get tiled. |
4084 | * |
4085 | * Tile "node" using user specified tile sizes, after splitting the band |
4086 | * if the number of specified tile sizes is smaller than the dimension |
4087 | * of the band. Mark the point band of this tiling as the band that |
4088 | * needs to be mapped to threads and instruct the AST generator to unroll |
4089 | * the band if the "unroll_gpu_tile" option is set. |
4090 | * Create a kernel representing the domain instances that reach "node" and |
4091 | * insert a mark node pointing to the ppcg_kernel before the band node. |
4092 | */ |
4093 | static __isl_give isl_schedule_node *mark_outer_permutable( |
4094 | __isl_take isl_schedule_node *node, void *user) |
4095 | { |
4096 | struct gpu_gen *gen = user; |
4097 | int outer; |
4098 | int scale; |
4099 | int tile_len; |
4100 | int *tile_size; |
4101 | isl_id *id; |
4102 | isl_multi_val *sizes; |
4103 | |
4104 | outer = is_outer_tilable(node); |
4105 | if (outer < 0) |
4106 | return isl_schedule_node_free(node); |
4107 | if (!outer) |
4108 | return node; |
4109 | |
4110 | if (gen->options->hybrid) { |
4111 | isl_schedule_node *saved = isl_schedule_node_copy(node); |
4112 | node = try_hybrid_tile(gen, node); |
4113 | isl_schedule_node_free(saved); |
4114 | if (node != saved) |
4115 | return node; |
4116 | } |
4117 | |
4118 | if (isl_schedule_node_get_type(node) != isl_schedule_node_band || |
4119 | !isl_schedule_node_band_member_get_coincident(node, 0)) |
4120 | node = insert_empty_permutable_band(node); |
4121 | |
4122 | tile_len = isl_schedule_node_band_n_member(node); |
4123 | tile_size = read_tile_sizes(gen, &tile_len); |
4124 | if (!tile_size) |
4125 | return isl_schedule_node_free(node); |
4126 | if (tile_len < isl_schedule_node_band_n_member(node)) |
4127 | node = isl_schedule_node_band_split(node, tile_len); |
4128 | sizes = construct_band_tiles_sizes(node, tile_size); |
4129 | node = tile_band(node, isl_multi_val_copy(sizes)); |
4130 | node = isl_schedule_node_child(node, 0); |
4131 | if (gen->options->unroll_gpu_tile) |
4132 | node = ppcg_set_schedule_node_type(node, isl_ast_loop_unroll); |
4133 | id = isl_id_alloc(gen->ctx, "thread", NULL((void*)0)); |
4134 | node = isl_schedule_node_insert_mark(node, id); |
4135 | node = isl_schedule_node_parent(node); |
4136 | |
4137 | scale = gen->options->scale_tile_loops; |
4138 | node = gpu_create_kernel(gen, node, scale, sizes); |
4139 | isl_multi_val_free(sizes); |
4140 | free(tile_size); |
4141 | |
4142 | return node; |
4143 | } |
4144 | |
4145 | /* Given a set or sequence node, return the union the filters of either all |
4146 | * (if "only_initial" is not set) or the initial (if "only_initial" is set) |
4147 | * direct subtrees that do not contain any suitably permutable bands |
4148 | * (according to subtree_has_permutable_bands). |
4149 | */ |
4150 | static __isl_give isl_union_set *get_non_parallel_subtree_filters( |
4151 | __isl_keep isl_schedule_node *node, int only_initial) |
4152 | { |
4153 | isl_space *space; |
4154 | isl_union_set *filter; |
4155 | int i, n; |
4156 | |
4157 | n = isl_schedule_node_n_children(node); |
4158 | if (n < 0) |
4159 | return NULL((void*)0); |
4160 | |
4161 | node = isl_schedule_node_copy(node); |
4162 | node = isl_schedule_node_child(node, 0); |
4163 | filter = isl_schedule_node_filter_get_filter(node); |
4164 | node = isl_schedule_node_parent(node); |
4165 | space = isl_union_set_get_space(filter); |
4166 | isl_union_set_free(filter); |
4167 | filter = isl_union_set_empty(space); |
4168 | |
4169 | for (i = 0; i < n; ++i) { |
4170 | int parallelism; |
4171 | |
4172 | node = isl_schedule_node_child(node, i); |
4173 | parallelism = subtree_has_permutable_bands(node); |
4174 | if (parallelism < 0) { |
4175 | filter = isl_union_set_free(filter); |
4176 | } else if (!parallelism) { |
4177 | isl_union_set *filter_i; |
4178 | filter_i = isl_schedule_node_filter_get_filter(node); |
4179 | filter = isl_union_set_union(filter, filter_i); |
4180 | } else if (only_initial) |
4181 | break; |
4182 | node = isl_schedule_node_parent(node); |
4183 | } |
4184 | |
4185 | isl_schedule_node_free(node); |
4186 | |
4187 | return filter; |
4188 | } |
4189 | |
4190 | /* Given a set or sequence node, return the union of the filters of |
4191 | * the direct subtrees that do not contain any suitably permutable bands |
4192 | * (according to subtree_has_permutable_bands). |
4193 | */ |
4194 | static __isl_give isl_union_set *get_all_non_parallel_subtree_filters( |
4195 | __isl_keep isl_schedule_node *node) |
4196 | { |
4197 | return get_non_parallel_subtree_filters(node, 0); |
4198 | } |
4199 | |
4200 | /* Given a set or sequence node, return the union of the filters of |
4201 | * the initial direct subtrees that do not contain any suitably permutable |
4202 | * bands (according to subtree_has_permutable_bands). |
4203 | */ |
4204 | static __isl_give isl_union_set *get_initial_non_parallel_subtree_filters( |
4205 | __isl_keep isl_schedule_node *node) |
4206 | { |
4207 | return get_non_parallel_subtree_filters(node, 1); |
4208 | } |
4209 | |
4210 | /* Mark all variables that are accessed by the statement instances in "domain" |
4211 | * and that are local to "prog" as requiring a declaration in the host code. |
4212 | * The statement instances in "domain" correspond to (a subset of) |
4213 | * the active instances at "node". |
4214 | * "node" is not modified by this function, except that NULL is returned |
4215 | * in case of error. |
4216 | */ |
4217 | static __isl_give isl_schedule_node *declare_accessed_local_variables( |
4218 | __isl_take isl_schedule_node *node, struct gpu_prog *prog, |
4219 | __isl_keep isl_union_set *domain) |
4220 | { |
4221 | isl_union_pw_multi_aff *contraction; |
4222 | isl_union_set *arrays; |
4223 | int i; |
4224 | |
4225 | if (!ppcg_scop_any_hidden_declarations(prog->scop)) |
4226 | return node; |
4227 | contraction = isl_schedule_node_get_subtree_contraction(node); |
4228 | domain = isl_union_set_copy(domain); |
4229 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, contraction); |
4230 | arrays = accessed_by_domain(domain, prog); |
4231 | |
4232 | for (i = 0; i < prog->n_array; ++i) { |
4233 | isl_space *space; |
4234 | isl_set *set; |
4235 | int empty; |
4236 | |
4237 | if (!prog->array[i].local) |
4238 | continue; |
4239 | space = isl_set_get_space(prog->array[i].extent); |
4240 | set = isl_union_set_extract_set(arrays, space); |
4241 | empty = isl_set_plain_is_empty(set); |
4242 | isl_set_free(set); |
4243 | if (empty < 0) |
4244 | goto error; |
4245 | if (!empty) |
4246 | prog->array[i].declare_local = 1; |
4247 | } |
4248 | |
4249 | isl_union_set_free(arrays); |
4250 | return node; |
4251 | error: |
4252 | isl_union_set_free(arrays); |
4253 | return isl_schedule_node_free(node); |
4254 | } |
4255 | |
4256 | /* If "node" points to a set node, then separate its children |
4257 | * into subtrees that have suitably permutable bands and |
4258 | * those that do not. |
4259 | * Adjust the schedule tree in order to execute the second group |
4260 | * after the first group and return a pointer to the first group, |
4261 | * assuming there are any such subtrees. |
4262 | * If "node" points to a sequence node, then separate the initial |
4263 | * children that do not have suitably permutable bands and |
4264 | * return a pointer to the subsequence of children that do have such bands, |
4265 | * assuming there are any such subtrees. |
4266 | * |
4267 | * In both cases, mark all local variables in "prog" that are accessed by |
4268 | * the group without permutable bands as requiring a declaration on the host. |
4269 | */ |
4270 | static __isl_give isl_schedule_node *isolate_permutable_subtrees( |
4271 | __isl_take isl_schedule_node *node, struct gpu_prog *prog) |
4272 | { |
4273 | isl_union_set *filter; |
4274 | enum isl_schedule_node_type type; |
4275 | |
4276 | if (!node) |
4277 | return NULL((void*)0); |
4278 | type = isl_schedule_node_get_type(node); |
4279 | if (type == isl_schedule_node_set) { |
4280 | filter = get_all_non_parallel_subtree_filters(node); |
4281 | node = declare_accessed_local_variables(node, prog, filter); |
4282 | node = isl_schedule_node_order_after(node, filter); |
4283 | } else if (type == isl_schedule_node_sequence) { |
4284 | filter = get_initial_non_parallel_subtree_filters(node); |
4285 | node = declare_accessed_local_variables(node, prog, filter); |
4286 | node = isl_schedule_node_order_before(node, filter); |
4287 | } |
4288 | |
4289 | return node; |
4290 | } |
4291 | |
4292 | /* Replace any reference to an array element in the range of "copy" |
4293 | * by a reference to all array elements (defined by the extent of the array). |
4294 | */ |
4295 | static __isl_give isl_union_map *approximate_copy_out( |
4296 | __isl_take isl_union_map *copy, struct gpu_prog *prog) |
4297 | { |
4298 | int i; |
4299 | isl_union_map *res; |
4300 | |
4301 | res = isl_union_map_empty(isl_union_map_get_space(copy)); |
4302 | |
4303 | for (i = 0; i < prog->n_array; ++i) { |
4304 | isl_space *space; |
4305 | isl_set *set; |
4306 | isl_union_map *copy_i; |
4307 | isl_union_set *extent, *domain; |
4308 | |
4309 | space = isl_space_copy(prog->array[i].space); |
4310 | extent = isl_union_set_from_set(isl_set_universe(space)); |
4311 | copy_i = isl_union_map_copy(copy); |
4312 | copy_i = isl_union_map_intersect_range(copy_i, extent); |
4313 | set = isl_set_copy(prog->array[i].extent); |
4314 | extent = isl_union_set_from_set(set); |
4315 | domain = isl_union_map_domain(copy_i); |
4316 | copy_i = isl_union_map_from_domain_and_range(domain, extent); |
4317 | res = isl_union_map_union(res, copy_i); |
4318 | } |
4319 | |
4320 | isl_union_map_free(copy); |
4321 | |
4322 | return res; |
4323 | } |
4324 | |
4325 | /* Insert "kernel" marks that point to a ppcg_kernel structure |
4326 | * in front of all outermost tilable band that (by construction) |
4327 | * have at least one parallel loop. |
4328 | */ |
4329 | static __isl_give isl_schedule_node *mark_kernels(struct gpu_gen *gen, |
4330 | __isl_take isl_schedule_node *node) |
4331 | { |
4332 | return isl_schedule_node_map_descendant_bottom_up(node, |
4333 | &mark_outer_permutable, gen); |
4334 | } |
4335 | |
4336 | /* Construct schedule constraints from the dependences in prog->scop and |
4337 | * the array order dependences in prog->array_order. |
4338 | * |
4339 | * If live range reordering is allowed, then we need to make sure |
4340 | * that live ranges on arrays are not run in parallel since doing |
4341 | * so would require array expansion. We therefore add the array |
4342 | * order dependences to the coincidence dependences. Non-zero array |
4343 | * order dependences will then prevent a schedule dimension from being |
4344 | * considered parallel. |
4345 | * Live ranges derived from scalars are allowed to be run in parallel |
4346 | * since we force the scalars to be mapped to private memory in |
4347 | * check_scalar_live_ranges. |
4348 | * If live range reordering is allowed, then the false dependences |
4349 | * are not added to the validity constraints as that would prevent |
4350 | * reordering. Instead, the external false dependences that enforce that reads |
4351 | * from potentially live-in data precede any later write and |
4352 | * that writes of potentially live-out data follow any other earlier write |
4353 | * are added to the validity and the coincidence constraints. |
4354 | * The false dependences are still added to the proximity constraints |
4355 | * for consistency with the case where live range reordering is not allowed. |
4356 | * The coincidence constraints then consist of flow dependences, |
4357 | * external false dependences and array order dependences. |
4358 | * The independences can be filtered out from the first two sets. |
4359 | * They have already been filtered out from the array order dependences |
4360 | * on a per array basis in collect_order_dependences. |
4361 | * There is no need for a per array handling of the other two sets |
4362 | * as there should be no flow or external false dependence on local |
4363 | * variables that can be filtered out. |
4364 | */ |
4365 | static __isl_give isl_schedule_constraints *construct_schedule_constraints( |
4366 | struct gpu_prog *prog) |
4367 | { |
4368 | isl_union_set *domain; |
4369 | isl_union_map *dep_raw, *dep; |
4370 | isl_union_map *validity, *proximity, *coincidence; |
4371 | isl_schedule_constraints *sc; |
4372 | |
4373 | domain = isl_union_set_copy(prog->scop->domain); |
4374 | sc = isl_schedule_constraints_on_domain(domain); |
4375 | sc = isl_schedule_constraints_set_context(sc, |
4376 | isl_set_copy(prog->scop->context)); |
4377 | if (prog->scop->options->live_range_reordering) { |
4378 | sc = isl_schedule_constraints_set_conditional_validity(sc, |
4379 | isl_union_map_copy(prog->scop->tagged_dep_flow), |
4380 | isl_union_map_copy(prog->scop->tagged_dep_order)); |
4381 | proximity = isl_union_map_copy(prog->scop->dep_flow); |
4382 | validity = isl_union_map_copy(proximity); |
4383 | validity = isl_union_map_union(validity, |
4384 | isl_union_map_copy(prog->scop->dep_forced)); |
4385 | proximity = isl_union_map_union(proximity, |
4386 | isl_union_map_copy(prog->scop->dep_false)); |
4387 | coincidence = isl_union_map_copy(validity); |
4388 | coincidence = isl_union_map_subtract(coincidence, |
4389 | isl_union_map_copy(prog->scop->independence)); |
4390 | coincidence = isl_union_map_union(coincidence, |
4391 | isl_union_map_copy(prog->array_order)); |
4392 | } else { |
4393 | dep_raw = isl_union_map_copy(prog->scop->dep_flow); |
4394 | dep = isl_union_map_copy(prog->scop->dep_false); |
4395 | dep = isl_union_map_union(dep, dep_raw); |
4396 | dep = isl_union_map_coalesce(dep); |
4397 | proximity = isl_union_map_copy(dep); |
4398 | coincidence = isl_union_map_copy(dep); |
4399 | validity = dep; |
4400 | } |
4401 | sc = isl_schedule_constraints_set_validity(sc, validity); |
4402 | sc = isl_schedule_constraints_set_coincidence(sc, coincidence); |
4403 | sc = isl_schedule_constraints_set_proximity(sc, proximity); |
4404 | |
4405 | if (prog->scop->options->debug->dump_schedule_constraints) |
4406 | isl_schedule_constraints_dump(sc); |
4407 | return sc; |
4408 | } |
4409 | |
4410 | /* Compute an appropriate schedule based on the accesses in |
4411 | * gen->read and gen->write. |
4412 | * |
4413 | * We derive schedule constraints from the dependences in gen->prog->scop |
4414 | * and then use isl to compute a schedule that has a parallel loop |
4415 | * in each tilable band. |
4416 | * During the schedule construction, some statement instances |
4417 | * may be grouped first based on the input schedule. |
4418 | */ |
4419 | static __isl_give isl_schedule *compute_schedule(struct gpu_gen *gen) |
4420 | { |
4421 | isl_schedule_constraints *sc; |
4422 | isl_schedule *schedule; |
4423 | |
4424 | sc = construct_schedule_constraints(gen->prog); |
4425 | schedule = gen->prog->scop->schedule; |
4426 | schedule = ppcg_compute_schedule(sc, schedule, gen->options); |
4427 | |
4428 | return schedule; |
4429 | } |
4430 | |
4431 | /* If the band node "node" has exactly one member then mark it permutable. |
4432 | */ |
4433 | static __isl_give isl_schedule_node *band_set_permutable( |
4434 | __isl_take isl_schedule_node *node, |
4435 | __isl_keep isl_schedule_constraints *sc) |
4436 | { |
4437 | if (isl_schedule_node_band_n_member(node) == 1) |
4438 | node = isl_schedule_node_band_set_permutable(node, 1); |
4439 | |
4440 | return node; |
4441 | } |
4442 | |
4443 | /* Return the coincidence constraints between pairs of instances |
4444 | * that are scheduled together by the ancestors of "node". |
4445 | * That is, select those coincidence constraints that relate |
4446 | * pairs of instances that have the same value for the prefix schedule. |
4447 | * If the schedule depth is zero, then the prefix schedule does not |
4448 | * contain any information, so we intersect domain and range |
4449 | * of the schedule constraints with the reaching domain elements instead. |
4450 | */ |
4451 | static __isl_give isl_union_map *get_local_coincidence( |
4452 | __isl_keep isl_schedule_node *node, |
4453 | __isl_keep isl_schedule_constraints *sc) |
4454 | { |
4455 | isl_union_map *coincidence; |
4456 | isl_multi_union_pw_aff *prefix; |
4457 | isl_union_pw_multi_aff *contraction; |
4458 | |
4459 | coincidence = isl_schedule_constraints_get_coincidence(sc); |
4460 | contraction = isl_schedule_node_get_subtree_contraction(node); |
4461 | if (isl_schedule_node_get_schedule_depth(node) == 0) { |
4462 | isl_union_set *domain; |
4463 | |
4464 | domain = isl_schedule_node_get_domain(node); |
4465 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, |
4466 | contraction); |
4467 | coincidence = isl_union_map_intersect_domain(coincidence, |
4468 | isl_union_set_copy(domain)); |
4469 | coincidence = isl_union_map_intersect_range(coincidence, |
4470 | domain); |
4471 | return coincidence; |
4472 | } |
4473 | |
4474 | prefix = isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node); |
4475 | prefix = isl_multi_union_pw_aff_pullback_union_pw_multi_aff(prefix, |
4476 | contraction); |
4477 | return isl_union_map_eq_at_multi_union_pw_aff(coincidence, prefix); |
4478 | } |
4479 | |
4480 | /* For each member in the band node "node", determine whether |
4481 | * it is coincident with respect to the outer nodes and mark |
4482 | * it accordingly. |
4483 | * |
4484 | * That is, for each coincidence constraint between pairs |
4485 | * of instances that are scheduled together by the outer nodes, |
4486 | * check that domain and range are assigned the same value |
4487 | * by the band member. This test is performed by checking |
4488 | * that imposing the same value for the band member does not |
4489 | * remove any elements from the set of coincidence constraints. |
4490 | */ |
4491 | static __isl_give isl_schedule_node *band_set_coincident( |
4492 | __isl_take isl_schedule_node *node, |
4493 | __isl_keep isl_schedule_constraints *sc) |
4494 | { |
4495 | isl_union_map *coincidence; |
4496 | isl_union_pw_multi_aff *contraction; |
4497 | isl_multi_union_pw_aff *partial; |
4498 | int i, n; |
4499 | |
4500 | coincidence = get_local_coincidence(node, sc); |
4501 | |
4502 | partial = isl_schedule_node_band_get_partial_schedule(node); |
4503 | contraction = isl_schedule_node_get_subtree_contraction(node); |
4504 | partial = isl_multi_union_pw_aff_pullback_union_pw_multi_aff(partial, |
4505 | contraction); |
4506 | n = isl_schedule_node_band_n_member(node); |
4507 | for (i = 0; i < n; ++i) { |
4508 | isl_union_map *coincidence_i; |
4509 | isl_union_pw_aff *upa; |
4510 | isl_multi_union_pw_aff *partial_i; |
4511 | int subset; |
4512 | |
4513 | upa = isl_multi_union_pw_aff_get_union_pw_aff(partial, i); |
4514 | partial_i = isl_multi_union_pw_aff_from_union_pw_aff(upa); |
4515 | coincidence_i = isl_union_map_copy(coincidence); |
4516 | coincidence_i = isl_union_map_eq_at_multi_union_pw_aff( |
4517 | coincidence_i, partial_i); |
4518 | subset = isl_union_map_is_subset(coincidence, coincidence_i); |
4519 | isl_union_map_free(coincidence_i); |
4520 | |
4521 | if (subset < 0) |
4522 | break; |
4523 | node = isl_schedule_node_band_member_set_coincident(node, i, |
4524 | subset); |
4525 | } |
4526 | if (i < n) |
4527 | node = isl_schedule_node_free(node); |
4528 | isl_multi_union_pw_aff_free(partial); |
4529 | isl_union_map_free(coincidence); |
4530 | |
4531 | return node; |
4532 | } |
4533 | |
4534 | /* If "node" is a band, then set its properties. |
4535 | * |
4536 | * In particular, if the band has exactly one member, then mark it permutable. |
4537 | * Mark the band member coincident based on the coincidence constraints |
4538 | * of "sc". |
4539 | */ |
4540 | static __isl_give isl_schedule_node *set_band_properties( |
4541 | __isl_take isl_schedule_node *node, void *user) |
4542 | { |
4543 | isl_schedule_constraints *sc = user; |
4544 | |
4545 | if (isl_schedule_node_get_type(node) != isl_schedule_node_band) |
4546 | return node; |
4547 | if (isl_schedule_node_band_n_member(node) == 0) |
4548 | return node; |
4549 | |
4550 | node = band_set_permutable(node, sc); |
4551 | node = band_set_coincident(node, sc); |
4552 | |
4553 | return node; |
4554 | } |
4555 | |
4556 | /* Return the original schedule with all bands marked permutable and |
4557 | * all band members marked coincident based on the coincidence constraints. |
4558 | * The bands are explicitly marked permutable so that they will be considered |
4559 | * by mark_outer_permutable. |
4560 | */ |
4561 | static __isl_give isl_schedule *determine_properties_original_schedule( |
4562 | struct gpu_gen *gen) |
4563 | { |
4564 | isl_schedule *schedule; |
4565 | isl_schedule_constraints *sc; |
4566 | |
4567 | schedule = isl_schedule_copy(gen->prog->scop->schedule); |
4568 | sc = construct_schedule_constraints(gen->prog); |
4569 | schedule = isl_schedule_map_schedule_node_bottom_up(schedule, |
4570 | &set_band_properties, sc); |
4571 | isl_schedule_constraints_free(sc); |
4572 | |
4573 | return schedule; |
4574 | } |
4575 | |
4576 | /* Compute a schedule or determine the properties of the original schedule |
4577 | * depending on the value of the "reschedule" option. |
4578 | */ |
4579 | static __isl_give isl_schedule *compute_or_set_properties(void *user) |
4580 | { |
4581 | struct gpu_gen *gen = user; |
4582 | |
4583 | if (gen->options->reschedule) |
4584 | return compute_schedule(gen); |
4585 | else |
4586 | return determine_properties_original_schedule(gen); |
4587 | } |
4588 | |
4589 | /* Obtain a schedule for the scop, by reading it from |
4590 | * a file, by computing one or by determining the properties |
4591 | * of the original schedule. |
4592 | */ |
4593 | __isl_give isl_schedule *get_schedule(struct gpu_gen *gen) |
4594 | { |
4595 | return ppcg_get_schedule(gen->ctx, gen->options, |
4596 | &compute_or_set_properties, gen); |
4597 | } |
4598 | |
4599 | /* Construct the string "<a>_<b>". |
4600 | */ |
4601 | static char *concat(isl_ctx *ctx, const char *a, const char *b) |
4602 | { |
4603 | isl_printer *p; |
4604 | char *s; |
4605 | |
4606 | p = isl_printer_to_str(ctx); |
4607 | p = isl_printer_print_str(p, a); |
4608 | p = isl_printer_print_str(p, "_"); |
4609 | p = isl_printer_print_str(p, b); |
4610 | s = isl_printer_get_str(p); |
4611 | isl_printer_free(p); |
4612 | |
4613 | return s; |
4614 | } |
4615 | |
4616 | /* For each array in "prog" of which an element appears in "accessed" and |
4617 | * that is not a read only scalar, create a zero-dimensional universe set |
4618 | * of which the tuple id has name "<prefix>_<name of array>" and a user |
4619 | * pointer pointing to the array (gpu_array_info). |
4620 | * |
4621 | * If the array is local to "prog", then make sure it will be declared |
4622 | * in the host code. |
4623 | * |
4624 | * Return the list of these universe sets. |
4625 | */ |
4626 | static __isl_give isl_union_set_list *create_copy_filters(struct gpu_prog *prog, |
4627 | const char *prefix, __isl_take isl_union_set *accessed) |
4628 | { |
4629 | int i; |
4630 | isl_ctx *ctx; |
4631 | isl_union_set_list *filters; |
4632 | |
4633 | ctx = prog->ctx; |
4634 | filters = isl_union_set_list_alloc(ctx, 0); |
4635 | for (i = 0; i < prog->n_array; ++i) { |
4636 | struct gpu_array_info *array = &prog->array[i]; |
4637 | isl_space *space; |
4638 | isl_set *accessed_i; |
4639 | int empty; |
4640 | char *name; |
4641 | isl_id *id; |
4642 | isl_union_set *uset; |
4643 | |
4644 | if (gpu_array_is_read_only_scalar(array)) |
4645 | continue; |
4646 | |
4647 | space = isl_space_copy(array->space); |
4648 | accessed_i = isl_union_set_extract_set(accessed, space); |
4649 | empty = isl_set_plain_is_empty(accessed_i); |
4650 | isl_set_free(accessed_i); |
4651 | if (empty < 0) { |
4652 | filters = isl_union_set_list_free(filters); |
4653 | break; |
4654 | } |
4655 | if (empty) |
4656 | continue; |
4657 | |
4658 | array->global = 1; |
4659 | if (array->local) |
4660 | array->declare_local = 1; |
4661 | |
4662 | name = concat(ctx, prefix, array->name); |
4663 | id = name ? isl_id_alloc(ctx, name, array) : NULL((void*)0); |
4664 | free(name); |
4665 | space = isl_space_set_alloc(ctx, 0, 0); |
4666 | space = isl_space_set_tuple_id(space, isl_dim_set, id); |
4667 | uset = isl_union_set_from_set(isl_set_universe(space)); |
4668 | |
4669 | filters = isl_union_set_list_add(filters, uset); |
4670 | } |
4671 | isl_union_set_free(accessed); |
4672 | |
4673 | return filters; |
4674 | } |
4675 | |
4676 | /* Make sure that code for the statements in "filters" that |
4677 | * copy arrays to or from the device is only generated when |
4678 | * the size of the corresponding array is positive. |
4679 | * That is, add a set node underneath "graft" with "filters" as children |
4680 | * and for each child add a guard that the selects the parameter |
4681 | * values for which the corresponding array has a positive size. |
4682 | * The array is available in the user pointer of the statement identifier. |
4683 | * "depth" is the schedule depth of the position where "graft" |
4684 | * will be added. |
4685 | */ |
4686 | static __isl_give isl_schedule_node *insert_positive_size_guards( |
4687 | __isl_take isl_schedule_node *graft, |
4688 | __isl_take isl_union_set_list *filters, int depth) |
4689 | { |
4690 | int i, n; |
4691 | |
4692 | graft = isl_schedule_node_child(graft, 0); |
4693 | graft = isl_schedule_node_insert_set(graft, filters); |
4694 | n = isl_schedule_node_n_children(graft); |
4695 | for (i = 0; i < n; ++i) { |
4696 | isl_union_set *filter; |
4697 | isl_set *domain, *guard; |
4698 | isl_id *id; |
4699 | struct gpu_array_info *array; |
4700 | |
4701 | graft = isl_schedule_node_child(graft, i); |
4702 | filter = isl_schedule_node_filter_get_filter(graft); |
4703 | domain = isl_set_from_union_set(filter); |
4704 | id = isl_set_get_tuple_id(domain); |
4705 | array = isl_id_get_user(id); |
4706 | isl_id_free(id); |
4707 | isl_set_free(domain); |
4708 | guard = gpu_array_positive_size_guard(array); |
4709 | guard = isl_set_from_params(guard); |
4710 | guard = isl_set_add_dims(guard, isl_dim_set, depth); |
4711 | graft = isl_schedule_node_child(graft, 0); |
4712 | graft = isl_schedule_node_insert_guard(graft, guard); |
4713 | graft = isl_schedule_node_parent(graft); |
4714 | graft = isl_schedule_node_parent(graft); |
4715 | } |
4716 | graft = isl_schedule_node_parent(graft); |
4717 | |
4718 | return graft; |
4719 | } |
4720 | |
4721 | /* Create a graft for copying arrays to or from the device, |
4722 | * whenever the size of the array is strictly positive. |
4723 | * Each statement is called "<prefix>_<name of array>" and |
4724 | * the identifier has a user pointer pointing to the array. |
4725 | * The graft will be added at the position specified by "node". |
4726 | * "copy" contains the array elements that need to be copied. |
4727 | * Only arrays of which some elements need to be copied |
4728 | * will have a corresponding statement in the graph. |
4729 | * Note though that each such statement will copy the entire array. |
4730 | */ |
4731 | static __isl_give isl_schedule_node *create_copy_device(struct gpu_prog *prog, |
4732 | __isl_keep isl_schedule_node *node, const char *prefix, |
4733 | __isl_take isl_union_set *copy) |
4734 | { |
4735 | int depth; |
4736 | isl_ctx *ctx; |
4737 | isl_space *space; |
4738 | isl_union_set *all, *domain; |
4739 | isl_union_set_list *filters; |
4740 | isl_union_map *extension; |
4741 | isl_schedule_node *graft; |
4742 | |
4743 | ctx = prog->ctx; |
4744 | depth = isl_schedule_node_get_schedule_depth(node); |
4745 | filters = create_copy_filters(prog, prefix, copy); |
4746 | all = isl_union_set_list_union(isl_union_set_list_copy(filters)); |
4747 | |
4748 | space = depth < 0 ? NULL((void*)0) : isl_space_set_alloc(ctx, 0, depth); |
4749 | domain = isl_union_set_from_set(isl_set_universe(space)); |
4750 | extension = isl_union_map_from_domain_and_range(domain, all); |
4751 | graft = isl_schedule_node_from_extension(extension); |
4752 | |
4753 | if (!filters) |
4754 | return isl_schedule_node_free(graft); |
4755 | if (isl_union_set_list_n_union_set(filters) == 0) { |
4756 | isl_union_set_list_free(filters); |
4757 | return graft; |
4758 | } |
4759 | |
4760 | return insert_positive_size_guards(graft, filters, depth); |
4761 | } |
4762 | |
4763 | /* Return (the universe spaces of) the arrays that are declared |
4764 | * inside the scop corresponding to "prog" and for which all |
4765 | * potential writes inside the scop form a subset of "domain". |
4766 | */ |
4767 | static __isl_give isl_union_set *extract_local_accesses(struct gpu_prog *prog, |
4768 | __isl_keep isl_union_set *domain) |
4769 | { |
4770 | int i; |
4771 | isl_union_set *local; |
4772 | |
4773 | local = isl_union_set_empty(isl_union_set_get_space(domain)); |
4774 | |
4775 | for (i = 0; i < prog->n_array; ++i) { |
4776 | isl_set *set; |
4777 | isl_union_map *to_outer; |
4778 | isl_union_map *may_write; |
4779 | isl_union_set *write_domain; |
4780 | isl_union_set *fields; |
4781 | int subset; |
4782 | |
4783 | if (!prog->array[i].local) |
4784 | continue; |
4785 | |
4786 | set = isl_set_universe(isl_space_copy(prog->array[i].space)); |
4787 | to_outer = isl_union_map_copy(prog->to_outer); |
4788 | to_outer = isl_union_map_intersect_range(to_outer, |
4789 | isl_union_set_from_set(isl_set_copy(set))); |
4790 | fields = isl_union_map_domain(to_outer); |
4791 | may_write = isl_union_map_copy(prog->may_write); |
4792 | may_write = isl_union_map_intersect_range(may_write, fields); |
4793 | write_domain = isl_union_map_domain(may_write); |
4794 | subset = isl_union_set_is_subset(write_domain, domain); |
4795 | isl_union_set_free(write_domain); |
4796 | |
4797 | if (subset < 0) { |
4798 | isl_set_free(set); |
4799 | return isl_union_set_free(local); |
4800 | } else if (subset) { |
4801 | local = isl_union_set_add_set(local, set); |
4802 | } else { |
4803 | isl_set_free(set); |
4804 | } |
4805 | } |
4806 | |
4807 | return local; |
4808 | } |
4809 | |
4810 | /* Internal data structure for node_may_persist. |
4811 | * |
4812 | * "tagger" maps tagged iteration domains to the corresponding untagged |
4813 | * iteration domain. |
4814 | * |
4815 | * "may_persist_flow" is the set of all tagged dataflow dependences |
4816 | * with those dependences removed that either precede or follow |
4817 | * the kernel launch in a sequence. |
4818 | * "inner_band_flow" is the set of all tagged dataflow dependences |
4819 | * that are local to a given iteration of the outer band nodes |
4820 | * with respect to the current node. |
4821 | * "local_flow" is equal to "inner_band_flow", except that the domain |
4822 | * and the range have been intersected with intermediate filters |
4823 | * on children of sets or sequences. |
4824 | */ |
4825 | struct ppcg_may_persist_data { |
4826 | isl_union_pw_multi_aff *tagger; |
4827 | |
4828 | isl_union_map *local_flow; |
4829 | isl_union_map *inner_band_flow; |
4830 | isl_union_map *may_persist_flow; |
4831 | }; |
4832 | |
4833 | /* Update the information in "data" based on the band ancestor "node". |
4834 | * |
4835 | * In particular, we restrict the dependences in data->local_flow |
4836 | * to those dependence where the source and the sink occur in |
4837 | * the same iteration of the given band node. |
4838 | * We also update data->inner_band_flow to the new value of |
4839 | * data->local_flow. |
4840 | */ |
4841 | static int update_may_persist_at_band(__isl_keep isl_schedule_node *node, |
4842 | struct ppcg_may_persist_data *data) |
4843 | { |
4844 | isl_multi_union_pw_aff *partial; |
4845 | isl_union_pw_multi_aff *contraction; |
4846 | isl_union_map *flow; |
4847 | |
4848 | if (isl_schedule_node_band_n_member(node) == 0) |
4849 | return 0; |
4850 | |
4851 | partial = isl_schedule_node_band_get_partial_schedule(node); |
4852 | contraction = isl_schedule_node_get_subtree_contraction(node); |
4853 | partial = isl_multi_union_pw_aff_pullback_union_pw_multi_aff(partial, |
4854 | contraction); |
4855 | partial = isl_multi_union_pw_aff_pullback_union_pw_multi_aff(partial, |
4856 | isl_union_pw_multi_aff_copy(data->tagger)); |
4857 | |
4858 | flow = data->local_flow; |
4859 | flow = isl_union_map_eq_at_multi_union_pw_aff(flow, partial); |
4860 | data->local_flow = flow; |
4861 | |
4862 | isl_union_map_free(data->inner_band_flow); |
4863 | data->inner_band_flow = isl_union_map_copy(data->local_flow); |
4864 | |
4865 | return 0; |
4866 | } |
4867 | |
4868 | /* Given a set of local reaching domain elements "domain", |
4869 | * expand them to the corresponding leaf domain elements using "contraction" |
4870 | * and insert the array references tags using data->tagger. |
4871 | */ |
4872 | static __isl_give isl_union_set *expand_and_tag( |
4873 | __isl_take isl_union_set *domain, |
4874 | __isl_take isl_union_pw_multi_aff *contraction, |
4875 | struct ppcg_may_persist_data *data) |
4876 | { |
4877 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, |
4878 | contraction); |
4879 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, |
4880 | isl_union_pw_multi_aff_copy(data->tagger)); |
4881 | return domain; |
4882 | } |
4883 | |
4884 | /* Given a filter node that is the child of a set or sequence node, |
4885 | * restrict data->local_flow to refer only to those elements |
4886 | * in the filter of the node. |
4887 | * "contraction" maps the leaf domain elements of the schedule tree |
4888 | * to the corresponding domain elements at (the parent of) "node". |
4889 | */ |
4890 | static int filter_flow(__isl_keep isl_schedule_node *node, |
4891 | struct ppcg_may_persist_data *data, |
4892 | __isl_take isl_union_pw_multi_aff *contraction) |
4893 | { |
4894 | isl_union_set *filter; |
4895 | isl_union_map *flow; |
4896 | |
4897 | flow = data->local_flow; |
4898 | filter = isl_schedule_node_filter_get_filter(node); |
4899 | filter = expand_and_tag(filter, contraction, data); |
4900 | flow = isl_union_map_intersect_domain(flow, isl_union_set_copy(filter)); |
4901 | flow = isl_union_map_intersect_range(flow, filter); |
4902 | data->local_flow = flow; |
4903 | |
4904 | return 0; |
4905 | } |
4906 | |
4907 | /* Given a filter node "node", collect the filters on all preceding siblings |
4908 | * (which are also filter nodes), add them to "filters" and return the result. |
4909 | */ |
4910 | static __isl_give isl_union_set *add_previous_filters( |
4911 | __isl_take isl_union_set *filters, __isl_keep isl_schedule_node *node) |
4912 | { |
4913 | isl_schedule_node *sibling; |
4914 | |
4915 | sibling = isl_schedule_node_copy(node); |
4916 | while (sibling && isl_schedule_node_has_previous_sibling(sibling)) { |
4917 | isl_union_set *filter; |
4918 | |
4919 | sibling = isl_schedule_node_previous_sibling(sibling); |
4920 | filter = isl_schedule_node_filter_get_filter(sibling); |
4921 | filters = isl_union_set_union(filters, filter); |
4922 | } |
4923 | isl_schedule_node_free(sibling); |
4924 | if (!sibling) |
4925 | return isl_union_set_free(filters); |
4926 | |
4927 | return filters; |
4928 | } |
4929 | |
4930 | /* Given a filter node "node", collect the filters on all following siblings |
4931 | * (which are also filter nodes), add them to "filters" and return the result. |
4932 | */ |
4933 | static __isl_give isl_union_set *add_next_filters( |
4934 | __isl_take isl_union_set *filters, __isl_keep isl_schedule_node *node) |
4935 | { |
4936 | isl_schedule_node *sibling; |
4937 | |
4938 | sibling = isl_schedule_node_copy(node); |
4939 | while (sibling && isl_schedule_node_has_next_sibling(sibling)) { |
4940 | isl_union_set *filter; |
4941 | |
4942 | sibling = isl_schedule_node_next_sibling(sibling); |
4943 | filter = isl_schedule_node_filter_get_filter(sibling); |
4944 | filters = isl_union_set_union(filters, filter); |
4945 | } |
4946 | isl_schedule_node_free(sibling); |
4947 | if (!sibling) |
4948 | return isl_union_set_free(filters); |
4949 | |
4950 | return filters; |
4951 | } |
4952 | |
4953 | /* Remove those flow dependences from data->may_persist_flow |
4954 | * that flow between elements of "domain" within the same iteration |
4955 | * of all outer band nodes. |
4956 | * "contraction" maps the leaf domain elements of the schedule tree |
4957 | * to the corresponding elements "domain". |
4958 | */ |
4959 | static void remove_external_flow(struct ppcg_may_persist_data *data, |
4960 | __isl_take isl_union_set *domain, |
4961 | __isl_keep isl_union_pw_multi_aff *contraction) |
4962 | { |
4963 | isl_union_map *flow; |
4964 | |
4965 | contraction = isl_union_pw_multi_aff_copy(contraction); |
4966 | domain = expand_and_tag(domain, contraction, data); |
4967 | flow = isl_union_map_copy(data->local_flow); |
4968 | flow = isl_union_map_intersect_domain(flow, isl_union_set_copy(domain)); |
4969 | flow = isl_union_map_intersect_range(flow, domain); |
4970 | |
4971 | data->may_persist_flow = isl_union_map_subtract(data->may_persist_flow, |
4972 | flow); |
4973 | } |
4974 | |
4975 | /* Update the information in "data" based on the filter ancestor "node". |
4976 | * We only need to modify anything if the filter is the child |
4977 | * of a set or sequence node. |
4978 | * |
4979 | * In the case of a sequence, we remove the dependences between |
4980 | * statement instances that are both executed either before or |
4981 | * after the subtree that will be mapped to a kernel, within |
4982 | * the same iteration of outer bands. |
4983 | * |
4984 | * In both cases, we restrict data->local_flow to the current child. |
4985 | */ |
4986 | static int update_may_persist_at_filter(__isl_keep isl_schedule_node *node, |
4987 | struct ppcg_may_persist_data *data) |
4988 | { |
4989 | enum isl_schedule_node_type type; |
4990 | isl_schedule_node *parent; |
4991 | isl_space *space; |
4992 | isl_union_pw_multi_aff *contraction; |
4993 | isl_union_set *before, *after, *filter; |
4994 | |
4995 | type = isl_schedule_node_get_parent_type(node); |
4996 | if (type != isl_schedule_node_sequence && type != isl_schedule_node_set) |
4997 | return 0; |
4998 | |
4999 | parent = isl_schedule_node_copy(node); |
5000 | parent = isl_schedule_node_parent(parent); |
5001 | contraction = isl_schedule_node_get_subtree_contraction(parent); |
5002 | isl_schedule_node_free(parent); |
5003 | |
5004 | if (type == isl_schedule_node_set) |
5005 | return filter_flow(node, data, contraction); |
5006 | |
5007 | filter = isl_schedule_node_filter_get_filter(node); |
5008 | space = isl_union_set_get_space(filter); |
5009 | isl_union_set_free(filter); |
5010 | before = isl_union_set_empty(space); |
5011 | after = isl_union_set_copy(before); |
5012 | before = add_previous_filters(before, node); |
5013 | after = add_next_filters(after, node); |
5014 | |
5015 | remove_external_flow(data, before, contraction); |
5016 | remove_external_flow(data, after, contraction); |
5017 | |
5018 | return filter_flow(node, data, contraction); |
5019 | } |
5020 | |
5021 | /* Update the information in "data" based on the ancestor "node". |
5022 | */ |
5023 | static isl_stat update_may_persist_at(__isl_keep isl_schedule_node *node, |
5024 | void *user) |
5025 | { |
5026 | struct ppcg_may_persist_data *data = user; |
5027 | |
5028 | switch (isl_schedule_node_get_type(node)) { |
5029 | case isl_schedule_node_error: |
5030 | return isl_stat_error; |
5031 | case isl_schedule_node_context: |
5032 | case isl_schedule_node_domain: |
5033 | case isl_schedule_node_expansion: |
5034 | case isl_schedule_node_extension: |
5035 | case isl_schedule_node_guard: |
5036 | case isl_schedule_node_leaf: |
5037 | case isl_schedule_node_mark: |
5038 | case isl_schedule_node_sequence: |
5039 | case isl_schedule_node_set: |
5040 | break; |
5041 | case isl_schedule_node_band: |
5042 | if (update_may_persist_at_band(node, data) < 0) |
5043 | return isl_stat_error; |
5044 | break; |
5045 | case isl_schedule_node_filter: |
5046 | if (update_may_persist_at_filter(node, data) < 0) |
5047 | return isl_stat_error; |
5048 | break; |
5049 | } |
5050 | |
5051 | return isl_stat_ok; |
5052 | } |
5053 | |
5054 | /* Determine the set of array elements that may need to be perserved |
5055 | * by a kernel constructed from the subtree at "node". |
5056 | * This includes the set of array elements that may need to be preserved |
5057 | * by the entire scop (prog->may_persist) and the elements for which |
5058 | * there is a potential flow dependence that may cross a kernel launch. |
5059 | * |
5060 | * To determine the second set, we start from all flow dependences. |
5061 | * From this set of dependences, we remove those that cannot possibly |
5062 | * require data to be preserved by a kernel launch. |
5063 | * In particular, we consider the following sets of dependences. |
5064 | * - dependences of which the write occurs inside the kernel. |
5065 | * If the data is needed outside the kernel, then it will |
5066 | * be copied out immediately after the kernel launch, so there |
5067 | * is no need for any special care. |
5068 | * - dependences of which the read occurs inside the kernel and the |
5069 | * corresponding write occurs inside the same iteration of the |
5070 | * outer band nodes. This means that the data is needed in |
5071 | * the first kernel launch after the write, which is already |
5072 | * taken care of by the standard copy-in. That is, the data |
5073 | * do not need to be preserved by any intermediate call to |
5074 | * the same kernel. |
5075 | * - dependences of which the write and the read either both occur |
5076 | * before the kernel launch or both occur after the kernel launch, |
5077 | * within the same iteration of the outer band nodes with respect |
5078 | * to the sequence that determines the ordering of the dependence |
5079 | * and the kernel launch. Such flow dependences cannot cross |
5080 | * any kernel launch. |
5081 | * |
5082 | * For the remaining (tagged) dependences, we take the domain |
5083 | * (i.e., the tagged writes) and apply the tagged access relation |
5084 | * to obtain the accessed data elements. |
5085 | * These are then combined with the elements that may need to be |
5086 | * preserved by the entire scop. |
5087 | */ |
5088 | static __isl_give isl_union_set *node_may_persist( |
5089 | __isl_keep isl_schedule_node *node, struct gpu_prog *prog) |
5090 | { |
5091 | struct ppcg_may_persist_data data; |
5092 | isl_union_pw_multi_aff *contraction; |
5093 | isl_union_set *domain; |
5094 | isl_union_set *persist; |
5095 | isl_union_map *flow, *local_flow; |
5096 | |
5097 | data.tagger = prog->scop->tagger; |
5098 | |
5099 | flow = isl_union_map_copy(prog->scop->tagged_dep_flow); |
5100 | data.local_flow = isl_union_map_copy(flow); |
5101 | data.inner_band_flow = isl_union_map_copy(flow); |
5102 | data.may_persist_flow = flow; |
5103 | if (isl_schedule_node_foreach_ancestor_top_down(node, |
5104 | &update_may_persist_at, &data) < 0) |
5105 | data.may_persist_flow = |
5106 | isl_union_map_free(data.may_persist_flow); |
5107 | flow = data.may_persist_flow; |
5108 | isl_union_map_free(data.local_flow); |
5109 | |
5110 | domain = isl_schedule_node_get_domain(node); |
5111 | contraction = isl_schedule_node_get_subtree_contraction(node); |
5112 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, |
5113 | contraction); |
5114 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, |
5115 | isl_union_pw_multi_aff_copy(data.tagger)); |
5116 | flow = isl_union_map_subtract_domain(flow, isl_union_set_copy(domain)); |
5117 | local_flow = data.inner_band_flow; |
5118 | local_flow = isl_union_map_intersect_range(local_flow, domain); |
5119 | flow = isl_union_map_subtract(flow, local_flow); |
5120 | |
5121 | persist = isl_union_map_domain(flow); |
5122 | persist = isl_union_set_apply(persist, |
5123 | isl_union_map_copy(prog->scop->tagged_may_writes)); |
5124 | persist = isl_union_set_union(persist, |
5125 | isl_union_set_copy(prog->may_persist)); |
5126 | |
5127 | return persist; |
5128 | } |
5129 | |
5130 | /* Add nodes for copying outer arrays in and out of the device |
5131 | * before and after the subtree "node", which contains one or more kernels. |
5132 | * "domain" contains the original statement instances, i.e., |
5133 | * those that correspond to the domains of the access relations in "prog". |
5134 | * In particular, the domain has not been contracted in any way. |
5135 | * "prefix" contains the prefix schedule at that point, in terms |
5136 | * of the same original statement instances. |
5137 | * |
5138 | * We first compute the sets of outer array elements that need |
5139 | * to be copied in and out and then graft in the nodes for |
5140 | * performing this copying. |
5141 | * |
5142 | * In particular, for each array that is possibly written anywhere in |
5143 | * the subtree "node" and that may be used after "node" |
5144 | * or that may be visible outside the corresponding scop, |
5145 | * we copy out its entire extent. |
5146 | * |
5147 | * Any array elements that is read without first being written inside |
5148 | * the subtree "node" needs to be copied in. |
5149 | * Furthermore, if there are any array elements that |
5150 | * are copied out, but that may not be written inside "node, then |
5151 | * they also need to be copied in to ensure that the value after execution |
5152 | * is the same as the value before execution, at least for those array |
5153 | * elements that may have their values preserved by the scop or that |
5154 | * may be written before "node" and read after "node". |
5155 | * In case the array elements are structures, we need to take into |
5156 | * account that all members of the structures need to be written |
5157 | * by "node" before we can avoid copying the data structure in. |
5158 | * |
5159 | * Note that the may_write relation is intersected with the domain, |
5160 | * which has been intersected with the context. |
5161 | * This helps in those cases where the arrays are declared with a fixed size, |
5162 | * while the accesses are parametric and the context assigns a fixed value |
5163 | * to the parameters. |
5164 | * |
5165 | * If an element from a local array is read without first being written, |
5166 | * then there is no point in copying it in since it cannot have been |
5167 | * written prior to the scop. Warn about the uninitialized read instead. |
5168 | */ |
5169 | static __isl_give isl_schedule_node *add_to_from_device( |
5170 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *domain, |
5171 | __isl_take isl_union_map *prefix, struct gpu_prog *prog) |
5172 | { |
5173 | isl_union_set *local; |
5174 | isl_union_set *may_persist; |
5175 | isl_union_map *may_write, *must_write, *copy_out, *not_written; |
5176 | isl_union_map *read, *copy_in; |
5177 | isl_union_map *tagged; |
5178 | isl_union_map *local_uninitialized; |
5179 | isl_schedule_node *graft; |
5180 | |
5181 | tagged = isl_union_map_copy(prog->scop->tagged_reads); |
5182 | tagged = isl_union_map_union(tagged, |
5183 | isl_union_map_copy(prog->scop->tagged_may_writes)); |
5184 | |
5185 | may_write = isl_union_map_copy(prog->may_write); |
5186 | may_write = isl_union_map_intersect_domain(may_write, |
5187 | isl_union_set_copy(domain)); |
5188 | may_write = remove_local_accesses(prog, |
5189 | isl_union_map_copy(tagged), may_write, |
5190 | isl_union_map_copy(prefix), 0); |
5191 | may_write = isl_union_map_apply_range(may_write, |
5192 | isl_union_map_copy(prog->to_outer)); |
5193 | may_write = isl_union_map_apply_domain(may_write, |
5194 | isl_union_map_copy(prefix)); |
5195 | may_write = approximate_copy_out(may_write, prog); |
5196 | copy_out = isl_union_map_copy(may_write); |
5197 | may_write = isl_union_map_apply_range(may_write, |
5198 | isl_union_map_copy(prog->to_inner)); |
5199 | must_write = isl_union_map_copy(prog->must_write); |
5200 | must_write = isl_union_map_apply_domain(must_write, |
5201 | isl_union_map_copy(prefix)); |
5202 | may_persist = node_may_persist(node, prog); |
5203 | may_write = isl_union_map_intersect_range(may_write, may_persist); |
5204 | not_written = isl_union_map_subtract(may_write, must_write); |
5205 | |
5206 | local = extract_local_accesses(prog, domain); |
5207 | read = isl_union_map_copy(prog->read); |
5208 | read = isl_union_map_intersect_domain(read, domain); |
5209 | read = remove_local_accesses(prog, tagged, read, |
5210 | isl_union_map_copy(prefix), 1); |
5211 | local = isl_union_set_apply(local, isl_union_map_copy(prog->to_inner)); |
5212 | local_uninitialized = isl_union_map_copy(prog->scop->live_in); |
5213 | local_uninitialized = isl_union_map_intersect_range(local_uninitialized, |
5214 | local); |
5215 | local_uninitialized = isl_union_map_intersect(local_uninitialized, |
5216 | isl_union_map_copy(read)); |
5217 | if (!isl_union_map_is_empty(local_uninitialized)) { |
5218 | fprintf(stderrstderr, |
5219 | "possibly uninitialized reads (not copied in):\n"); |
5220 | isl_union_map_dump(local_uninitialized); |
5221 | } |
5222 | read = isl_union_map_subtract(read, local_uninitialized); |
5223 | read = isl_union_map_apply_domain(read, prefix); |
5224 | copy_in = isl_union_map_union(read, not_written); |
5225 | copy_in = isl_union_map_apply_range(copy_in, |
5226 | isl_union_map_copy(prog->to_outer)); |
5227 | |
5228 | graft = create_copy_device(prog, node, "to_device", |
5229 | isl_union_map_range(copy_in)); |
5230 | node = isl_schedule_node_graft_before(node, graft); |
5231 | graft = create_copy_device(prog, node, "from_device", |
5232 | isl_union_map_range(copy_out)); |
5233 | node = isl_schedule_node_graft_after(node, graft); |
5234 | |
5235 | return node; |
5236 | } |
5237 | |
5238 | /* Add nodes for initializing ("init_device") and clearing ("clear_device") |
5239 | * the device before and after "node". |
5240 | */ |
5241 | static __isl_give isl_schedule_node *add_init_clear_device( |
5242 | __isl_take isl_schedule_node *node) |
5243 | { |
5244 | isl_ctx *ctx; |
5245 | isl_space *space; |
5246 | isl_union_set *domain; |
5247 | isl_schedule_node *graft; |
5248 | |
5249 | ctx = isl_schedule_node_get_ctx(node); |
5250 | |
5251 | space = isl_space_set_alloc(ctx, 0, 0); |
5252 | space = isl_space_set_tuple_name(space, isl_dim_set, "init_device"); |
5253 | domain = isl_union_set_from_set(isl_set_universe(space)); |
5254 | graft = isl_schedule_node_from_domain(domain); |
5255 | |
5256 | node = isl_schedule_node_graft_before(node, graft); |
5257 | |
5258 | space = isl_space_set_alloc(ctx, 0, 0); |
5259 | space = isl_space_set_tuple_name(space, isl_dim_set, "clear_device"); |
5260 | domain = isl_union_set_from_set(isl_set_universe(space)); |
5261 | graft = isl_schedule_node_from_domain(domain); |
5262 | |
5263 | node = isl_schedule_node_graft_after(node, graft); |
5264 | |
5265 | return node; |
5266 | } |
5267 | |
5268 | /* Update "schedule" for mapping to a GPU device. |
5269 | * |
5270 | * In particular, insert a context node, create kernels for |
5271 | * each outermost tilable band and introduce nodes for copying arrays |
5272 | * in and out of the device and for initializing and clearing the device. |
5273 | * If the child of the initial root points to a set node, |
5274 | * then children of this node that do not contain any tilable bands |
5275 | * are separated from the other children and are not mapped to |
5276 | * the device. |
5277 | * |
5278 | * The GPU code is generated in a context where at least one |
5279 | * statement instance is executed. The corresponding guard is inserted |
5280 | * around the entire schedule. |
5281 | */ |
5282 | __isl_give isl_schedule *map_to_device(struct gpu_gen *gen, |
5283 | __isl_take isl_schedule *schedule, int to_from_device) |
5284 | { |
5285 | isl_schedule_node *node; |
5286 | isl_set *context; |
5287 | isl_set *guard; |
5288 | isl_union_set *domain; |
5289 | isl_union_map *prefix; |
5290 | isl_union_pw_multi_aff *contraction; |
5291 | struct gpu_prog *prog; |
5292 | |
5293 | context = isl_set_copy(gen->prog->context); |
5294 | context = isl_set_from_params(context); |
5295 | schedule = isl_schedule_insert_context(schedule, context); |
5296 | |
5297 | prog = gen->prog; |
5298 | guard = isl_union_set_params(isl_union_set_copy(prog->scop->domain)); |
5299 | prog->context = isl_set_intersect(prog->context, isl_set_copy(guard)); |
5300 | guard = isl_set_from_params(guard); |
5301 | |
5302 | node = isl_schedule_get_root(schedule); |
5303 | isl_schedule_free(schedule); |
5304 | node = isl_schedule_node_child(node, 0); |
5305 | node = isl_schedule_node_child(node, 0); |
5306 | node = isolate_permutable_subtrees(node, gen->prog); |
5307 | domain = isl_schedule_node_get_domain(node); |
5308 | contraction = isl_schedule_node_get_subtree_contraction(node); |
5309 | domain = isl_union_set_preimage_union_pw_multi_aff(domain, |
5310 | isl_union_pw_multi_aff_copy(contraction)); |
5311 | prefix = isl_schedule_node_get_prefix_schedule_union_map(node); |
5312 | prefix = isl_union_map_preimage_domain_union_pw_multi_aff(prefix, |
5313 | contraction); |
5314 | node = mark_kernels(gen, node); |
5315 | if (to_from_device) { |
5316 | node = add_to_from_device(node, domain, prefix, gen->prog); |
5317 | } else { |
5318 | isl_union_set_free(domain); |
5319 | isl_union_map_free(prefix); |
5320 | } |
5321 | node = isl_schedule_node_root(node); |
5322 | node = isl_schedule_node_child(node, 0); |
5323 | node = isl_schedule_node_child(node, 0); |
5324 | node = isl_schedule_node_insert_guard(node, guard); |
5325 | node = isl_schedule_node_child(node, 0); |
5326 | node = add_init_clear_device(node); |
5327 | schedule = isl_schedule_node_get_schedule(node); |
5328 | isl_schedule_node_free(node); |
5329 | |
5330 | return schedule; |
5331 | } |
5332 | |
5333 | /* Internal data structure for extract_access. |
5334 | * "next_access" points to the end of a linked list that is extended |
5335 | * by extract_access. |
5336 | * "single_expression" is set if the access expressions belong to |
5337 | * an expression statement (i.e., a statement without internal control). |
5338 | * "any_to_outer" maps all intermediate arrays to their outer arrays. |
5339 | */ |
5340 | struct ppcg_extract_access_data { |
5341 | struct gpu_stmt_access **next_access; |
5342 | int single_expression; |
5343 | isl_union_map *any_to_outer; |
5344 | }; |
5345 | |
5346 | /* Given a tagged access relation to a single array "tagged", extract it |
5347 | * as a map, taking into account that the input may be empty. |
5348 | * If the access relation is empty, then it does not contain |
5349 | * any space information, so we try to recover it from the index |
5350 | * expression. |
5351 | * The space of the index expression is of the form I -> A, |
5352 | * with I the statement instances and A the array, or [I -> F] -> A, |
5353 | * with F the filters corresponding to arguments. |
5354 | * We first drop F, if present, obtaining I -> A. |
5355 | * Then we construct I -> R, with R the reference tag, |
5356 | * combine the two into I -> [R -> A] and uncurry to obtain |
5357 | * the final result [I -> R] -> A. |
5358 | * Note that the index expression may have a lower dimension |
5359 | * than that of the array, but this dimension is not used |
5360 | * if the access relation is empty. |
5361 | */ |
5362 | static __isl_give isl_map *extract_single_tagged_access( |
5363 | __isl_take isl_union_map *tagged, __isl_keep pet_expr *expr) |
5364 | { |
5365 | int empty; |
5366 | isl_id *id; |
5367 | isl_space *space, *space2; |
5368 | isl_multi_pw_aff *index; |
5369 | |
5370 | empty = isl_union_map_is_empty(tagged); |
5371 | if (empty < 0) |
5372 | goto error; |
5373 | if (!empty) |
5374 | return isl_map_from_union_map(tagged); |
5375 | isl_union_map_free(tagged); |
5376 | |
5377 | index = pet_expr_access_get_index(expr); |
5378 | space = isl_multi_pw_aff_get_space(index); |
5379 | isl_multi_pw_aff_free(index); |
5380 | if (isl_space_domain_is_wrapping(space)) |
5381 | space = isl_space_domain_factor_domain(space); |
5382 | space2 = isl_space_copy(space); |
Value stored to 'space2' is never read | |
5383 | space2 = isl_space_from_domain(isl_space_domain(space)); |
5384 | id = pet_expr_access_get_ref_id(expr); |
5385 | space2 = isl_space_set_tuple_id(space2, isl_dim_out, id); |
5386 | space = isl_space_range_product(space2, space); |
5387 | space = isl_space_uncurry(space); |
5388 | |
5389 | return isl_map_empty(space); |
5390 | error: |
5391 | isl_union_map_free(tagged); |
5392 | return NULL((void*)0); |
5393 | } |
5394 | |
5395 | /* Does the index expression "index" of "expr" represent an access |
5396 | * to a single element? |
5397 | * That is, is "index" completely specified? |
5398 | * |
5399 | * If "expr" accesses elements from different spaces (i.e., fields |
5400 | * of a structure), then it does not access a single element. |
5401 | * Otherwise, if the single space of the access matches the space |
5402 | * of "index", then the index expression is completely specified |
5403 | * (no pointer to a lower-dimensional slice of the accessed array) |
5404 | * and a single element is being accessed. |
5405 | */ |
5406 | static isl_bool complete_index(__isl_keep pet_expr *expr, |
5407 | __isl_keep isl_multi_pw_aff *index) |
5408 | { |
5409 | isl_union_map *read, *write, *all; |
5410 | isl_map *map; |
5411 | isl_space *space1, *space2; |
5412 | isl_bool complete; |
5413 | |
5414 | read = pet_expr_access_get_may_read(expr); |
5415 | write = pet_expr_access_get_may_write(expr); |
5416 | all = isl_union_map_union(read, write); |
5417 | if (!all) |
5418 | return isl_bool_error; |
5419 | if (isl_union_map_n_map(all) != 1) { |
5420 | isl_union_map_free(all); |
5421 | return isl_bool_false; |
5422 | } |
5423 | map = isl_map_from_union_map(all); |
5424 | space1 = isl_map_get_space(map); |
5425 | isl_map_free(map); |
5426 | space2 = isl_multi_pw_aff_get_space(index); |
5427 | complete = isl_space_tuple_is_equal(space1, isl_dim_out, |
5428 | space2, isl_dim_out); |
5429 | isl_space_free(space1); |
5430 | isl_space_free(space2); |
5431 | |
5432 | return complete; |
5433 | } |
5434 | |
5435 | /* Does "expr" access a single, fixed element (independently of the statement |
5436 | * instance)? |
5437 | * That is, does it have a completely specified constant index expression? |
5438 | * |
5439 | * Note that it is not sufficient for the index expression to be |
5440 | * piecewise constant. isl_multi_pw_aff_is_cst can therefore not be used. |
5441 | */ |
5442 | static isl_bool accesses_fixed_element(__isl_keep pet_expr *expr) |
5443 | { |
5444 | int i, n; |
5445 | isl_multi_pw_aff *index; |
5446 | isl_bool fixed = isl_bool_true; |
5447 | |
5448 | index = pet_expr_access_get_index(expr); |
5449 | if (index < 0) |
5450 | return isl_bool_error; |
5451 | n = isl_multi_pw_aff_dim(index, isl_dim_out); |
5452 | for (i = 0; i < n; ++i) { |
5453 | isl_pw_aff *pa; |
5454 | |
5455 | pa = isl_multi_pw_aff_get_pw_aff(index, 0); |
5456 | fixed = isl_pw_aff_n_piece(pa) == 1; |
5457 | if (fixed) |
5458 | fixed = isl_pw_aff_is_cst(pa); |
5459 | isl_pw_aff_free(pa); |
5460 | if (fixed < 0 || !fixed) |
5461 | break; |
5462 | } |
5463 | if (fixed >= 0 && fixed) |
5464 | fixed = complete_index(expr, index); |
5465 | isl_multi_pw_aff_free(index); |
5466 | |
5467 | return fixed; |
5468 | } |
5469 | |
5470 | /* Extract a gpu_stmt_access from "expr", append it to the list |
5471 | * that ends in *data->next_access and update the end of the list. |
5472 | * If the access expression performs a write, then it is considered |
5473 | * exact only if it appears in a single expression statement and |
5474 | * if its may access relation is equal to its must access relation. |
5475 | * |
5476 | * The combined set of may accesses may be a union if member accesses |
5477 | * are involved, but the entire set is derived from a single reference and |
5478 | * therefore from a single index expression. These accesses therefore |
5479 | * all map to the same outer array. |
5480 | */ |
5481 | static int extract_access(__isl_keep pet_expr *expr, void *user) |
5482 | { |
5483 | struct ppcg_extract_access_data *data = user; |
5484 | isl_union_map *tagged; |
5485 | struct gpu_stmt_access *access; |
5486 | isl_ctx *ctx = pet_expr_get_ctx(expr); |
5487 | isl_multi_pw_aff *index; |
5488 | |
5489 | access = isl_alloc_type(ctx, struct gpu_stmt_access)((struct gpu_stmt_access *)isl_malloc_or_die(ctx, sizeof(struct gpu_stmt_access))); |
5490 | assert(access)((access) ? (void) (0) : __assert_fail ("access", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 5490, __PRETTY_FUNCTION__)); |
5491 | access->next = NULL((void*)0); |
5492 | access->read = pet_expr_access_is_read(expr); |
5493 | access->write = pet_expr_access_is_write(expr); |
5494 | tagged = pet_expr_access_get_tagged_may_read(expr); |
5495 | tagged = isl_union_map_union(tagged, |
5496 | pet_expr_access_get_tagged_may_write(expr)); |
5497 | tagged = isl_union_map_apply_range(tagged, |
5498 | isl_union_map_copy(data->any_to_outer)); |
5499 | if (!access->write) { |
5500 | access->exact_write = 1; |
5501 | } else if (!data->single_expression) { |
5502 | access->exact_write = 0; |
5503 | } else { |
5504 | isl_union_map *must, *may; |
5505 | may = isl_union_map_copy(tagged); |
5506 | may = isl_union_map_domain_factor_domain(may); |
5507 | must = pet_expr_access_get_must_write(expr); |
5508 | access->exact_write = isl_union_map_is_equal(must, may); |
5509 | isl_union_map_free(must); |
5510 | isl_union_map_free(may); |
5511 | } |
5512 | index = pet_expr_access_get_index(expr); |
5513 | access->n_index = isl_multi_pw_aff_dim(index, isl_dim_out); |
5514 | isl_multi_pw_aff_free(index); |
5515 | access->ref_id = pet_expr_access_get_ref_id(expr); |
5516 | access->tagged_access = extract_single_tagged_access(tagged, expr); |
5517 | access->access = isl_map_copy(access->tagged_access); |
5518 | access->access = isl_map_domain_factor_domain(access->access); |
5519 | access->fixed_element = accesses_fixed_element(expr); |
5520 | |
5521 | *data->next_access = access; |
5522 | data->next_access = &(*data->next_access)->next; |
5523 | |
5524 | if (!access->access || access->fixed_element < 0) |
5525 | return -1; |
5526 | |
5527 | return 0; |
5528 | } |
5529 | |
5530 | /* Construct a linked list of gpu_stmt_access objects, |
5531 | * one for each access expression in the statement body. |
5532 | * "any_to_outer" maps all intermediate arrays to their outer arrays. |
5533 | */ |
5534 | static int pet_stmt_extract_accesses(struct gpu_stmt *stmt, |
5535 | __isl_keep isl_union_map *any_to_outer) |
5536 | { |
5537 | struct ppcg_extract_access_data data; |
5538 | |
5539 | stmt->accesses = NULL((void*)0); |
5540 | data.next_access = &stmt->accesses; |
5541 | data.single_expression = |
5542 | pet_tree_get_type(stmt->stmt->body) == pet_tree_expr; |
5543 | data.any_to_outer = any_to_outer; |
5544 | return pet_tree_foreach_access_expr(stmt->stmt->body, |
5545 | &extract_access, &data); |
5546 | } |
5547 | |
5548 | /* Has statement "stmt" been killed from "scop"? |
5549 | * That is, is the instance set of "scop" free from any |
5550 | * instances of "stmt"? |
5551 | */ |
5552 | static isl_bool is_stmt_killed(struct ppcg_scop *scop, struct pet_stmt *stmt) |
5553 | { |
5554 | isl_space *space; |
5555 | isl_set *left; |
5556 | isl_bool empty; |
5557 | |
5558 | if (!scop || !stmt) |
5559 | return isl_bool_error; |
5560 | space = isl_set_get_space(stmt->domain); |
5561 | left = isl_union_set_extract_set(scop->domain, space); |
5562 | empty = isl_set_plain_is_empty(left); |
5563 | isl_set_free(left); |
5564 | |
5565 | return empty; |
5566 | } |
5567 | |
5568 | /* Return an array of gpu_stmt representing the statements in "scop". |
5569 | * Do not collect array accesses for statements that have been killed. |
5570 | */ |
5571 | static struct gpu_stmt *extract_stmts(isl_ctx *ctx, struct ppcg_scop *scop, |
5572 | __isl_keep isl_union_map *any_to_outer) |
5573 | { |
5574 | int i; |
5575 | struct gpu_stmt *stmts; |
5576 | |
5577 | stmts = isl_calloc_array(ctx, struct gpu_stmt, scop->pet->n_stmt)((struct gpu_stmt *)isl_calloc_or_die(ctx, scop->pet->n_stmt , sizeof(struct gpu_stmt))); |
5578 | if (!stmts) |
5579 | return NULL((void*)0); |
5580 | |
5581 | for (i = 0; i < scop->pet->n_stmt; ++i) { |
5582 | struct gpu_stmt *s = &stmts[i]; |
5583 | isl_bool killed; |
5584 | |
5585 | s->id = isl_set_get_tuple_id(scop->pet->stmts[i]->domain); |
5586 | s->stmt = scop->pet->stmts[i]; |
5587 | killed = is_stmt_killed(scop, scop->pet->stmts[i]); |
5588 | if (killed < 0) |
5589 | return free_stmts(stmts, i + 1); |
5590 | if (killed) |
5591 | continue; |
5592 | if (pet_stmt_extract_accesses(s, any_to_outer) < 0) |
5593 | return free_stmts(stmts, i + 1); |
5594 | } |
5595 | |
5596 | return stmts; |
5597 | } |
5598 | |
5599 | /* Generate CUDA code for "scop" and print it to "p". |
5600 | * After generating an AST for the transformed scop as explained below, |
5601 | * we call "gen->print" to print the AST in the desired output format |
5602 | * to "p". |
5603 | * |
5604 | * If it turns out that it does not make sense to generate GPU code, |
5605 | * then we generate CPU code instead. |
5606 | * |
5607 | * The declarations of the arrays that are visible outside of the scop |
5608 | * are printed outside of the code generated from the schedule, |
5609 | * because the generated code may involve a guard around the entire code. |
5610 | * |
5611 | * We first compute a schedule that respects the dependences |
5612 | * of the original program and select the outermost bands |
5613 | * of tilable dimensions that have at least one parallel loop. |
5614 | * If the --load-schedule is specified, then the loaded schedule |
5615 | * is used instead of a computed schedule. |
5616 | * |
5617 | * Each of these bands B is then tiled according to "tile" sizes, resulting |
5618 | * in two nested bands, with a kernel marker on top |
5619 | * |
5620 | * K |
5621 | * | |
5622 | * T |
5623 | * | |
5624 | * P |
5625 | * |
5626 | * We then split off at most 2 parallel dimensions from the T band and |
5627 | * at most 3 parallel dimension from the P band |
5628 | * |
5629 | * K |
5630 | * | |
5631 | * T |
5632 | * T1 |
5633 | * | |
5634 | * T2 |
5635 | * | |
5636 | * P1 |
5637 | * | |
5638 | * P2 |
5639 | * |
5640 | * A filter is introduced in front of T1 that maps the domain instances |
5641 | * to block identifiers. Similarly, a filter is introduced in front of P1 |
5642 | * that maps the domain instances to thread identifiers. |
5643 | * |
5644 | * For each iteration of the T2 band and for each array, we compute |
5645 | * the array elements accessed by that iteration, construct a rectangular |
5646 | * box around it and shift it to the origin. The result is used |
5647 | * as shared memory for the array. |
5648 | * |
5649 | * Copying and synchronization statements are added to this schedule tree. |
5650 | * In principle, these are added in front of the P1 band, but some of |
5651 | * them may get hoisted up to higher levels. |
5652 | * |
5653 | * The entire AST is then generated from the single resulting schedule tree. |
5654 | * During the generation the subtrees at kernel nodes (K) are saved |
5655 | * aside and replaced by kernel calls. The result is printed as host code |
5656 | * while the saved subtrees are printed as device code. |
5657 | */ |
5658 | static __isl_give isl_printer *generate(__isl_take isl_printer *p, |
5659 | struct gpu_gen *gen, struct ppcg_scop *scop, |
5660 | struct ppcg_options *options) |
5661 | { |
5662 | struct gpu_prog *prog; |
5663 | isl_ctx *ctx; |
5664 | isl_schedule *schedule; |
5665 | int any_permutable; |
5666 | |
5667 | if (!scop) |
5668 | return isl_printer_free(p); |
5669 | |
5670 | ctx = isl_printer_get_ctx(p); |
5671 | prog = gpu_prog_alloc(ctx, scop); |
5672 | if (!prog) |
5673 | return isl_printer_free(p); |
5674 | |
5675 | gen->prog = prog; |
5676 | schedule = get_schedule(gen); |
5677 | |
5678 | any_permutable = has_any_permutable_node(schedule); |
5679 | if (any_permutable < 0 || !any_permutable) { |
5680 | if (any_permutable < 0) |
5681 | p = isl_printer_free(p); |
5682 | else |
5683 | p = print_cpu(p, scop, options); |
5684 | isl_schedule_free(schedule); |
5685 | } else { |
5686 | const int create_to_from_device = 1; |
5687 | schedule = map_to_device(gen, schedule, create_to_from_device); |
5688 | gen->tree = generate_code(gen, schedule); |
5689 | p = ppcg_set_macro_names(p); |
5690 | p = ppcg_print_exposed_declarations(p, prog->scop); |
5691 | p = gen->print(p, gen->prog, gen->tree, &gen->types, |
5692 | gen->print_user); |
5693 | isl_ast_node_free(gen->tree); |
5694 | } |
5695 | |
5696 | gpu_prog_free(prog); |
5697 | |
5698 | return p; |
5699 | } |
5700 | |
5701 | /* Wrapper around generate for use as a ppcg_transform callback. |
5702 | */ |
5703 | static __isl_give isl_printer *generate_wrap(__isl_take isl_printer *p, |
5704 | struct ppcg_scop *scop, void *user) |
5705 | { |
5706 | struct gpu_gen *gen = user; |
5707 | |
5708 | return generate(p, gen, scop, gen->options); |
5709 | } |
5710 | |
5711 | /* Transform the code in the file called "input" by replacing |
5712 | * all scops by corresponding GPU code and write the results to "out". |
5713 | */ |
5714 | int generate_gpu(isl_ctx *ctx, const char *input, FILE *out, |
5715 | struct ppcg_options *options, |
5716 | __isl_give isl_printer *(*print)(__isl_take isl_printer *p, |
5717 | struct gpu_prog *prog, __isl_keep isl_ast_node *tree, |
5718 | struct gpu_types *types, void *user), void *user) |
5719 | { |
5720 | struct gpu_gen gen; |
5721 | int r; |
5722 | int i; |
5723 | |
5724 | gen.ctx = ctx; |
5725 | gen.sizes = extract_sizes_from_str(ctx, options->sizes); |
5726 | gen.options = options; |
5727 | gen.kernel_id = 0; |
5728 | gen.print = print; |
5729 | gen.print_user = user; |
5730 | gen.types.n = 0; |
5731 | gen.types.name = NULL((void*)0); |
5732 | |
5733 | if (options->debug->dump_sizes) { |
5734 | isl_space *space = isl_space_params_alloc(ctx, 0); |
5735 | gen.used_sizes = isl_union_map_empty(space); |
5736 | } |
5737 | |
5738 | r = ppcg_transform(ctx, input, out, options, &generate_wrap, &gen); |
5739 | |
5740 | if (options->debug->dump_sizes) { |
5741 | isl_union_map_dump(gen.used_sizes); |
5742 | isl_union_map_free(gen.used_sizes); |
5743 | } |
5744 | |
5745 | isl_union_map_free(gen.sizes); |
5746 | for (i = 0; i < gen.types.n; ++i) |
5747 | free(gen.types.name[i]); |
5748 | free(gen.types.name); |
5749 | |
5750 | return r; |
5751 | } |
5752 | |
5753 | /* Compute the set of inner array elements that may have their values |
5754 | * preserved by "prog". In particular, collect the array elements of |
5755 | * arrays that are not local to "prog" and remove those elements that |
5756 | * are definitely killed or definitely written by "prog". |
5757 | */ |
5758 | __isl_give isl_union_set *compute_may_persist(struct gpu_prog *prog) |
5759 | { |
5760 | int i; |
5761 | isl_union_set *may_persist, *killed; |
5762 | isl_union_map *must_kill; |
5763 | |
5764 | may_persist = isl_union_set_empty(isl_set_get_space(prog->context)); |
5765 | for (i = 0; i < prog->n_array; ++i) { |
5766 | isl_set *extent; |
5767 | |
5768 | if (prog->array[i].local) |
5769 | continue; |
5770 | |
5771 | extent = isl_set_copy(prog->array[i].extent); |
5772 | may_persist = isl_union_set_add_set(may_persist, extent); |
5773 | } |
5774 | |
5775 | may_persist = isl_union_set_intersect_params(may_persist, |
5776 | isl_set_copy(prog->context)); |
5777 | may_persist = isl_union_set_apply(may_persist, |
5778 | isl_union_map_copy(prog->to_inner)); |
5779 | must_kill = isl_union_map_copy(prog->tagged_must_kill); |
5780 | killed = isl_union_map_range(must_kill); |
5781 | must_kill = isl_union_map_copy(prog->must_write); |
5782 | killed = isl_union_set_union(killed, isl_union_map_range(must_kill)); |
5783 | |
5784 | may_persist = isl_union_set_subtract(may_persist, killed); |
5785 | return may_persist; |
5786 | } |
5787 | |
5788 | struct gpu_prog *gpu_prog_alloc(isl_ctx *ctx, struct ppcg_scop *scop) |
5789 | { |
5790 | struct gpu_prog *prog; |
5791 | isl_space *space; |
5792 | isl_map *id; |
5793 | |
5794 | if (!scop) |
5795 | return NULL((void*)0); |
5796 | |
5797 | prog = isl_calloc_type(ctx, struct gpu_prog)((struct gpu_prog *)isl_calloc_or_die(ctx, 1, sizeof(struct gpu_prog ))); |
5798 | assert(prog)((prog) ? (void) (0) : __assert_fail ("prog", "/build/llvm-toolchain-snapshot-8~svn345461/tools/polly/lib/External/ppcg/gpu.c" , 5798, __PRETTY_FUNCTION__)); |
5799 | |
5800 | prog->ctx = ctx; |
5801 | prog->scop = scop; |
5802 | prog->context = isl_set_copy(scop->context); |
5803 | prog->n_stmts = scop->pet->n_stmt; |
5804 | prog->any_to_outer = pet_scop_compute_outer_to_any(scop->pet); |
5805 | prog->any_to_outer = isl_union_map_reverse(prog->any_to_outer); |
5806 | space = isl_union_map_get_space(prog->any_to_outer); |
5807 | space = isl_space_set_from_params(space); |
5808 | space = isl_space_add_dims(space, isl_dim_set, 1); |
5809 | space = isl_space_map_from_set(space); |
5810 | id = isl_map_identity(space); |
5811 | prog->any_to_outer = isl_union_map_add_map(prog->any_to_outer, id); |
5812 | prog->stmts = extract_stmts(ctx, scop, prog->any_to_outer); |
5813 | prog->read = isl_union_map_copy(scop->reads); |
5814 | prog->may_write = isl_union_map_copy(scop->may_writes); |
5815 | prog->must_write = isl_union_map_copy(scop->must_writes); |
5816 | prog->tagged_must_kill = isl_union_map_copy(scop->tagged_must_kills); |
5817 | prog->to_inner = pet_scop_compute_outer_to_inner(scop->pet); |
5818 | prog->to_outer = isl_union_map_copy(prog->to_inner); |
5819 | prog->to_outer = isl_union_map_reverse(prog->to_outer); |
5820 | |
5821 | if (!prog->stmts) |
5822 | return gpu_prog_free(prog); |
5823 | |
5824 | if (collect_array_info(prog) < 0) |
5825 | return gpu_prog_free(prog); |
5826 | prog->may_persist = compute_may_persist(prog); |
5827 | |
5828 | return prog; |
5829 | } |
5830 | |
5831 | void *gpu_prog_free(struct gpu_prog *prog) |
5832 | { |
5833 | if (!prog) |
5834 | return NULL((void*)0); |
5835 | free_array_info(prog); |
5836 | free_stmts(prog->stmts, prog->n_stmts); |
5837 | isl_union_map_free(prog->any_to_outer); |
5838 | isl_union_map_free(prog->to_outer); |
5839 | isl_union_map_free(prog->to_inner); |
5840 | isl_union_map_free(prog->read); |
5841 | isl_union_map_free(prog->may_write); |
5842 | isl_union_map_free(prog->must_write); |
5843 | isl_union_map_free(prog->tagged_must_kill); |
5844 | isl_union_map_free(prog->array_order); |
5845 | isl_union_set_free(prog->may_persist); |
5846 | isl_set_free(prog->context); |
5847 | free(prog); |
5848 | return NULL((void*)0); |
5849 | } |