1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 case GIMPLE_TRANSACTION: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
148
149 static inline tree
scan_omp_op(tree * tp,omp_context * ctx)150 scan_omp_op (tree *tp, omp_context *ctx)
151 {
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159 }
160
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
166
167 tree
find_omp_clause(tree clauses,enum omp_clause_code kind)168 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 {
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
172 return clauses;
173
174 return NULL_TREE;
175 }
176
177 /* Return true if CTX is for an omp parallel. */
178
179 static inline bool
is_parallel_ctx(omp_context * ctx)180 is_parallel_ctx (omp_context *ctx)
181 {
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183 }
184
185
186 /* Return true if CTX is for an omp task. */
187
188 static inline bool
is_task_ctx(omp_context * ctx)189 is_task_ctx (omp_context *ctx)
190 {
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192 }
193
194
195 /* Return true if CTX is for an omp parallel or omp task. */
196
197 static inline bool
is_taskreg_ctx(omp_context * ctx)198 is_taskreg_ctx (omp_context *ctx)
199 {
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202 }
203
204
205 /* Return true if REGION is a combined parallel+workshare region. */
206
207 static inline bool
is_combined_parallel(struct omp_region * region)208 is_combined_parallel (struct omp_region *region)
209 {
210 return region->is_combined_parallel;
211 }
212
213
214 /* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217 static void
extract_omp_for_data(gimple for_stmt,struct omp_for_data * fd,struct omp_for_data_loop * loops)218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
220 {
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 else
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 else
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
334 t = gimple_omp_for_incr (for_stmt, i);
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
339 case POINTER_PLUS_EXPR:
340 loop->step = TREE_OPERAND (t, 1);
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
350 }
351
352 if (iter_type != long_long_unsigned_type_node)
353 {
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
359 {
360 tree n;
361
362 if (loop->cond_code == LT_EXPR)
363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
371 }
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
374 {
375 tree n1, n2;
376
377 if (loop->cond_code == LT_EXPR)
378 {
379 n1 = loop->n1;
380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 }
384 else
385 {
386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
388 loop->n2, loop->step);
389 n2 = loop->n1;
390 }
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
396 }
397 }
398
399 if (collapse_count && *collapse_count == NULL)
400 {
401 if ((i == 0 || count != NULL_TREE)
402 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop->n1)
404 && TREE_CONSTANT (loop->n2)
405 && TREE_CODE (loop->step) == INTEGER_CST)
406 {
407 tree itype = TREE_TYPE (loop->v);
408
409 if (POINTER_TYPE_P (itype))
410 itype
411 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
412 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
413 t = fold_build2_loc (loc,
414 PLUS_EXPR, itype,
415 fold_convert_loc (loc, itype, loop->step), t);
416 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n2));
418 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419 fold_convert_loc (loc, itype, loop->n1));
420 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423 fold_build1_loc (loc, NEGATE_EXPR, itype,
424 fold_convert_loc (loc, itype,
425 loop->step)));
426 else
427 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428 fold_convert_loc (loc, itype, loop->step));
429 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
430 if (count != NULL_TREE)
431 count = fold_build2_loc (loc,
432 MULT_EXPR, long_long_unsigned_type_node,
433 count, t);
434 else
435 count = t;
436 if (TREE_CODE (count) != INTEGER_CST)
437 count = NULL_TREE;
438 }
439 else
440 count = NULL_TREE;
441 }
442 }
443
444 if (count)
445 {
446 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447 iter_type = long_long_unsigned_type_node;
448 else
449 iter_type = long_integer_type_node;
450 }
451 else if (collapse_iter && *collapse_iter != NULL)
452 iter_type = TREE_TYPE (*collapse_iter);
453 fd->iter_type = iter_type;
454 if (collapse_iter && *collapse_iter == NULL)
455 *collapse_iter = create_tmp_var (iter_type, ".iter");
456 if (collapse_count && *collapse_count == NULL)
457 {
458 if (count)
459 *collapse_count = fold_convert_loc (loc, iter_type, count);
460 else
461 *collapse_count = create_tmp_var (iter_type, ".count");
462 }
463
464 if (fd->collapse > 1)
465 {
466 fd->loop.v = *collapse_iter;
467 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468 fd->loop.n2 = *collapse_count;
469 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470 fd->loop.cond_code = LT_EXPR;
471 }
472 }
473
474
475 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
485
486 #pragma omp parallel for schedule (guided, i * 4)
487 for (j ...)
488
489 Is lowered into:
490
491 # BLOCK 2 (PAR_ENTRY_BB)
492 .omp_data_o.i = i;
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494
495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
498 D.1598 = D.1667 * 4;
499 #pragma omp for schedule (guided, D.1598)
500
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
505
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
510 call.
511
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
516
517 static bool
workshare_safe_to_combine_p(basic_block ws_entry_bb)518 workshare_safe_to_combine_p (basic_block ws_entry_bb)
519 {
520 struct omp_for_data fd;
521 gimple ws_stmt = last_stmt (ws_entry_bb);
522
523 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
524 return true;
525
526 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
527
528 extract_omp_for_data (ws_stmt, &fd, NULL);
529
530 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 return false;
532 if (fd.iter_type != long_integer_type_node)
533 return false;
534
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
539 see through this. */
540 if (!is_gimple_min_invariant (fd.loop.n1)
541 || !is_gimple_min_invariant (fd.loop.n2)
542 || !is_gimple_min_invariant (fd.loop.step)
543 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544 return false;
545
546 return true;
547 }
548
549
550 /* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
552 expanded. */
553
VEC(tree,gc)554 static VEC(tree,gc) *
555 get_ws_args_for (gimple ws_stmt)
556 {
557 tree t;
558 location_t loc = gimple_location (ws_stmt);
559 VEC(tree,gc) *ws_args;
560
561 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
562 {
563 struct omp_for_data fd;
564
565 extract_omp_for_data (ws_stmt, &fd, NULL);
566
567 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
568
569 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570 VEC_quick_push (tree, ws_args, t);
571
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
573 VEC_quick_push (tree, ws_args, t);
574
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576 VEC_quick_push (tree, ws_args, t);
577
578 if (fd.chunk_size)
579 {
580 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581 VEC_quick_push (tree, ws_args, t);
582 }
583
584 return ws_args;
585 }
586 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
587 {
588 /* Number of sections is equal to the number of edges from the
589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb = single_succ (gimple_bb (ws_stmt));
592 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
593 ws_args = VEC_alloc (tree, gc, 1);
594 VEC_quick_push (tree, ws_args, t);
595 return ws_args;
596 }
597
598 gcc_unreachable ();
599 }
600
601
602 /* Discover whether REGION is a combined parallel+workshare region. */
603
604 static void
determine_parallel_type(struct omp_region * region)605 determine_parallel_type (struct omp_region *region)
606 {
607 basic_block par_entry_bb, par_exit_bb;
608 basic_block ws_entry_bb, ws_exit_bb;
609
610 if (region == NULL || region->inner == NULL
611 || region->exit == NULL || region->inner->exit == NULL
612 || region->inner->cont == NULL)
613 return;
614
615 /* We only support parallel+for and parallel+sections. */
616 if (region->type != GIMPLE_OMP_PARALLEL
617 || (region->inner->type != GIMPLE_OMP_FOR
618 && region->inner->type != GIMPLE_OMP_SECTIONS))
619 return;
620
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
623 par_entry_bb = region->entry;
624 par_exit_bb = region->exit;
625 ws_entry_bb = region->inner->entry;
626 ws_exit_bb = region->inner->exit;
627
628 if (single_succ (par_entry_bb) == ws_entry_bb
629 && single_succ (ws_exit_bb) == par_exit_bb
630 && workshare_safe_to_combine_p (ws_entry_bb)
631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
632 || (last_and_only_stmt (ws_entry_bb)
633 && last_and_only_stmt (par_exit_bb))))
634 {
635 gimple ws_stmt = last_stmt (ws_entry_bb);
636
637 if (region->inner->type == GIMPLE_OMP_FOR)
638 {
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
648 tree clauses = gimple_omp_for_clauses (ws_stmt);
649 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 if (c == NULL
651 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 {
654 region->is_combined_parallel = false;
655 region->inner->is_combined_parallel = false;
656 return;
657 }
658 }
659
660 region->is_combined_parallel = true;
661 region->inner->is_combined_parallel = true;
662 region->ws_args = get_ws_args_for (ws_stmt);
663 }
664 }
665
666
667 /* Return true if EXPR is variable sized. */
668
669 static inline bool
is_variable_sized(const_tree expr)670 is_variable_sized (const_tree expr)
671 {
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
673 }
674
675 /* Return true if DECL is a reference type. */
676
677 static inline bool
is_reference(tree decl)678 is_reference (tree decl)
679 {
680 return lang_hooks.decls.omp_privatize_by_reference (decl);
681 }
682
683 /* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
686
687 static inline tree
lookup_decl(tree var,omp_context * ctx)688 lookup_decl (tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return *n;
693 }
694
695 static inline tree
maybe_lookup_decl(const_tree var,omp_context * ctx)696 maybe_lookup_decl (const_tree var, omp_context *ctx)
697 {
698 tree *n;
699 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 return n ? *n : NULL_TREE;
701 }
702
703 static inline tree
lookup_field(tree var,omp_context * ctx)704 lookup_field (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708 return (tree) n->value;
709 }
710
711 static inline tree
lookup_sfield(tree var,omp_context * ctx)712 lookup_sfield (tree var, omp_context *ctx)
713 {
714 splay_tree_node n;
715 n = splay_tree_lookup (ctx->sfield_map
716 ? ctx->sfield_map : ctx->field_map,
717 (splay_tree_key) var);
718 return (tree) n->value;
719 }
720
721 static inline tree
maybe_lookup_field(tree var,omp_context * ctx)722 maybe_lookup_field (tree var, omp_context *ctx)
723 {
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return n ? (tree) n->value : NULL_TREE;
727 }
728
729 /* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
731
732 static bool
use_pointer_for_field(tree decl,omp_context * shared_ctx)733 use_pointer_for_field (tree decl, omp_context *shared_ctx)
734 {
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736 return true;
737
738 /* We can only use copy-in/copy-out semantics for shared variables
739 when we know the value is not accessible from an outer scope. */
740 if (shared_ctx)
741 {
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747 return true;
748
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
753 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
754 return true;
755
756 /* Do not use copy-in/copy-out for variables that have their
757 address taken. */
758 if (TREE_ADDRESSABLE (decl))
759 return true;
760
761 /* lower_send_shared_vars only uses copy-in, but not copy-out
762 for these. */
763 if (TREE_READONLY (decl)
764 || ((TREE_CODE (decl) == RESULT_DECL
765 || TREE_CODE (decl) == PARM_DECL)
766 && DECL_BY_REFERENCE (decl)))
767 return false;
768
769 /* Disallow copy-in/out in nested parallel if
770 decl is shared in outer parallel, otherwise
771 each thread could store the shared variable
772 in its own copy-in location, making the
773 variable no longer really shared. */
774 if (shared_ctx->is_nested)
775 {
776 omp_context *up;
777
778 for (up = shared_ctx->outer; up; up = up->outer)
779 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
780 break;
781
782 if (up)
783 {
784 tree c;
785
786 for (c = gimple_omp_taskreg_clauses (up->stmt);
787 c; c = OMP_CLAUSE_CHAIN (c))
788 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
789 && OMP_CLAUSE_DECL (c) == decl)
790 break;
791
792 if (c)
793 goto maybe_mark_addressable_and_ret;
794 }
795 }
796
797 /* For tasks avoid using copy-in/out. As tasks can be
798 deferred or executed in different thread, when GOMP_task
799 returns, the task hasn't necessarily terminated. */
800 if (is_task_ctx (shared_ctx))
801 {
802 tree outer;
803 maybe_mark_addressable_and_ret:
804 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
805 if (is_gimple_reg (outer))
806 {
807 /* Taking address of OUTER in lower_send_shared_vars
808 might need regimplification of everything that uses the
809 variable. */
810 if (!task_shared_vars)
811 task_shared_vars = BITMAP_ALLOC (NULL);
812 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
813 TREE_ADDRESSABLE (outer) = 1;
814 }
815 return true;
816 }
817 }
818
819 return false;
820 }
821
822 /* Create a new VAR_DECL and copy information from VAR to it. */
823
824 tree
copy_var_decl(tree var,tree name,tree type)825 copy_var_decl (tree var, tree name, tree type)
826 {
827 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
828
829 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
830 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
831 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
832 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
833 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
834 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
835 TREE_USED (copy) = 1;
836 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
837
838 return copy;
839 }
840
841 /* Construct a new automatic decl similar to VAR. */
842
843 static tree
omp_copy_decl_2(tree var,tree name,tree type,omp_context * ctx)844 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
845 {
846 tree copy = copy_var_decl (var, name, type);
847
848 DECL_CONTEXT (copy) = current_function_decl;
849 DECL_CHAIN (copy) = ctx->block_vars;
850 ctx->block_vars = copy;
851
852 return copy;
853 }
854
855 static tree
omp_copy_decl_1(tree var,omp_context * ctx)856 omp_copy_decl_1 (tree var, omp_context *ctx)
857 {
858 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
859 }
860
861 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
862 as appropriate. */
863 static tree
omp_build_component_ref(tree obj,tree field)864 omp_build_component_ref (tree obj, tree field)
865 {
866 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
867 if (TREE_THIS_VOLATILE (field))
868 TREE_THIS_VOLATILE (ret) |= 1;
869 if (TREE_READONLY (field))
870 TREE_READONLY (ret) |= 1;
871 return ret;
872 }
873
874 /* Build tree nodes to access the field for VAR on the receiver side. */
875
876 static tree
build_receiver_ref(tree var,bool by_ref,omp_context * ctx)877 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
878 {
879 tree x, field = lookup_field (var, ctx);
880
881 /* If the receiver record type was remapped in the child function,
882 remap the field into the new record type. */
883 x = maybe_lookup_field (field, ctx);
884 if (x != NULL)
885 field = x;
886
887 x = build_simple_mem_ref (ctx->receiver_decl);
888 x = omp_build_component_ref (x, field);
889 if (by_ref)
890 x = build_simple_mem_ref (x);
891
892 return x;
893 }
894
895 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
896 of a parallel, this is a component reference; for workshare constructs
897 this is some variable. */
898
899 static tree
build_outer_var_ref(tree var,omp_context * ctx)900 build_outer_var_ref (tree var, omp_context *ctx)
901 {
902 tree x;
903
904 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
905 x = var;
906 else if (is_variable_sized (var))
907 {
908 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
909 x = build_outer_var_ref (x, ctx);
910 x = build_simple_mem_ref (x);
911 }
912 else if (is_taskreg_ctx (ctx))
913 {
914 bool by_ref = use_pointer_for_field (var, NULL);
915 x = build_receiver_ref (var, by_ref, ctx);
916 }
917 else if (ctx->outer)
918 x = lookup_decl (var, ctx->outer);
919 else if (is_reference (var))
920 /* This can happen with orphaned constructs. If var is reference, it is
921 possible it is shared and as such valid. */
922 x = var;
923 else
924 gcc_unreachable ();
925
926 if (is_reference (var))
927 x = build_simple_mem_ref (x);
928
929 return x;
930 }
931
932 /* Build tree nodes to access the field for VAR on the sender side. */
933
934 static tree
build_sender_ref(tree var,omp_context * ctx)935 build_sender_ref (tree var, omp_context *ctx)
936 {
937 tree field = lookup_sfield (var, ctx);
938 return omp_build_component_ref (ctx->sender_decl, field);
939 }
940
941 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
942
943 static void
install_var_field(tree var,bool by_ref,int mask,omp_context * ctx)944 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
945 {
946 tree field, type, sfield = NULL_TREE;
947
948 gcc_assert ((mask & 1) == 0
949 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
950 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
951 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
952
953 type = TREE_TYPE (var);
954 if (by_ref)
955 type = build_pointer_type (type);
956 else if ((mask & 3) == 1 && is_reference (var))
957 type = TREE_TYPE (type);
958
959 field = build_decl (DECL_SOURCE_LOCATION (var),
960 FIELD_DECL, DECL_NAME (var), type);
961
962 /* Remember what variable this field was created for. This does have a
963 side effect of making dwarf2out ignore this member, so for helpful
964 debugging we clear it later in delete_omp_context. */
965 DECL_ABSTRACT_ORIGIN (field) = var;
966 if (type == TREE_TYPE (var))
967 {
968 DECL_ALIGN (field) = DECL_ALIGN (var);
969 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
970 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
971 }
972 else
973 DECL_ALIGN (field) = TYPE_ALIGN (type);
974
975 if ((mask & 3) == 3)
976 {
977 insert_field_into_struct (ctx->record_type, field);
978 if (ctx->srecord_type)
979 {
980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (var), type);
982 DECL_ABSTRACT_ORIGIN (sfield) = var;
983 DECL_ALIGN (sfield) = DECL_ALIGN (field);
984 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
985 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
986 insert_field_into_struct (ctx->srecord_type, sfield);
987 }
988 }
989 else
990 {
991 if (ctx->srecord_type == NULL_TREE)
992 {
993 tree t;
994
995 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
996 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
997 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
998 {
999 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1000 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1001 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1002 insert_field_into_struct (ctx->srecord_type, sfield);
1003 splay_tree_insert (ctx->sfield_map,
1004 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1005 (splay_tree_value) sfield);
1006 }
1007 }
1008 sfield = field;
1009 insert_field_into_struct ((mask & 1) ? ctx->record_type
1010 : ctx->srecord_type, field);
1011 }
1012
1013 if (mask & 1)
1014 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1015 (splay_tree_value) field);
1016 if ((mask & 2) && ctx->sfield_map)
1017 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1018 (splay_tree_value) sfield);
1019 }
1020
1021 static tree
install_var_local(tree var,omp_context * ctx)1022 install_var_local (tree var, omp_context *ctx)
1023 {
1024 tree new_var = omp_copy_decl_1 (var, ctx);
1025 insert_decl_map (&ctx->cb, var, new_var);
1026 return new_var;
1027 }
1028
1029 /* Adjust the replacement for DECL in CTX for the new context. This means
1030 copying the DECL_VALUE_EXPR, and fixing up the type. */
1031
1032 static void
fixup_remapped_decl(tree decl,omp_context * ctx,bool private_debug)1033 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1034 {
1035 tree new_decl, size;
1036
1037 new_decl = lookup_decl (decl, ctx);
1038
1039 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1040
1041 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1042 && DECL_HAS_VALUE_EXPR_P (decl))
1043 {
1044 tree ve = DECL_VALUE_EXPR (decl);
1045 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1046 SET_DECL_VALUE_EXPR (new_decl, ve);
1047 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1048 }
1049
1050 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1051 {
1052 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1053 if (size == error_mark_node)
1054 size = TYPE_SIZE (TREE_TYPE (new_decl));
1055 DECL_SIZE (new_decl) = size;
1056
1057 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1058 if (size == error_mark_node)
1059 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1060 DECL_SIZE_UNIT (new_decl) = size;
1061 }
1062 }
1063
1064 /* The callback for remap_decl. Search all containing contexts for a
1065 mapping of the variable; this avoids having to duplicate the splay
1066 tree ahead of time. We know a mapping doesn't already exist in the
1067 given context. Create new mappings to implement default semantics. */
1068
1069 static tree
omp_copy_decl(tree var,copy_body_data * cb)1070 omp_copy_decl (tree var, copy_body_data *cb)
1071 {
1072 omp_context *ctx = (omp_context *) cb;
1073 tree new_var;
1074
1075 if (TREE_CODE (var) == LABEL_DECL)
1076 {
1077 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1078 DECL_CONTEXT (new_var) = current_function_decl;
1079 insert_decl_map (&ctx->cb, var, new_var);
1080 return new_var;
1081 }
1082
1083 while (!is_taskreg_ctx (ctx))
1084 {
1085 ctx = ctx->outer;
1086 if (ctx == NULL)
1087 return var;
1088 new_var = maybe_lookup_decl (var, ctx);
1089 if (new_var)
1090 return new_var;
1091 }
1092
1093 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1094 return var;
1095
1096 return error_mark_node;
1097 }
1098
1099
1100 /* Return the parallel region associated with STMT. */
1101
1102 /* Debugging dumps for parallel regions. */
1103 void dump_omp_region (FILE *, struct omp_region *, int);
1104 void debug_omp_region (struct omp_region *);
1105 void debug_all_omp_regions (void);
1106
1107 /* Dump the parallel region tree rooted at REGION. */
1108
1109 void
dump_omp_region(FILE * file,struct omp_region * region,int indent)1110 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1111 {
1112 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1113 gimple_code_name[region->type]);
1114
1115 if (region->inner)
1116 dump_omp_region (file, region->inner, indent + 4);
1117
1118 if (region->cont)
1119 {
1120 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1121 region->cont->index);
1122 }
1123
1124 if (region->exit)
1125 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1126 region->exit->index);
1127 else
1128 fprintf (file, "%*s[no exit marker]\n", indent, "");
1129
1130 if (region->next)
1131 dump_omp_region (file, region->next, indent);
1132 }
1133
1134 DEBUG_FUNCTION void
debug_omp_region(struct omp_region * region)1135 debug_omp_region (struct omp_region *region)
1136 {
1137 dump_omp_region (stderr, region, 0);
1138 }
1139
1140 DEBUG_FUNCTION void
debug_all_omp_regions(void)1141 debug_all_omp_regions (void)
1142 {
1143 dump_omp_region (stderr, root_omp_region, 0);
1144 }
1145
1146
1147 /* Create a new parallel region starting at STMT inside region PARENT. */
1148
1149 struct omp_region *
new_omp_region(basic_block bb,enum gimple_code type,struct omp_region * parent)1150 new_omp_region (basic_block bb, enum gimple_code type,
1151 struct omp_region *parent)
1152 {
1153 struct omp_region *region = XCNEW (struct omp_region);
1154
1155 region->outer = parent;
1156 region->entry = bb;
1157 region->type = type;
1158
1159 if (parent)
1160 {
1161 /* This is a nested region. Add it to the list of inner
1162 regions in PARENT. */
1163 region->next = parent->inner;
1164 parent->inner = region;
1165 }
1166 else
1167 {
1168 /* This is a toplevel region. Add it to the list of toplevel
1169 regions in ROOT_OMP_REGION. */
1170 region->next = root_omp_region;
1171 root_omp_region = region;
1172 }
1173
1174 return region;
1175 }
1176
1177 /* Release the memory associated with the region tree rooted at REGION. */
1178
1179 static void
free_omp_region_1(struct omp_region * region)1180 free_omp_region_1 (struct omp_region *region)
1181 {
1182 struct omp_region *i, *n;
1183
1184 for (i = region->inner; i ; i = n)
1185 {
1186 n = i->next;
1187 free_omp_region_1 (i);
1188 }
1189
1190 free (region);
1191 }
1192
1193 /* Release the memory for the entire omp region tree. */
1194
1195 void
free_omp_regions(void)1196 free_omp_regions (void)
1197 {
1198 struct omp_region *r, *n;
1199 for (r = root_omp_region; r ; r = n)
1200 {
1201 n = r->next;
1202 free_omp_region_1 (r);
1203 }
1204 root_omp_region = NULL;
1205 }
1206
1207
1208 /* Create a new context, with OUTER_CTX being the surrounding context. */
1209
1210 static omp_context *
new_omp_context(gimple stmt,omp_context * outer_ctx)1211 new_omp_context (gimple stmt, omp_context *outer_ctx)
1212 {
1213 omp_context *ctx = XCNEW (omp_context);
1214
1215 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1216 (splay_tree_value) ctx);
1217 ctx->stmt = stmt;
1218
1219 if (outer_ctx)
1220 {
1221 ctx->outer = outer_ctx;
1222 ctx->cb = outer_ctx->cb;
1223 ctx->cb.block = NULL;
1224 ctx->depth = outer_ctx->depth + 1;
1225 }
1226 else
1227 {
1228 ctx->cb.src_fn = current_function_decl;
1229 ctx->cb.dst_fn = current_function_decl;
1230 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1231 gcc_checking_assert (ctx->cb.src_node);
1232 ctx->cb.dst_node = ctx->cb.src_node;
1233 ctx->cb.src_cfun = cfun;
1234 ctx->cb.copy_decl = omp_copy_decl;
1235 ctx->cb.eh_lp_nr = 0;
1236 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1237 ctx->depth = 1;
1238 }
1239
1240 ctx->cb.decl_map = pointer_map_create ();
1241
1242 return ctx;
1243 }
1244
1245 static gimple_seq maybe_catch_exception (gimple_seq);
1246
1247 /* Finalize task copyfn. */
1248
1249 static void
finalize_task_copyfn(gimple task_stmt)1250 finalize_task_copyfn (gimple task_stmt)
1251 {
1252 struct function *child_cfun;
1253 tree child_fn, old_fn;
1254 gimple_seq seq, new_seq;
1255 gimple bind;
1256
1257 child_fn = gimple_omp_task_copy_fn (task_stmt);
1258 if (child_fn == NULL_TREE)
1259 return;
1260
1261 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1262
1263 /* Inform the callgraph about the new function. */
1264 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1265 = cfun->curr_properties;
1266
1267 old_fn = current_function_decl;
1268 push_cfun (child_cfun);
1269 current_function_decl = child_fn;
1270 bind = gimplify_body (child_fn, false);
1271 seq = gimple_seq_alloc ();
1272 gimple_seq_add_stmt (&seq, bind);
1273 new_seq = maybe_catch_exception (seq);
1274 if (new_seq != seq)
1275 {
1276 bind = gimple_build_bind (NULL, new_seq, NULL);
1277 seq = gimple_seq_alloc ();
1278 gimple_seq_add_stmt (&seq, bind);
1279 }
1280 gimple_set_body (child_fn, seq);
1281 pop_cfun ();
1282 current_function_decl = old_fn;
1283
1284 cgraph_add_new_function (child_fn, false);
1285 }
1286
1287 /* Destroy a omp_context data structures. Called through the splay tree
1288 value delete callback. */
1289
1290 static void
delete_omp_context(splay_tree_value value)1291 delete_omp_context (splay_tree_value value)
1292 {
1293 omp_context *ctx = (omp_context *) value;
1294
1295 pointer_map_destroy (ctx->cb.decl_map);
1296
1297 if (ctx->field_map)
1298 splay_tree_delete (ctx->field_map);
1299 if (ctx->sfield_map)
1300 splay_tree_delete (ctx->sfield_map);
1301
1302 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1303 it produces corrupt debug information. */
1304 if (ctx->record_type)
1305 {
1306 tree t;
1307 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1308 DECL_ABSTRACT_ORIGIN (t) = NULL;
1309 }
1310 if (ctx->srecord_type)
1311 {
1312 tree t;
1313 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1314 DECL_ABSTRACT_ORIGIN (t) = NULL;
1315 }
1316
1317 if (is_task_ctx (ctx))
1318 finalize_task_copyfn (ctx->stmt);
1319
1320 XDELETE (ctx);
1321 }
1322
1323 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1324 context. */
1325
1326 static void
fixup_child_record_type(omp_context * ctx)1327 fixup_child_record_type (omp_context *ctx)
1328 {
1329 tree f, type = ctx->record_type;
1330
1331 /* ??? It isn't sufficient to just call remap_type here, because
1332 variably_modified_type_p doesn't work the way we expect for
1333 record types. Testing each field for whether it needs remapping
1334 and creating a new record by hand works, however. */
1335 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1336 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1337 break;
1338 if (f)
1339 {
1340 tree name, new_fields = NULL;
1341
1342 type = lang_hooks.types.make_type (RECORD_TYPE);
1343 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1344 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1345 TYPE_DECL, name, type);
1346 TYPE_NAME (type) = name;
1347
1348 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1349 {
1350 tree new_f = copy_node (f);
1351 DECL_CONTEXT (new_f) = type;
1352 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1353 DECL_CHAIN (new_f) = new_fields;
1354 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1355 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1356 &ctx->cb, NULL);
1357 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1358 &ctx->cb, NULL);
1359 new_fields = new_f;
1360
1361 /* Arrange to be able to look up the receiver field
1362 given the sender field. */
1363 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1364 (splay_tree_value) new_f);
1365 }
1366 TYPE_FIELDS (type) = nreverse (new_fields);
1367 layout_type (type);
1368 }
1369
1370 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1371 }
1372
1373 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1374 specified by CLAUSES. */
1375
1376 static void
scan_sharing_clauses(tree clauses,omp_context * ctx)1377 scan_sharing_clauses (tree clauses, omp_context *ctx)
1378 {
1379 tree c, decl;
1380 bool scan_array_reductions = false;
1381
1382 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1383 {
1384 bool by_ref;
1385
1386 switch (OMP_CLAUSE_CODE (c))
1387 {
1388 case OMP_CLAUSE_PRIVATE:
1389 decl = OMP_CLAUSE_DECL (c);
1390 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1391 goto do_private;
1392 else if (!is_variable_sized (decl))
1393 install_var_local (decl, ctx);
1394 break;
1395
1396 case OMP_CLAUSE_SHARED:
1397 gcc_assert (is_taskreg_ctx (ctx));
1398 decl = OMP_CLAUSE_DECL (c);
1399 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1400 || !is_variable_sized (decl));
1401 /* Global variables don't need to be copied,
1402 the receiver side will use them directly. */
1403 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1404 break;
1405 by_ref = use_pointer_for_field (decl, ctx);
1406 if (! TREE_READONLY (decl)
1407 || TREE_ADDRESSABLE (decl)
1408 || by_ref
1409 || is_reference (decl))
1410 {
1411 install_var_field (decl, by_ref, 3, ctx);
1412 install_var_local (decl, ctx);
1413 break;
1414 }
1415 /* We don't need to copy const scalar vars back. */
1416 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1417 goto do_private;
1418
1419 case OMP_CLAUSE_LASTPRIVATE:
1420 /* Let the corresponding firstprivate clause create
1421 the variable. */
1422 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1423 break;
1424 /* FALLTHRU */
1425
1426 case OMP_CLAUSE_FIRSTPRIVATE:
1427 case OMP_CLAUSE_REDUCTION:
1428 decl = OMP_CLAUSE_DECL (c);
1429 do_private:
1430 if (is_variable_sized (decl))
1431 {
1432 if (is_task_ctx (ctx))
1433 install_var_field (decl, false, 1, ctx);
1434 break;
1435 }
1436 else if (is_taskreg_ctx (ctx))
1437 {
1438 bool global
1439 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1440 by_ref = use_pointer_for_field (decl, NULL);
1441
1442 if (is_task_ctx (ctx)
1443 && (global || by_ref || is_reference (decl)))
1444 {
1445 install_var_field (decl, false, 1, ctx);
1446 if (!global)
1447 install_var_field (decl, by_ref, 2, ctx);
1448 }
1449 else if (!global)
1450 install_var_field (decl, by_ref, 3, ctx);
1451 }
1452 install_var_local (decl, ctx);
1453 break;
1454
1455 case OMP_CLAUSE_COPYPRIVATE:
1456 case OMP_CLAUSE_COPYIN:
1457 decl = OMP_CLAUSE_DECL (c);
1458 by_ref = use_pointer_for_field (decl, NULL);
1459 install_var_field (decl, by_ref, 3, ctx);
1460 break;
1461
1462 case OMP_CLAUSE_DEFAULT:
1463 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1464 break;
1465
1466 case OMP_CLAUSE_FINAL:
1467 case OMP_CLAUSE_IF:
1468 case OMP_CLAUSE_NUM_THREADS:
1469 case OMP_CLAUSE_SCHEDULE:
1470 if (ctx->outer)
1471 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1472 break;
1473
1474 case OMP_CLAUSE_NOWAIT:
1475 case OMP_CLAUSE_ORDERED:
1476 case OMP_CLAUSE_COLLAPSE:
1477 case OMP_CLAUSE_UNTIED:
1478 case OMP_CLAUSE_MERGEABLE:
1479 break;
1480
1481 default:
1482 gcc_unreachable ();
1483 }
1484 }
1485
1486 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1487 {
1488 switch (OMP_CLAUSE_CODE (c))
1489 {
1490 case OMP_CLAUSE_LASTPRIVATE:
1491 /* Let the corresponding firstprivate clause create
1492 the variable. */
1493 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1494 scan_array_reductions = true;
1495 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1496 break;
1497 /* FALLTHRU */
1498
1499 case OMP_CLAUSE_PRIVATE:
1500 case OMP_CLAUSE_FIRSTPRIVATE:
1501 case OMP_CLAUSE_REDUCTION:
1502 decl = OMP_CLAUSE_DECL (c);
1503 if (is_variable_sized (decl))
1504 install_var_local (decl, ctx);
1505 fixup_remapped_decl (decl, ctx,
1506 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1507 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 scan_array_reductions = true;
1511 break;
1512
1513 case OMP_CLAUSE_SHARED:
1514 decl = OMP_CLAUSE_DECL (c);
1515 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1516 fixup_remapped_decl (decl, ctx, false);
1517 break;
1518
1519 case OMP_CLAUSE_COPYPRIVATE:
1520 case OMP_CLAUSE_COPYIN:
1521 case OMP_CLAUSE_DEFAULT:
1522 case OMP_CLAUSE_IF:
1523 case OMP_CLAUSE_NUM_THREADS:
1524 case OMP_CLAUSE_SCHEDULE:
1525 case OMP_CLAUSE_NOWAIT:
1526 case OMP_CLAUSE_ORDERED:
1527 case OMP_CLAUSE_COLLAPSE:
1528 case OMP_CLAUSE_UNTIED:
1529 case OMP_CLAUSE_FINAL:
1530 case OMP_CLAUSE_MERGEABLE:
1531 break;
1532
1533 default:
1534 gcc_unreachable ();
1535 }
1536 }
1537
1538 if (scan_array_reductions)
1539 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1540 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1541 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1542 {
1543 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1544 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1545 }
1546 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1547 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1548 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1549 }
1550
1551 /* Create a new name for omp child function. Returns an identifier. */
1552
1553 static GTY(()) unsigned int tmp_ompfn_id_num;
1554
1555 static tree
create_omp_child_function_name(bool task_copy)1556 create_omp_child_function_name (bool task_copy)
1557 {
1558 return (clone_function_name (current_function_decl,
1559 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1560 }
1561
1562 /* Build a decl for the omp child function. It'll not contain a body
1563 yet, just the bare decl. */
1564
1565 static void
create_omp_child_function(omp_context * ctx,bool task_copy)1566 create_omp_child_function (omp_context *ctx, bool task_copy)
1567 {
1568 tree decl, type, name, t;
1569
1570 name = create_omp_child_function_name (task_copy);
1571 if (task_copy)
1572 type = build_function_type_list (void_type_node, ptr_type_node,
1573 ptr_type_node, NULL_TREE);
1574 else
1575 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1576
1577 decl = build_decl (gimple_location (ctx->stmt),
1578 FUNCTION_DECL, name, type);
1579
1580 if (!task_copy)
1581 ctx->cb.dst_fn = decl;
1582 else
1583 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1584
1585 TREE_STATIC (decl) = 1;
1586 TREE_USED (decl) = 1;
1587 DECL_ARTIFICIAL (decl) = 1;
1588 DECL_NAMELESS (decl) = 1;
1589 DECL_IGNORED_P (decl) = 0;
1590 TREE_PUBLIC (decl) = 0;
1591 DECL_UNINLINABLE (decl) = 1;
1592 DECL_EXTERNAL (decl) = 0;
1593 DECL_CONTEXT (decl) = NULL_TREE;
1594 DECL_INITIAL (decl) = make_node (BLOCK);
1595
1596 t = build_decl (DECL_SOURCE_LOCATION (decl),
1597 RESULT_DECL, NULL_TREE, void_type_node);
1598 DECL_ARTIFICIAL (t) = 1;
1599 DECL_IGNORED_P (t) = 1;
1600 DECL_CONTEXT (t) = decl;
1601 DECL_RESULT (decl) = t;
1602
1603 t = build_decl (DECL_SOURCE_LOCATION (decl),
1604 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1605 DECL_ARTIFICIAL (t) = 1;
1606 DECL_NAMELESS (t) = 1;
1607 DECL_ARG_TYPE (t) = ptr_type_node;
1608 DECL_CONTEXT (t) = current_function_decl;
1609 TREE_USED (t) = 1;
1610 DECL_ARGUMENTS (decl) = t;
1611 if (!task_copy)
1612 ctx->receiver_decl = t;
1613 else
1614 {
1615 t = build_decl (DECL_SOURCE_LOCATION (decl),
1616 PARM_DECL, get_identifier (".omp_data_o"),
1617 ptr_type_node);
1618 DECL_ARTIFICIAL (t) = 1;
1619 DECL_NAMELESS (t) = 1;
1620 DECL_ARG_TYPE (t) = ptr_type_node;
1621 DECL_CONTEXT (t) = current_function_decl;
1622 TREE_USED (t) = 1;
1623 TREE_ADDRESSABLE (t) = 1;
1624 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1625 DECL_ARGUMENTS (decl) = t;
1626 }
1627
1628 /* Allocate memory for the function structure. The call to
1629 allocate_struct_function clobbers CFUN, so we need to restore
1630 it afterward. */
1631 push_struct_function (decl);
1632 cfun->function_end_locus = gimple_location (ctx->stmt);
1633 pop_cfun ();
1634 }
1635
1636
1637 /* Scan an OpenMP parallel directive. */
1638
1639 static void
scan_omp_parallel(gimple_stmt_iterator * gsi,omp_context * outer_ctx)1640 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1641 {
1642 omp_context *ctx;
1643 tree name;
1644 gimple stmt = gsi_stmt (*gsi);
1645
1646 /* Ignore parallel directives with empty bodies, unless there
1647 are copyin clauses. */
1648 if (optimize > 0
1649 && empty_body_p (gimple_omp_body (stmt))
1650 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1651 OMP_CLAUSE_COPYIN) == NULL)
1652 {
1653 gsi_replace (gsi, gimple_build_nop (), false);
1654 return;
1655 }
1656
1657 ctx = new_omp_context (stmt, outer_ctx);
1658 if (taskreg_nesting_level > 1)
1659 ctx->is_nested = true;
1660 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1661 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1662 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1663 name = create_tmp_var_name (".omp_data_s");
1664 name = build_decl (gimple_location (stmt),
1665 TYPE_DECL, name, ctx->record_type);
1666 DECL_ARTIFICIAL (name) = 1;
1667 DECL_NAMELESS (name) = 1;
1668 TYPE_NAME (ctx->record_type) = name;
1669 create_omp_child_function (ctx, false);
1670 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1671
1672 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1673 scan_omp (gimple_omp_body (stmt), ctx);
1674
1675 if (TYPE_FIELDS (ctx->record_type) == NULL)
1676 ctx->record_type = ctx->receiver_decl = NULL;
1677 else
1678 {
1679 layout_type (ctx->record_type);
1680 fixup_child_record_type (ctx);
1681 }
1682 }
1683
1684 /* Scan an OpenMP task directive. */
1685
1686 static void
scan_omp_task(gimple_stmt_iterator * gsi,omp_context * outer_ctx)1687 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1688 {
1689 omp_context *ctx;
1690 tree name, t;
1691 gimple stmt = gsi_stmt (*gsi);
1692 location_t loc = gimple_location (stmt);
1693
1694 /* Ignore task directives with empty bodies. */
1695 if (optimize > 0
1696 && empty_body_p (gimple_omp_body (stmt)))
1697 {
1698 gsi_replace (gsi, gimple_build_nop (), false);
1699 return;
1700 }
1701
1702 ctx = new_omp_context (stmt, outer_ctx);
1703 if (taskreg_nesting_level > 1)
1704 ctx->is_nested = true;
1705 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1706 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1707 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1708 name = create_tmp_var_name (".omp_data_s");
1709 name = build_decl (gimple_location (stmt),
1710 TYPE_DECL, name, ctx->record_type);
1711 DECL_ARTIFICIAL (name) = 1;
1712 DECL_NAMELESS (name) = 1;
1713 TYPE_NAME (ctx->record_type) = name;
1714 create_omp_child_function (ctx, false);
1715 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1716
1717 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1718
1719 if (ctx->srecord_type)
1720 {
1721 name = create_tmp_var_name (".omp_data_a");
1722 name = build_decl (gimple_location (stmt),
1723 TYPE_DECL, name, ctx->srecord_type);
1724 DECL_ARTIFICIAL (name) = 1;
1725 DECL_NAMELESS (name) = 1;
1726 TYPE_NAME (ctx->srecord_type) = name;
1727 create_omp_child_function (ctx, true);
1728 }
1729
1730 scan_omp (gimple_omp_body (stmt), ctx);
1731
1732 if (TYPE_FIELDS (ctx->record_type) == NULL)
1733 {
1734 ctx->record_type = ctx->receiver_decl = NULL;
1735 t = build_int_cst (long_integer_type_node, 0);
1736 gimple_omp_task_set_arg_size (stmt, t);
1737 t = build_int_cst (long_integer_type_node, 1);
1738 gimple_omp_task_set_arg_align (stmt, t);
1739 }
1740 else
1741 {
1742 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1743 /* Move VLA fields to the end. */
1744 p = &TYPE_FIELDS (ctx->record_type);
1745 while (*p)
1746 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1747 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1748 {
1749 *q = *p;
1750 *p = TREE_CHAIN (*p);
1751 TREE_CHAIN (*q) = NULL_TREE;
1752 q = &TREE_CHAIN (*q);
1753 }
1754 else
1755 p = &DECL_CHAIN (*p);
1756 *p = vla_fields;
1757 layout_type (ctx->record_type);
1758 fixup_child_record_type (ctx);
1759 if (ctx->srecord_type)
1760 layout_type (ctx->srecord_type);
1761 t = fold_convert_loc (loc, long_integer_type_node,
1762 TYPE_SIZE_UNIT (ctx->record_type));
1763 gimple_omp_task_set_arg_size (stmt, t);
1764 t = build_int_cst (long_integer_type_node,
1765 TYPE_ALIGN_UNIT (ctx->record_type));
1766 gimple_omp_task_set_arg_align (stmt, t);
1767 }
1768 }
1769
1770
1771 /* Scan an OpenMP loop directive. */
1772
1773 static void
scan_omp_for(gimple stmt,omp_context * outer_ctx)1774 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1775 {
1776 omp_context *ctx;
1777 size_t i;
1778
1779 ctx = new_omp_context (stmt, outer_ctx);
1780
1781 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1782
1783 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1784 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1785 {
1786 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1787 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1788 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1789 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1790 }
1791 scan_omp (gimple_omp_body (stmt), ctx);
1792 }
1793
1794 /* Scan an OpenMP sections directive. */
1795
1796 static void
scan_omp_sections(gimple stmt,omp_context * outer_ctx)1797 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1798 {
1799 omp_context *ctx;
1800
1801 ctx = new_omp_context (stmt, outer_ctx);
1802 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1803 scan_omp (gimple_omp_body (stmt), ctx);
1804 }
1805
1806 /* Scan an OpenMP single directive. */
1807
1808 static void
scan_omp_single(gimple stmt,omp_context * outer_ctx)1809 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1810 {
1811 omp_context *ctx;
1812 tree name;
1813
1814 ctx = new_omp_context (stmt, outer_ctx);
1815 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1816 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1817 name = create_tmp_var_name (".omp_copy_s");
1818 name = build_decl (gimple_location (stmt),
1819 TYPE_DECL, name, ctx->record_type);
1820 TYPE_NAME (ctx->record_type) = name;
1821
1822 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1823 scan_omp (gimple_omp_body (stmt), ctx);
1824
1825 if (TYPE_FIELDS (ctx->record_type) == NULL)
1826 ctx->record_type = NULL;
1827 else
1828 layout_type (ctx->record_type);
1829 }
1830
1831
1832 /* Check OpenMP nesting restrictions. */
1833 static bool
check_omp_nesting_restrictions(gimple stmt,omp_context * ctx)1834 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1835 {
1836 switch (gimple_code (stmt))
1837 {
1838 case GIMPLE_OMP_FOR:
1839 case GIMPLE_OMP_SECTIONS:
1840 case GIMPLE_OMP_SINGLE:
1841 case GIMPLE_CALL:
1842 for (; ctx != NULL; ctx = ctx->outer)
1843 switch (gimple_code (ctx->stmt))
1844 {
1845 case GIMPLE_OMP_FOR:
1846 case GIMPLE_OMP_SECTIONS:
1847 case GIMPLE_OMP_SINGLE:
1848 case GIMPLE_OMP_ORDERED:
1849 case GIMPLE_OMP_MASTER:
1850 case GIMPLE_OMP_TASK:
1851 if (is_gimple_call (stmt))
1852 {
1853 error_at (gimple_location (stmt),
1854 "barrier region may not be closely nested inside "
1855 "of work-sharing, critical, ordered, master or "
1856 "explicit task region");
1857 return false;
1858 }
1859 error_at (gimple_location (stmt),
1860 "work-sharing region may not be closely nested inside "
1861 "of work-sharing, critical, ordered, master or explicit "
1862 "task region");
1863 return false;
1864 case GIMPLE_OMP_PARALLEL:
1865 return true;
1866 default:
1867 break;
1868 }
1869 break;
1870 case GIMPLE_OMP_MASTER:
1871 for (; ctx != NULL; ctx = ctx->outer)
1872 switch (gimple_code (ctx->stmt))
1873 {
1874 case GIMPLE_OMP_FOR:
1875 case GIMPLE_OMP_SECTIONS:
1876 case GIMPLE_OMP_SINGLE:
1877 case GIMPLE_OMP_TASK:
1878 error_at (gimple_location (stmt),
1879 "master region may not be closely nested inside "
1880 "of work-sharing or explicit task region");
1881 return false;
1882 case GIMPLE_OMP_PARALLEL:
1883 return true;
1884 default:
1885 break;
1886 }
1887 break;
1888 case GIMPLE_OMP_ORDERED:
1889 for (; ctx != NULL; ctx = ctx->outer)
1890 switch (gimple_code (ctx->stmt))
1891 {
1892 case GIMPLE_OMP_CRITICAL:
1893 case GIMPLE_OMP_TASK:
1894 error_at (gimple_location (stmt),
1895 "ordered region may not be closely nested inside "
1896 "of critical or explicit task region");
1897 return false;
1898 case GIMPLE_OMP_FOR:
1899 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1900 OMP_CLAUSE_ORDERED) == NULL)
1901 {
1902 error_at (gimple_location (stmt),
1903 "ordered region must be closely nested inside "
1904 "a loop region with an ordered clause");
1905 return false;
1906 }
1907 return true;
1908 case GIMPLE_OMP_PARALLEL:
1909 return true;
1910 default:
1911 break;
1912 }
1913 break;
1914 case GIMPLE_OMP_CRITICAL:
1915 for (; ctx != NULL; ctx = ctx->outer)
1916 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1917 && (gimple_omp_critical_name (stmt)
1918 == gimple_omp_critical_name (ctx->stmt)))
1919 {
1920 error_at (gimple_location (stmt),
1921 "critical region may not be nested inside a critical "
1922 "region with the same name");
1923 return false;
1924 }
1925 break;
1926 default:
1927 break;
1928 }
1929 return true;
1930 }
1931
1932
1933 /* Helper function scan_omp.
1934
1935 Callback for walk_tree or operators in walk_gimple_stmt used to
1936 scan for OpenMP directives in TP. */
1937
1938 static tree
scan_omp_1_op(tree * tp,int * walk_subtrees,void * data)1939 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1940 {
1941 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1942 omp_context *ctx = (omp_context *) wi->info;
1943 tree t = *tp;
1944
1945 switch (TREE_CODE (t))
1946 {
1947 case VAR_DECL:
1948 case PARM_DECL:
1949 case LABEL_DECL:
1950 case RESULT_DECL:
1951 if (ctx)
1952 *tp = remap_decl (t, &ctx->cb);
1953 break;
1954
1955 default:
1956 if (ctx && TYPE_P (t))
1957 *tp = remap_type (t, &ctx->cb);
1958 else if (!DECL_P (t))
1959 {
1960 *walk_subtrees = 1;
1961 if (ctx)
1962 {
1963 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1964 if (tem != TREE_TYPE (t))
1965 {
1966 if (TREE_CODE (t) == INTEGER_CST)
1967 *tp = build_int_cst_wide (tem,
1968 TREE_INT_CST_LOW (t),
1969 TREE_INT_CST_HIGH (t));
1970 else
1971 TREE_TYPE (t) = tem;
1972 }
1973 }
1974 }
1975 break;
1976 }
1977
1978 return NULL_TREE;
1979 }
1980
1981
1982 /* Helper function for scan_omp.
1983
1984 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1985 the current statement in GSI. */
1986
1987 static tree
scan_omp_1_stmt(gimple_stmt_iterator * gsi,bool * handled_ops_p,struct walk_stmt_info * wi)1988 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1989 struct walk_stmt_info *wi)
1990 {
1991 gimple stmt = gsi_stmt (*gsi);
1992 omp_context *ctx = (omp_context *) wi->info;
1993
1994 if (gimple_has_location (stmt))
1995 input_location = gimple_location (stmt);
1996
1997 /* Check the OpenMP nesting restrictions. */
1998 if (ctx != NULL)
1999 {
2000 bool remove = false;
2001 if (is_gimple_omp (stmt))
2002 remove = !check_omp_nesting_restrictions (stmt, ctx);
2003 else if (is_gimple_call (stmt))
2004 {
2005 tree fndecl = gimple_call_fndecl (stmt);
2006 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2007 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2008 remove = !check_omp_nesting_restrictions (stmt, ctx);
2009 }
2010 if (remove)
2011 {
2012 stmt = gimple_build_nop ();
2013 gsi_replace (gsi, stmt, false);
2014 }
2015 }
2016
2017 *handled_ops_p = true;
2018
2019 switch (gimple_code (stmt))
2020 {
2021 case GIMPLE_OMP_PARALLEL:
2022 taskreg_nesting_level++;
2023 scan_omp_parallel (gsi, ctx);
2024 taskreg_nesting_level--;
2025 break;
2026
2027 case GIMPLE_OMP_TASK:
2028 taskreg_nesting_level++;
2029 scan_omp_task (gsi, ctx);
2030 taskreg_nesting_level--;
2031 break;
2032
2033 case GIMPLE_OMP_FOR:
2034 scan_omp_for (stmt, ctx);
2035 break;
2036
2037 case GIMPLE_OMP_SECTIONS:
2038 scan_omp_sections (stmt, ctx);
2039 break;
2040
2041 case GIMPLE_OMP_SINGLE:
2042 scan_omp_single (stmt, ctx);
2043 break;
2044
2045 case GIMPLE_OMP_SECTION:
2046 case GIMPLE_OMP_MASTER:
2047 case GIMPLE_OMP_ORDERED:
2048 case GIMPLE_OMP_CRITICAL:
2049 ctx = new_omp_context (stmt, ctx);
2050 scan_omp (gimple_omp_body (stmt), ctx);
2051 break;
2052
2053 case GIMPLE_BIND:
2054 {
2055 tree var;
2056
2057 *handled_ops_p = false;
2058 if (ctx)
2059 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2060 insert_decl_map (&ctx->cb, var, var);
2061 }
2062 break;
2063 default:
2064 *handled_ops_p = false;
2065 break;
2066 }
2067
2068 return NULL_TREE;
2069 }
2070
2071
2072 /* Scan all the statements starting at the current statement. CTX
2073 contains context information about the OpenMP directives and
2074 clauses found during the scan. */
2075
2076 static void
scan_omp(gimple_seq body,omp_context * ctx)2077 scan_omp (gimple_seq body, omp_context *ctx)
2078 {
2079 location_t saved_location;
2080 struct walk_stmt_info wi;
2081
2082 memset (&wi, 0, sizeof (wi));
2083 wi.info = ctx;
2084 wi.want_locations = true;
2085
2086 saved_location = input_location;
2087 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2088 input_location = saved_location;
2089 }
2090
2091 /* Re-gimplification and code generation routines. */
2092
2093 /* Build a call to GOMP_barrier. */
2094
2095 static tree
build_omp_barrier(void)2096 build_omp_barrier (void)
2097 {
2098 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2099 }
2100
2101 /* If a context was created for STMT when it was scanned, return it. */
2102
2103 static omp_context *
maybe_lookup_ctx(gimple stmt)2104 maybe_lookup_ctx (gimple stmt)
2105 {
2106 splay_tree_node n;
2107 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2108 return n ? (omp_context *) n->value : NULL;
2109 }
2110
2111
2112 /* Find the mapping for DECL in CTX or the immediately enclosing
2113 context that has a mapping for DECL.
2114
2115 If CTX is a nested parallel directive, we may have to use the decl
2116 mappings created in CTX's parent context. Suppose that we have the
2117 following parallel nesting (variable UIDs showed for clarity):
2118
2119 iD.1562 = 0;
2120 #omp parallel shared(iD.1562) -> outer parallel
2121 iD.1562 = iD.1562 + 1;
2122
2123 #omp parallel shared (iD.1562) -> inner parallel
2124 iD.1562 = iD.1562 - 1;
2125
2126 Each parallel structure will create a distinct .omp_data_s structure
2127 for copying iD.1562 in/out of the directive:
2128
2129 outer parallel .omp_data_s.1.i -> iD.1562
2130 inner parallel .omp_data_s.2.i -> iD.1562
2131
2132 A shared variable mapping will produce a copy-out operation before
2133 the parallel directive and a copy-in operation after it. So, in
2134 this case we would have:
2135
2136 iD.1562 = 0;
2137 .omp_data_o.1.i = iD.1562;
2138 #omp parallel shared(iD.1562) -> outer parallel
2139 .omp_data_i.1 = &.omp_data_o.1
2140 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2141
2142 .omp_data_o.2.i = iD.1562; -> **
2143 #omp parallel shared(iD.1562) -> inner parallel
2144 .omp_data_i.2 = &.omp_data_o.2
2145 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2146
2147
2148 ** This is a problem. The symbol iD.1562 cannot be referenced
2149 inside the body of the outer parallel region. But since we are
2150 emitting this copy operation while expanding the inner parallel
2151 directive, we need to access the CTX structure of the outer
2152 parallel directive to get the correct mapping:
2153
2154 .omp_data_o.2.i = .omp_data_i.1->i
2155
2156 Since there may be other workshare or parallel directives enclosing
2157 the parallel directive, it may be necessary to walk up the context
2158 parent chain. This is not a problem in general because nested
2159 parallelism happens only rarely. */
2160
2161 static tree
lookup_decl_in_outer_ctx(tree decl,omp_context * ctx)2162 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2163 {
2164 tree t;
2165 omp_context *up;
2166
2167 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2168 t = maybe_lookup_decl (decl, up);
2169
2170 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2171
2172 return t ? t : decl;
2173 }
2174
2175
2176 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2177 in outer contexts. */
2178
2179 static tree
maybe_lookup_decl_in_outer_ctx(tree decl,omp_context * ctx)2180 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2181 {
2182 tree t = NULL;
2183 omp_context *up;
2184
2185 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2186 t = maybe_lookup_decl (decl, up);
2187
2188 return t ? t : decl;
2189 }
2190
2191
2192 /* Construct the initialization value for reduction CLAUSE. */
2193
2194 tree
omp_reduction_init(tree clause,tree type)2195 omp_reduction_init (tree clause, tree type)
2196 {
2197 location_t loc = OMP_CLAUSE_LOCATION (clause);
2198 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2199 {
2200 case PLUS_EXPR:
2201 case MINUS_EXPR:
2202 case BIT_IOR_EXPR:
2203 case BIT_XOR_EXPR:
2204 case TRUTH_OR_EXPR:
2205 case TRUTH_ORIF_EXPR:
2206 case TRUTH_XOR_EXPR:
2207 case NE_EXPR:
2208 return build_zero_cst (type);
2209
2210 case MULT_EXPR:
2211 case TRUTH_AND_EXPR:
2212 case TRUTH_ANDIF_EXPR:
2213 case EQ_EXPR:
2214 return fold_convert_loc (loc, type, integer_one_node);
2215
2216 case BIT_AND_EXPR:
2217 return fold_convert_loc (loc, type, integer_minus_one_node);
2218
2219 case MAX_EXPR:
2220 if (SCALAR_FLOAT_TYPE_P (type))
2221 {
2222 REAL_VALUE_TYPE max, min;
2223 if (HONOR_INFINITIES (TYPE_MODE (type)))
2224 {
2225 real_inf (&max);
2226 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2227 }
2228 else
2229 real_maxval (&min, 1, TYPE_MODE (type));
2230 return build_real (type, min);
2231 }
2232 else
2233 {
2234 gcc_assert (INTEGRAL_TYPE_P (type));
2235 return TYPE_MIN_VALUE (type);
2236 }
2237
2238 case MIN_EXPR:
2239 if (SCALAR_FLOAT_TYPE_P (type))
2240 {
2241 REAL_VALUE_TYPE max;
2242 if (HONOR_INFINITIES (TYPE_MODE (type)))
2243 real_inf (&max);
2244 else
2245 real_maxval (&max, 0, TYPE_MODE (type));
2246 return build_real (type, max);
2247 }
2248 else
2249 {
2250 gcc_assert (INTEGRAL_TYPE_P (type));
2251 return TYPE_MAX_VALUE (type);
2252 }
2253
2254 default:
2255 gcc_unreachable ();
2256 }
2257 }
2258
2259 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2260 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2261 private variables. Initialization statements go in ILIST, while calls
2262 to destructors go in DLIST. */
2263
2264 static void
lower_rec_input_clauses(tree clauses,gimple_seq * ilist,gimple_seq * dlist,omp_context * ctx)2265 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2266 omp_context *ctx)
2267 {
2268 gimple_stmt_iterator diter;
2269 tree c, dtor, copyin_seq, x, ptr;
2270 bool copyin_by_ref = false;
2271 bool lastprivate_firstprivate = false;
2272 int pass;
2273
2274 *dlist = gimple_seq_alloc ();
2275 diter = gsi_start (*dlist);
2276 copyin_seq = NULL;
2277
2278 /* Do all the fixed sized types in the first pass, and the variable sized
2279 types in the second pass. This makes sure that the scalar arguments to
2280 the variable sized types are processed before we use them in the
2281 variable sized operations. */
2282 for (pass = 0; pass < 2; ++pass)
2283 {
2284 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2285 {
2286 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2287 tree var, new_var;
2288 bool by_ref;
2289 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2290
2291 switch (c_kind)
2292 {
2293 case OMP_CLAUSE_PRIVATE:
2294 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2295 continue;
2296 break;
2297 case OMP_CLAUSE_SHARED:
2298 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2299 {
2300 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2301 continue;
2302 }
2303 case OMP_CLAUSE_FIRSTPRIVATE:
2304 case OMP_CLAUSE_COPYIN:
2305 case OMP_CLAUSE_REDUCTION:
2306 break;
2307 case OMP_CLAUSE_LASTPRIVATE:
2308 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2309 {
2310 lastprivate_firstprivate = true;
2311 if (pass != 0)
2312 continue;
2313 }
2314 break;
2315 default:
2316 continue;
2317 }
2318
2319 new_var = var = OMP_CLAUSE_DECL (c);
2320 if (c_kind != OMP_CLAUSE_COPYIN)
2321 new_var = lookup_decl (var, ctx);
2322
2323 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2324 {
2325 if (pass != 0)
2326 continue;
2327 }
2328 else if (is_variable_sized (var))
2329 {
2330 /* For variable sized types, we need to allocate the
2331 actual storage here. Call alloca and store the
2332 result in the pointer decl that we created elsewhere. */
2333 if (pass == 0)
2334 continue;
2335
2336 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2337 {
2338 gimple stmt;
2339 tree tmp, atmp;
2340
2341 ptr = DECL_VALUE_EXPR (new_var);
2342 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2343 ptr = TREE_OPERAND (ptr, 0);
2344 gcc_assert (DECL_P (ptr));
2345 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2346
2347 /* void *tmp = __builtin_alloca */
2348 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2349 stmt = gimple_build_call (atmp, 1, x);
2350 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2351 gimple_add_tmp_var (tmp);
2352 gimple_call_set_lhs (stmt, tmp);
2353
2354 gimple_seq_add_stmt (ilist, stmt);
2355
2356 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2357 gimplify_assign (ptr, x, ilist);
2358 }
2359 }
2360 else if (is_reference (var))
2361 {
2362 /* For references that are being privatized for Fortran,
2363 allocate new backing storage for the new pointer
2364 variable. This allows us to avoid changing all the
2365 code that expects a pointer to something that expects
2366 a direct variable. Note that this doesn't apply to
2367 C++, since reference types are disallowed in data
2368 sharing clauses there, except for NRV optimized
2369 return values. */
2370 if (pass == 0)
2371 continue;
2372
2373 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2374 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2375 {
2376 x = build_receiver_ref (var, false, ctx);
2377 x = build_fold_addr_expr_loc (clause_loc, x);
2378 }
2379 else if (TREE_CONSTANT (x))
2380 {
2381 const char *name = NULL;
2382 if (DECL_NAME (var))
2383 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2384
2385 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2386 name);
2387 gimple_add_tmp_var (x);
2388 TREE_ADDRESSABLE (x) = 1;
2389 x = build_fold_addr_expr_loc (clause_loc, x);
2390 }
2391 else
2392 {
2393 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2394 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2395 }
2396
2397 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2398 gimplify_assign (new_var, x, ilist);
2399
2400 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2401 }
2402 else if (c_kind == OMP_CLAUSE_REDUCTION
2403 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2404 {
2405 if (pass == 0)
2406 continue;
2407 }
2408 else if (pass != 0)
2409 continue;
2410
2411 switch (OMP_CLAUSE_CODE (c))
2412 {
2413 case OMP_CLAUSE_SHARED:
2414 /* Shared global vars are just accessed directly. */
2415 if (is_global_var (new_var))
2416 break;
2417 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2418 needs to be delayed until after fixup_child_record_type so
2419 that we get the correct type during the dereference. */
2420 by_ref = use_pointer_for_field (var, ctx);
2421 x = build_receiver_ref (var, by_ref, ctx);
2422 SET_DECL_VALUE_EXPR (new_var, x);
2423 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2424
2425 /* ??? If VAR is not passed by reference, and the variable
2426 hasn't been initialized yet, then we'll get a warning for
2427 the store into the omp_data_s structure. Ideally, we'd be
2428 able to notice this and not store anything at all, but
2429 we're generating code too early. Suppress the warning. */
2430 if (!by_ref)
2431 TREE_NO_WARNING (var) = 1;
2432 break;
2433
2434 case OMP_CLAUSE_LASTPRIVATE:
2435 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2436 break;
2437 /* FALLTHRU */
2438
2439 case OMP_CLAUSE_PRIVATE:
2440 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2441 x = build_outer_var_ref (var, ctx);
2442 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2443 {
2444 if (is_task_ctx (ctx))
2445 x = build_receiver_ref (var, false, ctx);
2446 else
2447 x = build_outer_var_ref (var, ctx);
2448 }
2449 else
2450 x = NULL;
2451 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2452 if (x)
2453 gimplify_and_add (x, ilist);
2454 /* FALLTHRU */
2455
2456 do_dtor:
2457 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2458 if (x)
2459 {
2460 gimple_seq tseq = NULL;
2461
2462 dtor = x;
2463 gimplify_stmt (&dtor, &tseq);
2464 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2465 }
2466 break;
2467
2468 case OMP_CLAUSE_FIRSTPRIVATE:
2469 if (is_task_ctx (ctx))
2470 {
2471 if (is_reference (var) || is_variable_sized (var))
2472 goto do_dtor;
2473 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2474 ctx))
2475 || use_pointer_for_field (var, NULL))
2476 {
2477 x = build_receiver_ref (var, false, ctx);
2478 SET_DECL_VALUE_EXPR (new_var, x);
2479 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2480 goto do_dtor;
2481 }
2482 }
2483 x = build_outer_var_ref (var, ctx);
2484 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2485 gimplify_and_add (x, ilist);
2486 goto do_dtor;
2487 break;
2488
2489 case OMP_CLAUSE_COPYIN:
2490 by_ref = use_pointer_for_field (var, NULL);
2491 x = build_receiver_ref (var, by_ref, ctx);
2492 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2493 append_to_statement_list (x, ©in_seq);
2494 copyin_by_ref |= by_ref;
2495 break;
2496
2497 case OMP_CLAUSE_REDUCTION:
2498 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2499 {
2500 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2501 x = build_outer_var_ref (var, ctx);
2502
2503 if (is_reference (var))
2504 x = build_fold_addr_expr_loc (clause_loc, x);
2505 SET_DECL_VALUE_EXPR (placeholder, x);
2506 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2507 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2508 gimple_seq_add_seq (ilist,
2509 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2510 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2511 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2512 }
2513 else
2514 {
2515 x = omp_reduction_init (c, TREE_TYPE (new_var));
2516 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2517 gimplify_assign (new_var, x, ilist);
2518 }
2519 break;
2520
2521 default:
2522 gcc_unreachable ();
2523 }
2524 }
2525 }
2526
2527 /* The copyin sequence is not to be executed by the main thread, since
2528 that would result in self-copies. Perhaps not visible to scalars,
2529 but it certainly is to C++ operator=. */
2530 if (copyin_seq)
2531 {
2532 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2533 0);
2534 x = build2 (NE_EXPR, boolean_type_node, x,
2535 build_int_cst (TREE_TYPE (x), 0));
2536 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2537 gimplify_and_add (x, ilist);
2538 }
2539
2540 /* If any copyin variable is passed by reference, we must ensure the
2541 master thread doesn't modify it before it is copied over in all
2542 threads. Similarly for variables in both firstprivate and
2543 lastprivate clauses we need to ensure the lastprivate copying
2544 happens after firstprivate copying in all threads. */
2545 if (copyin_by_ref || lastprivate_firstprivate)
2546 gimplify_and_add (build_omp_barrier (), ilist);
2547 }
2548
2549
2550 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2551 both parallel and workshare constructs. PREDICATE may be NULL if it's
2552 always true. */
2553
2554 static void
lower_lastprivate_clauses(tree clauses,tree predicate,gimple_seq * stmt_list,omp_context * ctx)2555 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2556 omp_context *ctx)
2557 {
2558 tree x, c, label = NULL;
2559 bool par_clauses = false;
2560
2561 /* Early exit if there are no lastprivate clauses. */
2562 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2563 if (clauses == NULL)
2564 {
2565 /* If this was a workshare clause, see if it had been combined
2566 with its parallel. In that case, look for the clauses on the
2567 parallel statement itself. */
2568 if (is_parallel_ctx (ctx))
2569 return;
2570
2571 ctx = ctx->outer;
2572 if (ctx == NULL || !is_parallel_ctx (ctx))
2573 return;
2574
2575 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2576 OMP_CLAUSE_LASTPRIVATE);
2577 if (clauses == NULL)
2578 return;
2579 par_clauses = true;
2580 }
2581
2582 if (predicate)
2583 {
2584 gimple stmt;
2585 tree label_true, arm1, arm2;
2586
2587 label = create_artificial_label (UNKNOWN_LOCATION);
2588 label_true = create_artificial_label (UNKNOWN_LOCATION);
2589 arm1 = TREE_OPERAND (predicate, 0);
2590 arm2 = TREE_OPERAND (predicate, 1);
2591 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2592 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2593 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2594 label_true, label);
2595 gimple_seq_add_stmt (stmt_list, stmt);
2596 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2597 }
2598
2599 for (c = clauses; c ;)
2600 {
2601 tree var, new_var;
2602 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2603
2604 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2605 {
2606 var = OMP_CLAUSE_DECL (c);
2607 new_var = lookup_decl (var, ctx);
2608
2609 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2610 {
2611 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2612 gimple_seq_add_seq (stmt_list,
2613 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2614 }
2615 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2616
2617 x = build_outer_var_ref (var, ctx);
2618 if (is_reference (var))
2619 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2620 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2621 gimplify_and_add (x, stmt_list);
2622 }
2623 c = OMP_CLAUSE_CHAIN (c);
2624 if (c == NULL && !par_clauses)
2625 {
2626 /* If this was a workshare clause, see if it had been combined
2627 with its parallel. In that case, continue looking for the
2628 clauses also on the parallel statement itself. */
2629 if (is_parallel_ctx (ctx))
2630 break;
2631
2632 ctx = ctx->outer;
2633 if (ctx == NULL || !is_parallel_ctx (ctx))
2634 break;
2635
2636 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2637 OMP_CLAUSE_LASTPRIVATE);
2638 par_clauses = true;
2639 }
2640 }
2641
2642 if (label)
2643 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2644 }
2645
2646
2647 /* Generate code to implement the REDUCTION clauses. */
2648
2649 static void
lower_reduction_clauses(tree clauses,gimple_seq * stmt_seqp,omp_context * ctx)2650 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2651 {
2652 gimple_seq sub_seq = NULL;
2653 gimple stmt;
2654 tree x, c;
2655 int count = 0;
2656
2657 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2658 update in that case, otherwise use a lock. */
2659 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2660 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2661 {
2662 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2663 {
2664 /* Never use OMP_ATOMIC for array reductions. */
2665 count = -1;
2666 break;
2667 }
2668 count++;
2669 }
2670
2671 if (count == 0)
2672 return;
2673
2674 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2675 {
2676 tree var, ref, new_var;
2677 enum tree_code code;
2678 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2679
2680 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2681 continue;
2682
2683 var = OMP_CLAUSE_DECL (c);
2684 new_var = lookup_decl (var, ctx);
2685 if (is_reference (var))
2686 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2687 ref = build_outer_var_ref (var, ctx);
2688 code = OMP_CLAUSE_REDUCTION_CODE (c);
2689
2690 /* reduction(-:var) sums up the partial results, so it acts
2691 identically to reduction(+:var). */
2692 if (code == MINUS_EXPR)
2693 code = PLUS_EXPR;
2694
2695 if (count == 1)
2696 {
2697 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2698
2699 addr = save_expr (addr);
2700 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2701 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2702 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2703 gimplify_and_add (x, stmt_seqp);
2704 return;
2705 }
2706
2707 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2708 {
2709 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2710
2711 if (is_reference (var))
2712 ref = build_fold_addr_expr_loc (clause_loc, ref);
2713 SET_DECL_VALUE_EXPR (placeholder, ref);
2714 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2715 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2716 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2717 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2718 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2719 }
2720 else
2721 {
2722 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2723 ref = build_outer_var_ref (var, ctx);
2724 gimplify_assign (ref, x, &sub_seq);
2725 }
2726 }
2727
2728 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2729 0);
2730 gimple_seq_add_stmt (stmt_seqp, stmt);
2731
2732 gimple_seq_add_seq (stmt_seqp, sub_seq);
2733
2734 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2735 0);
2736 gimple_seq_add_stmt (stmt_seqp, stmt);
2737 }
2738
2739
2740 /* Generate code to implement the COPYPRIVATE clauses. */
2741
2742 static void
lower_copyprivate_clauses(tree clauses,gimple_seq * slist,gimple_seq * rlist,omp_context * ctx)2743 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2744 omp_context *ctx)
2745 {
2746 tree c;
2747
2748 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2749 {
2750 tree var, new_var, ref, x;
2751 bool by_ref;
2752 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2753
2754 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2755 continue;
2756
2757 var = OMP_CLAUSE_DECL (c);
2758 by_ref = use_pointer_for_field (var, NULL);
2759
2760 ref = build_sender_ref (var, ctx);
2761 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2762 if (by_ref)
2763 {
2764 x = build_fold_addr_expr_loc (clause_loc, new_var);
2765 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2766 }
2767 gimplify_assign (ref, x, slist);
2768
2769 ref = build_receiver_ref (var, false, ctx);
2770 if (by_ref)
2771 {
2772 ref = fold_convert_loc (clause_loc,
2773 build_pointer_type (TREE_TYPE (new_var)),
2774 ref);
2775 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2776 }
2777 if (is_reference (var))
2778 {
2779 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2780 ref = build_simple_mem_ref_loc (clause_loc, ref);
2781 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2782 }
2783 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2784 gimplify_and_add (x, rlist);
2785 }
2786 }
2787
2788
2789 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2790 and REDUCTION from the sender (aka parent) side. */
2791
2792 static void
lower_send_clauses(tree clauses,gimple_seq * ilist,gimple_seq * olist,omp_context * ctx)2793 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2794 omp_context *ctx)
2795 {
2796 tree c;
2797
2798 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2799 {
2800 tree val, ref, x, var;
2801 bool by_ref, do_in = false, do_out = false;
2802 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2803
2804 switch (OMP_CLAUSE_CODE (c))
2805 {
2806 case OMP_CLAUSE_PRIVATE:
2807 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2808 break;
2809 continue;
2810 case OMP_CLAUSE_FIRSTPRIVATE:
2811 case OMP_CLAUSE_COPYIN:
2812 case OMP_CLAUSE_LASTPRIVATE:
2813 case OMP_CLAUSE_REDUCTION:
2814 break;
2815 default:
2816 continue;
2817 }
2818
2819 val = OMP_CLAUSE_DECL (c);
2820 var = lookup_decl_in_outer_ctx (val, ctx);
2821
2822 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2823 && is_global_var (var))
2824 continue;
2825 if (is_variable_sized (val))
2826 continue;
2827 by_ref = use_pointer_for_field (val, NULL);
2828
2829 switch (OMP_CLAUSE_CODE (c))
2830 {
2831 case OMP_CLAUSE_PRIVATE:
2832 case OMP_CLAUSE_FIRSTPRIVATE:
2833 case OMP_CLAUSE_COPYIN:
2834 do_in = true;
2835 break;
2836
2837 case OMP_CLAUSE_LASTPRIVATE:
2838 if (by_ref || is_reference (val))
2839 {
2840 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2841 continue;
2842 do_in = true;
2843 }
2844 else
2845 {
2846 do_out = true;
2847 if (lang_hooks.decls.omp_private_outer_ref (val))
2848 do_in = true;
2849 }
2850 break;
2851
2852 case OMP_CLAUSE_REDUCTION:
2853 do_in = true;
2854 do_out = !(by_ref || is_reference (val));
2855 break;
2856
2857 default:
2858 gcc_unreachable ();
2859 }
2860
2861 if (do_in)
2862 {
2863 ref = build_sender_ref (val, ctx);
2864 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2865 gimplify_assign (ref, x, ilist);
2866 if (is_task_ctx (ctx))
2867 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2868 }
2869
2870 if (do_out)
2871 {
2872 ref = build_sender_ref (val, ctx);
2873 gimplify_assign (var, ref, olist);
2874 }
2875 }
2876 }
2877
2878 /* Generate code to implement SHARED from the sender (aka parent)
2879 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2880 list things that got automatically shared. */
2881
2882 static void
lower_send_shared_vars(gimple_seq * ilist,gimple_seq * olist,omp_context * ctx)2883 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2884 {
2885 tree var, ovar, nvar, f, x, record_type;
2886
2887 if (ctx->record_type == NULL)
2888 return;
2889
2890 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2891 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2892 {
2893 ovar = DECL_ABSTRACT_ORIGIN (f);
2894 nvar = maybe_lookup_decl (ovar, ctx);
2895 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2896 continue;
2897
2898 /* If CTX is a nested parallel directive. Find the immediately
2899 enclosing parallel or workshare construct that contains a
2900 mapping for OVAR. */
2901 var = lookup_decl_in_outer_ctx (ovar, ctx);
2902
2903 if (use_pointer_for_field (ovar, ctx))
2904 {
2905 x = build_sender_ref (ovar, ctx);
2906 var = build_fold_addr_expr (var);
2907 gimplify_assign (x, var, ilist);
2908 }
2909 else
2910 {
2911 x = build_sender_ref (ovar, ctx);
2912 gimplify_assign (x, var, ilist);
2913
2914 if (!TREE_READONLY (var)
2915 /* We don't need to receive a new reference to a result
2916 or parm decl. In fact we may not store to it as we will
2917 invalidate any pending RSO and generate wrong gimple
2918 during inlining. */
2919 && !((TREE_CODE (var) == RESULT_DECL
2920 || TREE_CODE (var) == PARM_DECL)
2921 && DECL_BY_REFERENCE (var)))
2922 {
2923 x = build_sender_ref (ovar, ctx);
2924 gimplify_assign (var, x, olist);
2925 }
2926 }
2927 }
2928 }
2929
2930
2931 /* A convenience function to build an empty GIMPLE_COND with just the
2932 condition. */
2933
2934 static gimple
gimple_build_cond_empty(tree cond)2935 gimple_build_cond_empty (tree cond)
2936 {
2937 enum tree_code pred_code;
2938 tree lhs, rhs;
2939
2940 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2941 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2942 }
2943
2944
2945 /* Build the function calls to GOMP_parallel_start etc to actually
2946 generate the parallel operation. REGION is the parallel region
2947 being expanded. BB is the block where to insert the code. WS_ARGS
2948 will be set if this is a call to a combined parallel+workshare
2949 construct, it contains the list of additional arguments needed by
2950 the workshare construct. */
2951
2952 static void
expand_parallel_call(struct omp_region * region,basic_block bb,gimple entry_stmt,VEC (tree,gc)* ws_args)2953 expand_parallel_call (struct omp_region *region, basic_block bb,
2954 gimple entry_stmt, VEC(tree,gc) *ws_args)
2955 {
2956 tree t, t1, t2, val, cond, c, clauses;
2957 gimple_stmt_iterator gsi;
2958 gimple stmt;
2959 enum built_in_function start_ix;
2960 int start_ix2;
2961 location_t clause_loc;
2962 VEC(tree,gc) *args;
2963
2964 clauses = gimple_omp_parallel_clauses (entry_stmt);
2965
2966 /* Determine what flavor of GOMP_parallel_start we will be
2967 emitting. */
2968 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2969 if (is_combined_parallel (region))
2970 {
2971 switch (region->inner->type)
2972 {
2973 case GIMPLE_OMP_FOR:
2974 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2975 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2976 + (region->inner->sched_kind
2977 == OMP_CLAUSE_SCHEDULE_RUNTIME
2978 ? 3 : region->inner->sched_kind));
2979 start_ix = (enum built_in_function)start_ix2;
2980 break;
2981 case GIMPLE_OMP_SECTIONS:
2982 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2983 break;
2984 default:
2985 gcc_unreachable ();
2986 }
2987 }
2988
2989 /* By default, the value of NUM_THREADS is zero (selected at run time)
2990 and there is no conditional. */
2991 cond = NULL_TREE;
2992 val = build_int_cst (unsigned_type_node, 0);
2993
2994 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2995 if (c)
2996 cond = OMP_CLAUSE_IF_EXPR (c);
2997
2998 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2999 if (c)
3000 {
3001 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
3002 clause_loc = OMP_CLAUSE_LOCATION (c);
3003 }
3004 else
3005 clause_loc = gimple_location (entry_stmt);
3006
3007 /* Ensure 'val' is of the correct type. */
3008 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3009
3010 /* If we found the clause 'if (cond)', build either
3011 (cond != 0) or (cond ? val : 1u). */
3012 if (cond)
3013 {
3014 gimple_stmt_iterator gsi;
3015
3016 cond = gimple_boolify (cond);
3017
3018 if (integer_zerop (val))
3019 val = fold_build2_loc (clause_loc,
3020 EQ_EXPR, unsigned_type_node, cond,
3021 build_int_cst (TREE_TYPE (cond), 0));
3022 else
3023 {
3024 basic_block cond_bb, then_bb, else_bb;
3025 edge e, e_then, e_else;
3026 tree tmp_then, tmp_else, tmp_join, tmp_var;
3027
3028 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3029 if (gimple_in_ssa_p (cfun))
3030 {
3031 tmp_then = make_ssa_name (tmp_var, NULL);
3032 tmp_else = make_ssa_name (tmp_var, NULL);
3033 tmp_join = make_ssa_name (tmp_var, NULL);
3034 }
3035 else
3036 {
3037 tmp_then = tmp_var;
3038 tmp_else = tmp_var;
3039 tmp_join = tmp_var;
3040 }
3041
3042 e = split_block (bb, NULL);
3043 cond_bb = e->src;
3044 bb = e->dest;
3045 remove_edge (e);
3046
3047 then_bb = create_empty_bb (cond_bb);
3048 else_bb = create_empty_bb (then_bb);
3049 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3050 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3051
3052 stmt = gimple_build_cond_empty (cond);
3053 gsi = gsi_start_bb (cond_bb);
3054 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3055
3056 gsi = gsi_start_bb (then_bb);
3057 stmt = gimple_build_assign (tmp_then, val);
3058 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3059
3060 gsi = gsi_start_bb (else_bb);
3061 stmt = gimple_build_assign
3062 (tmp_else, build_int_cst (unsigned_type_node, 1));
3063 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3064
3065 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3066 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3067 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3068 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3069
3070 if (gimple_in_ssa_p (cfun))
3071 {
3072 gimple phi = create_phi_node (tmp_join, bb);
3073 SSA_NAME_DEF_STMT (tmp_join) = phi;
3074 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3075 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3076 }
3077
3078 val = tmp_join;
3079 }
3080
3081 gsi = gsi_start_bb (bb);
3082 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3083 false, GSI_CONTINUE_LINKING);
3084 }
3085
3086 gsi = gsi_last_bb (bb);
3087 t = gimple_omp_parallel_data_arg (entry_stmt);
3088 if (t == NULL)
3089 t1 = null_pointer_node;
3090 else
3091 t1 = build_fold_addr_expr (t);
3092 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3093
3094 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3095 VEC_quick_push (tree, args, t2);
3096 VEC_quick_push (tree, args, t1);
3097 VEC_quick_push (tree, args, val);
3098 VEC_splice (tree, args, ws_args);
3099
3100 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3101 builtin_decl_explicit (start_ix), args);
3102
3103 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3104 false, GSI_CONTINUE_LINKING);
3105
3106 t = gimple_omp_parallel_data_arg (entry_stmt);
3107 if (t == NULL)
3108 t = null_pointer_node;
3109 else
3110 t = build_fold_addr_expr (t);
3111 t = build_call_expr_loc (gimple_location (entry_stmt),
3112 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3113 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3114 false, GSI_CONTINUE_LINKING);
3115
3116 t = build_call_expr_loc (gimple_location (entry_stmt),
3117 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3118 0);
3119 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3120 false, GSI_CONTINUE_LINKING);
3121 }
3122
3123
3124 /* Build the function call to GOMP_task to actually
3125 generate the task operation. BB is the block where to insert the code. */
3126
3127 static void
expand_task_call(basic_block bb,gimple entry_stmt)3128 expand_task_call (basic_block bb, gimple entry_stmt)
3129 {
3130 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3131 gimple_stmt_iterator gsi;
3132 location_t loc = gimple_location (entry_stmt);
3133
3134 clauses = gimple_omp_task_clauses (entry_stmt);
3135
3136 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3137 if (c)
3138 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3139 else
3140 cond = boolean_true_node;
3141
3142 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3143 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3144 flags = build_int_cst (unsigned_type_node,
3145 (c ? 1 : 0) + (c2 ? 4 : 0));
3146
3147 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3148 if (c)
3149 {
3150 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3151 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3152 build_int_cst (unsigned_type_node, 2),
3153 build_int_cst (unsigned_type_node, 0));
3154 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3155 }
3156
3157 gsi = gsi_last_bb (bb);
3158 t = gimple_omp_task_data_arg (entry_stmt);
3159 if (t == NULL)
3160 t2 = null_pointer_node;
3161 else
3162 t2 = build_fold_addr_expr_loc (loc, t);
3163 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3164 t = gimple_omp_task_copy_fn (entry_stmt);
3165 if (t == NULL)
3166 t3 = null_pointer_node;
3167 else
3168 t3 = build_fold_addr_expr_loc (loc, t);
3169
3170 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3171 7, t1, t2, t3,
3172 gimple_omp_task_arg_size (entry_stmt),
3173 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3174
3175 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3176 false, GSI_CONTINUE_LINKING);
3177 }
3178
3179
3180 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3181 catch handler and return it. This prevents programs from violating the
3182 structured block semantics with throws. */
3183
3184 static gimple_seq
maybe_catch_exception(gimple_seq body)3185 maybe_catch_exception (gimple_seq body)
3186 {
3187 gimple g;
3188 tree decl;
3189
3190 if (!flag_exceptions)
3191 return body;
3192
3193 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3194 decl = lang_hooks.eh_protect_cleanup_actions ();
3195 else
3196 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3197
3198 g = gimple_build_eh_must_not_throw (decl);
3199 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3200 GIMPLE_TRY_CATCH);
3201
3202 return gimple_seq_alloc_with_stmt (g);
3203 }
3204
3205 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3206
3207 static tree
vec2chain(VEC (tree,gc)* v)3208 vec2chain (VEC(tree,gc) *v)
3209 {
3210 tree chain = NULL_TREE, t;
3211 unsigned ix;
3212
3213 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3214 {
3215 DECL_CHAIN (t) = chain;
3216 chain = t;
3217 }
3218
3219 return chain;
3220 }
3221
3222
3223 /* Remove barriers in REGION->EXIT's block. Note that this is only
3224 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3225 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3226 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3227 removed. */
3228
3229 static void
remove_exit_barrier(struct omp_region * region)3230 remove_exit_barrier (struct omp_region *region)
3231 {
3232 gimple_stmt_iterator gsi;
3233 basic_block exit_bb;
3234 edge_iterator ei;
3235 edge e;
3236 gimple stmt;
3237 int any_addressable_vars = -1;
3238
3239 exit_bb = region->exit;
3240
3241 /* If the parallel region doesn't return, we don't have REGION->EXIT
3242 block at all. */
3243 if (! exit_bb)
3244 return;
3245
3246 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3247 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3248 statements that can appear in between are extremely limited -- no
3249 memory operations at all. Here, we allow nothing at all, so the
3250 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3251 gsi = gsi_last_bb (exit_bb);
3252 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3253 gsi_prev (&gsi);
3254 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3255 return;
3256
3257 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3258 {
3259 gsi = gsi_last_bb (e->src);
3260 if (gsi_end_p (gsi))
3261 continue;
3262 stmt = gsi_stmt (gsi);
3263 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3264 && !gimple_omp_return_nowait_p (stmt))
3265 {
3266 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3267 in many cases. If there could be tasks queued, the barrier
3268 might be needed to let the tasks run before some local
3269 variable of the parallel that the task uses as shared
3270 runs out of scope. The task can be spawned either
3271 from within current function (this would be easy to check)
3272 or from some function it calls and gets passed an address
3273 of such a variable. */
3274 if (any_addressable_vars < 0)
3275 {
3276 gimple parallel_stmt = last_stmt (region->entry);
3277 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3278 tree local_decls, block, decl;
3279 unsigned ix;
3280
3281 any_addressable_vars = 0;
3282 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3283 if (TREE_ADDRESSABLE (decl))
3284 {
3285 any_addressable_vars = 1;
3286 break;
3287 }
3288 for (block = gimple_block (stmt);
3289 !any_addressable_vars
3290 && block
3291 && TREE_CODE (block) == BLOCK;
3292 block = BLOCK_SUPERCONTEXT (block))
3293 {
3294 for (local_decls = BLOCK_VARS (block);
3295 local_decls;
3296 local_decls = DECL_CHAIN (local_decls))
3297 if (TREE_ADDRESSABLE (local_decls))
3298 {
3299 any_addressable_vars = 1;
3300 break;
3301 }
3302 if (block == gimple_block (parallel_stmt))
3303 break;
3304 }
3305 }
3306 if (!any_addressable_vars)
3307 gimple_omp_return_set_nowait (stmt);
3308 }
3309 }
3310 }
3311
3312 static void
remove_exit_barriers(struct omp_region * region)3313 remove_exit_barriers (struct omp_region *region)
3314 {
3315 if (region->type == GIMPLE_OMP_PARALLEL)
3316 remove_exit_barrier (region);
3317
3318 if (region->inner)
3319 {
3320 region = region->inner;
3321 remove_exit_barriers (region);
3322 while (region->next)
3323 {
3324 region = region->next;
3325 remove_exit_barriers (region);
3326 }
3327 }
3328 }
3329
3330 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3331 calls. These can't be declared as const functions, but
3332 within one parallel body they are constant, so they can be
3333 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3334 which are declared const. Similarly for task body, except
3335 that in untied task omp_get_thread_num () can change at any task
3336 scheduling point. */
3337
3338 static void
optimize_omp_library_calls(gimple entry_stmt)3339 optimize_omp_library_calls (gimple entry_stmt)
3340 {
3341 basic_block bb;
3342 gimple_stmt_iterator gsi;
3343 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3344 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3345 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3346 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3347 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3348 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3349 OMP_CLAUSE_UNTIED) != NULL);
3350
3351 FOR_EACH_BB (bb)
3352 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3353 {
3354 gimple call = gsi_stmt (gsi);
3355 tree decl;
3356
3357 if (is_gimple_call (call)
3358 && (decl = gimple_call_fndecl (call))
3359 && DECL_EXTERNAL (decl)
3360 && TREE_PUBLIC (decl)
3361 && DECL_INITIAL (decl) == NULL)
3362 {
3363 tree built_in;
3364
3365 if (DECL_NAME (decl) == thr_num_id)
3366 {
3367 /* In #pragma omp task untied omp_get_thread_num () can change
3368 during the execution of the task region. */
3369 if (untied_task)
3370 continue;
3371 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3372 }
3373 else if (DECL_NAME (decl) == num_thr_id)
3374 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3375 else
3376 continue;
3377
3378 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3379 || gimple_call_num_args (call) != 0)
3380 continue;
3381
3382 if (flag_exceptions && !TREE_NOTHROW (decl))
3383 continue;
3384
3385 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3386 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3387 TREE_TYPE (TREE_TYPE (built_in))))
3388 continue;
3389
3390 gimple_call_set_fndecl (call, built_in);
3391 }
3392 }
3393 }
3394
3395 /* Expand the OpenMP parallel or task directive starting at REGION. */
3396
3397 static void
expand_omp_taskreg(struct omp_region * region)3398 expand_omp_taskreg (struct omp_region *region)
3399 {
3400 basic_block entry_bb, exit_bb, new_bb;
3401 struct function *child_cfun;
3402 tree child_fn, block, t;
3403 tree save_current;
3404 gimple_stmt_iterator gsi;
3405 gimple entry_stmt, stmt;
3406 edge e;
3407 VEC(tree,gc) *ws_args;
3408
3409 entry_stmt = last_stmt (region->entry);
3410 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3411 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3412 /* If this function has been already instrumented, make sure
3413 the child function isn't instrumented again. */
3414 child_cfun->after_tree_profile = cfun->after_tree_profile;
3415
3416 entry_bb = region->entry;
3417 exit_bb = region->exit;
3418
3419 if (is_combined_parallel (region))
3420 ws_args = region->ws_args;
3421 else
3422 ws_args = NULL;
3423
3424 if (child_cfun->cfg)
3425 {
3426 /* Due to inlining, it may happen that we have already outlined
3427 the region, in which case all we need to do is make the
3428 sub-graph unreachable and emit the parallel call. */
3429 edge entry_succ_e, exit_succ_e;
3430 gimple_stmt_iterator gsi;
3431
3432 entry_succ_e = single_succ_edge (entry_bb);
3433
3434 gsi = gsi_last_bb (entry_bb);
3435 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3436 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3437 gsi_remove (&gsi, true);
3438
3439 new_bb = entry_bb;
3440 if (exit_bb)
3441 {
3442 exit_succ_e = single_succ_edge (exit_bb);
3443 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3444 }
3445 remove_edge_and_dominated_blocks (entry_succ_e);
3446 }
3447 else
3448 {
3449 unsigned srcidx, dstidx, num;
3450
3451 /* If the parallel region needs data sent from the parent
3452 function, then the very first statement (except possible
3453 tree profile counter updates) of the parallel body
3454 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3455 &.OMP_DATA_O is passed as an argument to the child function,
3456 we need to replace it with the argument as seen by the child
3457 function.
3458
3459 In most cases, this will end up being the identity assignment
3460 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3461 a function call that has been inlined, the original PARM_DECL
3462 .OMP_DATA_I may have been converted into a different local
3463 variable. In which case, we need to keep the assignment. */
3464 if (gimple_omp_taskreg_data_arg (entry_stmt))
3465 {
3466 basic_block entry_succ_bb = single_succ (entry_bb);
3467 gimple_stmt_iterator gsi;
3468 tree arg, narg;
3469 gimple parcopy_stmt = NULL;
3470
3471 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3472 {
3473 gimple stmt;
3474
3475 gcc_assert (!gsi_end_p (gsi));
3476 stmt = gsi_stmt (gsi);
3477 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3478 continue;
3479
3480 if (gimple_num_ops (stmt) == 2)
3481 {
3482 tree arg = gimple_assign_rhs1 (stmt);
3483
3484 /* We're ignore the subcode because we're
3485 effectively doing a STRIP_NOPS. */
3486
3487 if (TREE_CODE (arg) == ADDR_EXPR
3488 && TREE_OPERAND (arg, 0)
3489 == gimple_omp_taskreg_data_arg (entry_stmt))
3490 {
3491 parcopy_stmt = stmt;
3492 break;
3493 }
3494 }
3495 }
3496
3497 gcc_assert (parcopy_stmt != NULL);
3498 arg = DECL_ARGUMENTS (child_fn);
3499
3500 if (!gimple_in_ssa_p (cfun))
3501 {
3502 if (gimple_assign_lhs (parcopy_stmt) == arg)
3503 gsi_remove (&gsi, true);
3504 else
3505 {
3506 /* ?? Is setting the subcode really necessary ?? */
3507 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3508 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3509 }
3510 }
3511 else
3512 {
3513 /* If we are in ssa form, we must load the value from the default
3514 definition of the argument. That should not be defined now,
3515 since the argument is not used uninitialized. */
3516 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3517 narg = make_ssa_name (arg, gimple_build_nop ());
3518 set_default_def (arg, narg);
3519 /* ?? Is setting the subcode really necessary ?? */
3520 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3521 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3522 update_stmt (parcopy_stmt);
3523 }
3524 }
3525
3526 /* Declare local variables needed in CHILD_CFUN. */
3527 block = DECL_INITIAL (child_fn);
3528 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3529 /* The gimplifier could record temporaries in parallel/task block
3530 rather than in containing function's local_decls chain,
3531 which would mean cgraph missed finalizing them. Do it now. */
3532 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3533 if (TREE_CODE (t) == VAR_DECL
3534 && TREE_STATIC (t)
3535 && !DECL_EXTERNAL (t))
3536 varpool_finalize_decl (t);
3537 DECL_SAVED_TREE (child_fn) = NULL;
3538 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3539 TREE_USED (block) = 1;
3540
3541 /* Reset DECL_CONTEXT on function arguments. */
3542 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3543 DECL_CONTEXT (t) = child_fn;
3544
3545 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3546 so that it can be moved to the child function. */
3547 gsi = gsi_last_bb (entry_bb);
3548 stmt = gsi_stmt (gsi);
3549 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3550 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3551 gsi_remove (&gsi, true);
3552 e = split_block (entry_bb, stmt);
3553 entry_bb = e->dest;
3554 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3555
3556 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3557 if (exit_bb)
3558 {
3559 gsi = gsi_last_bb (exit_bb);
3560 gcc_assert (!gsi_end_p (gsi)
3561 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3562 stmt = gimple_build_return (NULL);
3563 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3564 gsi_remove (&gsi, true);
3565 }
3566
3567 /* Move the parallel region into CHILD_CFUN. */
3568
3569 if (gimple_in_ssa_p (cfun))
3570 {
3571 push_cfun (child_cfun);
3572 init_tree_ssa (child_cfun);
3573 init_ssa_operands ();
3574 cfun->gimple_df->in_ssa_p = true;
3575 pop_cfun ();
3576 block = NULL_TREE;
3577 }
3578 else
3579 block = gimple_block (entry_stmt);
3580
3581 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3582 if (exit_bb)
3583 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3584
3585 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3586 num = VEC_length (tree, child_cfun->local_decls);
3587 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3588 {
3589 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3590 if (DECL_CONTEXT (t) == cfun->decl)
3591 continue;
3592 if (srcidx != dstidx)
3593 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3594 dstidx++;
3595 }
3596 if (dstidx != num)
3597 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3598
3599 /* Inform the callgraph about the new function. */
3600 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3601 = cfun->curr_properties;
3602 cgraph_add_new_function (child_fn, true);
3603
3604 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3605 fixed in a following pass. */
3606 push_cfun (child_cfun);
3607 save_current = current_function_decl;
3608 current_function_decl = child_fn;
3609 if (optimize)
3610 optimize_omp_library_calls (entry_stmt);
3611 rebuild_cgraph_edges ();
3612
3613 /* Some EH regions might become dead, see PR34608. If
3614 pass_cleanup_cfg isn't the first pass to happen with the
3615 new child, these dead EH edges might cause problems.
3616 Clean them up now. */
3617 if (flag_exceptions)
3618 {
3619 basic_block bb;
3620 bool changed = false;
3621
3622 FOR_EACH_BB (bb)
3623 changed |= gimple_purge_dead_eh_edges (bb);
3624 if (changed)
3625 cleanup_tree_cfg ();
3626 }
3627 if (gimple_in_ssa_p (cfun))
3628 update_ssa (TODO_update_ssa);
3629 current_function_decl = save_current;
3630 pop_cfun ();
3631 }
3632
3633 /* Emit a library call to launch the children threads. */
3634 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3635 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3636 else
3637 expand_task_call (new_bb, entry_stmt);
3638 update_ssa (TODO_update_ssa_only_virtuals);
3639 }
3640
3641
3642 /* A subroutine of expand_omp_for. Generate code for a parallel
3643 loop with any schedule. Given parameters:
3644
3645 for (V = N1; V cond N2; V += STEP) BODY;
3646
3647 where COND is "<" or ">", we generate pseudocode
3648
3649 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3650 if (more) goto L0; else goto L3;
3651 L0:
3652 V = istart0;
3653 iend = iend0;
3654 L1:
3655 BODY;
3656 V += STEP;
3657 if (V cond iend) goto L1; else goto L2;
3658 L2:
3659 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3660 L3:
3661
3662 If this is a combined omp parallel loop, instead of the call to
3663 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3664
3665 For collapsed loops, given parameters:
3666 collapse(3)
3667 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3668 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3669 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3670 BODY;
3671
3672 we generate pseudocode
3673
3674 if (cond3 is <)
3675 adj = STEP3 - 1;
3676 else
3677 adj = STEP3 + 1;
3678 count3 = (adj + N32 - N31) / STEP3;
3679 if (cond2 is <)
3680 adj = STEP2 - 1;
3681 else
3682 adj = STEP2 + 1;
3683 count2 = (adj + N22 - N21) / STEP2;
3684 if (cond1 is <)
3685 adj = STEP1 - 1;
3686 else
3687 adj = STEP1 + 1;
3688 count1 = (adj + N12 - N11) / STEP1;
3689 count = count1 * count2 * count3;
3690 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3691 if (more) goto L0; else goto L3;
3692 L0:
3693 V = istart0;
3694 T = V;
3695 V3 = N31 + (T % count3) * STEP3;
3696 T = T / count3;
3697 V2 = N21 + (T % count2) * STEP2;
3698 T = T / count2;
3699 V1 = N11 + T * STEP1;
3700 iend = iend0;
3701 L1:
3702 BODY;
3703 V += 1;
3704 if (V < iend) goto L10; else goto L2;
3705 L10:
3706 V3 += STEP3;
3707 if (V3 cond3 N32) goto L1; else goto L11;
3708 L11:
3709 V3 = N31;
3710 V2 += STEP2;
3711 if (V2 cond2 N22) goto L1; else goto L12;
3712 L12:
3713 V2 = N21;
3714 V1 += STEP1;
3715 goto L1;
3716 L2:
3717 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3718 L3:
3719
3720 */
3721
3722 static void
expand_omp_for_generic(struct omp_region * region,struct omp_for_data * fd,enum built_in_function start_fn,enum built_in_function next_fn)3723 expand_omp_for_generic (struct omp_region *region,
3724 struct omp_for_data *fd,
3725 enum built_in_function start_fn,
3726 enum built_in_function next_fn)
3727 {
3728 tree type, istart0, iend0, iend;
3729 tree t, vmain, vback, bias = NULL_TREE;
3730 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3731 basic_block l2_bb = NULL, l3_bb = NULL;
3732 gimple_stmt_iterator gsi;
3733 gimple stmt;
3734 bool in_combined_parallel = is_combined_parallel (region);
3735 bool broken_loop = region->cont == NULL;
3736 edge e, ne;
3737 tree *counts = NULL;
3738 int i;
3739
3740 gcc_assert (!broken_loop || !in_combined_parallel);
3741 gcc_assert (fd->iter_type == long_integer_type_node
3742 || !in_combined_parallel);
3743
3744 type = TREE_TYPE (fd->loop.v);
3745 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3746 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3747 TREE_ADDRESSABLE (istart0) = 1;
3748 TREE_ADDRESSABLE (iend0) = 1;
3749 if (gimple_in_ssa_p (cfun))
3750 {
3751 add_referenced_var (istart0);
3752 add_referenced_var (iend0);
3753 }
3754
3755 /* See if we need to bias by LLONG_MIN. */
3756 if (fd->iter_type == long_long_unsigned_type_node
3757 && TREE_CODE (type) == INTEGER_TYPE
3758 && !TYPE_UNSIGNED (type))
3759 {
3760 tree n1, n2;
3761
3762 if (fd->loop.cond_code == LT_EXPR)
3763 {
3764 n1 = fd->loop.n1;
3765 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3766 }
3767 else
3768 {
3769 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3770 n2 = fd->loop.n1;
3771 }
3772 if (TREE_CODE (n1) != INTEGER_CST
3773 || TREE_CODE (n2) != INTEGER_CST
3774 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3775 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3776 }
3777
3778 entry_bb = region->entry;
3779 cont_bb = region->cont;
3780 collapse_bb = NULL;
3781 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3782 gcc_assert (broken_loop
3783 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3784 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3785 l1_bb = single_succ (l0_bb);
3786 if (!broken_loop)
3787 {
3788 l2_bb = create_empty_bb (cont_bb);
3789 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3790 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3791 }
3792 else
3793 l2_bb = NULL;
3794 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3795 exit_bb = region->exit;
3796
3797 gsi = gsi_last_bb (entry_bb);
3798
3799 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3800 if (fd->collapse > 1)
3801 {
3802 /* collapsed loops need work for expansion in SSA form. */
3803 gcc_assert (!gimple_in_ssa_p (cfun));
3804 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3805 for (i = 0; i < fd->collapse; i++)
3806 {
3807 tree itype = TREE_TYPE (fd->loops[i].v);
3808
3809 if (POINTER_TYPE_P (itype))
3810 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3811 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3812 ? -1 : 1));
3813 t = fold_build2 (PLUS_EXPR, itype,
3814 fold_convert (itype, fd->loops[i].step), t);
3815 t = fold_build2 (PLUS_EXPR, itype, t,
3816 fold_convert (itype, fd->loops[i].n2));
3817 t = fold_build2 (MINUS_EXPR, itype, t,
3818 fold_convert (itype, fd->loops[i].n1));
3819 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3820 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3821 fold_build1 (NEGATE_EXPR, itype, t),
3822 fold_build1 (NEGATE_EXPR, itype,
3823 fold_convert (itype,
3824 fd->loops[i].step)));
3825 else
3826 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3827 fold_convert (itype, fd->loops[i].step));
3828 t = fold_convert (type, t);
3829 if (TREE_CODE (t) == INTEGER_CST)
3830 counts[i] = t;
3831 else
3832 {
3833 counts[i] = create_tmp_var (type, ".count");
3834 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3835 true, GSI_SAME_STMT);
3836 stmt = gimple_build_assign (counts[i], t);
3837 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3838 }
3839 if (SSA_VAR_P (fd->loop.n2))
3840 {
3841 if (i == 0)
3842 t = counts[0];
3843 else
3844 {
3845 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3846 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3847 true, GSI_SAME_STMT);
3848 }
3849 stmt = gimple_build_assign (fd->loop.n2, t);
3850 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3851 }
3852 }
3853 }
3854 if (in_combined_parallel)
3855 {
3856 /* In a combined parallel loop, emit a call to
3857 GOMP_loop_foo_next. */
3858 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3859 build_fold_addr_expr (istart0),
3860 build_fold_addr_expr (iend0));
3861 }
3862 else
3863 {
3864 tree t0, t1, t2, t3, t4;
3865 /* If this is not a combined parallel loop, emit a call to
3866 GOMP_loop_foo_start in ENTRY_BB. */
3867 t4 = build_fold_addr_expr (iend0);
3868 t3 = build_fold_addr_expr (istart0);
3869 t2 = fold_convert (fd->iter_type, fd->loop.step);
3870 if (POINTER_TYPE_P (type)
3871 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3872 {
3873 /* Avoid casting pointers to integer of a different size. */
3874 tree itype
3875 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3876 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3877 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3878 }
3879 else
3880 {
3881 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3882 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3883 }
3884 if (bias)
3885 {
3886 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3887 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3888 }
3889 if (fd->iter_type == long_integer_type_node)
3890 {
3891 if (fd->chunk_size)
3892 {
3893 t = fold_convert (fd->iter_type, fd->chunk_size);
3894 t = build_call_expr (builtin_decl_explicit (start_fn),
3895 6, t0, t1, t2, t, t3, t4);
3896 }
3897 else
3898 t = build_call_expr (builtin_decl_explicit (start_fn),
3899 5, t0, t1, t2, t3, t4);
3900 }
3901 else
3902 {
3903 tree t5;
3904 tree c_bool_type;
3905 tree bfn_decl;
3906
3907 /* The GOMP_loop_ull_*start functions have additional boolean
3908 argument, true for < loops and false for > loops.
3909 In Fortran, the C bool type can be different from
3910 boolean_type_node. */
3911 bfn_decl = builtin_decl_explicit (start_fn);
3912 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3913 t5 = build_int_cst (c_bool_type,
3914 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3915 if (fd->chunk_size)
3916 {
3917 tree bfn_decl = builtin_decl_explicit (start_fn);
3918 t = fold_convert (fd->iter_type, fd->chunk_size);
3919 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3920 }
3921 else
3922 t = build_call_expr (builtin_decl_explicit (start_fn),
3923 6, t5, t0, t1, t2, t3, t4);
3924 }
3925 }
3926 if (TREE_TYPE (t) != boolean_type_node)
3927 t = fold_build2 (NE_EXPR, boolean_type_node,
3928 t, build_int_cst (TREE_TYPE (t), 0));
3929 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3930 true, GSI_SAME_STMT);
3931 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3932
3933 /* Remove the GIMPLE_OMP_FOR statement. */
3934 gsi_remove (&gsi, true);
3935
3936 /* Iteration setup for sequential loop goes in L0_BB. */
3937 gsi = gsi_start_bb (l0_bb);
3938 t = istart0;
3939 if (bias)
3940 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3941 if (POINTER_TYPE_P (type))
3942 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3943 0), t);
3944 t = fold_convert (type, t);
3945 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3946 false, GSI_CONTINUE_LINKING);
3947 stmt = gimple_build_assign (fd->loop.v, t);
3948 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3949
3950 t = iend0;
3951 if (bias)
3952 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3953 if (POINTER_TYPE_P (type))
3954 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3955 0), t);
3956 t = fold_convert (type, t);
3957 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3958 false, GSI_CONTINUE_LINKING);
3959 if (fd->collapse > 1)
3960 {
3961 tree tem = create_tmp_var (type, ".tem");
3962
3963 stmt = gimple_build_assign (tem, fd->loop.v);
3964 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3965 for (i = fd->collapse - 1; i >= 0; i--)
3966 {
3967 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3968 itype = vtype;
3969 if (POINTER_TYPE_P (vtype))
3970 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3971 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3972 t = fold_convert (itype, t);
3973 t = fold_build2 (MULT_EXPR, itype, t,
3974 fold_convert (itype, fd->loops[i].step));
3975 if (POINTER_TYPE_P (vtype))
3976 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3977 else
3978 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3979 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3980 false, GSI_CONTINUE_LINKING);
3981 stmt = gimple_build_assign (fd->loops[i].v, t);
3982 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3983 if (i != 0)
3984 {
3985 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3986 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3987 false, GSI_CONTINUE_LINKING);
3988 stmt = gimple_build_assign (tem, t);
3989 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3990 }
3991 }
3992 }
3993
3994 if (!broken_loop)
3995 {
3996 /* Code to control the increment and predicate for the sequential
3997 loop goes in the CONT_BB. */
3998 gsi = gsi_last_bb (cont_bb);
3999 stmt = gsi_stmt (gsi);
4000 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4001 vmain = gimple_omp_continue_control_use (stmt);
4002 vback = gimple_omp_continue_control_def (stmt);
4003
4004 if (POINTER_TYPE_P (type))
4005 t = fold_build_pointer_plus (vmain, fd->loop.step);
4006 else
4007 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4008 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4009 true, GSI_SAME_STMT);
4010 stmt = gimple_build_assign (vback, t);
4011 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4012
4013 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
4014 stmt = gimple_build_cond_empty (t);
4015 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4016
4017 /* Remove GIMPLE_OMP_CONTINUE. */
4018 gsi_remove (&gsi, true);
4019
4020 if (fd->collapse > 1)
4021 {
4022 basic_block last_bb, bb;
4023
4024 last_bb = cont_bb;
4025 for (i = fd->collapse - 1; i >= 0; i--)
4026 {
4027 tree vtype = TREE_TYPE (fd->loops[i].v);
4028
4029 bb = create_empty_bb (last_bb);
4030 gsi = gsi_start_bb (bb);
4031
4032 if (i < fd->collapse - 1)
4033 {
4034 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4035 e->probability = REG_BR_PROB_BASE / 8;
4036
4037 t = fd->loops[i + 1].n1;
4038 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4039 false, GSI_CONTINUE_LINKING);
4040 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4041 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4042 }
4043 else
4044 collapse_bb = bb;
4045
4046 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4047
4048 if (POINTER_TYPE_P (vtype))
4049 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4050 else
4051 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4052 fd->loops[i].step);
4053 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4054 false, GSI_CONTINUE_LINKING);
4055 stmt = gimple_build_assign (fd->loops[i].v, t);
4056 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4057
4058 if (i > 0)
4059 {
4060 t = fd->loops[i].n2;
4061 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4062 false, GSI_CONTINUE_LINKING);
4063 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4064 fd->loops[i].v, t);
4065 stmt = gimple_build_cond_empty (t);
4066 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4067 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4068 e->probability = REG_BR_PROB_BASE * 7 / 8;
4069 }
4070 else
4071 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4072 last_bb = bb;
4073 }
4074 }
4075
4076 /* Emit code to get the next parallel iteration in L2_BB. */
4077 gsi = gsi_start_bb (l2_bb);
4078
4079 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4080 build_fold_addr_expr (istart0),
4081 build_fold_addr_expr (iend0));
4082 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4083 false, GSI_CONTINUE_LINKING);
4084 if (TREE_TYPE (t) != boolean_type_node)
4085 t = fold_build2 (NE_EXPR, boolean_type_node,
4086 t, build_int_cst (TREE_TYPE (t), 0));
4087 stmt = gimple_build_cond_empty (t);
4088 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4089 }
4090
4091 /* Add the loop cleanup function. */
4092 gsi = gsi_last_bb (exit_bb);
4093 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4094 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4095 else
4096 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4097 stmt = gimple_build_call (t, 0);
4098 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4099 gsi_remove (&gsi, true);
4100
4101 /* Connect the new blocks. */
4102 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4103 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4104
4105 if (!broken_loop)
4106 {
4107 gimple_seq phis;
4108
4109 e = find_edge (cont_bb, l3_bb);
4110 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4111
4112 phis = phi_nodes (l3_bb);
4113 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4114 {
4115 gimple phi = gsi_stmt (gsi);
4116 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4117 PHI_ARG_DEF_FROM_EDGE (phi, e));
4118 }
4119 remove_edge (e);
4120
4121 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4122 if (fd->collapse > 1)
4123 {
4124 e = find_edge (cont_bb, l1_bb);
4125 remove_edge (e);
4126 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4127 }
4128 else
4129 {
4130 e = find_edge (cont_bb, l1_bb);
4131 e->flags = EDGE_TRUE_VALUE;
4132 }
4133 e->probability = REG_BR_PROB_BASE * 7 / 8;
4134 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4135 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4136
4137 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4138 recompute_dominator (CDI_DOMINATORS, l2_bb));
4139 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4140 recompute_dominator (CDI_DOMINATORS, l3_bb));
4141 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4142 recompute_dominator (CDI_DOMINATORS, l0_bb));
4143 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4144 recompute_dominator (CDI_DOMINATORS, l1_bb));
4145 }
4146 }
4147
4148
4149 /* A subroutine of expand_omp_for. Generate code for a parallel
4150 loop with static schedule and no specified chunk size. Given
4151 parameters:
4152
4153 for (V = N1; V cond N2; V += STEP) BODY;
4154
4155 where COND is "<" or ">", we generate pseudocode
4156
4157 if (cond is <)
4158 adj = STEP - 1;
4159 else
4160 adj = STEP + 1;
4161 if ((__typeof (V)) -1 > 0 && cond is >)
4162 n = -(adj + N2 - N1) / -STEP;
4163 else
4164 n = (adj + N2 - N1) / STEP;
4165 q = n / nthreads;
4166 tt = n % nthreads;
4167 if (threadid < tt) goto L3; else goto L4;
4168 L3:
4169 tt = 0;
4170 q = q + 1;
4171 L4:
4172 s0 = q * threadid + tt;
4173 e0 = s0 + q;
4174 V = s0 * STEP + N1;
4175 if (s0 >= e0) goto L2; else goto L0;
4176 L0:
4177 e = e0 * STEP + N1;
4178 L1:
4179 BODY;
4180 V += STEP;
4181 if (V cond e) goto L1;
4182 L2:
4183 */
4184
4185 static void
expand_omp_for_static_nochunk(struct omp_region * region,struct omp_for_data * fd)4186 expand_omp_for_static_nochunk (struct omp_region *region,
4187 struct omp_for_data *fd)
4188 {
4189 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4190 tree type, itype, vmain, vback;
4191 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4192 basic_block body_bb, cont_bb;
4193 basic_block fin_bb;
4194 gimple_stmt_iterator gsi;
4195 gimple stmt;
4196 edge ep;
4197
4198 itype = type = TREE_TYPE (fd->loop.v);
4199 if (POINTER_TYPE_P (type))
4200 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4201
4202 entry_bb = region->entry;
4203 cont_bb = region->cont;
4204 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4205 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4206 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4207 body_bb = single_succ (seq_start_bb);
4208 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4209 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4210 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4211 exit_bb = region->exit;
4212
4213 /* Iteration space partitioning goes in ENTRY_BB. */
4214 gsi = gsi_last_bb (entry_bb);
4215 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4216
4217 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4218 t = fold_convert (itype, t);
4219 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4220 true, GSI_SAME_STMT);
4221
4222 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4223 t = fold_convert (itype, t);
4224 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4225 true, GSI_SAME_STMT);
4226
4227 fd->loop.n1
4228 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4229 true, NULL_TREE, true, GSI_SAME_STMT);
4230 fd->loop.n2
4231 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4232 true, NULL_TREE, true, GSI_SAME_STMT);
4233 fd->loop.step
4234 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4235 true, NULL_TREE, true, GSI_SAME_STMT);
4236
4237 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4238 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4239 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4240 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4241 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4242 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4243 fold_build1 (NEGATE_EXPR, itype, t),
4244 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4245 else
4246 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4247 t = fold_convert (itype, t);
4248 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4249
4250 q = create_tmp_var (itype, "q");
4251 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4252 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4253 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4254
4255 tt = create_tmp_var (itype, "tt");
4256 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4257 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4258 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4259
4260 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4261 stmt = gimple_build_cond_empty (t);
4262 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4263
4264 second_bb = split_block (entry_bb, stmt)->dest;
4265 gsi = gsi_last_bb (second_bb);
4266 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4267
4268 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4269 GSI_SAME_STMT);
4270 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4271 build_int_cst (itype, 1));
4272 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4273
4274 third_bb = split_block (second_bb, stmt)->dest;
4275 gsi = gsi_last_bb (third_bb);
4276 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4277
4278 t = build2 (MULT_EXPR, itype, q, threadid);
4279 t = build2 (PLUS_EXPR, itype, t, tt);
4280 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4281
4282 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4283 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4284
4285 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4286 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4287
4288 /* Remove the GIMPLE_OMP_FOR statement. */
4289 gsi_remove (&gsi, true);
4290
4291 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4292 gsi = gsi_start_bb (seq_start_bb);
4293
4294 t = fold_convert (itype, s0);
4295 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4296 if (POINTER_TYPE_P (type))
4297 t = fold_build_pointer_plus (fd->loop.n1, t);
4298 else
4299 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4300 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4301 false, GSI_CONTINUE_LINKING);
4302 stmt = gimple_build_assign (fd->loop.v, t);
4303 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4304
4305 t = fold_convert (itype, e0);
4306 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4307 if (POINTER_TYPE_P (type))
4308 t = fold_build_pointer_plus (fd->loop.n1, t);
4309 else
4310 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4311 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4312 false, GSI_CONTINUE_LINKING);
4313
4314 /* The code controlling the sequential loop replaces the
4315 GIMPLE_OMP_CONTINUE. */
4316 gsi = gsi_last_bb (cont_bb);
4317 stmt = gsi_stmt (gsi);
4318 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4319 vmain = gimple_omp_continue_control_use (stmt);
4320 vback = gimple_omp_continue_control_def (stmt);
4321
4322 if (POINTER_TYPE_P (type))
4323 t = fold_build_pointer_plus (vmain, fd->loop.step);
4324 else
4325 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4326 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4327 true, GSI_SAME_STMT);
4328 stmt = gimple_build_assign (vback, t);
4329 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4330
4331 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4332 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4333
4334 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4335 gsi_remove (&gsi, true);
4336
4337 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4338 gsi = gsi_last_bb (exit_bb);
4339 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4340 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4341 false, GSI_SAME_STMT);
4342 gsi_remove (&gsi, true);
4343
4344 /* Connect all the blocks. */
4345 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4346 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4347 ep = find_edge (entry_bb, second_bb);
4348 ep->flags = EDGE_TRUE_VALUE;
4349 ep->probability = REG_BR_PROB_BASE / 4;
4350 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4351 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4352
4353 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4354 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4355
4356 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4357 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4358 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4359 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4360 recompute_dominator (CDI_DOMINATORS, body_bb));
4361 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4362 recompute_dominator (CDI_DOMINATORS, fin_bb));
4363 }
4364
4365
4366 /* A subroutine of expand_omp_for. Generate code for a parallel
4367 loop with static schedule and a specified chunk size. Given
4368 parameters:
4369
4370 for (V = N1; V cond N2; V += STEP) BODY;
4371
4372 where COND is "<" or ">", we generate pseudocode
4373
4374 if (cond is <)
4375 adj = STEP - 1;
4376 else
4377 adj = STEP + 1;
4378 if ((__typeof (V)) -1 > 0 && cond is >)
4379 n = -(adj + N2 - N1) / -STEP;
4380 else
4381 n = (adj + N2 - N1) / STEP;
4382 trip = 0;
4383 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4384 here so that V is defined
4385 if the loop is not entered
4386 L0:
4387 s0 = (trip * nthreads + threadid) * CHUNK;
4388 e0 = min(s0 + CHUNK, n);
4389 if (s0 < n) goto L1; else goto L4;
4390 L1:
4391 V = s0 * STEP + N1;
4392 e = e0 * STEP + N1;
4393 L2:
4394 BODY;
4395 V += STEP;
4396 if (V cond e) goto L2; else goto L3;
4397 L3:
4398 trip += 1;
4399 goto L0;
4400 L4:
4401 */
4402
4403 static void
expand_omp_for_static_chunk(struct omp_region * region,struct omp_for_data * fd)4404 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4405 {
4406 tree n, s0, e0, e, t;
4407 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4408 tree type, itype, v_main, v_back, v_extra;
4409 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4410 basic_block trip_update_bb, cont_bb, fin_bb;
4411 gimple_stmt_iterator si;
4412 gimple stmt;
4413 edge se;
4414
4415 itype = type = TREE_TYPE (fd->loop.v);
4416 if (POINTER_TYPE_P (type))
4417 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4418
4419 entry_bb = region->entry;
4420 se = split_block (entry_bb, last_stmt (entry_bb));
4421 entry_bb = se->src;
4422 iter_part_bb = se->dest;
4423 cont_bb = region->cont;
4424 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4425 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4426 == FALLTHRU_EDGE (cont_bb)->dest);
4427 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4428 body_bb = single_succ (seq_start_bb);
4429 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4430 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4431 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4432 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4433 exit_bb = region->exit;
4434
4435 /* Trip and adjustment setup goes in ENTRY_BB. */
4436 si = gsi_last_bb (entry_bb);
4437 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4438
4439 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4440 t = fold_convert (itype, t);
4441 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4442 true, GSI_SAME_STMT);
4443
4444 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4445 t = fold_convert (itype, t);
4446 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4447 true, GSI_SAME_STMT);
4448
4449 fd->loop.n1
4450 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4451 true, NULL_TREE, true, GSI_SAME_STMT);
4452 fd->loop.n2
4453 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4454 true, NULL_TREE, true, GSI_SAME_STMT);
4455 fd->loop.step
4456 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4457 true, NULL_TREE, true, GSI_SAME_STMT);
4458 fd->chunk_size
4459 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4460 true, NULL_TREE, true, GSI_SAME_STMT);
4461
4462 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4463 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4464 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4465 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4466 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4467 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4468 fold_build1 (NEGATE_EXPR, itype, t),
4469 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4470 else
4471 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4472 t = fold_convert (itype, t);
4473 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4474 true, GSI_SAME_STMT);
4475
4476 trip_var = create_tmp_var (itype, ".trip");
4477 if (gimple_in_ssa_p (cfun))
4478 {
4479 add_referenced_var (trip_var);
4480 trip_init = make_ssa_name (trip_var, NULL);
4481 trip_main = make_ssa_name (trip_var, NULL);
4482 trip_back = make_ssa_name (trip_var, NULL);
4483 }
4484 else
4485 {
4486 trip_init = trip_var;
4487 trip_main = trip_var;
4488 trip_back = trip_var;
4489 }
4490
4491 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4492 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4493
4494 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4495 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4496 if (POINTER_TYPE_P (type))
4497 t = fold_build_pointer_plus (fd->loop.n1, t);
4498 else
4499 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4500 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4501 true, GSI_SAME_STMT);
4502
4503 /* Remove the GIMPLE_OMP_FOR. */
4504 gsi_remove (&si, true);
4505
4506 /* Iteration space partitioning goes in ITER_PART_BB. */
4507 si = gsi_last_bb (iter_part_bb);
4508
4509 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4510 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4511 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4512 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4513 false, GSI_CONTINUE_LINKING);
4514
4515 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4516 t = fold_build2 (MIN_EXPR, itype, t, n);
4517 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4518 false, GSI_CONTINUE_LINKING);
4519
4520 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4521 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4522
4523 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4524 si = gsi_start_bb (seq_start_bb);
4525
4526 t = fold_convert (itype, s0);
4527 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4528 if (POINTER_TYPE_P (type))
4529 t = fold_build_pointer_plus (fd->loop.n1, t);
4530 else
4531 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4532 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4533 false, GSI_CONTINUE_LINKING);
4534 stmt = gimple_build_assign (fd->loop.v, t);
4535 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4536
4537 t = fold_convert (itype, e0);
4538 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4539 if (POINTER_TYPE_P (type))
4540 t = fold_build_pointer_plus (fd->loop.n1, t);
4541 else
4542 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4543 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4544 false, GSI_CONTINUE_LINKING);
4545
4546 /* The code controlling the sequential loop goes in CONT_BB,
4547 replacing the GIMPLE_OMP_CONTINUE. */
4548 si = gsi_last_bb (cont_bb);
4549 stmt = gsi_stmt (si);
4550 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4551 v_main = gimple_omp_continue_control_use (stmt);
4552 v_back = gimple_omp_continue_control_def (stmt);
4553
4554 if (POINTER_TYPE_P (type))
4555 t = fold_build_pointer_plus (v_main, fd->loop.step);
4556 else
4557 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4558 stmt = gimple_build_assign (v_back, t);
4559 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4560
4561 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4562 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4563
4564 /* Remove GIMPLE_OMP_CONTINUE. */
4565 gsi_remove (&si, true);
4566
4567 /* Trip update code goes into TRIP_UPDATE_BB. */
4568 si = gsi_start_bb (trip_update_bb);
4569
4570 t = build_int_cst (itype, 1);
4571 t = build2 (PLUS_EXPR, itype, trip_main, t);
4572 stmt = gimple_build_assign (trip_back, t);
4573 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4574
4575 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4576 si = gsi_last_bb (exit_bb);
4577 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4578 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4579 false, GSI_SAME_STMT);
4580 gsi_remove (&si, true);
4581
4582 /* Connect the new blocks. */
4583 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4584 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4585
4586 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4587 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4588
4589 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4590
4591 if (gimple_in_ssa_p (cfun))
4592 {
4593 gimple_stmt_iterator psi;
4594 gimple phi;
4595 edge re, ene;
4596 edge_var_map_vector head;
4597 edge_var_map *vm;
4598 size_t i;
4599
4600 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4601 remove arguments of the phi nodes in fin_bb. We need to create
4602 appropriate phi nodes in iter_part_bb instead. */
4603 se = single_pred_edge (fin_bb);
4604 re = single_succ_edge (trip_update_bb);
4605 head = redirect_edge_var_map_vector (re);
4606 ene = single_succ_edge (entry_bb);
4607
4608 psi = gsi_start_phis (fin_bb);
4609 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4610 gsi_next (&psi), ++i)
4611 {
4612 gimple nphi;
4613 source_location locus;
4614
4615 phi = gsi_stmt (psi);
4616 t = gimple_phi_result (phi);
4617 gcc_assert (t == redirect_edge_var_map_result (vm));
4618 nphi = create_phi_node (t, iter_part_bb);
4619 SSA_NAME_DEF_STMT (t) = nphi;
4620
4621 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4622 locus = gimple_phi_arg_location_from_edge (phi, se);
4623
4624 /* A special case -- fd->loop.v is not yet computed in
4625 iter_part_bb, we need to use v_extra instead. */
4626 if (t == fd->loop.v)
4627 t = v_extra;
4628 add_phi_arg (nphi, t, ene, locus);
4629 locus = redirect_edge_var_map_location (vm);
4630 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4631 }
4632 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4633 redirect_edge_var_map_clear (re);
4634 while (1)
4635 {
4636 psi = gsi_start_phis (fin_bb);
4637 if (gsi_end_p (psi))
4638 break;
4639 remove_phi_node (&psi, false);
4640 }
4641
4642 /* Make phi node for trip. */
4643 phi = create_phi_node (trip_main, iter_part_bb);
4644 SSA_NAME_DEF_STMT (trip_main) = phi;
4645 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4646 UNKNOWN_LOCATION);
4647 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4648 UNKNOWN_LOCATION);
4649 }
4650
4651 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4652 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4653 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4654 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4655 recompute_dominator (CDI_DOMINATORS, fin_bb));
4656 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4657 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4658 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4659 recompute_dominator (CDI_DOMINATORS, body_bb));
4660 }
4661
4662
4663 /* Expand the OpenMP loop defined by REGION. */
4664
4665 static void
expand_omp_for(struct omp_region * region)4666 expand_omp_for (struct omp_region *region)
4667 {
4668 struct omp_for_data fd;
4669 struct omp_for_data_loop *loops;
4670
4671 loops
4672 = (struct omp_for_data_loop *)
4673 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4674 * sizeof (struct omp_for_data_loop));
4675 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4676 region->sched_kind = fd.sched_kind;
4677
4678 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4679 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4680 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4681 if (region->cont)
4682 {
4683 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4684 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4685 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4686 }
4687
4688 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4689 && !fd.have_ordered
4690 && fd.collapse == 1
4691 && region->cont != NULL)
4692 {
4693 if (fd.chunk_size == NULL)
4694 expand_omp_for_static_nochunk (region, &fd);
4695 else
4696 expand_omp_for_static_chunk (region, &fd);
4697 }
4698 else
4699 {
4700 int fn_index, start_ix, next_ix;
4701
4702 if (fd.chunk_size == NULL
4703 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4704 fd.chunk_size = integer_zero_node;
4705 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4706 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4707 ? 3 : fd.sched_kind;
4708 fn_index += fd.have_ordered * 4;
4709 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4710 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4711 if (fd.iter_type == long_long_unsigned_type_node)
4712 {
4713 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4714 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4715 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4716 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4717 }
4718 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4719 (enum built_in_function) next_ix);
4720 }
4721
4722 update_ssa (TODO_update_ssa_only_virtuals);
4723 }
4724
4725
4726 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4727
4728 v = GOMP_sections_start (n);
4729 L0:
4730 switch (v)
4731 {
4732 case 0:
4733 goto L2;
4734 case 1:
4735 section 1;
4736 goto L1;
4737 case 2:
4738 ...
4739 case n:
4740 ...
4741 default:
4742 abort ();
4743 }
4744 L1:
4745 v = GOMP_sections_next ();
4746 goto L0;
4747 L2:
4748 reduction;
4749
4750 If this is a combined parallel sections, replace the call to
4751 GOMP_sections_start with call to GOMP_sections_next. */
4752
4753 static void
expand_omp_sections(struct omp_region * region)4754 expand_omp_sections (struct omp_region *region)
4755 {
4756 tree t, u, vin = NULL, vmain, vnext, l2;
4757 VEC (tree,heap) *label_vec;
4758 unsigned len;
4759 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4760 gimple_stmt_iterator si, switch_si;
4761 gimple sections_stmt, stmt, cont;
4762 edge_iterator ei;
4763 edge e;
4764 struct omp_region *inner;
4765 unsigned i, casei;
4766 bool exit_reachable = region->cont != NULL;
4767
4768 gcc_assert (region->exit != NULL);
4769 entry_bb = region->entry;
4770 l0_bb = single_succ (entry_bb);
4771 l1_bb = region->cont;
4772 l2_bb = region->exit;
4773 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4774 l2 = gimple_block_label (l2_bb);
4775 else
4776 {
4777 /* This can happen if there are reductions. */
4778 len = EDGE_COUNT (l0_bb->succs);
4779 gcc_assert (len > 0);
4780 e = EDGE_SUCC (l0_bb, len - 1);
4781 si = gsi_last_bb (e->dest);
4782 l2 = NULL_TREE;
4783 if (gsi_end_p (si)
4784 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4785 l2 = gimple_block_label (e->dest);
4786 else
4787 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4788 {
4789 si = gsi_last_bb (e->dest);
4790 if (gsi_end_p (si)
4791 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4792 {
4793 l2 = gimple_block_label (e->dest);
4794 break;
4795 }
4796 }
4797 }
4798 if (exit_reachable)
4799 default_bb = create_empty_bb (l1_bb->prev_bb);
4800 else
4801 default_bb = create_empty_bb (l0_bb);
4802
4803 /* We will build a switch() with enough cases for all the
4804 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4805 and a default case to abort if something goes wrong. */
4806 len = EDGE_COUNT (l0_bb->succs);
4807
4808 /* Use VEC_quick_push on label_vec throughout, since we know the size
4809 in advance. */
4810 label_vec = VEC_alloc (tree, heap, len);
4811
4812 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4813 GIMPLE_OMP_SECTIONS statement. */
4814 si = gsi_last_bb (entry_bb);
4815 sections_stmt = gsi_stmt (si);
4816 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4817 vin = gimple_omp_sections_control (sections_stmt);
4818 if (!is_combined_parallel (region))
4819 {
4820 /* If we are not inside a combined parallel+sections region,
4821 call GOMP_sections_start. */
4822 t = build_int_cst (unsigned_type_node,
4823 exit_reachable ? len - 1 : len);
4824 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4825 stmt = gimple_build_call (u, 1, t);
4826 }
4827 else
4828 {
4829 /* Otherwise, call GOMP_sections_next. */
4830 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4831 stmt = gimple_build_call (u, 0);
4832 }
4833 gimple_call_set_lhs (stmt, vin);
4834 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4835 gsi_remove (&si, true);
4836
4837 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4838 L0_BB. */
4839 switch_si = gsi_last_bb (l0_bb);
4840 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4841 if (exit_reachable)
4842 {
4843 cont = last_stmt (l1_bb);
4844 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4845 vmain = gimple_omp_continue_control_use (cont);
4846 vnext = gimple_omp_continue_control_def (cont);
4847 }
4848 else
4849 {
4850 vmain = vin;
4851 vnext = NULL_TREE;
4852 }
4853
4854 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4855 VEC_quick_push (tree, label_vec, t);
4856 i = 1;
4857
4858 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4859 for (inner = region->inner, casei = 1;
4860 inner;
4861 inner = inner->next, i++, casei++)
4862 {
4863 basic_block s_entry_bb, s_exit_bb;
4864
4865 /* Skip optional reduction region. */
4866 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4867 {
4868 --i;
4869 --casei;
4870 continue;
4871 }
4872
4873 s_entry_bb = inner->entry;
4874 s_exit_bb = inner->exit;
4875
4876 t = gimple_block_label (s_entry_bb);
4877 u = build_int_cst (unsigned_type_node, casei);
4878 u = build_case_label (u, NULL, t);
4879 VEC_quick_push (tree, label_vec, u);
4880
4881 si = gsi_last_bb (s_entry_bb);
4882 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4883 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4884 gsi_remove (&si, true);
4885 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4886
4887 if (s_exit_bb == NULL)
4888 continue;
4889
4890 si = gsi_last_bb (s_exit_bb);
4891 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4892 gsi_remove (&si, true);
4893
4894 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4895 }
4896
4897 /* Error handling code goes in DEFAULT_BB. */
4898 t = gimple_block_label (default_bb);
4899 u = build_case_label (NULL, NULL, t);
4900 make_edge (l0_bb, default_bb, 0);
4901
4902 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4903 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4904 gsi_remove (&switch_si, true);
4905 VEC_free (tree, heap, label_vec);
4906
4907 si = gsi_start_bb (default_bb);
4908 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4909 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4910
4911 if (exit_reachable)
4912 {
4913 tree bfn_decl;
4914
4915 /* Code to get the next section goes in L1_BB. */
4916 si = gsi_last_bb (l1_bb);
4917 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4918
4919 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4920 stmt = gimple_build_call (bfn_decl, 0);
4921 gimple_call_set_lhs (stmt, vnext);
4922 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4923 gsi_remove (&si, true);
4924
4925 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4926 }
4927
4928 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4929 si = gsi_last_bb (l2_bb);
4930 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4931 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4932 else
4933 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4934 stmt = gimple_build_call (t, 0);
4935 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4936 gsi_remove (&si, true);
4937
4938 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4939 }
4940
4941
4942 /* Expand code for an OpenMP single directive. We've already expanded
4943 much of the code, here we simply place the GOMP_barrier call. */
4944
4945 static void
expand_omp_single(struct omp_region * region)4946 expand_omp_single (struct omp_region *region)
4947 {
4948 basic_block entry_bb, exit_bb;
4949 gimple_stmt_iterator si;
4950 bool need_barrier = false;
4951
4952 entry_bb = region->entry;
4953 exit_bb = region->exit;
4954
4955 si = gsi_last_bb (entry_bb);
4956 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4957 be removed. We need to ensure that the thread that entered the single
4958 does not exit before the data is copied out by the other threads. */
4959 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4960 OMP_CLAUSE_COPYPRIVATE))
4961 need_barrier = true;
4962 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4963 gsi_remove (&si, true);
4964 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4965
4966 si = gsi_last_bb (exit_bb);
4967 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4968 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4969 false, GSI_SAME_STMT);
4970 gsi_remove (&si, true);
4971 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4972 }
4973
4974
4975 /* Generic expansion for OpenMP synchronization directives: master,
4976 ordered and critical. All we need to do here is remove the entry
4977 and exit markers for REGION. */
4978
4979 static void
expand_omp_synch(struct omp_region * region)4980 expand_omp_synch (struct omp_region *region)
4981 {
4982 basic_block entry_bb, exit_bb;
4983 gimple_stmt_iterator si;
4984
4985 entry_bb = region->entry;
4986 exit_bb = region->exit;
4987
4988 si = gsi_last_bb (entry_bb);
4989 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4990 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4991 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4992 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4993 gsi_remove (&si, true);
4994 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4995
4996 if (exit_bb)
4997 {
4998 si = gsi_last_bb (exit_bb);
4999 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5000 gsi_remove (&si, true);
5001 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5002 }
5003 }
5004
5005 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5006 operation as a normal volatile load. */
5007
5008 static bool
expand_omp_atomic_load(basic_block load_bb,tree addr,tree loaded_val,int index)5009 expand_omp_atomic_load (basic_block load_bb, tree addr,
5010 tree loaded_val, int index)
5011 {
5012 enum built_in_function tmpbase;
5013 gimple_stmt_iterator gsi;
5014 basic_block store_bb;
5015 location_t loc;
5016 gimple stmt;
5017 tree decl, call, type, itype;
5018
5019 gsi = gsi_last_bb (load_bb);
5020 stmt = gsi_stmt (gsi);
5021 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5022 loc = gimple_location (stmt);
5023
5024 /* ??? If the target does not implement atomic_load_optab[mode], and mode
5025 is smaller than word size, then expand_atomic_load assumes that the load
5026 is atomic. We could avoid the builtin entirely in this case. */
5027
5028 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5029 decl = builtin_decl_explicit (tmpbase);
5030 if (decl == NULL_TREE)
5031 return false;
5032
5033 type = TREE_TYPE (loaded_val);
5034 itype = TREE_TYPE (TREE_TYPE (decl));
5035
5036 call = build_call_expr_loc (loc, decl, 2, addr,
5037 build_int_cst (NULL, MEMMODEL_RELAXED));
5038 if (!useless_type_conversion_p (type, itype))
5039 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5040 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5041
5042 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5043 gsi_remove (&gsi, true);
5044
5045 store_bb = single_succ (load_bb);
5046 gsi = gsi_last_bb (store_bb);
5047 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5048 gsi_remove (&gsi, true);
5049
5050 if (gimple_in_ssa_p (cfun))
5051 update_ssa (TODO_update_ssa_no_phi);
5052
5053 return true;
5054 }
5055
5056 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5057 operation as a normal volatile store. */
5058
5059 static bool
expand_omp_atomic_store(basic_block load_bb,tree addr,tree loaded_val,tree stored_val,int index)5060 expand_omp_atomic_store (basic_block load_bb, tree addr,
5061 tree loaded_val, tree stored_val, int index)
5062 {
5063 enum built_in_function tmpbase;
5064 gimple_stmt_iterator gsi;
5065 basic_block store_bb = single_succ (load_bb);
5066 location_t loc;
5067 gimple stmt;
5068 tree decl, call, type, itype;
5069 enum machine_mode imode;
5070 bool exchange;
5071
5072 gsi = gsi_last_bb (load_bb);
5073 stmt = gsi_stmt (gsi);
5074 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5075
5076 /* If the load value is needed, then this isn't a store but an exchange. */
5077 exchange = gimple_omp_atomic_need_value_p (stmt);
5078
5079 gsi = gsi_last_bb (store_bb);
5080 stmt = gsi_stmt (gsi);
5081 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5082 loc = gimple_location (stmt);
5083
5084 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5085 is smaller than word size, then expand_atomic_store assumes that the store
5086 is atomic. We could avoid the builtin entirely in this case. */
5087
5088 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5089 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5090 decl = builtin_decl_explicit (tmpbase);
5091 if (decl == NULL_TREE)
5092 return false;
5093
5094 type = TREE_TYPE (stored_val);
5095
5096 /* Dig out the type of the function's second argument. */
5097 itype = TREE_TYPE (decl);
5098 itype = TYPE_ARG_TYPES (itype);
5099 itype = TREE_CHAIN (itype);
5100 itype = TREE_VALUE (itype);
5101 imode = TYPE_MODE (itype);
5102
5103 if (exchange && !can_atomic_exchange_p (imode, true))
5104 return false;
5105
5106 if (!useless_type_conversion_p (itype, type))
5107 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5108 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5109 build_int_cst (NULL, MEMMODEL_RELAXED));
5110 if (exchange)
5111 {
5112 if (!useless_type_conversion_p (type, itype))
5113 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5114 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5115 }
5116
5117 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5118 gsi_remove (&gsi, true);
5119
5120 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5121 gsi = gsi_last_bb (load_bb);
5122 gsi_remove (&gsi, true);
5123
5124 if (gimple_in_ssa_p (cfun))
5125 update_ssa (TODO_update_ssa_no_phi);
5126
5127 return true;
5128 }
5129
5130 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5131 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5132 size of the data type, and thus usable to find the index of the builtin
5133 decl. Returns false if the expression is not of the proper form. */
5134
5135 static bool
expand_omp_atomic_fetch_op(basic_block load_bb,tree addr,tree loaded_val,tree stored_val,int index)5136 expand_omp_atomic_fetch_op (basic_block load_bb,
5137 tree addr, tree loaded_val,
5138 tree stored_val, int index)
5139 {
5140 enum built_in_function oldbase, newbase, tmpbase;
5141 tree decl, itype, call;
5142 tree lhs, rhs;
5143 basic_block store_bb = single_succ (load_bb);
5144 gimple_stmt_iterator gsi;
5145 gimple stmt;
5146 location_t loc;
5147 enum tree_code code;
5148 bool need_old, need_new;
5149 enum machine_mode imode;
5150
5151 /* We expect to find the following sequences:
5152
5153 load_bb:
5154 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5155
5156 store_bb:
5157 val = tmp OP something; (or: something OP tmp)
5158 GIMPLE_OMP_STORE (val)
5159
5160 ???FIXME: Allow a more flexible sequence.
5161 Perhaps use data flow to pick the statements.
5162
5163 */
5164
5165 gsi = gsi_after_labels (store_bb);
5166 stmt = gsi_stmt (gsi);
5167 loc = gimple_location (stmt);
5168 if (!is_gimple_assign (stmt))
5169 return false;
5170 gsi_next (&gsi);
5171 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5172 return false;
5173 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5174 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5175 gcc_checking_assert (!need_old || !need_new);
5176
5177 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5178 return false;
5179
5180 /* Check for one of the supported fetch-op operations. */
5181 code = gimple_assign_rhs_code (stmt);
5182 switch (code)
5183 {
5184 case PLUS_EXPR:
5185 case POINTER_PLUS_EXPR:
5186 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5187 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5188 break;
5189 case MINUS_EXPR:
5190 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5191 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5192 break;
5193 case BIT_AND_EXPR:
5194 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5195 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5196 break;
5197 case BIT_IOR_EXPR:
5198 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5199 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5200 break;
5201 case BIT_XOR_EXPR:
5202 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5203 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5204 break;
5205 default:
5206 return false;
5207 }
5208
5209 /* Make sure the expression is of the proper form. */
5210 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5211 rhs = gimple_assign_rhs2 (stmt);
5212 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5213 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5214 rhs = gimple_assign_rhs1 (stmt);
5215 else
5216 return false;
5217
5218 tmpbase = ((enum built_in_function)
5219 ((need_new ? newbase : oldbase) + index + 1));
5220 decl = builtin_decl_explicit (tmpbase);
5221 if (decl == NULL_TREE)
5222 return false;
5223 itype = TREE_TYPE (TREE_TYPE (decl));
5224 imode = TYPE_MODE (itype);
5225
5226 /* We could test all of the various optabs involved, but the fact of the
5227 matter is that (with the exception of i486 vs i586 and xadd) all targets
5228 that support any atomic operaton optab also implements compare-and-swap.
5229 Let optabs.c take care of expanding any compare-and-swap loop. */
5230 if (!can_compare_and_swap_p (imode, true))
5231 return false;
5232
5233 gsi = gsi_last_bb (load_bb);
5234 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5235
5236 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5237 It only requires that the operation happen atomically. Thus we can
5238 use the RELAXED memory model. */
5239 call = build_call_expr_loc (loc, decl, 3, addr,
5240 fold_convert_loc (loc, itype, rhs),
5241 build_int_cst (NULL, MEMMODEL_RELAXED));
5242
5243 if (need_old || need_new)
5244 {
5245 lhs = need_old ? loaded_val : stored_val;
5246 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5247 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5248 }
5249 else
5250 call = fold_convert_loc (loc, void_type_node, call);
5251 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5252 gsi_remove (&gsi, true);
5253
5254 gsi = gsi_last_bb (store_bb);
5255 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5256 gsi_remove (&gsi, true);
5257 gsi = gsi_last_bb (store_bb);
5258 gsi_remove (&gsi, true);
5259
5260 if (gimple_in_ssa_p (cfun))
5261 update_ssa (TODO_update_ssa_no_phi);
5262
5263 return true;
5264 }
5265
5266 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5267
5268 oldval = *addr;
5269 repeat:
5270 newval = rhs; // with oldval replacing *addr in rhs
5271 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5272 if (oldval != newval)
5273 goto repeat;
5274
5275 INDEX is log2 of the size of the data type, and thus usable to find the
5276 index of the builtin decl. */
5277
5278 static bool
expand_omp_atomic_pipeline(basic_block load_bb,basic_block store_bb,tree addr,tree loaded_val,tree stored_val,int index)5279 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5280 tree addr, tree loaded_val, tree stored_val,
5281 int index)
5282 {
5283 tree loadedi, storedi, initial, new_storedi, old_vali;
5284 tree type, itype, cmpxchg, iaddr;
5285 gimple_stmt_iterator si;
5286 basic_block loop_header = single_succ (load_bb);
5287 gimple phi, stmt;
5288 edge e;
5289 enum built_in_function fncode;
5290
5291 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5292 order to use the RELAXED memory model effectively. */
5293 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5294 + index + 1);
5295 cmpxchg = builtin_decl_explicit (fncode);
5296 if (cmpxchg == NULL_TREE)
5297 return false;
5298 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5299 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5300
5301 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5302 return false;
5303
5304 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5305 si = gsi_last_bb (load_bb);
5306 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5307
5308 /* For floating-point values, we'll need to view-convert them to integers
5309 so that we can perform the atomic compare and swap. Simplify the
5310 following code by always setting up the "i"ntegral variables. */
5311 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5312 {
5313 tree iaddr_val;
5314
5315 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5316 true), NULL);
5317 iaddr_val
5318 = force_gimple_operand_gsi (&si,
5319 fold_convert (TREE_TYPE (iaddr), addr),
5320 false, NULL_TREE, true, GSI_SAME_STMT);
5321 stmt = gimple_build_assign (iaddr, iaddr_val);
5322 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5323 loadedi = create_tmp_var (itype, NULL);
5324 if (gimple_in_ssa_p (cfun))
5325 {
5326 add_referenced_var (iaddr);
5327 add_referenced_var (loadedi);
5328 loadedi = make_ssa_name (loadedi, NULL);
5329 }
5330 }
5331 else
5332 {
5333 iaddr = addr;
5334 loadedi = loaded_val;
5335 }
5336
5337 initial
5338 = force_gimple_operand_gsi (&si,
5339 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5340 iaddr,
5341 build_int_cst (TREE_TYPE (iaddr), 0)),
5342 true, NULL_TREE, true, GSI_SAME_STMT);
5343
5344 /* Move the value to the LOADEDI temporary. */
5345 if (gimple_in_ssa_p (cfun))
5346 {
5347 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5348 phi = create_phi_node (loadedi, loop_header);
5349 SSA_NAME_DEF_STMT (loadedi) = phi;
5350 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5351 initial);
5352 }
5353 else
5354 gsi_insert_before (&si,
5355 gimple_build_assign (loadedi, initial),
5356 GSI_SAME_STMT);
5357 if (loadedi != loaded_val)
5358 {
5359 gimple_stmt_iterator gsi2;
5360 tree x;
5361
5362 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5363 gsi2 = gsi_start_bb (loop_header);
5364 if (gimple_in_ssa_p (cfun))
5365 {
5366 gimple stmt;
5367 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5368 true, GSI_SAME_STMT);
5369 stmt = gimple_build_assign (loaded_val, x);
5370 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5371 }
5372 else
5373 {
5374 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5375 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5376 true, GSI_SAME_STMT);
5377 }
5378 }
5379 gsi_remove (&si, true);
5380
5381 si = gsi_last_bb (store_bb);
5382 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5383
5384 if (iaddr == addr)
5385 storedi = stored_val;
5386 else
5387 storedi =
5388 force_gimple_operand_gsi (&si,
5389 build1 (VIEW_CONVERT_EXPR, itype,
5390 stored_val), true, NULL_TREE, true,
5391 GSI_SAME_STMT);
5392
5393 /* Build the compare&swap statement. */
5394 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5395 new_storedi = force_gimple_operand_gsi (&si,
5396 fold_convert (TREE_TYPE (loadedi),
5397 new_storedi),
5398 true, NULL_TREE,
5399 true, GSI_SAME_STMT);
5400
5401 if (gimple_in_ssa_p (cfun))
5402 old_vali = loadedi;
5403 else
5404 {
5405 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5406 if (gimple_in_ssa_p (cfun))
5407 add_referenced_var (old_vali);
5408 stmt = gimple_build_assign (old_vali, loadedi);
5409 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5410
5411 stmt = gimple_build_assign (loadedi, new_storedi);
5412 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5413 }
5414
5415 /* Note that we always perform the comparison as an integer, even for
5416 floating point. This allows the atomic operation to properly
5417 succeed even with NaNs and -0.0. */
5418 stmt = gimple_build_cond_empty
5419 (build2 (NE_EXPR, boolean_type_node,
5420 new_storedi, old_vali));
5421 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5422
5423 /* Update cfg. */
5424 e = single_succ_edge (store_bb);
5425 e->flags &= ~EDGE_FALLTHRU;
5426 e->flags |= EDGE_FALSE_VALUE;
5427
5428 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5429
5430 /* Copy the new value to loadedi (we already did that before the condition
5431 if we are not in SSA). */
5432 if (gimple_in_ssa_p (cfun))
5433 {
5434 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5435 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5436 }
5437
5438 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5439 gsi_remove (&si, true);
5440
5441 if (gimple_in_ssa_p (cfun))
5442 update_ssa (TODO_update_ssa_no_phi);
5443
5444 return true;
5445 }
5446
5447 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5448
5449 GOMP_atomic_start ();
5450 *addr = rhs;
5451 GOMP_atomic_end ();
5452
5453 The result is not globally atomic, but works so long as all parallel
5454 references are within #pragma omp atomic directives. According to
5455 responses received from omp@openmp.org, appears to be within spec.
5456 Which makes sense, since that's how several other compilers handle
5457 this situation as well.
5458 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5459 expanding. STORED_VAL is the operand of the matching
5460 GIMPLE_OMP_ATOMIC_STORE.
5461
5462 We replace
5463 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5464 loaded_val = *addr;
5465
5466 and replace
5467 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5468 *addr = stored_val;
5469 */
5470
5471 static bool
expand_omp_atomic_mutex(basic_block load_bb,basic_block store_bb,tree addr,tree loaded_val,tree stored_val)5472 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5473 tree addr, tree loaded_val, tree stored_val)
5474 {
5475 gimple_stmt_iterator si;
5476 gimple stmt;
5477 tree t;
5478
5479 si = gsi_last_bb (load_bb);
5480 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5481
5482 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5483 t = build_call_expr (t, 0);
5484 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5485
5486 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5487 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5488 gsi_remove (&si, true);
5489
5490 si = gsi_last_bb (store_bb);
5491 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5492
5493 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5494 stored_val);
5495 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5496
5497 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5498 t = build_call_expr (t, 0);
5499 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5500 gsi_remove (&si, true);
5501
5502 if (gimple_in_ssa_p (cfun))
5503 update_ssa (TODO_update_ssa_no_phi);
5504 return true;
5505 }
5506
5507 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5508 using expand_omp_atomic_fetch_op. If it failed, we try to
5509 call expand_omp_atomic_pipeline, and if it fails too, the
5510 ultimate fallback is wrapping the operation in a mutex
5511 (expand_omp_atomic_mutex). REGION is the atomic region built
5512 by build_omp_regions_1(). */
5513
5514 static void
expand_omp_atomic(struct omp_region * region)5515 expand_omp_atomic (struct omp_region *region)
5516 {
5517 basic_block load_bb = region->entry, store_bb = region->exit;
5518 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5519 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5520 tree addr = gimple_omp_atomic_load_rhs (load);
5521 tree stored_val = gimple_omp_atomic_store_val (store);
5522 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5523 HOST_WIDE_INT index;
5524
5525 /* Make sure the type is one of the supported sizes. */
5526 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5527 index = exact_log2 (index);
5528 if (index >= 0 && index <= 4)
5529 {
5530 unsigned int align = TYPE_ALIGN_UNIT (type);
5531
5532 /* __sync builtins require strict data alignment. */
5533 if (exact_log2 (align) >= index)
5534 {
5535 /* Atomic load. */
5536 if (loaded_val == stored_val
5537 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5538 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5539 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5540 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5541 return;
5542
5543 /* Atomic store. */
5544 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5545 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5546 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5547 && store_bb == single_succ (load_bb)
5548 && first_stmt (store_bb) == store
5549 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5550 stored_val, index))
5551 return;
5552
5553 /* When possible, use specialized atomic update functions. */
5554 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5555 && store_bb == single_succ (load_bb)
5556 && expand_omp_atomic_fetch_op (load_bb, addr,
5557 loaded_val, stored_val, index))
5558 return;
5559
5560 /* If we don't have specialized __sync builtins, try and implement
5561 as a compare and swap loop. */
5562 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5563 loaded_val, stored_val, index))
5564 return;
5565 }
5566 }
5567
5568 /* The ultimate fallback is wrapping the operation in a mutex. */
5569 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5570 }
5571
5572
5573 /* Expand the parallel region tree rooted at REGION. Expansion
5574 proceeds in depth-first order. Innermost regions are expanded
5575 first. This way, parallel regions that require a new function to
5576 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5577 internal dependencies in their body. */
5578
5579 static void
expand_omp(struct omp_region * region)5580 expand_omp (struct omp_region *region)
5581 {
5582 while (region)
5583 {
5584 location_t saved_location;
5585
5586 /* First, determine whether this is a combined parallel+workshare
5587 region. */
5588 if (region->type == GIMPLE_OMP_PARALLEL)
5589 determine_parallel_type (region);
5590
5591 if (region->inner)
5592 expand_omp (region->inner);
5593
5594 saved_location = input_location;
5595 if (gimple_has_location (last_stmt (region->entry)))
5596 input_location = gimple_location (last_stmt (region->entry));
5597
5598 switch (region->type)
5599 {
5600 case GIMPLE_OMP_PARALLEL:
5601 case GIMPLE_OMP_TASK:
5602 expand_omp_taskreg (region);
5603 break;
5604
5605 case GIMPLE_OMP_FOR:
5606 expand_omp_for (region);
5607 break;
5608
5609 case GIMPLE_OMP_SECTIONS:
5610 expand_omp_sections (region);
5611 break;
5612
5613 case GIMPLE_OMP_SECTION:
5614 /* Individual omp sections are handled together with their
5615 parent GIMPLE_OMP_SECTIONS region. */
5616 break;
5617
5618 case GIMPLE_OMP_SINGLE:
5619 expand_omp_single (region);
5620 break;
5621
5622 case GIMPLE_OMP_MASTER:
5623 case GIMPLE_OMP_ORDERED:
5624 case GIMPLE_OMP_CRITICAL:
5625 expand_omp_synch (region);
5626 break;
5627
5628 case GIMPLE_OMP_ATOMIC_LOAD:
5629 expand_omp_atomic (region);
5630 break;
5631
5632 default:
5633 gcc_unreachable ();
5634 }
5635
5636 input_location = saved_location;
5637 region = region->next;
5638 }
5639 }
5640
5641
5642 /* Helper for build_omp_regions. Scan the dominator tree starting at
5643 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5644 true, the function ends once a single tree is built (otherwise, whole
5645 forest of OMP constructs may be built). */
5646
5647 static void
build_omp_regions_1(basic_block bb,struct omp_region * parent,bool single_tree)5648 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5649 bool single_tree)
5650 {
5651 gimple_stmt_iterator gsi;
5652 gimple stmt;
5653 basic_block son;
5654
5655 gsi = gsi_last_bb (bb);
5656 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5657 {
5658 struct omp_region *region;
5659 enum gimple_code code;
5660
5661 stmt = gsi_stmt (gsi);
5662 code = gimple_code (stmt);
5663 if (code == GIMPLE_OMP_RETURN)
5664 {
5665 /* STMT is the return point out of region PARENT. Mark it
5666 as the exit point and make PARENT the immediately
5667 enclosing region. */
5668 gcc_assert (parent);
5669 region = parent;
5670 region->exit = bb;
5671 parent = parent->outer;
5672 }
5673 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5674 {
5675 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5676 GIMPLE_OMP_RETURN, but matches with
5677 GIMPLE_OMP_ATOMIC_LOAD. */
5678 gcc_assert (parent);
5679 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5680 region = parent;
5681 region->exit = bb;
5682 parent = parent->outer;
5683 }
5684
5685 else if (code == GIMPLE_OMP_CONTINUE)
5686 {
5687 gcc_assert (parent);
5688 parent->cont = bb;
5689 }
5690 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5691 {
5692 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5693 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5694 ;
5695 }
5696 else
5697 {
5698 /* Otherwise, this directive becomes the parent for a new
5699 region. */
5700 region = new_omp_region (bb, code, parent);
5701 parent = region;
5702 }
5703 }
5704
5705 if (single_tree && !parent)
5706 return;
5707
5708 for (son = first_dom_son (CDI_DOMINATORS, bb);
5709 son;
5710 son = next_dom_son (CDI_DOMINATORS, son))
5711 build_omp_regions_1 (son, parent, single_tree);
5712 }
5713
5714 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5715 root_omp_region. */
5716
5717 static void
build_omp_regions_root(basic_block root)5718 build_omp_regions_root (basic_block root)
5719 {
5720 gcc_assert (root_omp_region == NULL);
5721 build_omp_regions_1 (root, NULL, true);
5722 gcc_assert (root_omp_region != NULL);
5723 }
5724
5725 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5726
5727 void
omp_expand_local(basic_block head)5728 omp_expand_local (basic_block head)
5729 {
5730 build_omp_regions_root (head);
5731 if (dump_file && (dump_flags & TDF_DETAILS))
5732 {
5733 fprintf (dump_file, "\nOMP region tree\n\n");
5734 dump_omp_region (dump_file, root_omp_region, 0);
5735 fprintf (dump_file, "\n");
5736 }
5737
5738 remove_exit_barriers (root_omp_region);
5739 expand_omp (root_omp_region);
5740
5741 free_omp_regions ();
5742 }
5743
5744 /* Scan the CFG and build a tree of OMP regions. Return the root of
5745 the OMP region tree. */
5746
5747 static void
build_omp_regions(void)5748 build_omp_regions (void)
5749 {
5750 gcc_assert (root_omp_region == NULL);
5751 calculate_dominance_info (CDI_DOMINATORS);
5752 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5753 }
5754
5755 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5756
5757 static unsigned int
execute_expand_omp(void)5758 execute_expand_omp (void)
5759 {
5760 build_omp_regions ();
5761
5762 if (!root_omp_region)
5763 return 0;
5764
5765 if (dump_file)
5766 {
5767 fprintf (dump_file, "\nOMP region tree\n\n");
5768 dump_omp_region (dump_file, root_omp_region, 0);
5769 fprintf (dump_file, "\n");
5770 }
5771
5772 remove_exit_barriers (root_omp_region);
5773
5774 expand_omp (root_omp_region);
5775
5776 cleanup_tree_cfg ();
5777
5778 free_omp_regions ();
5779
5780 return 0;
5781 }
5782
5783 /* OMP expansion -- the default pass, run before creation of SSA form. */
5784
5785 static bool
gate_expand_omp(void)5786 gate_expand_omp (void)
5787 {
5788 return (flag_openmp != 0 && !seen_error ());
5789 }
5790
5791 struct gimple_opt_pass pass_expand_omp =
5792 {
5793 {
5794 GIMPLE_PASS,
5795 "ompexp", /* name */
5796 gate_expand_omp, /* gate */
5797 execute_expand_omp, /* execute */
5798 NULL, /* sub */
5799 NULL, /* next */
5800 0, /* static_pass_number */
5801 TV_NONE, /* tv_id */
5802 PROP_gimple_any, /* properties_required */
5803 0, /* properties_provided */
5804 0, /* properties_destroyed */
5805 0, /* todo_flags_start */
5806 0 /* todo_flags_finish */
5807 }
5808 };
5809
5810 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5811
5812 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5813 CTX is the enclosing OMP context for the current statement. */
5814
5815 static void
lower_omp_sections(gimple_stmt_iterator * gsi_p,omp_context * ctx)5816 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5817 {
5818 tree block, control;
5819 gimple_stmt_iterator tgsi;
5820 unsigned i, len;
5821 gimple stmt, new_stmt, bind, t;
5822 gimple_seq ilist, dlist, olist, new_body, body;
5823 struct gimplify_ctx gctx;
5824
5825 stmt = gsi_stmt (*gsi_p);
5826
5827 push_gimplify_context (&gctx);
5828
5829 dlist = NULL;
5830 ilist = NULL;
5831 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5832 &ilist, &dlist, ctx);
5833
5834 tgsi = gsi_start (gimple_omp_body (stmt));
5835 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5836 continue;
5837
5838 tgsi = gsi_start (gimple_omp_body (stmt));
5839 body = NULL;
5840 for (i = 0; i < len; i++, gsi_next (&tgsi))
5841 {
5842 omp_context *sctx;
5843 gimple sec_start;
5844
5845 sec_start = gsi_stmt (tgsi);
5846 sctx = maybe_lookup_ctx (sec_start);
5847 gcc_assert (sctx);
5848
5849 gimple_seq_add_stmt (&body, sec_start);
5850
5851 lower_omp (gimple_omp_body (sec_start), sctx);
5852 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5853 gimple_omp_set_body (sec_start, NULL);
5854
5855 if (i == len - 1)
5856 {
5857 gimple_seq l = NULL;
5858 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5859 &l, ctx);
5860 gimple_seq_add_seq (&body, l);
5861 gimple_omp_section_set_last (sec_start);
5862 }
5863
5864 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5865 }
5866
5867 block = make_node (BLOCK);
5868 bind = gimple_build_bind (NULL, body, block);
5869
5870 olist = NULL;
5871 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5872
5873 block = make_node (BLOCK);
5874 new_stmt = gimple_build_bind (NULL, NULL, block);
5875
5876 pop_gimplify_context (new_stmt);
5877 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5878 BLOCK_VARS (block) = gimple_bind_vars (bind);
5879 if (BLOCK_VARS (block))
5880 TREE_USED (block) = 1;
5881
5882 new_body = NULL;
5883 gimple_seq_add_seq (&new_body, ilist);
5884 gimple_seq_add_stmt (&new_body, stmt);
5885 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5886 gimple_seq_add_stmt (&new_body, bind);
5887
5888 control = create_tmp_var (unsigned_type_node, ".section");
5889 t = gimple_build_omp_continue (control, control);
5890 gimple_omp_sections_set_control (stmt, control);
5891 gimple_seq_add_stmt (&new_body, t);
5892
5893 gimple_seq_add_seq (&new_body, olist);
5894 gimple_seq_add_seq (&new_body, dlist);
5895
5896 new_body = maybe_catch_exception (new_body);
5897
5898 t = gimple_build_omp_return
5899 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5900 OMP_CLAUSE_NOWAIT));
5901 gimple_seq_add_stmt (&new_body, t);
5902
5903 gimple_bind_set_body (new_stmt, new_body);
5904 gimple_omp_set_body (stmt, NULL);
5905
5906 gsi_replace (gsi_p, new_stmt, true);
5907 }
5908
5909
5910 /* A subroutine of lower_omp_single. Expand the simple form of
5911 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5912
5913 if (GOMP_single_start ())
5914 BODY;
5915 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5916
5917 FIXME. It may be better to delay expanding the logic of this until
5918 pass_expand_omp. The expanded logic may make the job more difficult
5919 to a synchronization analysis pass. */
5920
5921 static void
lower_omp_single_simple(gimple single_stmt,gimple_seq * pre_p)5922 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5923 {
5924 location_t loc = gimple_location (single_stmt);
5925 tree tlabel = create_artificial_label (loc);
5926 tree flabel = create_artificial_label (loc);
5927 gimple call, cond;
5928 tree lhs, decl;
5929
5930 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5931 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5932 call = gimple_build_call (decl, 0);
5933 gimple_call_set_lhs (call, lhs);
5934 gimple_seq_add_stmt (pre_p, call);
5935
5936 cond = gimple_build_cond (EQ_EXPR, lhs,
5937 fold_convert_loc (loc, TREE_TYPE (lhs),
5938 boolean_true_node),
5939 tlabel, flabel);
5940 gimple_seq_add_stmt (pre_p, cond);
5941 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5942 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5943 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5944 }
5945
5946
5947 /* A subroutine of lower_omp_single. Expand the simple form of
5948 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5949
5950 #pragma omp single copyprivate (a, b, c)
5951
5952 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5953
5954 {
5955 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5956 {
5957 BODY;
5958 copyout.a = a;
5959 copyout.b = b;
5960 copyout.c = c;
5961 GOMP_single_copy_end (©out);
5962 }
5963 else
5964 {
5965 a = copyout_p->a;
5966 b = copyout_p->b;
5967 c = copyout_p->c;
5968 }
5969 GOMP_barrier ();
5970 }
5971
5972 FIXME. It may be better to delay expanding the logic of this until
5973 pass_expand_omp. The expanded logic may make the job more difficult
5974 to a synchronization analysis pass. */
5975
5976 static void
lower_omp_single_copy(gimple single_stmt,gimple_seq * pre_p,omp_context * ctx)5977 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5978 {
5979 tree ptr_type, t, l0, l1, l2, bfn_decl;
5980 gimple_seq copyin_seq;
5981 location_t loc = gimple_location (single_stmt);
5982
5983 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5984
5985 ptr_type = build_pointer_type (ctx->record_type);
5986 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5987
5988 l0 = create_artificial_label (loc);
5989 l1 = create_artificial_label (loc);
5990 l2 = create_artificial_label (loc);
5991
5992 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5993 t = build_call_expr_loc (loc, bfn_decl, 0);
5994 t = fold_convert_loc (loc, ptr_type, t);
5995 gimplify_assign (ctx->receiver_decl, t, pre_p);
5996
5997 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5998 build_int_cst (ptr_type, 0));
5999 t = build3 (COND_EXPR, void_type_node, t,
6000 build_and_jump (&l0), build_and_jump (&l1));
6001 gimplify_and_add (t, pre_p);
6002
6003 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
6004
6005 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
6006
6007 copyin_seq = NULL;
6008 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
6009 ©in_seq, ctx);
6010
6011 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6012 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
6013 t = build_call_expr_loc (loc, bfn_decl, 1, t);
6014 gimplify_and_add (t, pre_p);
6015
6016 t = build_and_jump (&l2);
6017 gimplify_and_add (t, pre_p);
6018
6019 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
6020
6021 gimple_seq_add_seq (pre_p, copyin_seq);
6022
6023 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
6024 }
6025
6026
6027 /* Expand code for an OpenMP single directive. */
6028
6029 static void
lower_omp_single(gimple_stmt_iterator * gsi_p,omp_context * ctx)6030 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6031 {
6032 tree block;
6033 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6034 gimple_seq bind_body, dlist;
6035 struct gimplify_ctx gctx;
6036
6037 push_gimplify_context (&gctx);
6038
6039 bind_body = NULL;
6040 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6041 &bind_body, &dlist, ctx);
6042 lower_omp (gimple_omp_body (single_stmt), ctx);
6043
6044 gimple_seq_add_stmt (&bind_body, single_stmt);
6045
6046 if (ctx->record_type)
6047 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6048 else
6049 lower_omp_single_simple (single_stmt, &bind_body);
6050
6051 gimple_omp_set_body (single_stmt, NULL);
6052
6053 gimple_seq_add_seq (&bind_body, dlist);
6054
6055 bind_body = maybe_catch_exception (bind_body);
6056
6057 t = gimple_build_omp_return
6058 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6059 OMP_CLAUSE_NOWAIT));
6060 gimple_seq_add_stmt (&bind_body, t);
6061
6062 block = make_node (BLOCK);
6063 bind = gimple_build_bind (NULL, bind_body, block);
6064
6065 pop_gimplify_context (bind);
6066
6067 gimple_bind_append_vars (bind, ctx->block_vars);
6068 BLOCK_VARS (block) = ctx->block_vars;
6069 gsi_replace (gsi_p, bind, true);
6070 if (BLOCK_VARS (block))
6071 TREE_USED (block) = 1;
6072 }
6073
6074
6075 /* Expand code for an OpenMP master directive. */
6076
6077 static void
lower_omp_master(gimple_stmt_iterator * gsi_p,omp_context * ctx)6078 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6079 {
6080 tree block, lab = NULL, x, bfn_decl;
6081 gimple stmt = gsi_stmt (*gsi_p), bind;
6082 location_t loc = gimple_location (stmt);
6083 gimple_seq tseq;
6084 struct gimplify_ctx gctx;
6085
6086 push_gimplify_context (&gctx);
6087
6088 block = make_node (BLOCK);
6089 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6090 block);
6091
6092 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6093 x = build_call_expr_loc (loc, bfn_decl, 0);
6094 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6095 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6096 tseq = NULL;
6097 gimplify_and_add (x, &tseq);
6098 gimple_bind_add_seq (bind, tseq);
6099
6100 lower_omp (gimple_omp_body (stmt), ctx);
6101 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6102 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6103 gimple_omp_set_body (stmt, NULL);
6104
6105 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6106
6107 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6108
6109 pop_gimplify_context (bind);
6110
6111 gimple_bind_append_vars (bind, ctx->block_vars);
6112 BLOCK_VARS (block) = ctx->block_vars;
6113 gsi_replace (gsi_p, bind, true);
6114 }
6115
6116
6117 /* Expand code for an OpenMP ordered directive. */
6118
6119 static void
lower_omp_ordered(gimple_stmt_iterator * gsi_p,omp_context * ctx)6120 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6121 {
6122 tree block;
6123 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6124 struct gimplify_ctx gctx;
6125
6126 push_gimplify_context (&gctx);
6127
6128 block = make_node (BLOCK);
6129 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6130 block);
6131
6132 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6133 0);
6134 gimple_bind_add_stmt (bind, x);
6135
6136 lower_omp (gimple_omp_body (stmt), ctx);
6137 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6138 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6139 gimple_omp_set_body (stmt, NULL);
6140
6141 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6142 gimple_bind_add_stmt (bind, x);
6143
6144 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6145
6146 pop_gimplify_context (bind);
6147
6148 gimple_bind_append_vars (bind, ctx->block_vars);
6149 BLOCK_VARS (block) = gimple_bind_vars (bind);
6150 gsi_replace (gsi_p, bind, true);
6151 }
6152
6153
6154 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6155 substitution of a couple of function calls. But in the NAMED case,
6156 requires that languages coordinate a symbol name. It is therefore
6157 best put here in common code. */
6158
6159 static GTY((param1_is (tree), param2_is (tree)))
6160 splay_tree critical_name_mutexes;
6161
6162 static void
lower_omp_critical(gimple_stmt_iterator * gsi_p,omp_context * ctx)6163 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6164 {
6165 tree block;
6166 tree name, lock, unlock;
6167 gimple stmt = gsi_stmt (*gsi_p), bind;
6168 location_t loc = gimple_location (stmt);
6169 gimple_seq tbody;
6170 struct gimplify_ctx gctx;
6171
6172 name = gimple_omp_critical_name (stmt);
6173 if (name)
6174 {
6175 tree decl;
6176 splay_tree_node n;
6177
6178 if (!critical_name_mutexes)
6179 critical_name_mutexes
6180 = splay_tree_new_ggc (splay_tree_compare_pointers,
6181 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6182 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6183
6184 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6185 if (n == NULL)
6186 {
6187 char *new_str;
6188
6189 decl = create_tmp_var_raw (ptr_type_node, NULL);
6190
6191 new_str = ACONCAT ((".gomp_critical_user_",
6192 IDENTIFIER_POINTER (name), NULL));
6193 DECL_NAME (decl) = get_identifier (new_str);
6194 TREE_PUBLIC (decl) = 1;
6195 TREE_STATIC (decl) = 1;
6196 DECL_COMMON (decl) = 1;
6197 DECL_ARTIFICIAL (decl) = 1;
6198 DECL_IGNORED_P (decl) = 1;
6199 varpool_finalize_decl (decl);
6200
6201 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6202 (splay_tree_value) decl);
6203 }
6204 else
6205 decl = (tree) n->value;
6206
6207 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6208 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6209
6210 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6211 unlock = build_call_expr_loc (loc, unlock, 1,
6212 build_fold_addr_expr_loc (loc, decl));
6213 }
6214 else
6215 {
6216 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6217 lock = build_call_expr_loc (loc, lock, 0);
6218
6219 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6220 unlock = build_call_expr_loc (loc, unlock, 0);
6221 }
6222
6223 push_gimplify_context (&gctx);
6224
6225 block = make_node (BLOCK);
6226 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6227
6228 tbody = gimple_bind_body (bind);
6229 gimplify_and_add (lock, &tbody);
6230 gimple_bind_set_body (bind, tbody);
6231
6232 lower_omp (gimple_omp_body (stmt), ctx);
6233 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6234 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6235 gimple_omp_set_body (stmt, NULL);
6236
6237 tbody = gimple_bind_body (bind);
6238 gimplify_and_add (unlock, &tbody);
6239 gimple_bind_set_body (bind, tbody);
6240
6241 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6242
6243 pop_gimplify_context (bind);
6244 gimple_bind_append_vars (bind, ctx->block_vars);
6245 BLOCK_VARS (block) = gimple_bind_vars (bind);
6246 gsi_replace (gsi_p, bind, true);
6247 }
6248
6249
6250 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6251 for a lastprivate clause. Given a loop control predicate of (V
6252 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6253 is appended to *DLIST, iterator initialization is appended to
6254 *BODY_P. */
6255
6256 static void
lower_omp_for_lastprivate(struct omp_for_data * fd,gimple_seq * body_p,gimple_seq * dlist,struct omp_context * ctx)6257 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6258 gimple_seq *dlist, struct omp_context *ctx)
6259 {
6260 tree clauses, cond, vinit;
6261 enum tree_code cond_code;
6262 gimple_seq stmts;
6263
6264 cond_code = fd->loop.cond_code;
6265 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6266
6267 /* When possible, use a strict equality expression. This can let VRP
6268 type optimizations deduce the value and remove a copy. */
6269 if (host_integerp (fd->loop.step, 0))
6270 {
6271 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6272 if (step == 1 || step == -1)
6273 cond_code = EQ_EXPR;
6274 }
6275
6276 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6277
6278 clauses = gimple_omp_for_clauses (fd->for_stmt);
6279 stmts = NULL;
6280 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6281 if (!gimple_seq_empty_p (stmts))
6282 {
6283 gimple_seq_add_seq (&stmts, *dlist);
6284 *dlist = stmts;
6285
6286 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6287 vinit = fd->loop.n1;
6288 if (cond_code == EQ_EXPR
6289 && host_integerp (fd->loop.n2, 0)
6290 && ! integer_zerop (fd->loop.n2))
6291 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6292
6293 /* Initialize the iterator variable, so that threads that don't execute
6294 any iterations don't execute the lastprivate clauses by accident. */
6295 gimplify_assign (fd->loop.v, vinit, body_p);
6296 }
6297 }
6298
6299
6300 /* Lower code for an OpenMP loop directive. */
6301
6302 static void
lower_omp_for(gimple_stmt_iterator * gsi_p,omp_context * ctx)6303 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6304 {
6305 tree *rhs_p, block;
6306 struct omp_for_data fd;
6307 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6308 gimple_seq omp_for_body, body, dlist;
6309 size_t i;
6310 struct gimplify_ctx gctx;
6311
6312 push_gimplify_context (&gctx);
6313
6314 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6315 lower_omp (gimple_omp_body (stmt), ctx);
6316
6317 block = make_node (BLOCK);
6318 new_stmt = gimple_build_bind (NULL, NULL, block);
6319
6320 /* Move declaration of temporaries in the loop body before we make
6321 it go away. */
6322 omp_for_body = gimple_omp_body (stmt);
6323 if (!gimple_seq_empty_p (omp_for_body)
6324 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6325 {
6326 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6327 gimple_bind_append_vars (new_stmt, vars);
6328 }
6329
6330 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6331 dlist = NULL;
6332 body = NULL;
6333 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6334 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6335
6336 /* Lower the header expressions. At this point, we can assume that
6337 the header is of the form:
6338
6339 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6340
6341 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6342 using the .omp_data_s mapping, if needed. */
6343 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6344 {
6345 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6346 if (!is_gimple_min_invariant (*rhs_p))
6347 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6348
6349 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6350 if (!is_gimple_min_invariant (*rhs_p))
6351 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6352
6353 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6354 if (!is_gimple_min_invariant (*rhs_p))
6355 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6356 }
6357
6358 /* Once lowered, extract the bounds and clauses. */
6359 extract_omp_for_data (stmt, &fd, NULL);
6360
6361 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6362
6363 gimple_seq_add_stmt (&body, stmt);
6364 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6365
6366 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6367 fd.loop.v));
6368
6369 /* After the loop, add exit clauses. */
6370 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6371 gimple_seq_add_seq (&body, dlist);
6372
6373 body = maybe_catch_exception (body);
6374
6375 /* Region exit marker goes at the end of the loop body. */
6376 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6377
6378 pop_gimplify_context (new_stmt);
6379
6380 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6381 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6382 if (BLOCK_VARS (block))
6383 TREE_USED (block) = 1;
6384
6385 gimple_bind_set_body (new_stmt, body);
6386 gimple_omp_set_body (stmt, NULL);
6387 gimple_omp_for_set_pre_body (stmt, NULL);
6388 gsi_replace (gsi_p, new_stmt, true);
6389 }
6390
6391 /* Callback for walk_stmts. Check if the current statement only contains
6392 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6393
6394 static tree
check_combined_parallel(gimple_stmt_iterator * gsi_p,bool * handled_ops_p,struct walk_stmt_info * wi)6395 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6396 bool *handled_ops_p,
6397 struct walk_stmt_info *wi)
6398 {
6399 int *info = (int *) wi->info;
6400 gimple stmt = gsi_stmt (*gsi_p);
6401
6402 *handled_ops_p = true;
6403 switch (gimple_code (stmt))
6404 {
6405 WALK_SUBSTMTS;
6406
6407 case GIMPLE_OMP_FOR:
6408 case GIMPLE_OMP_SECTIONS:
6409 *info = *info == 0 ? 1 : -1;
6410 break;
6411 default:
6412 *info = -1;
6413 break;
6414 }
6415 return NULL;
6416 }
6417
6418 struct omp_taskcopy_context
6419 {
6420 /* This field must be at the beginning, as we do "inheritance": Some
6421 callback functions for tree-inline.c (e.g., omp_copy_decl)
6422 receive a copy_body_data pointer that is up-casted to an
6423 omp_context pointer. */
6424 copy_body_data cb;
6425 omp_context *ctx;
6426 };
6427
6428 static tree
task_copyfn_copy_decl(tree var,copy_body_data * cb)6429 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6430 {
6431 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6432
6433 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6434 return create_tmp_var (TREE_TYPE (var), NULL);
6435
6436 return var;
6437 }
6438
6439 static tree
task_copyfn_remap_type(struct omp_taskcopy_context * tcctx,tree orig_type)6440 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6441 {
6442 tree name, new_fields = NULL, type, f;
6443
6444 type = lang_hooks.types.make_type (RECORD_TYPE);
6445 name = DECL_NAME (TYPE_NAME (orig_type));
6446 name = build_decl (gimple_location (tcctx->ctx->stmt),
6447 TYPE_DECL, name, type);
6448 TYPE_NAME (type) = name;
6449
6450 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6451 {
6452 tree new_f = copy_node (f);
6453 DECL_CONTEXT (new_f) = type;
6454 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6455 TREE_CHAIN (new_f) = new_fields;
6456 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6457 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6458 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6459 &tcctx->cb, NULL);
6460 new_fields = new_f;
6461 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6462 }
6463 TYPE_FIELDS (type) = nreverse (new_fields);
6464 layout_type (type);
6465 return type;
6466 }
6467
6468 /* Create task copyfn. */
6469
6470 static void
create_task_copyfn(gimple task_stmt,omp_context * ctx)6471 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6472 {
6473 struct function *child_cfun;
6474 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6475 tree record_type, srecord_type, bind, list;
6476 bool record_needs_remap = false, srecord_needs_remap = false;
6477 splay_tree_node n;
6478 struct omp_taskcopy_context tcctx;
6479 struct gimplify_ctx gctx;
6480 location_t loc = gimple_location (task_stmt);
6481
6482 child_fn = gimple_omp_task_copy_fn (task_stmt);
6483 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6484 gcc_assert (child_cfun->cfg == NULL);
6485 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6486
6487 /* Reset DECL_CONTEXT on function arguments. */
6488 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6489 DECL_CONTEXT (t) = child_fn;
6490
6491 /* Populate the function. */
6492 push_gimplify_context (&gctx);
6493 current_function_decl = child_fn;
6494
6495 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6496 TREE_SIDE_EFFECTS (bind) = 1;
6497 list = NULL;
6498 DECL_SAVED_TREE (child_fn) = bind;
6499 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6500
6501 /* Remap src and dst argument types if needed. */
6502 record_type = ctx->record_type;
6503 srecord_type = ctx->srecord_type;
6504 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6505 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6506 {
6507 record_needs_remap = true;
6508 break;
6509 }
6510 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6511 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6512 {
6513 srecord_needs_remap = true;
6514 break;
6515 }
6516
6517 if (record_needs_remap || srecord_needs_remap)
6518 {
6519 memset (&tcctx, '\0', sizeof (tcctx));
6520 tcctx.cb.src_fn = ctx->cb.src_fn;
6521 tcctx.cb.dst_fn = child_fn;
6522 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6523 gcc_checking_assert (tcctx.cb.src_node);
6524 tcctx.cb.dst_node = tcctx.cb.src_node;
6525 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6526 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6527 tcctx.cb.eh_lp_nr = 0;
6528 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6529 tcctx.cb.decl_map = pointer_map_create ();
6530 tcctx.ctx = ctx;
6531
6532 if (record_needs_remap)
6533 record_type = task_copyfn_remap_type (&tcctx, record_type);
6534 if (srecord_needs_remap)
6535 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6536 }
6537 else
6538 tcctx.cb.decl_map = NULL;
6539
6540 push_cfun (child_cfun);
6541
6542 arg = DECL_ARGUMENTS (child_fn);
6543 TREE_TYPE (arg) = build_pointer_type (record_type);
6544 sarg = DECL_CHAIN (arg);
6545 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6546
6547 /* First pass: initialize temporaries used in record_type and srecord_type
6548 sizes and field offsets. */
6549 if (tcctx.cb.decl_map)
6550 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6551 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6552 {
6553 tree *p;
6554
6555 decl = OMP_CLAUSE_DECL (c);
6556 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6557 if (p == NULL)
6558 continue;
6559 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6560 sf = (tree) n->value;
6561 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6562 src = build_simple_mem_ref_loc (loc, sarg);
6563 src = omp_build_component_ref (src, sf);
6564 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6565 append_to_statement_list (t, &list);
6566 }
6567
6568 /* Second pass: copy shared var pointers and copy construct non-VLA
6569 firstprivate vars. */
6570 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6571 switch (OMP_CLAUSE_CODE (c))
6572 {
6573 case OMP_CLAUSE_SHARED:
6574 decl = OMP_CLAUSE_DECL (c);
6575 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6576 if (n == NULL)
6577 break;
6578 f = (tree) n->value;
6579 if (tcctx.cb.decl_map)
6580 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6581 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6582 sf = (tree) n->value;
6583 if (tcctx.cb.decl_map)
6584 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6585 src = build_simple_mem_ref_loc (loc, sarg);
6586 src = omp_build_component_ref (src, sf);
6587 dst = build_simple_mem_ref_loc (loc, arg);
6588 dst = omp_build_component_ref (dst, f);
6589 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6590 append_to_statement_list (t, &list);
6591 break;
6592 case OMP_CLAUSE_FIRSTPRIVATE:
6593 decl = OMP_CLAUSE_DECL (c);
6594 if (is_variable_sized (decl))
6595 break;
6596 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6597 if (n == NULL)
6598 break;
6599 f = (tree) n->value;
6600 if (tcctx.cb.decl_map)
6601 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6602 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6603 if (n != NULL)
6604 {
6605 sf = (tree) n->value;
6606 if (tcctx.cb.decl_map)
6607 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6608 src = build_simple_mem_ref_loc (loc, sarg);
6609 src = omp_build_component_ref (src, sf);
6610 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6611 src = build_simple_mem_ref_loc (loc, src);
6612 }
6613 else
6614 src = decl;
6615 dst = build_simple_mem_ref_loc (loc, arg);
6616 dst = omp_build_component_ref (dst, f);
6617 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6618 append_to_statement_list (t, &list);
6619 break;
6620 case OMP_CLAUSE_PRIVATE:
6621 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6622 break;
6623 decl = OMP_CLAUSE_DECL (c);
6624 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6625 f = (tree) n->value;
6626 if (tcctx.cb.decl_map)
6627 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6628 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6629 if (n != NULL)
6630 {
6631 sf = (tree) n->value;
6632 if (tcctx.cb.decl_map)
6633 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6634 src = build_simple_mem_ref_loc (loc, sarg);
6635 src = omp_build_component_ref (src, sf);
6636 if (use_pointer_for_field (decl, NULL))
6637 src = build_simple_mem_ref_loc (loc, src);
6638 }
6639 else
6640 src = decl;
6641 dst = build_simple_mem_ref_loc (loc, arg);
6642 dst = omp_build_component_ref (dst, f);
6643 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6644 append_to_statement_list (t, &list);
6645 break;
6646 default:
6647 break;
6648 }
6649
6650 /* Last pass: handle VLA firstprivates. */
6651 if (tcctx.cb.decl_map)
6652 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6653 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6654 {
6655 tree ind, ptr, df;
6656
6657 decl = OMP_CLAUSE_DECL (c);
6658 if (!is_variable_sized (decl))
6659 continue;
6660 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6661 if (n == NULL)
6662 continue;
6663 f = (tree) n->value;
6664 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6665 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6666 ind = DECL_VALUE_EXPR (decl);
6667 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6668 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6669 n = splay_tree_lookup (ctx->sfield_map,
6670 (splay_tree_key) TREE_OPERAND (ind, 0));
6671 sf = (tree) n->value;
6672 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6673 src = build_simple_mem_ref_loc (loc, sarg);
6674 src = omp_build_component_ref (src, sf);
6675 src = build_simple_mem_ref_loc (loc, src);
6676 dst = build_simple_mem_ref_loc (loc, arg);
6677 dst = omp_build_component_ref (dst, f);
6678 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6679 append_to_statement_list (t, &list);
6680 n = splay_tree_lookup (ctx->field_map,
6681 (splay_tree_key) TREE_OPERAND (ind, 0));
6682 df = (tree) n->value;
6683 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6684 ptr = build_simple_mem_ref_loc (loc, arg);
6685 ptr = omp_build_component_ref (ptr, df);
6686 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6687 build_fold_addr_expr_loc (loc, dst));
6688 append_to_statement_list (t, &list);
6689 }
6690
6691 t = build1 (RETURN_EXPR, void_type_node, NULL);
6692 append_to_statement_list (t, &list);
6693
6694 if (tcctx.cb.decl_map)
6695 pointer_map_destroy (tcctx.cb.decl_map);
6696 pop_gimplify_context (NULL);
6697 BIND_EXPR_BODY (bind) = list;
6698 pop_cfun ();
6699 current_function_decl = ctx->cb.src_fn;
6700 }
6701
6702 /* Lower the OpenMP parallel or task directive in the current statement
6703 in GSI_P. CTX holds context information for the directive. */
6704
6705 static void
lower_omp_taskreg(gimple_stmt_iterator * gsi_p,omp_context * ctx)6706 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6707 {
6708 tree clauses;
6709 tree child_fn, t;
6710 gimple stmt = gsi_stmt (*gsi_p);
6711 gimple par_bind, bind;
6712 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6713 struct gimplify_ctx gctx;
6714 location_t loc = gimple_location (stmt);
6715
6716 clauses = gimple_omp_taskreg_clauses (stmt);
6717 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6718 par_body = gimple_bind_body (par_bind);
6719 child_fn = ctx->cb.dst_fn;
6720 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6721 && !gimple_omp_parallel_combined_p (stmt))
6722 {
6723 struct walk_stmt_info wi;
6724 int ws_num = 0;
6725
6726 memset (&wi, 0, sizeof (wi));
6727 wi.info = &ws_num;
6728 wi.val_only = true;
6729 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6730 if (ws_num == 1)
6731 gimple_omp_parallel_set_combined_p (stmt, true);
6732 }
6733 if (ctx->srecord_type)
6734 create_task_copyfn (stmt, ctx);
6735
6736 push_gimplify_context (&gctx);
6737
6738 par_olist = NULL;
6739 par_ilist = NULL;
6740 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6741 lower_omp (par_body, ctx);
6742 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6743 lower_reduction_clauses (clauses, &par_olist, ctx);
6744
6745 /* Declare all the variables created by mapping and the variables
6746 declared in the scope of the parallel body. */
6747 record_vars_into (ctx->block_vars, child_fn);
6748 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6749
6750 if (ctx->record_type)
6751 {
6752 ctx->sender_decl
6753 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6754 : ctx->record_type, ".omp_data_o");
6755 DECL_NAMELESS (ctx->sender_decl) = 1;
6756 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6757 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6758 }
6759
6760 olist = NULL;
6761 ilist = NULL;
6762 lower_send_clauses (clauses, &ilist, &olist, ctx);
6763 lower_send_shared_vars (&ilist, &olist, ctx);
6764
6765 /* Once all the expansions are done, sequence all the different
6766 fragments inside gimple_omp_body. */
6767
6768 new_body = NULL;
6769
6770 if (ctx->record_type)
6771 {
6772 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6773 /* fixup_child_record_type might have changed receiver_decl's type. */
6774 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6775 gimple_seq_add_stmt (&new_body,
6776 gimple_build_assign (ctx->receiver_decl, t));
6777 }
6778
6779 gimple_seq_add_seq (&new_body, par_ilist);
6780 gimple_seq_add_seq (&new_body, par_body);
6781 gimple_seq_add_seq (&new_body, par_olist);
6782 new_body = maybe_catch_exception (new_body);
6783 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6784 gimple_omp_set_body (stmt, new_body);
6785
6786 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6787 gimple_bind_add_stmt (bind, stmt);
6788 if (ilist || olist)
6789 {
6790 gimple_seq_add_stmt (&ilist, bind);
6791 gimple_seq_add_seq (&ilist, olist);
6792 bind = gimple_build_bind (NULL, ilist, NULL);
6793 }
6794
6795 gsi_replace (gsi_p, bind, true);
6796
6797 pop_gimplify_context (NULL);
6798 }
6799
6800 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6801 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6802 of OpenMP context, but with task_shared_vars set. */
6803
6804 static tree
lower_omp_regimplify_p(tree * tp,int * walk_subtrees,void * data)6805 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6806 void *data)
6807 {
6808 tree t = *tp;
6809
6810 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6811 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6812 return t;
6813
6814 if (task_shared_vars
6815 && DECL_P (t)
6816 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6817 return t;
6818
6819 /* If a global variable has been privatized, TREE_CONSTANT on
6820 ADDR_EXPR might be wrong. */
6821 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6822 recompute_tree_invariant_for_addr_expr (t);
6823
6824 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6825 return NULL_TREE;
6826 }
6827
6828 static void
lower_omp_1(gimple_stmt_iterator * gsi_p,omp_context * ctx)6829 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6830 {
6831 gimple stmt = gsi_stmt (*gsi_p);
6832 struct walk_stmt_info wi;
6833
6834 if (gimple_has_location (stmt))
6835 input_location = gimple_location (stmt);
6836
6837 if (task_shared_vars)
6838 memset (&wi, '\0', sizeof (wi));
6839
6840 /* If we have issued syntax errors, avoid doing any heavy lifting.
6841 Just replace the OpenMP directives with a NOP to avoid
6842 confusing RTL expansion. */
6843 if (seen_error () && is_gimple_omp (stmt))
6844 {
6845 gsi_replace (gsi_p, gimple_build_nop (), true);
6846 return;
6847 }
6848
6849 switch (gimple_code (stmt))
6850 {
6851 case GIMPLE_COND:
6852 if ((ctx || task_shared_vars)
6853 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6854 ctx ? NULL : &wi, NULL)
6855 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6856 ctx ? NULL : &wi, NULL)))
6857 gimple_regimplify_operands (stmt, gsi_p);
6858 break;
6859 case GIMPLE_CATCH:
6860 lower_omp (gimple_catch_handler (stmt), ctx);
6861 break;
6862 case GIMPLE_EH_FILTER:
6863 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6864 break;
6865 case GIMPLE_TRY:
6866 lower_omp (gimple_try_eval (stmt), ctx);
6867 lower_omp (gimple_try_cleanup (stmt), ctx);
6868 break;
6869 case GIMPLE_TRANSACTION:
6870 lower_omp (gimple_transaction_body (stmt), ctx);
6871 break;
6872 case GIMPLE_BIND:
6873 lower_omp (gimple_bind_body (stmt), ctx);
6874 break;
6875 case GIMPLE_OMP_PARALLEL:
6876 case GIMPLE_OMP_TASK:
6877 ctx = maybe_lookup_ctx (stmt);
6878 lower_omp_taskreg (gsi_p, ctx);
6879 break;
6880 case GIMPLE_OMP_FOR:
6881 ctx = maybe_lookup_ctx (stmt);
6882 gcc_assert (ctx);
6883 lower_omp_for (gsi_p, ctx);
6884 break;
6885 case GIMPLE_OMP_SECTIONS:
6886 ctx = maybe_lookup_ctx (stmt);
6887 gcc_assert (ctx);
6888 lower_omp_sections (gsi_p, ctx);
6889 break;
6890 case GIMPLE_OMP_SINGLE:
6891 ctx = maybe_lookup_ctx (stmt);
6892 gcc_assert (ctx);
6893 lower_omp_single (gsi_p, ctx);
6894 break;
6895 case GIMPLE_OMP_MASTER:
6896 ctx = maybe_lookup_ctx (stmt);
6897 gcc_assert (ctx);
6898 lower_omp_master (gsi_p, ctx);
6899 break;
6900 case GIMPLE_OMP_ORDERED:
6901 ctx = maybe_lookup_ctx (stmt);
6902 gcc_assert (ctx);
6903 lower_omp_ordered (gsi_p, ctx);
6904 break;
6905 case GIMPLE_OMP_CRITICAL:
6906 ctx = maybe_lookup_ctx (stmt);
6907 gcc_assert (ctx);
6908 lower_omp_critical (gsi_p, ctx);
6909 break;
6910 case GIMPLE_OMP_ATOMIC_LOAD:
6911 if ((ctx || task_shared_vars)
6912 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6913 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6914 gimple_regimplify_operands (stmt, gsi_p);
6915 break;
6916 default:
6917 if ((ctx || task_shared_vars)
6918 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6919 ctx ? NULL : &wi))
6920 gimple_regimplify_operands (stmt, gsi_p);
6921 break;
6922 }
6923 }
6924
6925 static void
lower_omp(gimple_seq body,omp_context * ctx)6926 lower_omp (gimple_seq body, omp_context *ctx)
6927 {
6928 location_t saved_location = input_location;
6929 gimple_stmt_iterator gsi = gsi_start (body);
6930 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6931 lower_omp_1 (&gsi, ctx);
6932 input_location = saved_location;
6933 }
6934
6935 /* Main entry point. */
6936
6937 static unsigned int
execute_lower_omp(void)6938 execute_lower_omp (void)
6939 {
6940 gimple_seq body;
6941
6942 /* This pass always runs, to provide PROP_gimple_lomp.
6943 But there is nothing to do unless -fopenmp is given. */
6944 if (flag_openmp == 0)
6945 return 0;
6946
6947 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6948 delete_omp_context);
6949
6950 body = gimple_body (current_function_decl);
6951 scan_omp (body, NULL);
6952 gcc_assert (taskreg_nesting_level == 0);
6953
6954 if (all_contexts->root)
6955 {
6956 struct gimplify_ctx gctx;
6957
6958 if (task_shared_vars)
6959 push_gimplify_context (&gctx);
6960 lower_omp (body, NULL);
6961 if (task_shared_vars)
6962 pop_gimplify_context (NULL);
6963 }
6964
6965 if (all_contexts)
6966 {
6967 splay_tree_delete (all_contexts);
6968 all_contexts = NULL;
6969 }
6970 BITMAP_FREE (task_shared_vars);
6971 return 0;
6972 }
6973
6974 struct gimple_opt_pass pass_lower_omp =
6975 {
6976 {
6977 GIMPLE_PASS,
6978 "omplower", /* name */
6979 NULL, /* gate */
6980 execute_lower_omp, /* execute */
6981 NULL, /* sub */
6982 NULL, /* next */
6983 0, /* static_pass_number */
6984 TV_NONE, /* tv_id */
6985 PROP_gimple_any, /* properties_required */
6986 PROP_gimple_lomp, /* properties_provided */
6987 0, /* properties_destroyed */
6988 0, /* todo_flags_start */
6989 0 /* todo_flags_finish */
6990 }
6991 };
6992
6993 /* The following is a utility to diagnose OpenMP structured block violations.
6994 It is not part of the "omplower" pass, as that's invoked too late. It
6995 should be invoked by the respective front ends after gimplification. */
6996
6997 static splay_tree all_labels;
6998
6999 /* Check for mismatched contexts and generate an error if needed. Return
7000 true if an error is detected. */
7001
7002 static bool
diagnose_sb_0(gimple_stmt_iterator * gsi_p,gimple branch_ctx,gimple label_ctx)7003 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
7004 gimple branch_ctx, gimple label_ctx)
7005 {
7006 if (label_ctx == branch_ctx)
7007 return false;
7008
7009
7010 /*
7011 Previously we kept track of the label's entire context in diagnose_sb_[12]
7012 so we could traverse it and issue a correct "exit" or "enter" error
7013 message upon a structured block violation.
7014
7015 We built the context by building a list with tree_cons'ing, but there is
7016 no easy counterpart in gimple tuples. It seems like far too much work
7017 for issuing exit/enter error messages. If someone really misses the
7018 distinct error message... patches welcome.
7019 */
7020
7021 #if 0
7022 /* Try to avoid confusing the user by producing and error message
7023 with correct "exit" or "enter" verbiage. We prefer "exit"
7024 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
7025 if (branch_ctx == NULL)
7026 exit_p = false;
7027 else
7028 {
7029 while (label_ctx)
7030 {
7031 if (TREE_VALUE (label_ctx) == branch_ctx)
7032 {
7033 exit_p = false;
7034 break;
7035 }
7036 label_ctx = TREE_CHAIN (label_ctx);
7037 }
7038 }
7039
7040 if (exit_p)
7041 error ("invalid exit from OpenMP structured block");
7042 else
7043 error ("invalid entry to OpenMP structured block");
7044 #endif
7045
7046 /* If it's obvious we have an invalid entry, be specific about the error. */
7047 if (branch_ctx == NULL)
7048 error ("invalid entry to OpenMP structured block");
7049 else
7050 /* Otherwise, be vague and lazy, but efficient. */
7051 error ("invalid branch to/from an OpenMP structured block");
7052
7053 gsi_replace (gsi_p, gimple_build_nop (), false);
7054 return true;
7055 }
7056
7057 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7058 where each label is found. */
7059
7060 static tree
diagnose_sb_1(gimple_stmt_iterator * gsi_p,bool * handled_ops_p,struct walk_stmt_info * wi)7061 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7062 struct walk_stmt_info *wi)
7063 {
7064 gimple context = (gimple) wi->info;
7065 gimple inner_context;
7066 gimple stmt = gsi_stmt (*gsi_p);
7067
7068 *handled_ops_p = true;
7069
7070 switch (gimple_code (stmt))
7071 {
7072 WALK_SUBSTMTS;
7073
7074 case GIMPLE_OMP_PARALLEL:
7075 case GIMPLE_OMP_TASK:
7076 case GIMPLE_OMP_SECTIONS:
7077 case GIMPLE_OMP_SINGLE:
7078 case GIMPLE_OMP_SECTION:
7079 case GIMPLE_OMP_MASTER:
7080 case GIMPLE_OMP_ORDERED:
7081 case GIMPLE_OMP_CRITICAL:
7082 /* The minimal context here is just the current OMP construct. */
7083 inner_context = stmt;
7084 wi->info = inner_context;
7085 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7086 wi->info = context;
7087 break;
7088
7089 case GIMPLE_OMP_FOR:
7090 inner_context = stmt;
7091 wi->info = inner_context;
7092 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7093 walk them. */
7094 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7095 diagnose_sb_1, NULL, wi);
7096 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7097 wi->info = context;
7098 break;
7099
7100 case GIMPLE_LABEL:
7101 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7102 (splay_tree_value) context);
7103 break;
7104
7105 default:
7106 break;
7107 }
7108
7109 return NULL_TREE;
7110 }
7111
7112 /* Pass 2: Check each branch and see if its context differs from that of
7113 the destination label's context. */
7114
7115 static tree
diagnose_sb_2(gimple_stmt_iterator * gsi_p,bool * handled_ops_p,struct walk_stmt_info * wi)7116 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7117 struct walk_stmt_info *wi)
7118 {
7119 gimple context = (gimple) wi->info;
7120 splay_tree_node n;
7121 gimple stmt = gsi_stmt (*gsi_p);
7122
7123 *handled_ops_p = true;
7124
7125 switch (gimple_code (stmt))
7126 {
7127 WALK_SUBSTMTS;
7128
7129 case GIMPLE_OMP_PARALLEL:
7130 case GIMPLE_OMP_TASK:
7131 case GIMPLE_OMP_SECTIONS:
7132 case GIMPLE_OMP_SINGLE:
7133 case GIMPLE_OMP_SECTION:
7134 case GIMPLE_OMP_MASTER:
7135 case GIMPLE_OMP_ORDERED:
7136 case GIMPLE_OMP_CRITICAL:
7137 wi->info = stmt;
7138 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7139 wi->info = context;
7140 break;
7141
7142 case GIMPLE_OMP_FOR:
7143 wi->info = stmt;
7144 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7145 walk them. */
7146 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7147 diagnose_sb_2, NULL, wi);
7148 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7149 wi->info = context;
7150 break;
7151
7152 case GIMPLE_COND:
7153 {
7154 tree lab = gimple_cond_true_label (stmt);
7155 if (lab)
7156 {
7157 n = splay_tree_lookup (all_labels,
7158 (splay_tree_key) lab);
7159 diagnose_sb_0 (gsi_p, context,
7160 n ? (gimple) n->value : NULL);
7161 }
7162 lab = gimple_cond_false_label (stmt);
7163 if (lab)
7164 {
7165 n = splay_tree_lookup (all_labels,
7166 (splay_tree_key) lab);
7167 diagnose_sb_0 (gsi_p, context,
7168 n ? (gimple) n->value : NULL);
7169 }
7170 }
7171 break;
7172
7173 case GIMPLE_GOTO:
7174 {
7175 tree lab = gimple_goto_dest (stmt);
7176 if (TREE_CODE (lab) != LABEL_DECL)
7177 break;
7178
7179 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7180 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7181 }
7182 break;
7183
7184 case GIMPLE_SWITCH:
7185 {
7186 unsigned int i;
7187 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7188 {
7189 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7190 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7191 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7192 break;
7193 }
7194 }
7195 break;
7196
7197 case GIMPLE_RETURN:
7198 diagnose_sb_0 (gsi_p, context, NULL);
7199 break;
7200
7201 default:
7202 break;
7203 }
7204
7205 return NULL_TREE;
7206 }
7207
7208 static unsigned int
diagnose_omp_structured_block_errors(void)7209 diagnose_omp_structured_block_errors (void)
7210 {
7211 struct walk_stmt_info wi;
7212 gimple_seq body = gimple_body (current_function_decl);
7213
7214 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7215
7216 memset (&wi, 0, sizeof (wi));
7217 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7218
7219 memset (&wi, 0, sizeof (wi));
7220 wi.want_locations = true;
7221 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7222
7223 splay_tree_delete (all_labels);
7224 all_labels = NULL;
7225
7226 return 0;
7227 }
7228
7229 static bool
gate_diagnose_omp_blocks(void)7230 gate_diagnose_omp_blocks (void)
7231 {
7232 return flag_openmp != 0;
7233 }
7234
7235 struct gimple_opt_pass pass_diagnose_omp_blocks =
7236 {
7237 {
7238 GIMPLE_PASS,
7239 "*diagnose_omp_blocks", /* name */
7240 gate_diagnose_omp_blocks, /* gate */
7241 diagnose_omp_structured_block_errors, /* execute */
7242 NULL, /* sub */
7243 NULL, /* next */
7244 0, /* static_pass_number */
7245 TV_NONE, /* tv_id */
7246 PROP_gimple_any, /* properties_required */
7247 0, /* properties_provided */
7248 0, /* properties_destroyed */
7249 0, /* todo_flags_start */
7250 0, /* todo_flags_finish */
7251 }
7252 };
7253
7254 #include "gt-omp-low.h"
7255