1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
3
4 Copyright (C) 2005-2018 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "omp-general.h"
32 #include "gomp-constants.h"
33
34
35 /* Complete a #pragma oacc wait construct. LOC is the location of
36 the #pragma. */
37
38 tree
c_finish_oacc_wait(location_t loc,tree parms,tree clauses)39 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
40 {
41 const int nparms = list_length (parms);
42 tree stmt, t;
43 vec<tree, va_gc> *args;
44
45 vec_alloc (args, nparms + 2);
46 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
47
48 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
49 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
50 else
51 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
52
53 args->quick_push (t);
54 args->quick_push (build_int_cst (integer_type_node, nparms));
55
56 for (t = parms; t; t = TREE_CHAIN (t))
57 {
58 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
59 args->quick_push (build_int_cst (integer_type_node,
60 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
61 else
62 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
63 }
64
65 stmt = build_call_expr_loc_vec (loc, stmt, args);
66
67 vec_free (args);
68
69 return stmt;
70 }
71
72 /* Complete a #pragma omp master construct. STMT is the structured-block
73 that follows the pragma. LOC is the l*/
74
75 tree
c_finish_omp_master(location_t loc,tree stmt)76 c_finish_omp_master (location_t loc, tree stmt)
77 {
78 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
79 SET_EXPR_LOCATION (t, loc);
80 return t;
81 }
82
83 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
84 that follows the pragma. LOC is the l*/
85
86 tree
c_finish_omp_taskgroup(location_t loc,tree stmt)87 c_finish_omp_taskgroup (location_t loc, tree stmt)
88 {
89 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
90 SET_EXPR_LOCATION (t, loc);
91 return t;
92 }
93
94 /* Complete a #pragma omp critical construct. STMT is the structured-block
95 that follows the pragma, NAME is the identifier in the pragma, or null
96 if it was omitted. LOC is the location of the #pragma. */
97
98 tree
c_finish_omp_critical(location_t loc,tree body,tree name,tree clauses)99 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
100 {
101 tree stmt = make_node (OMP_CRITICAL);
102 TREE_TYPE (stmt) = void_type_node;
103 OMP_CRITICAL_BODY (stmt) = body;
104 OMP_CRITICAL_NAME (stmt) = name;
105 OMP_CRITICAL_CLAUSES (stmt) = clauses;
106 SET_EXPR_LOCATION (stmt, loc);
107 return add_stmt (stmt);
108 }
109
110 /* Complete a #pragma omp ordered construct. STMT is the structured-block
111 that follows the pragma. LOC is the location of the #pragma. */
112
113 tree
c_finish_omp_ordered(location_t loc,tree clauses,tree stmt)114 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
115 {
116 tree t = make_node (OMP_ORDERED);
117 TREE_TYPE (t) = void_type_node;
118 OMP_ORDERED_BODY (t) = stmt;
119 if (!flag_openmp /* flag_openmp_simd */
120 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
121 || OMP_CLAUSE_CHAIN (clauses)))
122 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
123 OMP_ORDERED_CLAUSES (t) = clauses;
124 SET_EXPR_LOCATION (t, loc);
125 return add_stmt (t);
126 }
127
128
129 /* Complete a #pragma omp barrier construct. LOC is the location of
130 the #pragma. */
131
132 void
c_finish_omp_barrier(location_t loc)133 c_finish_omp_barrier (location_t loc)
134 {
135 tree x;
136
137 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
138 x = build_call_expr_loc (loc, x, 0);
139 add_stmt (x);
140 }
141
142
143 /* Complete a #pragma omp taskwait construct. LOC is the location of the
144 pragma. */
145
146 void
c_finish_omp_taskwait(location_t loc)147 c_finish_omp_taskwait (location_t loc)
148 {
149 tree x;
150
151 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
152 x = build_call_expr_loc (loc, x, 0);
153 add_stmt (x);
154 }
155
156
157 /* Complete a #pragma omp taskyield construct. LOC is the location of the
158 pragma. */
159
160 void
c_finish_omp_taskyield(location_t loc)161 c_finish_omp_taskyield (location_t loc)
162 {
163 tree x;
164
165 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
166 x = build_call_expr_loc (loc, x, 0);
167 add_stmt (x);
168 }
169
170
171 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
172 the expression to be implemented atomically is LHS opcode= RHS.
173 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
174 opcode= RHS with the new or old content of LHS returned.
175 LOC is the location of the atomic statement. The value returned
176 is either error_mark_node (if the construct was erroneous) or an
177 OMP_ATOMIC* node which should be added to the current statement
178 tree with add_stmt. If TEST is set, avoid calling save_expr
179 or create_tmp_var*. */
180
181 tree
c_finish_omp_atomic(location_t loc,enum tree_code code,enum tree_code opcode,tree lhs,tree rhs,tree v,tree lhs1,tree rhs1,bool swapped,bool seq_cst,bool test)182 c_finish_omp_atomic (location_t loc, enum tree_code code,
183 enum tree_code opcode, tree lhs, tree rhs,
184 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst,
185 bool test)
186 {
187 tree x, type, addr, pre = NULL_TREE;
188 HOST_WIDE_INT bitpos = 0, bitsize = 0;
189
190 if (lhs == error_mark_node || rhs == error_mark_node
191 || v == error_mark_node || lhs1 == error_mark_node
192 || rhs1 == error_mark_node)
193 return error_mark_node;
194
195 /* ??? According to one reading of the OpenMP spec, complex type are
196 supported, but there are no atomic stores for any architecture.
197 But at least icc 9.0 doesn't support complex types here either.
198 And lets not even talk about vector types... */
199 type = TREE_TYPE (lhs);
200 if (!INTEGRAL_TYPE_P (type)
201 && !POINTER_TYPE_P (type)
202 && !SCALAR_FLOAT_TYPE_P (type))
203 {
204 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
205 return error_mark_node;
206 }
207 if (TYPE_ATOMIC (type))
208 {
209 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
210 return error_mark_node;
211 }
212
213 if (opcode == RDIV_EXPR)
214 opcode = TRUNC_DIV_EXPR;
215
216 /* ??? Validate that rhs does not overlap lhs. */
217 tree blhs = NULL;
218 if (TREE_CODE (lhs) == COMPONENT_REF
219 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
220 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
221 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
222 {
223 tree field = TREE_OPERAND (lhs, 1);
224 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
225 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
226 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
227 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
228 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
229 else
230 bitpos = 0;
231 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
232 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
233 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
234 bitsize = tree_to_shwi (DECL_SIZE (field));
235 blhs = lhs;
236 type = TREE_TYPE (repr);
237 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
238 repr, TREE_OPERAND (lhs, 2));
239 }
240
241 /* Take and save the address of the lhs. From then on we'll reference it
242 via indirection. */
243 addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
244 if (addr == error_mark_node)
245 return error_mark_node;
246 if (!test)
247 addr = save_expr (addr);
248 if (!test
249 && TREE_CODE (addr) != SAVE_EXPR
250 && (TREE_CODE (addr) != ADDR_EXPR
251 || !VAR_P (TREE_OPERAND (addr, 0))))
252 {
253 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
254 it even after unsharing function body. */
255 tree var = create_tmp_var_raw (TREE_TYPE (addr));
256 DECL_CONTEXT (var) = current_function_decl;
257 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
258 }
259 tree orig_lhs = lhs;
260 lhs = build_indirect_ref (loc, addr, RO_NULL);
261 tree new_lhs = lhs;
262
263 if (code == OMP_ATOMIC_READ)
264 {
265 x = build1 (OMP_ATOMIC_READ, type, addr);
266 SET_EXPR_LOCATION (x, loc);
267 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
268 if (blhs)
269 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
270 bitsize_int (bitsize), bitsize_int (bitpos));
271 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
272 loc, x, NULL_TREE);
273 }
274
275 /* There are lots of warnings, errors, and conversions that need to happen
276 in the course of interpreting a statement. Use the normal mechanisms
277 to do this, and then take it apart again. */
278 if (blhs)
279 {
280 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
281 bitsize_int (bitsize), bitsize_int (bitpos));
282 if (swapped)
283 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
284 else if (opcode != NOP_EXPR)
285 rhs = build_binary_op (loc, opcode, lhs, rhs, true);
286 opcode = NOP_EXPR;
287 }
288 else if (swapped)
289 {
290 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
291 opcode = NOP_EXPR;
292 }
293 bool save = in_late_binary_op;
294 in_late_binary_op = true;
295 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
296 loc, rhs, NULL_TREE);
297 in_late_binary_op = save;
298 if (x == error_mark_node)
299 return error_mark_node;
300 if (TREE_CODE (x) == COMPOUND_EXPR)
301 {
302 pre = TREE_OPERAND (x, 0);
303 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
304 x = TREE_OPERAND (x, 1);
305 }
306 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
307 rhs = TREE_OPERAND (x, 1);
308
309 if (blhs)
310 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
311 rhs, bitsize_int (bitpos));
312
313 /* Punt the actual generation of atomic operations to common code. */
314 if (code == OMP_ATOMIC)
315 type = void_type_node;
316 x = build2 (code, type, addr, rhs);
317 SET_EXPR_LOCATION (x, loc);
318 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
319
320 /* Generally it is hard to prove lhs1 and lhs are the same memory
321 location, just diagnose different variables. */
322 if (rhs1
323 && VAR_P (rhs1)
324 && VAR_P (orig_lhs)
325 && rhs1 != orig_lhs
326 && !test)
327 {
328 if (code == OMP_ATOMIC)
329 error_at (loc, "%<#pragma omp atomic update%> uses two different "
330 "variables for memory");
331 else
332 error_at (loc, "%<#pragma omp atomic capture%> uses two different "
333 "variables for memory");
334 return error_mark_node;
335 }
336
337 if (lhs1
338 && lhs1 != orig_lhs
339 && TREE_CODE (lhs1) == COMPONENT_REF
340 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
341 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
342 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
343 {
344 tree field = TREE_OPERAND (lhs1, 1);
345 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
346 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
347 repr, TREE_OPERAND (lhs1, 2));
348 }
349 if (rhs1
350 && rhs1 != orig_lhs
351 && TREE_CODE (rhs1) == COMPONENT_REF
352 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
353 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
354 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
355 {
356 tree field = TREE_OPERAND (rhs1, 1);
357 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
358 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
359 repr, TREE_OPERAND (rhs1, 2));
360 }
361
362 if (code != OMP_ATOMIC)
363 {
364 /* Generally it is hard to prove lhs1 and lhs are the same memory
365 location, just diagnose different variables. */
366 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
367 {
368 if (lhs1 != orig_lhs && !test)
369 {
370 error_at (loc, "%<#pragma omp atomic capture%> uses two "
371 "different variables for memory");
372 return error_mark_node;
373 }
374 }
375 if (blhs)
376 {
377 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
378 bitsize_int (bitsize), bitsize_int (bitpos));
379 type = TREE_TYPE (blhs);
380 }
381 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
382 loc, x, NULL_TREE);
383 if (rhs1 && rhs1 != orig_lhs)
384 {
385 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
386 if (rhs1addr == error_mark_node)
387 return error_mark_node;
388 x = omit_one_operand_loc (loc, type, x, rhs1addr);
389 }
390 if (lhs1 && lhs1 != orig_lhs)
391 {
392 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
393 if (lhs1addr == error_mark_node)
394 return error_mark_node;
395 if (code == OMP_ATOMIC_CAPTURE_OLD)
396 x = omit_one_operand_loc (loc, type, x, lhs1addr);
397 else
398 {
399 if (!test)
400 x = save_expr (x);
401 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
402 }
403 }
404 }
405 else if (rhs1 && rhs1 != orig_lhs)
406 {
407 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
408 if (rhs1addr == error_mark_node)
409 return error_mark_node;
410 x = omit_one_operand_loc (loc, type, x, rhs1addr);
411 }
412
413 if (pre)
414 x = omit_one_operand_loc (loc, type, x, pre);
415 return x;
416 }
417
418
419 /* Complete a #pragma omp flush construct. We don't do anything with
420 the variable list that the syntax allows. LOC is the location of
421 the #pragma. */
422
423 void
c_finish_omp_flush(location_t loc)424 c_finish_omp_flush (location_t loc)
425 {
426 tree x;
427
428 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
429 x = build_call_expr_loc (loc, x, 0);
430 add_stmt (x);
431 }
432
433
434 /* Check and canonicalize OMP_FOR increment expression.
435 Helper function for c_finish_omp_for. */
436
437 static tree
check_omp_for_incr_expr(location_t loc,tree exp,tree decl)438 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
439 {
440 tree t;
441
442 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
443 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
444 return error_mark_node;
445
446 if (exp == decl)
447 return build_int_cst (TREE_TYPE (exp), 0);
448
449 switch (TREE_CODE (exp))
450 {
451 CASE_CONVERT:
452 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
453 if (t != error_mark_node)
454 return fold_convert_loc (loc, TREE_TYPE (exp), t);
455 break;
456 case MINUS_EXPR:
457 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
458 if (t != error_mark_node)
459 return fold_build2_loc (loc, MINUS_EXPR,
460 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
461 break;
462 case PLUS_EXPR:
463 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
464 if (t != error_mark_node)
465 return fold_build2_loc (loc, PLUS_EXPR,
466 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
467 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
468 if (t != error_mark_node)
469 return fold_build2_loc (loc, PLUS_EXPR,
470 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
471 break;
472 case COMPOUND_EXPR:
473 {
474 /* cp_build_modify_expr forces preevaluation of the RHS to make
475 sure that it is evaluated before the lvalue-rvalue conversion
476 is applied to the LHS. Reconstruct the original expression. */
477 tree op0 = TREE_OPERAND (exp, 0);
478 if (TREE_CODE (op0) == TARGET_EXPR
479 && !VOID_TYPE_P (TREE_TYPE (op0)))
480 {
481 tree op1 = TREE_OPERAND (exp, 1);
482 tree temp = TARGET_EXPR_SLOT (op0);
483 if (BINARY_CLASS_P (op1)
484 && TREE_OPERAND (op1, 1) == temp)
485 {
486 op1 = copy_node (op1);
487 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
488 return check_omp_for_incr_expr (loc, op1, decl);
489 }
490 }
491 break;
492 }
493 default:
494 break;
495 }
496
497 return error_mark_node;
498 }
499
500 /* If the OMP_FOR increment expression in INCR is of pointer type,
501 canonicalize it into an expression handled by gimplify_omp_for()
502 and return it. DECL is the iteration variable. */
503
504 static tree
c_omp_for_incr_canonicalize_ptr(location_t loc,tree decl,tree incr)505 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
506 {
507 if (POINTER_TYPE_P (TREE_TYPE (decl))
508 && TREE_OPERAND (incr, 1))
509 {
510 tree t = fold_convert_loc (loc,
511 sizetype, TREE_OPERAND (incr, 1));
512
513 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
514 || TREE_CODE (incr) == PREDECREMENT_EXPR)
515 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
516 t = fold_build_pointer_plus (decl, t);
517 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
518 }
519 return incr;
520 }
521
522 /* Validate and generate OMP_FOR.
523 DECLV is a vector of iteration variables, for each collapsed loop.
524
525 ORIG_DECLV, if non-NULL, is a vector with the original iteration
526 variables (prior to any transformations, by say, C++ iterators).
527
528 INITV, CONDV and INCRV are vectors containing initialization
529 expressions, controlling predicates and increment expressions.
530 BODY is the body of the loop and PRE_BODY statements that go before
531 the loop. */
532
533 tree
c_finish_omp_for(location_t locus,enum tree_code code,tree declv,tree orig_declv,tree initv,tree condv,tree incrv,tree body,tree pre_body)534 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
535 tree orig_declv, tree initv, tree condv, tree incrv,
536 tree body, tree pre_body)
537 {
538 location_t elocus;
539 bool fail = false;
540 int i;
541
542 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
543 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
544 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
545 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
546 {
547 tree decl = TREE_VEC_ELT (declv, i);
548 tree init = TREE_VEC_ELT (initv, i);
549 tree cond = TREE_VEC_ELT (condv, i);
550 tree incr = TREE_VEC_ELT (incrv, i);
551
552 elocus = locus;
553 if (EXPR_HAS_LOCATION (init))
554 elocus = EXPR_LOCATION (init);
555
556 /* Validate the iteration variable. */
557 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
558 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
559 {
560 error_at (elocus, "invalid type for iteration variable %qE", decl);
561 fail = true;
562 }
563 else if (TYPE_ATOMIC (TREE_TYPE (decl)))
564 {
565 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
566 fail = true;
567 /* _Atomic iterator confuses stuff too much, so we risk ICE
568 trying to diagnose it further. */
569 continue;
570 }
571
572 /* In the case of "for (int i = 0...)", init will be a decl. It should
573 have a DECL_INITIAL that we can turn into an assignment. */
574 if (init == decl)
575 {
576 elocus = DECL_SOURCE_LOCATION (decl);
577
578 init = DECL_INITIAL (decl);
579 if (init == NULL)
580 {
581 error_at (elocus, "%qE is not initialized", decl);
582 init = integer_zero_node;
583 fail = true;
584 }
585 DECL_INITIAL (decl) = NULL_TREE;
586
587 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
588 /* FIXME diagnostics: This should
589 be the location of the INIT. */
590 elocus,
591 init,
592 NULL_TREE);
593 }
594 if (init != error_mark_node)
595 {
596 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
597 gcc_assert (TREE_OPERAND (init, 0) == decl);
598 }
599
600 if (cond == NULL_TREE)
601 {
602 error_at (elocus, "missing controlling predicate");
603 fail = true;
604 }
605 else
606 {
607 bool cond_ok = false;
608
609 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
610 evaluation of the vla VAR_DECL. We need to readd
611 them to the non-decl operand. See PR45784. */
612 while (TREE_CODE (cond) == COMPOUND_EXPR)
613 cond = TREE_OPERAND (cond, 1);
614
615 if (EXPR_HAS_LOCATION (cond))
616 elocus = EXPR_LOCATION (cond);
617
618 if (TREE_CODE (cond) == LT_EXPR
619 || TREE_CODE (cond) == LE_EXPR
620 || TREE_CODE (cond) == GT_EXPR
621 || TREE_CODE (cond) == GE_EXPR
622 || TREE_CODE (cond) == NE_EXPR
623 || TREE_CODE (cond) == EQ_EXPR)
624 {
625 tree op0 = TREE_OPERAND (cond, 0);
626 tree op1 = TREE_OPERAND (cond, 1);
627
628 /* 2.5.1. The comparison in the condition is computed in
629 the type of DECL, otherwise the behavior is undefined.
630
631 For example:
632 long n; int i;
633 i < n;
634
635 according to ISO will be evaluated as:
636 (long)i < n;
637
638 We want to force:
639 i < (int)n; */
640 if (TREE_CODE (op0) == NOP_EXPR
641 && decl == TREE_OPERAND (op0, 0))
642 {
643 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
644 TREE_OPERAND (cond, 1)
645 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
646 TREE_OPERAND (cond, 1));
647 }
648 else if (TREE_CODE (op1) == NOP_EXPR
649 && decl == TREE_OPERAND (op1, 0))
650 {
651 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
652 TREE_OPERAND (cond, 0)
653 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
654 TREE_OPERAND (cond, 0));
655 }
656
657 if (decl == TREE_OPERAND (cond, 0))
658 cond_ok = true;
659 else if (decl == TREE_OPERAND (cond, 1))
660 {
661 TREE_SET_CODE (cond,
662 swap_tree_comparison (TREE_CODE (cond)));
663 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
664 TREE_OPERAND (cond, 0) = decl;
665 cond_ok = true;
666 }
667
668 if (TREE_CODE (cond) == NE_EXPR
669 || TREE_CODE (cond) == EQ_EXPR)
670 {
671 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
672 {
673 cond_ok = false;
674 }
675 else if (operand_equal_p (TREE_OPERAND (cond, 1),
676 TYPE_MIN_VALUE (TREE_TYPE (decl)),
677 0))
678 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
679 ? GT_EXPR : LE_EXPR);
680 else if (operand_equal_p (TREE_OPERAND (cond, 1),
681 TYPE_MAX_VALUE (TREE_TYPE (decl)),
682 0))
683 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
684 ? LT_EXPR : GE_EXPR);
685 else
686 cond_ok = false;
687 }
688
689 if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
690 {
691 tree ce = NULL_TREE, *pce = &ce;
692 tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
693 for (tree c = TREE_VEC_ELT (condv, i); c != cond;
694 c = TREE_OPERAND (c, 1))
695 {
696 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
697 TREE_OPERAND (cond, 1));
698 pce = &TREE_OPERAND (*pce, 1);
699 }
700 TREE_OPERAND (cond, 1) = ce;
701 TREE_VEC_ELT (condv, i) = cond;
702 }
703 }
704
705 if (!cond_ok)
706 {
707 error_at (elocus, "invalid controlling predicate");
708 fail = true;
709 }
710 }
711
712 if (incr == NULL_TREE)
713 {
714 error_at (elocus, "missing increment expression");
715 fail = true;
716 }
717 else
718 {
719 bool incr_ok = false;
720
721 if (EXPR_HAS_LOCATION (incr))
722 elocus = EXPR_LOCATION (incr);
723
724 /* Check all the valid increment expressions: v++, v--, ++v, --v,
725 v = v + incr, v = incr + v and v = v - incr. */
726 switch (TREE_CODE (incr))
727 {
728 case POSTINCREMENT_EXPR:
729 case PREINCREMENT_EXPR:
730 case POSTDECREMENT_EXPR:
731 case PREDECREMENT_EXPR:
732 if (TREE_OPERAND (incr, 0) != decl)
733 break;
734
735 incr_ok = true;
736 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
737 break;
738
739 case COMPOUND_EXPR:
740 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
741 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
742 break;
743 incr = TREE_OPERAND (incr, 1);
744 /* FALLTHRU */
745 case MODIFY_EXPR:
746 if (TREE_OPERAND (incr, 0) != decl)
747 break;
748 if (TREE_OPERAND (incr, 1) == decl)
749 break;
750 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
751 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
752 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
753 incr_ok = true;
754 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
755 || (TREE_CODE (TREE_OPERAND (incr, 1))
756 == POINTER_PLUS_EXPR))
757 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
758 incr_ok = true;
759 else
760 {
761 tree t = check_omp_for_incr_expr (elocus,
762 TREE_OPERAND (incr, 1),
763 decl);
764 if (t != error_mark_node)
765 {
766 incr_ok = true;
767 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
768 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
769 }
770 }
771 break;
772
773 default:
774 break;
775 }
776 if (!incr_ok)
777 {
778 error_at (elocus, "invalid increment expression");
779 fail = true;
780 }
781 }
782
783 TREE_VEC_ELT (initv, i) = init;
784 TREE_VEC_ELT (incrv, i) = incr;
785 }
786
787 if (fail)
788 return NULL;
789 else
790 {
791 tree t = make_node (code);
792
793 TREE_TYPE (t) = void_type_node;
794 OMP_FOR_INIT (t) = initv;
795 OMP_FOR_COND (t) = condv;
796 OMP_FOR_INCR (t) = incrv;
797 OMP_FOR_BODY (t) = body;
798 OMP_FOR_PRE_BODY (t) = pre_body;
799 OMP_FOR_ORIG_DECLS (t) = orig_declv;
800
801 SET_EXPR_LOCATION (t, locus);
802 return t;
803 }
804 }
805
806 /* Type for passing data in between c_omp_check_loop_iv and
807 c_omp_check_loop_iv_r. */
808
809 struct c_omp_check_loop_iv_data
810 {
811 tree declv;
812 bool fail;
813 location_t stmt_loc;
814 location_t expr_loc;
815 int kind;
816 walk_tree_lh lh;
817 hash_set<tree> *ppset;
818 };
819
820 /* Helper function called via walk_tree, to diagnose uses
821 of associated loop IVs inside of lb, b and incr expressions
822 of OpenMP loops. */
823
824 static tree
c_omp_check_loop_iv_r(tree * tp,int * walk_subtrees,void * data)825 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
826 {
827 struct c_omp_check_loop_iv_data *d
828 = (struct c_omp_check_loop_iv_data *) data;
829 if (DECL_P (*tp))
830 {
831 int i;
832 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
833 if (*tp == TREE_VEC_ELT (d->declv, i))
834 {
835 location_t loc = d->expr_loc;
836 if (loc == UNKNOWN_LOCATION)
837 loc = d->stmt_loc;
838 switch (d->kind)
839 {
840 case 0:
841 error_at (loc, "initializer expression refers to "
842 "iteration variable %qD", *tp);
843 break;
844 case 1:
845 error_at (loc, "condition expression refers to "
846 "iteration variable %qD", *tp);
847 break;
848 case 2:
849 error_at (loc, "increment expression refers to "
850 "iteration variable %qD", *tp);
851 break;
852 }
853 d->fail = true;
854 }
855 }
856 /* Don't walk dtors added by C++ wrap_cleanups_r. */
857 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
858 && TRY_CATCH_IS_CLEANUP (*tp))
859 {
860 *walk_subtrees = 0;
861 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
862 d->ppset, d->lh);
863 }
864
865 return NULL_TREE;
866 }
867
868 /* Diagnose invalid references to loop iterators in lb, b and incr
869 expressions. */
870
871 bool
c_omp_check_loop_iv(tree stmt,tree declv,walk_tree_lh lh)872 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
873 {
874 hash_set<tree> pset;
875 struct c_omp_check_loop_iv_data data;
876 int i;
877
878 data.declv = declv;
879 data.fail = false;
880 data.stmt_loc = EXPR_LOCATION (stmt);
881 data.lh = lh;
882 data.ppset = &pset;
883 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
884 {
885 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
886 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
887 tree decl = TREE_OPERAND (init, 0);
888 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
889 gcc_assert (COMPARISON_CLASS_P (cond));
890 gcc_assert (TREE_OPERAND (cond, 0) == decl);
891 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
892 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
893 data.kind = 0;
894 walk_tree_1 (&TREE_OPERAND (init, 1),
895 c_omp_check_loop_iv_r, &data, &pset, lh);
896 /* Don't warn for C++ random access iterators here, the
897 expression then involves the subtraction and always refers
898 to the original value. The C++ FE needs to warn on those
899 earlier. */
900 if (decl == TREE_VEC_ELT (declv, i))
901 {
902 data.expr_loc = EXPR_LOCATION (cond);
903 data.kind = 1;
904 walk_tree_1 (&TREE_OPERAND (cond, 1),
905 c_omp_check_loop_iv_r, &data, &pset, lh);
906 }
907 if (TREE_CODE (incr) == MODIFY_EXPR)
908 {
909 gcc_assert (TREE_OPERAND (incr, 0) == decl);
910 incr = TREE_OPERAND (incr, 1);
911 data.kind = 2;
912 if (TREE_CODE (incr) == PLUS_EXPR
913 && TREE_OPERAND (incr, 1) == decl)
914 {
915 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
916 walk_tree_1 (&TREE_OPERAND (incr, 0),
917 c_omp_check_loop_iv_r, &data, &pset, lh);
918 }
919 else
920 {
921 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
922 walk_tree_1 (&TREE_OPERAND (incr, 1),
923 c_omp_check_loop_iv_r, &data, &pset, lh);
924 }
925 }
926 }
927 return !data.fail;
928 }
929
930 /* Similar, but allows to check the init or cond expressions individually. */
931
932 bool
c_omp_check_loop_iv_exprs(location_t stmt_loc,tree declv,tree decl,tree init,tree cond,walk_tree_lh lh)933 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
934 tree init, tree cond, walk_tree_lh lh)
935 {
936 hash_set<tree> pset;
937 struct c_omp_check_loop_iv_data data;
938
939 data.declv = declv;
940 data.fail = false;
941 data.stmt_loc = stmt_loc;
942 data.lh = lh;
943 data.ppset = &pset;
944 if (init)
945 {
946 data.expr_loc = EXPR_LOCATION (init);
947 data.kind = 0;
948 walk_tree_1 (&init,
949 c_omp_check_loop_iv_r, &data, &pset, lh);
950 }
951 if (cond)
952 {
953 gcc_assert (COMPARISON_CLASS_P (cond));
954 data.expr_loc = EXPR_LOCATION (init);
955 data.kind = 1;
956 if (TREE_OPERAND (cond, 0) == decl)
957 walk_tree_1 (&TREE_OPERAND (cond, 1),
958 c_omp_check_loop_iv_r, &data, &pset, lh);
959 else
960 walk_tree_1 (&TREE_OPERAND (cond, 0),
961 c_omp_check_loop_iv_r, &data, &pset, lh);
962 }
963 return !data.fail;
964 }
965
966 /* This function splits clauses for OpenACC combined loop
967 constructs. OpenACC combined loop constructs are:
968 #pragma acc kernels loop
969 #pragma acc parallel loop */
970
971 tree
c_oacc_split_loop_clauses(tree clauses,tree * not_loop_clauses,bool is_parallel)972 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
973 bool is_parallel)
974 {
975 tree next, loop_clauses, nc;
976
977 loop_clauses = *not_loop_clauses = NULL_TREE;
978 for (; clauses ; clauses = next)
979 {
980 next = OMP_CLAUSE_CHAIN (clauses);
981
982 switch (OMP_CLAUSE_CODE (clauses))
983 {
984 /* Loop clauses. */
985 case OMP_CLAUSE_COLLAPSE:
986 case OMP_CLAUSE_TILE:
987 case OMP_CLAUSE_GANG:
988 case OMP_CLAUSE_WORKER:
989 case OMP_CLAUSE_VECTOR:
990 case OMP_CLAUSE_AUTO:
991 case OMP_CLAUSE_SEQ:
992 case OMP_CLAUSE_INDEPENDENT:
993 case OMP_CLAUSE_PRIVATE:
994 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
995 loop_clauses = clauses;
996 break;
997
998 /* Reductions must be duplicated on both constructs. */
999 case OMP_CLAUSE_REDUCTION:
1000 if (is_parallel)
1001 {
1002 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1003 OMP_CLAUSE_REDUCTION);
1004 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
1005 OMP_CLAUSE_REDUCTION_CODE (nc)
1006 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1007 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
1008 *not_loop_clauses = nc;
1009 }
1010
1011 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1012 loop_clauses = clauses;
1013 break;
1014
1015 /* Parallel/kernels clauses. */
1016 default:
1017 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
1018 *not_loop_clauses = clauses;
1019 break;
1020 }
1021 }
1022
1023 return loop_clauses;
1024 }
1025
1026 /* This function attempts to split or duplicate clauses for OpenMP
1027 combined/composite constructs. Right now there are 21 different
1028 constructs. CODE is the innermost construct in the combined construct,
1029 and MASK allows to determine which constructs are combined together,
1030 as every construct has at least one clause that no other construct
1031 has (except for OMP_SECTIONS, but that can be only combined with parallel).
1032 OpenMP combined/composite constructs are:
1033 #pragma omp distribute parallel for
1034 #pragma omp distribute parallel for simd
1035 #pragma omp distribute simd
1036 #pragma omp for simd
1037 #pragma omp parallel for
1038 #pragma omp parallel for simd
1039 #pragma omp parallel sections
1040 #pragma omp target parallel
1041 #pragma omp target parallel for
1042 #pragma omp target parallel for simd
1043 #pragma omp target teams
1044 #pragma omp target teams distribute
1045 #pragma omp target teams distribute parallel for
1046 #pragma omp target teams distribute parallel for simd
1047 #pragma omp target teams distribute simd
1048 #pragma omp target simd
1049 #pragma omp taskloop simd
1050 #pragma omp teams distribute
1051 #pragma omp teams distribute parallel for
1052 #pragma omp teams distribute parallel for simd
1053 #pragma omp teams distribute simd */
1054
1055 void
c_omp_split_clauses(location_t loc,enum tree_code code,omp_clause_mask mask,tree clauses,tree * cclauses)1056 c_omp_split_clauses (location_t loc, enum tree_code code,
1057 omp_clause_mask mask, tree clauses, tree *cclauses)
1058 {
1059 tree next, c;
1060 enum c_omp_clause_split s;
1061 int i;
1062
1063 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
1064 cclauses[i] = NULL;
1065 /* Add implicit nowait clause on
1066 #pragma omp parallel {for,for simd,sections}. */
1067 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1068 switch (code)
1069 {
1070 case OMP_FOR:
1071 case OMP_SIMD:
1072 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
1073 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1074 break;
1075 case OMP_SECTIONS:
1076 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
1077 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1078 break;
1079 default:
1080 break;
1081 }
1082
1083 for (; clauses ; clauses = next)
1084 {
1085 next = OMP_CLAUSE_CHAIN (clauses);
1086
1087 switch (OMP_CLAUSE_CODE (clauses))
1088 {
1089 /* First the clauses that are unique to some constructs. */
1090 case OMP_CLAUSE_DEVICE:
1091 case OMP_CLAUSE_MAP:
1092 case OMP_CLAUSE_IS_DEVICE_PTR:
1093 case OMP_CLAUSE_DEFAULTMAP:
1094 case OMP_CLAUSE_DEPEND:
1095 s = C_OMP_CLAUSE_SPLIT_TARGET;
1096 break;
1097 case OMP_CLAUSE_NUM_TEAMS:
1098 case OMP_CLAUSE_THREAD_LIMIT:
1099 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1100 break;
1101 case OMP_CLAUSE_DIST_SCHEDULE:
1102 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1103 break;
1104 case OMP_CLAUSE_COPYIN:
1105 case OMP_CLAUSE_NUM_THREADS:
1106 case OMP_CLAUSE_PROC_BIND:
1107 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1108 break;
1109 case OMP_CLAUSE_ORDERED:
1110 s = C_OMP_CLAUSE_SPLIT_FOR;
1111 break;
1112 case OMP_CLAUSE_SCHEDULE:
1113 s = C_OMP_CLAUSE_SPLIT_FOR;
1114 if (code != OMP_SIMD)
1115 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1116 break;
1117 case OMP_CLAUSE_SAFELEN:
1118 case OMP_CLAUSE_SIMDLEN:
1119 case OMP_CLAUSE_ALIGNED:
1120 s = C_OMP_CLAUSE_SPLIT_SIMD;
1121 break;
1122 case OMP_CLAUSE_GRAINSIZE:
1123 case OMP_CLAUSE_NUM_TASKS:
1124 case OMP_CLAUSE_FINAL:
1125 case OMP_CLAUSE_UNTIED:
1126 case OMP_CLAUSE_MERGEABLE:
1127 case OMP_CLAUSE_NOGROUP:
1128 case OMP_CLAUSE_PRIORITY:
1129 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1130 break;
1131 /* Duplicate this to all of taskloop, distribute, for and simd. */
1132 case OMP_CLAUSE_COLLAPSE:
1133 if (code == OMP_SIMD)
1134 {
1135 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1136 | (OMP_CLAUSE_MASK_1
1137 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1138 | (OMP_CLAUSE_MASK_1
1139 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1140 {
1141 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1142 OMP_CLAUSE_COLLAPSE);
1143 OMP_CLAUSE_COLLAPSE_EXPR (c)
1144 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1145 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1146 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1147 }
1148 else
1149 {
1150 /* This must be #pragma omp target simd */
1151 s = C_OMP_CLAUSE_SPLIT_SIMD;
1152 break;
1153 }
1154 }
1155 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1156 {
1157 if ((mask & (OMP_CLAUSE_MASK_1
1158 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1159 {
1160 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1161 OMP_CLAUSE_COLLAPSE);
1162 OMP_CLAUSE_COLLAPSE_EXPR (c)
1163 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1164 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1165 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1166 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1167 }
1168 else
1169 s = C_OMP_CLAUSE_SPLIT_FOR;
1170 }
1171 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1172 != 0)
1173 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1174 else
1175 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1176 break;
1177 /* Private clause is supported on all constructs,
1178 it is enough to put it on the innermost one. For
1179 #pragma omp {for,sections} put it on parallel though,
1180 as that's what we did for OpenMP 3.1. */
1181 case OMP_CLAUSE_PRIVATE:
1182 switch (code)
1183 {
1184 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1185 case OMP_FOR: case OMP_SECTIONS:
1186 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1187 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1188 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1189 default: gcc_unreachable ();
1190 }
1191 break;
1192 /* Firstprivate clause is supported on all constructs but
1193 simd. Put it on the outermost of those and duplicate on teams
1194 and parallel. */
1195 case OMP_CLAUSE_FIRSTPRIVATE:
1196 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1197 != 0)
1198 {
1199 if (code == OMP_SIMD
1200 && (mask & ((OMP_CLAUSE_MASK_1
1201 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1202 | (OMP_CLAUSE_MASK_1
1203 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1204 {
1205 /* This must be #pragma omp target simd. */
1206 s = C_OMP_CLAUSE_SPLIT_TARGET;
1207 break;
1208 }
1209 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1210 OMP_CLAUSE_FIRSTPRIVATE);
1211 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1212 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1213 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1214 }
1215 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1216 != 0)
1217 {
1218 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1219 | (OMP_CLAUSE_MASK_1
1220 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1221 {
1222 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1223 OMP_CLAUSE_FIRSTPRIVATE);
1224 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1225 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1226 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1227 if ((mask & (OMP_CLAUSE_MASK_1
1228 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1229 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1230 else
1231 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1232 }
1233 else
1234 /* This must be
1235 #pragma omp parallel{, for{, simd}, sections}
1236 or
1237 #pragma omp target parallel. */
1238 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1239 }
1240 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1241 != 0)
1242 {
1243 /* This must be one of
1244 #pragma omp {,target }teams distribute
1245 #pragma omp target teams
1246 #pragma omp {,target }teams distribute simd. */
1247 gcc_assert (code == OMP_DISTRIBUTE
1248 || code == OMP_TEAMS
1249 || code == OMP_SIMD);
1250 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1251 }
1252 else if ((mask & (OMP_CLAUSE_MASK_1
1253 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1254 {
1255 /* This must be #pragma omp distribute simd. */
1256 gcc_assert (code == OMP_SIMD);
1257 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1258 }
1259 else if ((mask & (OMP_CLAUSE_MASK_1
1260 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1261 {
1262 /* This must be #pragma omp taskloop simd. */
1263 gcc_assert (code == OMP_SIMD);
1264 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1265 }
1266 else
1267 {
1268 /* This must be #pragma omp for simd. */
1269 gcc_assert (code == OMP_SIMD);
1270 s = C_OMP_CLAUSE_SPLIT_FOR;
1271 }
1272 break;
1273 /* Lastprivate is allowed on distribute, for, sections and simd. In
1274 parallel {for{, simd},sections} we actually want to put it on
1275 parallel rather than for or sections. */
1276 case OMP_CLAUSE_LASTPRIVATE:
1277 if (code == OMP_DISTRIBUTE)
1278 {
1279 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1280 break;
1281 }
1282 if ((mask & (OMP_CLAUSE_MASK_1
1283 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1284 {
1285 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1286 OMP_CLAUSE_LASTPRIVATE);
1287 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1288 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1289 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1290 }
1291 if (code == OMP_FOR || code == OMP_SECTIONS)
1292 {
1293 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1294 != 0)
1295 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1296 else
1297 s = C_OMP_CLAUSE_SPLIT_FOR;
1298 break;
1299 }
1300 gcc_assert (code == OMP_SIMD);
1301 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1302 {
1303 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1304 OMP_CLAUSE_LASTPRIVATE);
1305 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1306 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1307 != 0)
1308 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1309 else
1310 s = C_OMP_CLAUSE_SPLIT_FOR;
1311 OMP_CLAUSE_CHAIN (c) = cclauses[s];
1312 cclauses[s] = c;
1313 }
1314 s = C_OMP_CLAUSE_SPLIT_SIMD;
1315 break;
1316 /* Shared and default clauses are allowed on parallel, teams and
1317 taskloop. */
1318 case OMP_CLAUSE_SHARED:
1319 case OMP_CLAUSE_DEFAULT:
1320 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1321 != 0)
1322 {
1323 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1324 break;
1325 }
1326 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1327 != 0)
1328 {
1329 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1330 == 0)
1331 {
1332 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1333 break;
1334 }
1335 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1336 OMP_CLAUSE_CODE (clauses));
1337 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1338 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1339 else
1340 OMP_CLAUSE_DEFAULT_KIND (c)
1341 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1342 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1343 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1344 }
1345 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1346 break;
1347 /* Reduction is allowed on simd, for, parallel, sections and teams.
1348 Duplicate it on all of them, but omit on for or sections if
1349 parallel is present. */
1350 case OMP_CLAUSE_REDUCTION:
1351 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1352 {
1353 if (code == OMP_SIMD)
1354 {
1355 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1356 OMP_CLAUSE_REDUCTION);
1357 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1358 OMP_CLAUSE_REDUCTION_CODE (c)
1359 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1360 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1361 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1362 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1363 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1364 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1365 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1366 }
1367 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1368 != 0)
1369 {
1370 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1371 OMP_CLAUSE_REDUCTION);
1372 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1373 OMP_CLAUSE_REDUCTION_CODE (c)
1374 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1375 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1376 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1377 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1378 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1379 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1380 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1381 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1382 }
1383 else if ((mask & (OMP_CLAUSE_MASK_1
1384 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1385 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1386 else
1387 s = C_OMP_CLAUSE_SPLIT_FOR;
1388 }
1389 else if (code == OMP_SECTIONS || code == OMP_PARALLEL)
1390 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1391 else if (code == OMP_SIMD)
1392 s = C_OMP_CLAUSE_SPLIT_SIMD;
1393 else
1394 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1395 break;
1396 case OMP_CLAUSE_IF:
1397 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1398 != 0)
1399 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1400 else if ((mask & (OMP_CLAUSE_MASK_1
1401 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1402 {
1403 if ((mask & (OMP_CLAUSE_MASK_1
1404 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
1405 {
1406 if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL)
1407 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1408 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET)
1409 s = C_OMP_CLAUSE_SPLIT_TARGET;
1410 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK)
1411 {
1412 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1413 OMP_CLAUSE_IF);
1414 OMP_CLAUSE_IF_MODIFIER (c)
1415 = OMP_CLAUSE_IF_MODIFIER (clauses);
1416 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1417 OMP_CLAUSE_CHAIN (c)
1418 = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1419 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1420 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1421 }
1422 else
1423 {
1424 error_at (OMP_CLAUSE_LOCATION (clauses),
1425 "expected %<parallel%> or %<target%> %<if%> "
1426 "clause modifier");
1427 continue;
1428 }
1429 }
1430 else
1431 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1432 }
1433 else
1434 s = C_OMP_CLAUSE_SPLIT_TARGET;
1435 break;
1436 case OMP_CLAUSE_LINEAR:
1437 /* Linear clause is allowed on simd and for. Put it on the
1438 innermost construct. */
1439 if (code == OMP_SIMD)
1440 s = C_OMP_CLAUSE_SPLIT_SIMD;
1441 else
1442 s = C_OMP_CLAUSE_SPLIT_FOR;
1443 break;
1444 case OMP_CLAUSE_NOWAIT:
1445 /* Nowait clause is allowed on target, for and sections, but
1446 is not allowed on parallel for or parallel sections. Therefore,
1447 put it on target construct if present, because that can only
1448 be combined with parallel for{, simd} and not with for{, simd},
1449 otherwise to the worksharing construct. */
1450 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1451 != 0)
1452 s = C_OMP_CLAUSE_SPLIT_TARGET;
1453 else
1454 s = C_OMP_CLAUSE_SPLIT_FOR;
1455 break;
1456 default:
1457 gcc_unreachable ();
1458 }
1459 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
1460 cclauses[s] = clauses;
1461 }
1462
1463 if (!flag_checking)
1464 return;
1465
1466 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
1467 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
1468 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
1469 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
1470 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
1471 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
1472 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
1473 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
1474 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1475 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
1476 && code != OMP_SECTIONS)
1477 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
1478 if (code != OMP_SIMD)
1479 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
1480 }
1481
1482
1483 /* qsort callback to compare #pragma omp declare simd clauses. */
1484
1485 static int
c_omp_declare_simd_clause_cmp(const void * p,const void * q)1486 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
1487 {
1488 tree a = *(const tree *) p;
1489 tree b = *(const tree *) q;
1490 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
1491 {
1492 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
1493 return -1;
1494 return 1;
1495 }
1496 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
1497 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
1498 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1499 {
1500 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1501 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1502 if (c < d)
1503 return 1;
1504 if (c > d)
1505 return -1;
1506 }
1507 return 0;
1508 }
1509
1510 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1511 CLAUSES on FNDECL into argument indexes and sort them. */
1512
1513 tree
c_omp_declare_simd_clauses_to_numbers(tree parms,tree clauses)1514 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1515 {
1516 tree c;
1517 vec<tree> clvec = vNULL;
1518
1519 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1520 {
1521 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1522 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1523 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1524 {
1525 tree decl = OMP_CLAUSE_DECL (c);
1526 tree arg;
1527 int idx;
1528 for (arg = parms, idx = 0; arg;
1529 arg = TREE_CHAIN (arg), idx++)
1530 if (arg == decl)
1531 break;
1532 if (arg == NULL_TREE)
1533 {
1534 error_at (OMP_CLAUSE_LOCATION (c),
1535 "%qD is not an function argument", decl);
1536 continue;
1537 }
1538 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1539 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1540 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1541 {
1542 decl = OMP_CLAUSE_LINEAR_STEP (c);
1543 for (arg = parms, idx = 0; arg;
1544 arg = TREE_CHAIN (arg), idx++)
1545 if (arg == decl)
1546 break;
1547 if (arg == NULL_TREE)
1548 {
1549 error_at (OMP_CLAUSE_LOCATION (c),
1550 "%qD is not an function argument", decl);
1551 continue;
1552 }
1553 OMP_CLAUSE_LINEAR_STEP (c)
1554 = build_int_cst (integer_type_node, idx);
1555 }
1556 }
1557 clvec.safe_push (c);
1558 }
1559 if (!clvec.is_empty ())
1560 {
1561 unsigned int len = clvec.length (), i;
1562 clvec.qsort (c_omp_declare_simd_clause_cmp);
1563 clauses = clvec[0];
1564 for (i = 0; i < len; i++)
1565 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1566 }
1567 else
1568 clauses = NULL_TREE;
1569 clvec.release ();
1570 return clauses;
1571 }
1572
1573 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1574
1575 void
c_omp_declare_simd_clauses_to_decls(tree fndecl,tree clauses)1576 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1577 {
1578 tree c;
1579
1580 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1581 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1582 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1583 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1584 {
1585 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1586 tree arg;
1587 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1588 arg = TREE_CHAIN (arg), i++)
1589 if (i == idx)
1590 break;
1591 gcc_assert (arg);
1592 OMP_CLAUSE_DECL (c) = arg;
1593 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1594 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1595 {
1596 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
1597 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1598 arg = TREE_CHAIN (arg), i++)
1599 if (i == idx)
1600 break;
1601 gcc_assert (arg);
1602 OMP_CLAUSE_LINEAR_STEP (c) = arg;
1603 }
1604 }
1605 }
1606
1607 /* True if OpenMP sharing attribute of DECL is predetermined. */
1608
1609 enum omp_clause_default_kind
c_omp_predetermined_sharing(tree decl)1610 c_omp_predetermined_sharing (tree decl)
1611 {
1612 /* Variables with const-qualified type having no mutable member
1613 are predetermined shared. */
1614 if (TREE_READONLY (decl))
1615 return OMP_CLAUSE_DEFAULT_SHARED;
1616
1617 /* Predetermine artificial variables holding integral values, those
1618 are usually result of gimplify_one_sizepos or SAVE_EXPR
1619 gimplification. */
1620 if (VAR_P (decl)
1621 && DECL_ARTIFICIAL (decl)
1622 && INTEGRAL_TYPE_P (TREE_TYPE (decl)))
1623 return OMP_CLAUSE_DEFAULT_SHARED;
1624
1625 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
1626 }
1627