xref: /dragonfly/contrib/gcc-8.0/gcc/c-family/c-omp.c (revision 7bcb6caf)
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2    called from parsing in the C and C++ front ends.
3 
4    Copyright (C) 2005-2018 Free Software Foundation, Inc.
5    Contributed by Richard Henderson <rth@redhat.com>,
6 		  Diego Novillo <dnovillo@redhat.com>.
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "omp-general.h"
32 #include "gomp-constants.h"
33 
34 
35 /* Complete a #pragma oacc wait construct.  LOC is the location of
36    the #pragma.  */
37 
38 tree
39 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
40 {
41   const int nparms = list_length (parms);
42   tree stmt, t;
43   vec<tree, va_gc> *args;
44 
45   vec_alloc (args, nparms + 2);
46   stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
47 
48   if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
49     t = OMP_CLAUSE_ASYNC_EXPR (clauses);
50   else
51     t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
52 
53   args->quick_push (t);
54   args->quick_push (build_int_cst (integer_type_node, nparms));
55 
56   for (t = parms; t; t = TREE_CHAIN (t))
57     {
58       if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
59 	args->quick_push (build_int_cst (integer_type_node,
60 			TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
61       else
62 	args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
63     }
64 
65   stmt = build_call_expr_loc_vec (loc, stmt, args);
66 
67   vec_free (args);
68 
69   return stmt;
70 }
71 
72 /* Complete a #pragma omp master construct.  STMT is the structured-block
73    that follows the pragma.  LOC is the l*/
74 
75 tree
76 c_finish_omp_master (location_t loc, tree stmt)
77 {
78   tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
79   SET_EXPR_LOCATION (t, loc);
80   return t;
81 }
82 
83 /* Complete a #pragma omp taskgroup construct.  STMT is the structured-block
84    that follows the pragma.  LOC is the l*/
85 
86 tree
87 c_finish_omp_taskgroup (location_t loc, tree stmt)
88 {
89   tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
90   SET_EXPR_LOCATION (t, loc);
91   return t;
92 }
93 
94 /* Complete a #pragma omp critical construct.  STMT is the structured-block
95    that follows the pragma, NAME is the identifier in the pragma, or null
96    if it was omitted.  LOC is the location of the #pragma.  */
97 
98 tree
99 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
100 {
101   tree stmt = make_node (OMP_CRITICAL);
102   TREE_TYPE (stmt) = void_type_node;
103   OMP_CRITICAL_BODY (stmt) = body;
104   OMP_CRITICAL_NAME (stmt) = name;
105   OMP_CRITICAL_CLAUSES (stmt) = clauses;
106   SET_EXPR_LOCATION (stmt, loc);
107   return add_stmt (stmt);
108 }
109 
110 /* Complete a #pragma omp ordered construct.  STMT is the structured-block
111    that follows the pragma.  LOC is the location of the #pragma.  */
112 
113 tree
114 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
115 {
116   tree t = make_node (OMP_ORDERED);
117   TREE_TYPE (t) = void_type_node;
118   OMP_ORDERED_BODY (t) = stmt;
119   if (!flag_openmp	/* flag_openmp_simd */
120       && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
121 	  || OMP_CLAUSE_CHAIN (clauses)))
122     clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
123   OMP_ORDERED_CLAUSES (t) = clauses;
124   SET_EXPR_LOCATION (t, loc);
125   return add_stmt (t);
126 }
127 
128 
129 /* Complete a #pragma omp barrier construct.  LOC is the location of
130    the #pragma.  */
131 
132 void
133 c_finish_omp_barrier (location_t loc)
134 {
135   tree x;
136 
137   x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
138   x = build_call_expr_loc (loc, x, 0);
139   add_stmt (x);
140 }
141 
142 
143 /* Complete a #pragma omp taskwait construct.  LOC is the location of the
144    pragma.  */
145 
146 void
147 c_finish_omp_taskwait (location_t loc)
148 {
149   tree x;
150 
151   x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
152   x = build_call_expr_loc (loc, x, 0);
153   add_stmt (x);
154 }
155 
156 
157 /* Complete a #pragma omp taskyield construct.  LOC is the location of the
158    pragma.  */
159 
160 void
161 c_finish_omp_taskyield (location_t loc)
162 {
163   tree x;
164 
165   x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
166   x = build_call_expr_loc (loc, x, 0);
167   add_stmt (x);
168 }
169 
170 
171 /* Complete a #pragma omp atomic construct.  For CODE OMP_ATOMIC
172    the expression to be implemented atomically is LHS opcode= RHS.
173    For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
174    opcode= RHS with the new or old content of LHS returned.
175    LOC is the location of the atomic statement.  The value returned
176    is either error_mark_node (if the construct was erroneous) or an
177    OMP_ATOMIC* node which should be added to the current statement
178    tree with add_stmt.  If TEST is set, avoid calling save_expr
179    or create_tmp_var*.  */
180 
181 tree
182 c_finish_omp_atomic (location_t loc, enum tree_code code,
183 		     enum tree_code opcode, tree lhs, tree rhs,
184 		     tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst,
185 		     bool test)
186 {
187   tree x, type, addr, pre = NULL_TREE;
188   HOST_WIDE_INT bitpos = 0, bitsize = 0;
189 
190   if (lhs == error_mark_node || rhs == error_mark_node
191       || v == error_mark_node || lhs1 == error_mark_node
192       || rhs1 == error_mark_node)
193     return error_mark_node;
194 
195   /* ??? According to one reading of the OpenMP spec, complex type are
196      supported, but there are no atomic stores for any architecture.
197      But at least icc 9.0 doesn't support complex types here either.
198      And lets not even talk about vector types...  */
199   type = TREE_TYPE (lhs);
200   if (!INTEGRAL_TYPE_P (type)
201       && !POINTER_TYPE_P (type)
202       && !SCALAR_FLOAT_TYPE_P (type))
203     {
204       error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
205       return error_mark_node;
206     }
207   if (TYPE_ATOMIC (type))
208     {
209       error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
210       return error_mark_node;
211     }
212 
213   if (opcode == RDIV_EXPR)
214     opcode = TRUNC_DIV_EXPR;
215 
216   /* ??? Validate that rhs does not overlap lhs.  */
217   tree blhs = NULL;
218   if (TREE_CODE (lhs) == COMPONENT_REF
219       && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
220       && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
221       && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
222     {
223       tree field = TREE_OPERAND (lhs, 1);
224       tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
225       if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
226 	  && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
227 	bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
228 		  - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
229       else
230 	bitpos = 0;
231       bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
232 		 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
233       gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
234       bitsize = tree_to_shwi (DECL_SIZE (field));
235       blhs = lhs;
236       type = TREE_TYPE (repr);
237       lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
238 		    repr, TREE_OPERAND (lhs, 2));
239     }
240 
241   /* Take and save the address of the lhs.  From then on we'll reference it
242      via indirection.  */
243   addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
244   if (addr == error_mark_node)
245     return error_mark_node;
246   if (!test)
247     addr = save_expr (addr);
248   if (!test
249       && TREE_CODE (addr) != SAVE_EXPR
250       && (TREE_CODE (addr) != ADDR_EXPR
251 	  || !VAR_P (TREE_OPERAND (addr, 0))))
252     {
253       /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
254 	 it even after unsharing function body.  */
255       tree var = create_tmp_var_raw (TREE_TYPE (addr));
256       DECL_CONTEXT (var) = current_function_decl;
257       addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
258     }
259   tree orig_lhs = lhs;
260   lhs = build_indirect_ref (loc, addr, RO_NULL);
261   tree new_lhs = lhs;
262 
263   if (code == OMP_ATOMIC_READ)
264     {
265       x = build1 (OMP_ATOMIC_READ, type, addr);
266       SET_EXPR_LOCATION (x, loc);
267       OMP_ATOMIC_SEQ_CST (x) = seq_cst;
268       if (blhs)
269 	x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
270 			bitsize_int (bitsize), bitsize_int (bitpos));
271       return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
272 				loc, x, NULL_TREE);
273     }
274 
275   /* There are lots of warnings, errors, and conversions that need to happen
276      in the course of interpreting a statement.  Use the normal mechanisms
277      to do this, and then take it apart again.  */
278   if (blhs)
279     {
280       lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
281 			bitsize_int (bitsize), bitsize_int (bitpos));
282       if (swapped)
283 	rhs = build_binary_op (loc, opcode, rhs, lhs, true);
284       else if (opcode != NOP_EXPR)
285 	rhs = build_binary_op (loc, opcode, lhs, rhs, true);
286       opcode = NOP_EXPR;
287     }
288   else if (swapped)
289     {
290       rhs = build_binary_op (loc, opcode, rhs, lhs, true);
291       opcode = NOP_EXPR;
292     }
293   bool save = in_late_binary_op;
294   in_late_binary_op = true;
295   x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
296 			 loc, rhs, NULL_TREE);
297   in_late_binary_op = save;
298   if (x == error_mark_node)
299     return error_mark_node;
300   if (TREE_CODE (x) == COMPOUND_EXPR)
301     {
302       pre = TREE_OPERAND (x, 0);
303       gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
304       x = TREE_OPERAND (x, 1);
305     }
306   gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
307   rhs = TREE_OPERAND (x, 1);
308 
309   if (blhs)
310     rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
311 		      rhs, bitsize_int (bitpos));
312 
313   /* Punt the actual generation of atomic operations to common code.  */
314   if (code == OMP_ATOMIC)
315     type = void_type_node;
316   x = build2 (code, type, addr, rhs);
317   SET_EXPR_LOCATION (x, loc);
318   OMP_ATOMIC_SEQ_CST (x) = seq_cst;
319 
320   /* Generally it is hard to prove lhs1 and lhs are the same memory
321      location, just diagnose different variables.  */
322   if (rhs1
323       && VAR_P (rhs1)
324       && VAR_P (orig_lhs)
325       && rhs1 != orig_lhs
326       && !test)
327     {
328       if (code == OMP_ATOMIC)
329 	error_at (loc, "%<#pragma omp atomic update%> uses two different "
330 		       "variables for memory");
331       else
332 	error_at (loc, "%<#pragma omp atomic capture%> uses two different "
333 		       "variables for memory");
334       return error_mark_node;
335     }
336 
337   if (lhs1
338       && lhs1 != orig_lhs
339       && TREE_CODE (lhs1) == COMPONENT_REF
340       && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
341       && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
342       && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
343     {
344       tree field = TREE_OPERAND (lhs1, 1);
345       tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
346       lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
347 		     repr, TREE_OPERAND (lhs1, 2));
348     }
349   if (rhs1
350       && rhs1 != orig_lhs
351       && TREE_CODE (rhs1) == COMPONENT_REF
352       && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
353       && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
354       && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
355     {
356       tree field = TREE_OPERAND (rhs1, 1);
357       tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
358       rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
359 		     repr, TREE_OPERAND (rhs1, 2));
360     }
361 
362   if (code != OMP_ATOMIC)
363     {
364       /* Generally it is hard to prove lhs1 and lhs are the same memory
365 	 location, just diagnose different variables.  */
366       if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
367 	{
368 	  if (lhs1 != orig_lhs && !test)
369 	    {
370 	      error_at (loc, "%<#pragma omp atomic capture%> uses two "
371 			     "different variables for memory");
372 	      return error_mark_node;
373 	    }
374 	}
375       if (blhs)
376 	x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
377 			bitsize_int (bitsize), bitsize_int (bitpos));
378       x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
379 			     loc, x, NULL_TREE);
380       if (rhs1 && rhs1 != orig_lhs)
381 	{
382 	  tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
383 	  if (rhs1addr == error_mark_node)
384 	    return error_mark_node;
385 	  x = omit_one_operand_loc (loc, type, x, rhs1addr);
386 	}
387       if (lhs1 && lhs1 != orig_lhs)
388 	{
389 	  tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
390 	  if (lhs1addr == error_mark_node)
391 	    return error_mark_node;
392 	  if (code == OMP_ATOMIC_CAPTURE_OLD)
393 	    x = omit_one_operand_loc (loc, type, x, lhs1addr);
394 	  else
395 	    {
396 	      if (!test)
397 		x = save_expr (x);
398 	      x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
399 	    }
400 	}
401     }
402   else if (rhs1 && rhs1 != orig_lhs)
403     {
404       tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
405       if (rhs1addr == error_mark_node)
406 	return error_mark_node;
407       x = omit_one_operand_loc (loc, type, x, rhs1addr);
408     }
409 
410   if (pre)
411     x = omit_one_operand_loc (loc, type, x, pre);
412   return x;
413 }
414 
415 
416 /* Complete a #pragma omp flush construct.  We don't do anything with
417    the variable list that the syntax allows.  LOC is the location of
418    the #pragma.  */
419 
420 void
421 c_finish_omp_flush (location_t loc)
422 {
423   tree x;
424 
425   x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
426   x = build_call_expr_loc (loc, x, 0);
427   add_stmt (x);
428 }
429 
430 
431 /* Check and canonicalize OMP_FOR increment expression.
432    Helper function for c_finish_omp_for.  */
433 
434 static tree
435 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
436 {
437   tree t;
438 
439   if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
440       || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
441     return error_mark_node;
442 
443   if (exp == decl)
444     return build_int_cst (TREE_TYPE (exp), 0);
445 
446   switch (TREE_CODE (exp))
447     {
448     CASE_CONVERT:
449       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
450       if (t != error_mark_node)
451         return fold_convert_loc (loc, TREE_TYPE (exp), t);
452       break;
453     case MINUS_EXPR:
454       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
455       if (t != error_mark_node)
456         return fold_build2_loc (loc, MINUS_EXPR,
457 			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
458       break;
459     case PLUS_EXPR:
460       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
461       if (t != error_mark_node)
462         return fold_build2_loc (loc, PLUS_EXPR,
463 			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
464       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
465       if (t != error_mark_node)
466         return fold_build2_loc (loc, PLUS_EXPR,
467 			    TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
468       break;
469     case COMPOUND_EXPR:
470       {
471 	/* cp_build_modify_expr forces preevaluation of the RHS to make
472 	   sure that it is evaluated before the lvalue-rvalue conversion
473 	   is applied to the LHS.  Reconstruct the original expression.  */
474 	tree op0 = TREE_OPERAND (exp, 0);
475 	if (TREE_CODE (op0) == TARGET_EXPR
476 	    && !VOID_TYPE_P (TREE_TYPE (op0)))
477 	  {
478 	    tree op1 = TREE_OPERAND (exp, 1);
479 	    tree temp = TARGET_EXPR_SLOT (op0);
480 	    if (BINARY_CLASS_P (op1)
481 		&& TREE_OPERAND (op1, 1) == temp)
482 	      {
483 		op1 = copy_node (op1);
484 		TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
485 		return check_omp_for_incr_expr (loc, op1, decl);
486 	      }
487 	  }
488 	break;
489       }
490     default:
491       break;
492     }
493 
494   return error_mark_node;
495 }
496 
497 /* If the OMP_FOR increment expression in INCR is of pointer type,
498    canonicalize it into an expression handled by gimplify_omp_for()
499    and return it.  DECL is the iteration variable.  */
500 
501 static tree
502 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
503 {
504   if (POINTER_TYPE_P (TREE_TYPE (decl))
505       && TREE_OPERAND (incr, 1))
506     {
507       tree t = fold_convert_loc (loc,
508 				 sizetype, TREE_OPERAND (incr, 1));
509 
510       if (TREE_CODE (incr) == POSTDECREMENT_EXPR
511 	  || TREE_CODE (incr) == PREDECREMENT_EXPR)
512 	t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
513       t = fold_build_pointer_plus (decl, t);
514       incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
515     }
516   return incr;
517 }
518 
519 /* Validate and generate OMP_FOR.
520    DECLV is a vector of iteration variables, for each collapsed loop.
521 
522    ORIG_DECLV, if non-NULL, is a vector with the original iteration
523    variables (prior to any transformations, by say, C++ iterators).
524 
525    INITV, CONDV and INCRV are vectors containing initialization
526    expressions, controlling predicates and increment expressions.
527    BODY is the body of the loop and PRE_BODY statements that go before
528    the loop.  */
529 
530 tree
531 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
532 		  tree orig_declv, tree initv, tree condv, tree incrv,
533 		  tree body, tree pre_body)
534 {
535   location_t elocus;
536   bool fail = false;
537   int i;
538 
539   gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
540   gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
541   gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
542   for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
543     {
544       tree decl = TREE_VEC_ELT (declv, i);
545       tree init = TREE_VEC_ELT (initv, i);
546       tree cond = TREE_VEC_ELT (condv, i);
547       tree incr = TREE_VEC_ELT (incrv, i);
548 
549       elocus = locus;
550       if (EXPR_HAS_LOCATION (init))
551 	elocus = EXPR_LOCATION (init);
552 
553       /* Validate the iteration variable.  */
554       if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
555 	  && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
556 	{
557 	  error_at (elocus, "invalid type for iteration variable %qE", decl);
558 	  fail = true;
559 	}
560       else if (TYPE_ATOMIC (TREE_TYPE (decl)))
561 	{
562 	  error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
563 	  fail = true;
564 	  /* _Atomic iterator confuses stuff too much, so we risk ICE
565 	     trying to diagnose it further.  */
566 	  continue;
567 	}
568 
569       /* In the case of "for (int i = 0...)", init will be a decl.  It should
570 	 have a DECL_INITIAL that we can turn into an assignment.  */
571       if (init == decl)
572 	{
573 	  elocus = DECL_SOURCE_LOCATION (decl);
574 
575 	  init = DECL_INITIAL (decl);
576 	  if (init == NULL)
577 	    {
578 	      error_at (elocus, "%qE is not initialized", decl);
579 	      init = integer_zero_node;
580 	      fail = true;
581 	    }
582 	  DECL_INITIAL (decl) = NULL_TREE;
583 
584 	  init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
585 	      			    /* FIXME diagnostics: This should
586 				       be the location of the INIT.  */
587 	      			    elocus,
588 				    init,
589 				    NULL_TREE);
590 	}
591       if (init != error_mark_node)
592 	{
593 	  gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
594 	  gcc_assert (TREE_OPERAND (init, 0) == decl);
595 	}
596 
597       if (cond == NULL_TREE)
598 	{
599 	  error_at (elocus, "missing controlling predicate");
600 	  fail = true;
601 	}
602       else
603 	{
604 	  bool cond_ok = false;
605 
606 	  /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
607 	     evaluation of the vla VAR_DECL.  We need to readd
608 	     them to the non-decl operand.  See PR45784.  */
609 	  while (TREE_CODE (cond) == COMPOUND_EXPR)
610 	    cond = TREE_OPERAND (cond, 1);
611 
612 	  if (EXPR_HAS_LOCATION (cond))
613 	    elocus = EXPR_LOCATION (cond);
614 
615 	  if (TREE_CODE (cond) == LT_EXPR
616 	      || TREE_CODE (cond) == LE_EXPR
617 	      || TREE_CODE (cond) == GT_EXPR
618 	      || TREE_CODE (cond) == GE_EXPR
619 	      || TREE_CODE (cond) == NE_EXPR
620 	      || TREE_CODE (cond) == EQ_EXPR)
621 	    {
622 	      tree op0 = TREE_OPERAND (cond, 0);
623 	      tree op1 = TREE_OPERAND (cond, 1);
624 
625 	      /* 2.5.1.  The comparison in the condition is computed in
626 		 the type of DECL, otherwise the behavior is undefined.
627 
628 		 For example:
629 		 long n; int i;
630 		 i < n;
631 
632 		 according to ISO will be evaluated as:
633 		 (long)i < n;
634 
635 		 We want to force:
636 		 i < (int)n;  */
637 	      if (TREE_CODE (op0) == NOP_EXPR
638 		  && decl == TREE_OPERAND (op0, 0))
639 		{
640 		  TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
641 		  TREE_OPERAND (cond, 1)
642 		    = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
643 				   TREE_OPERAND (cond, 1));
644 		}
645 	      else if (TREE_CODE (op1) == NOP_EXPR
646 		       && decl == TREE_OPERAND (op1, 0))
647 		{
648 		  TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
649 		  TREE_OPERAND (cond, 0)
650 		    = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
651 				   TREE_OPERAND (cond, 0));
652 		}
653 
654 	      if (decl == TREE_OPERAND (cond, 0))
655 		cond_ok = true;
656 	      else if (decl == TREE_OPERAND (cond, 1))
657 		{
658 		  TREE_SET_CODE (cond,
659 				 swap_tree_comparison (TREE_CODE (cond)));
660 		  TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
661 		  TREE_OPERAND (cond, 0) = decl;
662 		  cond_ok = true;
663 		}
664 
665 	      if (TREE_CODE (cond) == NE_EXPR
666 		  || TREE_CODE (cond) == EQ_EXPR)
667 		{
668 		  if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
669 		    {
670 		      cond_ok = false;
671 		    }
672 		  else if (operand_equal_p (TREE_OPERAND (cond, 1),
673 					    TYPE_MIN_VALUE (TREE_TYPE (decl)),
674 					    0))
675 		    TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
676 					 ? GT_EXPR : LE_EXPR);
677 		  else if (operand_equal_p (TREE_OPERAND (cond, 1),
678 					    TYPE_MAX_VALUE (TREE_TYPE (decl)),
679 					    0))
680 		    TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
681 					 ? LT_EXPR : GE_EXPR);
682 		  else
683 		    cond_ok = false;
684 		}
685 
686 	      if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
687 		{
688 		  tree ce = NULL_TREE, *pce = &ce;
689 		  tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
690 		  for (tree c = TREE_VEC_ELT (condv, i); c != cond;
691 		       c = TREE_OPERAND (c, 1))
692 		    {
693 		      *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
694 				     TREE_OPERAND (cond, 1));
695 		      pce = &TREE_OPERAND (*pce, 1);
696 		    }
697 		  TREE_OPERAND (cond, 1) = ce;
698 		  TREE_VEC_ELT (condv, i) = cond;
699 		}
700 	    }
701 
702 	  if (!cond_ok)
703 	    {
704 	      error_at (elocus, "invalid controlling predicate");
705 	      fail = true;
706 	    }
707 	}
708 
709       if (incr == NULL_TREE)
710 	{
711 	  error_at (elocus, "missing increment expression");
712 	  fail = true;
713 	}
714       else
715 	{
716 	  bool incr_ok = false;
717 
718 	  if (EXPR_HAS_LOCATION (incr))
719 	    elocus = EXPR_LOCATION (incr);
720 
721 	  /* Check all the valid increment expressions: v++, v--, ++v, --v,
722 	     v = v + incr, v = incr + v and v = v - incr.  */
723 	  switch (TREE_CODE (incr))
724 	    {
725 	    case POSTINCREMENT_EXPR:
726 	    case PREINCREMENT_EXPR:
727 	    case POSTDECREMENT_EXPR:
728 	    case PREDECREMENT_EXPR:
729 	      if (TREE_OPERAND (incr, 0) != decl)
730 		break;
731 
732 	      incr_ok = true;
733 	      incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
734 	      break;
735 
736 	    case COMPOUND_EXPR:
737 	      if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
738 		  || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
739 		break;
740 	      incr = TREE_OPERAND (incr, 1);
741 	      /* FALLTHRU */
742 	    case MODIFY_EXPR:
743 	      if (TREE_OPERAND (incr, 0) != decl)
744 		break;
745 	      if (TREE_OPERAND (incr, 1) == decl)
746 		break;
747 	      if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
748 		  && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
749 		      || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
750 		incr_ok = true;
751 	      else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
752 			|| (TREE_CODE (TREE_OPERAND (incr, 1))
753 			    == POINTER_PLUS_EXPR))
754 		       && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
755 		incr_ok = true;
756 	      else
757 		{
758 		  tree t = check_omp_for_incr_expr (elocus,
759 						    TREE_OPERAND (incr, 1),
760 						    decl);
761 		  if (t != error_mark_node)
762 		    {
763 		      incr_ok = true;
764 		      t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
765 		      incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
766 		    }
767 		}
768 	      break;
769 
770 	    default:
771 	      break;
772 	    }
773 	  if (!incr_ok)
774 	    {
775 	      error_at (elocus, "invalid increment expression");
776 	      fail = true;
777 	    }
778 	}
779 
780       TREE_VEC_ELT (initv, i) = init;
781       TREE_VEC_ELT (incrv, i) = incr;
782     }
783 
784   if (fail)
785     return NULL;
786   else
787     {
788       tree t = make_node (code);
789 
790       TREE_TYPE (t) = void_type_node;
791       OMP_FOR_INIT (t) = initv;
792       OMP_FOR_COND (t) = condv;
793       OMP_FOR_INCR (t) = incrv;
794       OMP_FOR_BODY (t) = body;
795       OMP_FOR_PRE_BODY (t) = pre_body;
796       OMP_FOR_ORIG_DECLS (t) = orig_declv;
797 
798       SET_EXPR_LOCATION (t, locus);
799       return t;
800     }
801 }
802 
803 /* Type for passing data in between c_omp_check_loop_iv and
804    c_omp_check_loop_iv_r.  */
805 
806 struct c_omp_check_loop_iv_data
807 {
808   tree declv;
809   bool fail;
810   location_t stmt_loc;
811   location_t expr_loc;
812   int kind;
813   walk_tree_lh lh;
814   hash_set<tree> *ppset;
815 };
816 
817 /* Helper function called via walk_tree, to diagnose uses
818    of associated loop IVs inside of lb, b and incr expressions
819    of OpenMP loops.  */
820 
821 static tree
822 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
823 {
824   struct c_omp_check_loop_iv_data *d
825     = (struct c_omp_check_loop_iv_data *) data;
826   if (DECL_P (*tp))
827     {
828       int i;
829       for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
830 	if (*tp == TREE_VEC_ELT (d->declv, i))
831 	  {
832 	    location_t loc = d->expr_loc;
833 	    if (loc == UNKNOWN_LOCATION)
834 	      loc = d->stmt_loc;
835 	    switch (d->kind)
836 	      {
837 	      case 0:
838 		error_at (loc, "initializer expression refers to "
839 			       "iteration variable %qD", *tp);
840 		break;
841 	      case 1:
842 		error_at (loc, "condition expression refers to "
843 			       "iteration variable %qD", *tp);
844 		break;
845 	      case 2:
846 		error_at (loc, "increment expression refers to "
847 			       "iteration variable %qD", *tp);
848 		break;
849 	      }
850 	    d->fail = true;
851 	  }
852     }
853   /* Don't walk dtors added by C++ wrap_cleanups_r.  */
854   else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
855 	   && TRY_CATCH_IS_CLEANUP (*tp))
856     {
857       *walk_subtrees = 0;
858       return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
859 			  d->ppset, d->lh);
860     }
861 
862   return NULL_TREE;
863 }
864 
865 /* Diagnose invalid references to loop iterators in lb, b and incr
866    expressions.  */
867 
868 bool
869 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
870 {
871   hash_set<tree> pset;
872   struct c_omp_check_loop_iv_data data;
873   int i;
874 
875   data.declv = declv;
876   data.fail = false;
877   data.stmt_loc = EXPR_LOCATION (stmt);
878   data.lh = lh;
879   data.ppset = &pset;
880   for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
881     {
882       tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
883       gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
884       tree decl = TREE_OPERAND (init, 0);
885       tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
886       gcc_assert (COMPARISON_CLASS_P (cond));
887       gcc_assert (TREE_OPERAND (cond, 0) == decl);
888       tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
889       data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
890       data.kind = 0;
891       walk_tree_1 (&TREE_OPERAND (init, 1),
892 		   c_omp_check_loop_iv_r, &data, &pset, lh);
893       /* Don't warn for C++ random access iterators here, the
894 	 expression then involves the subtraction and always refers
895 	 to the original value.  The C++ FE needs to warn on those
896 	 earlier.  */
897       if (decl == TREE_VEC_ELT (declv, i))
898 	{
899 	  data.expr_loc = EXPR_LOCATION (cond);
900 	  data.kind = 1;
901 	  walk_tree_1 (&TREE_OPERAND (cond, 1),
902 		       c_omp_check_loop_iv_r, &data, &pset, lh);
903 	}
904       if (TREE_CODE (incr) == MODIFY_EXPR)
905 	{
906 	  gcc_assert (TREE_OPERAND (incr, 0) == decl);
907 	  incr = TREE_OPERAND (incr, 1);
908 	  data.kind = 2;
909 	  if (TREE_CODE (incr) == PLUS_EXPR
910 	      && TREE_OPERAND (incr, 1) == decl)
911 	    {
912 	      data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
913 	      walk_tree_1 (&TREE_OPERAND (incr, 0),
914 			   c_omp_check_loop_iv_r, &data, &pset, lh);
915 	    }
916 	  else
917 	    {
918 	      data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
919 	      walk_tree_1 (&TREE_OPERAND (incr, 1),
920 			   c_omp_check_loop_iv_r, &data, &pset, lh);
921 	    }
922 	}
923     }
924   return !data.fail;
925 }
926 
927 /* Similar, but allows to check the init or cond expressions individually.  */
928 
929 bool
930 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
931 			   tree init, tree cond, walk_tree_lh lh)
932 {
933   hash_set<tree> pset;
934   struct c_omp_check_loop_iv_data data;
935 
936   data.declv = declv;
937   data.fail = false;
938   data.stmt_loc = stmt_loc;
939   data.lh = lh;
940   data.ppset = &pset;
941   if (init)
942     {
943       data.expr_loc = EXPR_LOCATION (init);
944       data.kind = 0;
945       walk_tree_1 (&init,
946 		   c_omp_check_loop_iv_r, &data, &pset, lh);
947     }
948   if (cond)
949     {
950       gcc_assert (COMPARISON_CLASS_P (cond));
951       data.expr_loc = EXPR_LOCATION (init);
952       data.kind = 1;
953       if (TREE_OPERAND (cond, 0) == decl)
954 	walk_tree_1 (&TREE_OPERAND (cond, 1),
955 		     c_omp_check_loop_iv_r, &data, &pset, lh);
956       else
957 	walk_tree_1 (&TREE_OPERAND (cond, 0),
958 		     c_omp_check_loop_iv_r, &data, &pset, lh);
959     }
960   return !data.fail;
961 }
962 
963 /* This function splits clauses for OpenACC combined loop
964    constructs.  OpenACC combined loop constructs are:
965    #pragma acc kernels loop
966    #pragma acc parallel loop  */
967 
968 tree
969 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
970 			   bool is_parallel)
971 {
972   tree next, loop_clauses, nc;
973 
974   loop_clauses = *not_loop_clauses = NULL_TREE;
975   for (; clauses ; clauses = next)
976     {
977       next = OMP_CLAUSE_CHAIN (clauses);
978 
979       switch (OMP_CLAUSE_CODE (clauses))
980         {
981 	  /* Loop clauses.  */
982 	case OMP_CLAUSE_COLLAPSE:
983 	case OMP_CLAUSE_TILE:
984 	case OMP_CLAUSE_GANG:
985 	case OMP_CLAUSE_WORKER:
986 	case OMP_CLAUSE_VECTOR:
987 	case OMP_CLAUSE_AUTO:
988 	case OMP_CLAUSE_SEQ:
989 	case OMP_CLAUSE_INDEPENDENT:
990 	case OMP_CLAUSE_PRIVATE:
991 	  OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
992 	  loop_clauses = clauses;
993 	  break;
994 
995 	  /* Reductions must be duplicated on both constructs.  */
996 	case OMP_CLAUSE_REDUCTION:
997 	  if (is_parallel)
998 	    {
999 	      nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1000 				     OMP_CLAUSE_REDUCTION);
1001 	      OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
1002 	      OMP_CLAUSE_REDUCTION_CODE (nc)
1003 		= OMP_CLAUSE_REDUCTION_CODE (clauses);
1004 	      OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
1005 	      *not_loop_clauses = nc;
1006 	    }
1007 
1008 	  OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1009 	  loop_clauses = clauses;
1010 	  break;
1011 
1012 	  /* Parallel/kernels clauses.  */
1013 	default:
1014 	  OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
1015 	  *not_loop_clauses = clauses;
1016 	  break;
1017 	}
1018     }
1019 
1020   return loop_clauses;
1021 }
1022 
1023 /* This function attempts to split or duplicate clauses for OpenMP
1024    combined/composite constructs.  Right now there are 21 different
1025    constructs.  CODE is the innermost construct in the combined construct,
1026    and MASK allows to determine which constructs are combined together,
1027    as every construct has at least one clause that no other construct
1028    has (except for OMP_SECTIONS, but that can be only combined with parallel).
1029    OpenMP combined/composite constructs are:
1030    #pragma omp distribute parallel for
1031    #pragma omp distribute parallel for simd
1032    #pragma omp distribute simd
1033    #pragma omp for simd
1034    #pragma omp parallel for
1035    #pragma omp parallel for simd
1036    #pragma omp parallel sections
1037    #pragma omp target parallel
1038    #pragma omp target parallel for
1039    #pragma omp target parallel for simd
1040    #pragma omp target teams
1041    #pragma omp target teams distribute
1042    #pragma omp target teams distribute parallel for
1043    #pragma omp target teams distribute parallel for simd
1044    #pragma omp target teams distribute simd
1045    #pragma omp target simd
1046    #pragma omp taskloop simd
1047    #pragma omp teams distribute
1048    #pragma omp teams distribute parallel for
1049    #pragma omp teams distribute parallel for simd
1050    #pragma omp teams distribute simd  */
1051 
1052 void
1053 c_omp_split_clauses (location_t loc, enum tree_code code,
1054 		     omp_clause_mask mask, tree clauses, tree *cclauses)
1055 {
1056   tree next, c;
1057   enum c_omp_clause_split s;
1058   int i;
1059 
1060   for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
1061     cclauses[i] = NULL;
1062   /* Add implicit nowait clause on
1063      #pragma omp parallel {for,for simd,sections}.  */
1064   if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1065     switch (code)
1066       {
1067       case OMP_FOR:
1068       case OMP_SIMD:
1069         cclauses[C_OMP_CLAUSE_SPLIT_FOR]
1070 	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1071 	break;
1072       case OMP_SECTIONS:
1073 	cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
1074 	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1075 	break;
1076       default:
1077 	break;
1078       }
1079 
1080   for (; clauses ; clauses = next)
1081     {
1082       next = OMP_CLAUSE_CHAIN (clauses);
1083 
1084       switch (OMP_CLAUSE_CODE (clauses))
1085 	{
1086 	/* First the clauses that are unique to some constructs.  */
1087 	case OMP_CLAUSE_DEVICE:
1088 	case OMP_CLAUSE_MAP:
1089 	case OMP_CLAUSE_IS_DEVICE_PTR:
1090 	case OMP_CLAUSE_DEFAULTMAP:
1091 	case OMP_CLAUSE_DEPEND:
1092 	  s = C_OMP_CLAUSE_SPLIT_TARGET;
1093 	  break;
1094 	case OMP_CLAUSE_NUM_TEAMS:
1095 	case OMP_CLAUSE_THREAD_LIMIT:
1096 	  s = C_OMP_CLAUSE_SPLIT_TEAMS;
1097 	  break;
1098 	case OMP_CLAUSE_DIST_SCHEDULE:
1099 	  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1100 	  break;
1101 	case OMP_CLAUSE_COPYIN:
1102 	case OMP_CLAUSE_NUM_THREADS:
1103 	case OMP_CLAUSE_PROC_BIND:
1104 	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1105 	  break;
1106 	case OMP_CLAUSE_ORDERED:
1107 	  s = C_OMP_CLAUSE_SPLIT_FOR;
1108 	  break;
1109 	case OMP_CLAUSE_SCHEDULE:
1110 	  s = C_OMP_CLAUSE_SPLIT_FOR;
1111 	  if (code != OMP_SIMD)
1112 	    OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1113 	  break;
1114 	case OMP_CLAUSE_SAFELEN:
1115 	case OMP_CLAUSE_SIMDLEN:
1116 	case OMP_CLAUSE_ALIGNED:
1117 	  s = C_OMP_CLAUSE_SPLIT_SIMD;
1118 	  break;
1119 	case OMP_CLAUSE_GRAINSIZE:
1120 	case OMP_CLAUSE_NUM_TASKS:
1121 	case OMP_CLAUSE_FINAL:
1122 	case OMP_CLAUSE_UNTIED:
1123 	case OMP_CLAUSE_MERGEABLE:
1124 	case OMP_CLAUSE_NOGROUP:
1125 	case OMP_CLAUSE_PRIORITY:
1126 	  s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1127 	  break;
1128 	/* Duplicate this to all of taskloop, distribute, for and simd.  */
1129 	case OMP_CLAUSE_COLLAPSE:
1130 	  if (code == OMP_SIMD)
1131 	    {
1132 	      if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1133 			   | (OMP_CLAUSE_MASK_1
1134 			      << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1135 			   | (OMP_CLAUSE_MASK_1
1136 			      << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1137 		{
1138 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1139 					OMP_CLAUSE_COLLAPSE);
1140 		  OMP_CLAUSE_COLLAPSE_EXPR (c)
1141 		    = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1142 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1143 		  cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1144 		}
1145 	      else
1146 		{
1147 		  /* This must be #pragma omp target simd */
1148 		  s = C_OMP_CLAUSE_SPLIT_SIMD;
1149 		  break;
1150 		}
1151 	    }
1152 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1153 	    {
1154 	      if ((mask & (OMP_CLAUSE_MASK_1
1155 			   << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1156 		{
1157 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1158 					OMP_CLAUSE_COLLAPSE);
1159 		  OMP_CLAUSE_COLLAPSE_EXPR (c)
1160 		    = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1161 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1162 		  cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1163 		  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1164 		}
1165 	      else
1166 		s = C_OMP_CLAUSE_SPLIT_FOR;
1167 	    }
1168 	  else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1169 		   != 0)
1170 	    s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1171 	  else
1172 	    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1173 	  break;
1174 	/* Private clause is supported on all constructs,
1175 	   it is enough to put it on the innermost one.  For
1176 	   #pragma omp {for,sections} put it on parallel though,
1177 	   as that's what we did for OpenMP 3.1.  */
1178 	case OMP_CLAUSE_PRIVATE:
1179 	  switch (code)
1180 	    {
1181 	    case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1182 	    case OMP_FOR: case OMP_SECTIONS:
1183 	    case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1184 	    case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1185 	    case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1186 	    default: gcc_unreachable ();
1187 	    }
1188 	  break;
1189 	/* Firstprivate clause is supported on all constructs but
1190 	   simd.  Put it on the outermost of those and duplicate on teams
1191 	   and parallel.  */
1192 	case OMP_CLAUSE_FIRSTPRIVATE:
1193 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1194 	      != 0)
1195 	    {
1196 	      if (code == OMP_SIMD
1197 		  && (mask & ((OMP_CLAUSE_MASK_1
1198 			       << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1199 			      | (OMP_CLAUSE_MASK_1
1200 				 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1201 		{
1202 		  /* This must be #pragma omp target simd.  */
1203 		  s = C_OMP_CLAUSE_SPLIT_TARGET;
1204 		  break;
1205 		}
1206 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1207 				    OMP_CLAUSE_FIRSTPRIVATE);
1208 	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1209 	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1210 	      cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1211 	    }
1212 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1213 	      != 0)
1214 	    {
1215 	      if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1216 			   | (OMP_CLAUSE_MASK_1
1217 			      << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1218 		{
1219 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1220 					OMP_CLAUSE_FIRSTPRIVATE);
1221 		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1222 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1223 		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1224 		  if ((mask & (OMP_CLAUSE_MASK_1
1225 			       << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1226 		    s = C_OMP_CLAUSE_SPLIT_TEAMS;
1227 		  else
1228 		    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1229 		}
1230 	      else
1231 		/* This must be
1232 		   #pragma omp parallel{, for{, simd}, sections}
1233 		   or
1234 		   #pragma omp target parallel.  */
1235 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1236 	    }
1237 	  else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1238 		   != 0)
1239 	    {
1240 	      /* This must be one of
1241 		 #pragma omp {,target }teams distribute
1242 		 #pragma omp target teams
1243 		 #pragma omp {,target }teams distribute simd.  */
1244 	      gcc_assert (code == OMP_DISTRIBUTE
1245 			  || code == OMP_TEAMS
1246 			  || code == OMP_SIMD);
1247 	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
1248 	    }
1249 	  else if ((mask & (OMP_CLAUSE_MASK_1
1250 			    << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1251 	    {
1252 	      /* This must be #pragma omp distribute simd.  */
1253 	      gcc_assert (code == OMP_SIMD);
1254 	      s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1255 	    }
1256 	  else if ((mask & (OMP_CLAUSE_MASK_1
1257 			    << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1258 	    {
1259 	      /* This must be #pragma omp taskloop simd.  */
1260 	      gcc_assert (code == OMP_SIMD);
1261 	      s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1262 	    }
1263 	  else
1264 	    {
1265 	      /* This must be #pragma omp for simd.  */
1266 	      gcc_assert (code == OMP_SIMD);
1267 	      s = C_OMP_CLAUSE_SPLIT_FOR;
1268 	    }
1269 	  break;
1270 	/* Lastprivate is allowed on distribute, for, sections and simd.  In
1271 	   parallel {for{, simd},sections} we actually want to put it on
1272 	   parallel rather than for or sections.  */
1273 	case OMP_CLAUSE_LASTPRIVATE:
1274 	  if (code == OMP_DISTRIBUTE)
1275 	    {
1276 	      s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1277 	      break;
1278 	    }
1279 	  if ((mask & (OMP_CLAUSE_MASK_1
1280 		       << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1281 	    {
1282 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1283 				    OMP_CLAUSE_LASTPRIVATE);
1284 	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1285 	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1286 	      cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1287 	    }
1288 	  if (code == OMP_FOR || code == OMP_SECTIONS)
1289 	    {
1290 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1291 		  != 0)
1292 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1293 	      else
1294 		s = C_OMP_CLAUSE_SPLIT_FOR;
1295 	      break;
1296 	    }
1297 	  gcc_assert (code == OMP_SIMD);
1298 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1299 	    {
1300 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1301 				    OMP_CLAUSE_LASTPRIVATE);
1302 	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1303 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1304 		  != 0)
1305 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1306 	      else
1307 		s = C_OMP_CLAUSE_SPLIT_FOR;
1308 	      OMP_CLAUSE_CHAIN (c) = cclauses[s];
1309 	      cclauses[s] = c;
1310 	    }
1311 	  s = C_OMP_CLAUSE_SPLIT_SIMD;
1312 	  break;
1313 	/* Shared and default clauses are allowed on parallel, teams and
1314 	   taskloop.  */
1315 	case OMP_CLAUSE_SHARED:
1316 	case OMP_CLAUSE_DEFAULT:
1317 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1318 	      != 0)
1319 	    {
1320 	      s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1321 	      break;
1322 	    }
1323 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1324 	      != 0)
1325 	    {
1326 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1327 		  == 0)
1328 		{
1329 		  s = C_OMP_CLAUSE_SPLIT_TEAMS;
1330 		  break;
1331 		}
1332 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1333 				    OMP_CLAUSE_CODE (clauses));
1334 	      if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1335 		OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1336 	      else
1337 		OMP_CLAUSE_DEFAULT_KIND (c)
1338 		  = OMP_CLAUSE_DEFAULT_KIND (clauses);
1339 	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1340 	      cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1341 	    }
1342 	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1343 	  break;
1344 	/* Reduction is allowed on simd, for, parallel, sections and teams.
1345 	   Duplicate it on all of them, but omit on for or sections if
1346 	   parallel is present.  */
1347 	case OMP_CLAUSE_REDUCTION:
1348 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1349 	    {
1350 	      if (code == OMP_SIMD)
1351 		{
1352 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1353 					OMP_CLAUSE_REDUCTION);
1354 		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1355 		  OMP_CLAUSE_REDUCTION_CODE (c)
1356 		    = OMP_CLAUSE_REDUCTION_CODE (clauses);
1357 		  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1358 		    = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1359 		  OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1360 		    = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1361 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1362 		  cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1363 		}
1364 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1365 		  != 0)
1366 		{
1367 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1368 					OMP_CLAUSE_REDUCTION);
1369 		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1370 		  OMP_CLAUSE_REDUCTION_CODE (c)
1371 		    = OMP_CLAUSE_REDUCTION_CODE (clauses);
1372 		  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1373 		    = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1374 		  OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1375 		    = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1376 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1377 		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1378 		  s = C_OMP_CLAUSE_SPLIT_TEAMS;
1379 		}
1380 	      else if ((mask & (OMP_CLAUSE_MASK_1
1381 				<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1382 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1383 	      else
1384 		s = C_OMP_CLAUSE_SPLIT_FOR;
1385 	    }
1386 	  else if (code == OMP_SECTIONS || code == OMP_PARALLEL)
1387 	    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1388 	  else if (code == OMP_SIMD)
1389 	    s = C_OMP_CLAUSE_SPLIT_SIMD;
1390 	  else
1391 	    s = C_OMP_CLAUSE_SPLIT_TEAMS;
1392 	  break;
1393 	case OMP_CLAUSE_IF:
1394 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1395 	      != 0)
1396 	    s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1397 	  else if ((mask & (OMP_CLAUSE_MASK_1
1398 			    << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1399 	    {
1400 	      if ((mask & (OMP_CLAUSE_MASK_1
1401 			   << PRAGMA_OMP_CLAUSE_MAP)) != 0)
1402 		{
1403 		  if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL)
1404 		    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1405 		  else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET)
1406 		    s = C_OMP_CLAUSE_SPLIT_TARGET;
1407 		  else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK)
1408 		    {
1409 		      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1410 					    OMP_CLAUSE_IF);
1411 		      OMP_CLAUSE_IF_MODIFIER (c)
1412 			= OMP_CLAUSE_IF_MODIFIER (clauses);
1413 		      OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1414 		      OMP_CLAUSE_CHAIN (c)
1415 			= cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1416 		      cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1417 		      s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1418 		    }
1419 		  else
1420 		    {
1421 		      error_at (OMP_CLAUSE_LOCATION (clauses),
1422 				"expected %<parallel%> or %<target%> %<if%> "
1423 				"clause modifier");
1424 		      continue;
1425 		    }
1426 		}
1427 	      else
1428 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1429 	    }
1430 	  else
1431 	    s = C_OMP_CLAUSE_SPLIT_TARGET;
1432 	  break;
1433 	case OMP_CLAUSE_LINEAR:
1434 	  /* Linear clause is allowed on simd and for.  Put it on the
1435 	     innermost construct.  */
1436 	  if (code == OMP_SIMD)
1437 	    s = C_OMP_CLAUSE_SPLIT_SIMD;
1438 	  else
1439 	    s = C_OMP_CLAUSE_SPLIT_FOR;
1440 	  break;
1441 	case OMP_CLAUSE_NOWAIT:
1442 	  /* Nowait clause is allowed on target, for and sections, but
1443 	     is not allowed on parallel for or parallel sections.  Therefore,
1444 	     put it on target construct if present, because that can only
1445 	     be combined with parallel for{, simd} and not with for{, simd},
1446 	     otherwise to the worksharing construct.  */
1447 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1448 	      != 0)
1449 	    s = C_OMP_CLAUSE_SPLIT_TARGET;
1450 	  else
1451 	    s = C_OMP_CLAUSE_SPLIT_FOR;
1452 	  break;
1453 	default:
1454 	  gcc_unreachable ();
1455 	}
1456       OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
1457       cclauses[s] = clauses;
1458     }
1459 
1460   if (!flag_checking)
1461     return;
1462 
1463   if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
1464     gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
1465   if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
1466     gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
1467   if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
1468     gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
1469   if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
1470     gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
1471   if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1472 	       | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
1473       && code != OMP_SECTIONS)
1474     gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
1475   if (code != OMP_SIMD)
1476     gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
1477 }
1478 
1479 
1480 /* qsort callback to compare #pragma omp declare simd clauses.  */
1481 
1482 static int
1483 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
1484 {
1485   tree a = *(const tree *) p;
1486   tree b = *(const tree *) q;
1487   if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
1488     {
1489       if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
1490 	return -1;
1491       return 1;
1492     }
1493   if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
1494       && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
1495       && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1496     {
1497       int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1498       int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1499       if (c < d)
1500 	return 1;
1501       if (c > d)
1502 	return -1;
1503     }
1504   return 0;
1505 }
1506 
1507 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1508    CLAUSES on FNDECL into argument indexes and sort them.  */
1509 
1510 tree
1511 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1512 {
1513   tree c;
1514   vec<tree> clvec = vNULL;
1515 
1516   for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1517     {
1518       if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1519 	  && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1520 	  && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1521 	{
1522 	  tree decl = OMP_CLAUSE_DECL (c);
1523 	  tree arg;
1524 	  int idx;
1525 	  for (arg = parms, idx = 0; arg;
1526 	       arg = TREE_CHAIN (arg), idx++)
1527 	    if (arg == decl)
1528 	      break;
1529 	  if (arg == NULL_TREE)
1530 	    {
1531 	      error_at (OMP_CLAUSE_LOCATION (c),
1532 			"%qD is not an function argument", decl);
1533 	      continue;
1534 	    }
1535 	  OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1536 	  if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1537 	      && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1538 	    {
1539 	      decl = OMP_CLAUSE_LINEAR_STEP (c);
1540 	      for (arg = parms, idx = 0; arg;
1541 		   arg = TREE_CHAIN (arg), idx++)
1542 		if (arg == decl)
1543 		  break;
1544 	      if (arg == NULL_TREE)
1545 		{
1546 		  error_at (OMP_CLAUSE_LOCATION (c),
1547 			    "%qD is not an function argument", decl);
1548 		  continue;
1549 		}
1550 	      OMP_CLAUSE_LINEAR_STEP (c)
1551 		= build_int_cst (integer_type_node, idx);
1552 	    }
1553 	}
1554       clvec.safe_push (c);
1555     }
1556   if (!clvec.is_empty ())
1557     {
1558       unsigned int len = clvec.length (), i;
1559       clvec.qsort (c_omp_declare_simd_clause_cmp);
1560       clauses = clvec[0];
1561       for (i = 0; i < len; i++)
1562 	OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1563     }
1564   else
1565     clauses = NULL_TREE;
1566   clvec.release ();
1567   return clauses;
1568 }
1569 
1570 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs.  */
1571 
1572 void
1573 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1574 {
1575   tree c;
1576 
1577   for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1578     if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1579 	&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1580 	&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1581       {
1582 	int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1583 	tree arg;
1584 	for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1585 	     arg = TREE_CHAIN (arg), i++)
1586 	  if (i == idx)
1587 	    break;
1588 	gcc_assert (arg);
1589 	OMP_CLAUSE_DECL (c) = arg;
1590 	if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1591 	    && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1592 	  {
1593 	    idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
1594 	    for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1595 		 arg = TREE_CHAIN (arg), i++)
1596 	      if (i == idx)
1597 		break;
1598 	    gcc_assert (arg);
1599 	    OMP_CLAUSE_LINEAR_STEP (c) = arg;
1600 	  }
1601       }
1602 }
1603 
1604 /* True if OpenMP sharing attribute of DECL is predetermined.  */
1605 
1606 enum omp_clause_default_kind
1607 c_omp_predetermined_sharing (tree decl)
1608 {
1609   /* Variables with const-qualified type having no mutable member
1610      are predetermined shared.  */
1611   if (TREE_READONLY (decl))
1612     return OMP_CLAUSE_DEFAULT_SHARED;
1613 
1614   return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
1615 }
1616