xref: /openbsd/gnu/gcc/libgomp/work.c (revision 404b540a)
1 /* Copyright (C) 2005 Free Software Foundation, Inc.
2    Contributed by Richard Henderson <rth@redhat.com>.
3 
4    This file is part of the GNU OpenMP Library (libgomp).
5 
6    Libgomp is free software; you can redistribute it and/or modify it
7    under the terms of the GNU Lesser General Public License as published by
8    the Free Software Foundation; either version 2.1 of the License, or
9    (at your option) any later version.
10 
11    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13    FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
14    more details.
15 
16    You should have received a copy of the GNU Lesser General Public License
17    along with libgomp; see the file COPYING.LIB.  If not, write to the
18    Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19    MA 02110-1301, USA.  */
20 
21 /* As a special exception, if you link this library with other files, some
22    of which are compiled with GCC, to produce an executable, this library
23    does not by itself cause the resulting executable to be covered by the
24    GNU General Public License.  This exception does not however invalidate
25    any other reasons why the executable file might be covered by the GNU
26    General Public License.  */
27 
28 /* This file contains routines to manage the work-share queue for a team
29    of threads.  */
30 
31 #include "libgomp.h"
32 #include <stdlib.h>
33 #include <string.h>
34 
35 
36 /* Create a new work share structure.  */
37 
38 struct gomp_work_share *
gomp_new_work_share(bool ordered,unsigned nthreads)39 gomp_new_work_share (bool ordered, unsigned nthreads)
40 {
41   struct gomp_work_share *ws;
42   size_t size;
43 
44   size = sizeof (*ws);
45   if (ordered)
46     size += nthreads * sizeof (ws->ordered_team_ids[0]);
47 
48   ws = gomp_malloc_cleared (size);
49   gomp_mutex_init (&ws->lock);
50   ws->ordered_owner = -1;
51 
52   return ws;
53 }
54 
55 
56 /* Free a work share structure.  */
57 
58 static void
free_work_share(struct gomp_work_share * ws)59 free_work_share (struct gomp_work_share *ws)
60 {
61   gomp_mutex_destroy (&ws->lock);
62   free (ws);
63 }
64 
65 
66 /* The current thread is ready to begin the next work sharing construct.
67    In all cases, thr->ts.work_share is updated to point to the new
68    structure.  In all cases the work_share lock is locked.  Return true
69    if this was the first thread to reach this point.  */
70 
71 bool
gomp_work_share_start(bool ordered)72 gomp_work_share_start (bool ordered)
73 {
74   struct gomp_thread *thr = gomp_thread ();
75   struct gomp_team *team = thr->ts.team;
76   struct gomp_work_share *ws;
77   unsigned ws_index, ws_gen;
78 
79   /* Work sharing constructs can be orphaned.  */
80   if (team == NULL)
81     {
82       ws = gomp_new_work_share (ordered, 1);
83       thr->ts.work_share = ws;
84       thr->ts.static_trip = 0;
85       gomp_mutex_lock (&ws->lock);
86       return true;
87     }
88 
89   gomp_mutex_lock (&team->work_share_lock);
90 
91   /* This thread is beginning its next generation.  */
92   ws_gen = ++thr->ts.work_share_generation;
93 
94   /* If this next generation is not newer than any other generation in
95      the team, then simply reference the existing construct.  */
96   if (ws_gen - team->oldest_live_gen < team->num_live_gen)
97     {
98       ws_index = ws_gen & team->generation_mask;
99       ws = team->work_shares[ws_index];
100       thr->ts.work_share = ws;
101       thr->ts.static_trip = 0;
102 
103       gomp_mutex_lock (&ws->lock);
104       gomp_mutex_unlock (&team->work_share_lock);
105 
106       return false;
107     }
108 
109   /* Resize the work shares queue if we've run out of space.  */
110   if (team->num_live_gen++ == team->generation_mask)
111     {
112       team->work_shares = gomp_realloc (team->work_shares,
113 					2 * team->num_live_gen
114 					* sizeof (*team->work_shares));
115 
116       /* Unless oldest_live_gen is zero, the sequence of live elements
117 	 wraps around the end of the array.  If we do nothing, we break
118 	 lookup of the existing elements.  Fix that by unwrapping the
119 	 data from the front to the end.  */
120       if (team->oldest_live_gen > 0)
121 	memcpy (team->work_shares + team->num_live_gen,
122 		team->work_shares,
123 		(team->oldest_live_gen & team->generation_mask)
124 		* sizeof (*team->work_shares));
125 
126       team->generation_mask = team->generation_mask * 2 + 1;
127     }
128 
129   ws_index = ws_gen & team->generation_mask;
130   ws = gomp_new_work_share (ordered, team->nthreads);
131   thr->ts.work_share = ws;
132   thr->ts.static_trip = 0;
133   team->work_shares[ws_index] = ws;
134 
135   gomp_mutex_lock (&ws->lock);
136   gomp_mutex_unlock (&team->work_share_lock);
137 
138   return true;
139 }
140 
141 
142 /* The current thread is done with its current work sharing construct.
143    This version does imply a barrier at the end of the work-share.  */
144 
145 void
gomp_work_share_end(void)146 gomp_work_share_end (void)
147 {
148   struct gomp_thread *thr = gomp_thread ();
149   struct gomp_team *team = thr->ts.team;
150   struct gomp_work_share *ws = thr->ts.work_share;
151   bool last;
152 
153   thr->ts.work_share = NULL;
154 
155   /* Work sharing constructs can be orphaned.  */
156   if (team == NULL)
157     {
158       free_work_share (ws);
159       return;
160     }
161 
162   last = gomp_barrier_wait_start (&team->barrier);
163 
164   if (last)
165     {
166       unsigned ws_index;
167 
168       ws_index = thr->ts.work_share_generation & team->generation_mask;
169       team->work_shares[ws_index] = NULL;
170       team->oldest_live_gen++;
171       team->num_live_gen = 0;
172 
173       free_work_share (ws);
174     }
175 
176   gomp_barrier_wait_end (&team->barrier, last);
177 }
178 
179 
180 /* The current thread is done with its current work sharing construct.
181    This version does NOT imply a barrier at the end of the work-share.  */
182 
183 void
gomp_work_share_end_nowait(void)184 gomp_work_share_end_nowait (void)
185 {
186   struct gomp_thread *thr = gomp_thread ();
187   struct gomp_team *team = thr->ts.team;
188   struct gomp_work_share *ws = thr->ts.work_share;
189   unsigned completed;
190 
191   thr->ts.work_share = NULL;
192 
193   /* Work sharing constructs can be orphaned.  */
194   if (team == NULL)
195     {
196       free_work_share (ws);
197       return;
198     }
199 
200 #ifdef HAVE_SYNC_BUILTINS
201   completed = __sync_add_and_fetch (&ws->threads_completed, 1);
202 #else
203   gomp_mutex_lock (&ws->lock);
204   completed = ++ws->threads_completed;
205   gomp_mutex_unlock (&ws->lock);
206 #endif
207 
208   if (completed == team->nthreads)
209     {
210       unsigned ws_index;
211 
212       gomp_mutex_lock (&team->work_share_lock);
213 
214       ws_index = thr->ts.work_share_generation & team->generation_mask;
215       team->work_shares[ws_index] = NULL;
216       team->oldest_live_gen++;
217       team->num_live_gen--;
218 
219       gomp_mutex_unlock (&team->work_share_lock);
220 
221       free_work_share (ws);
222     }
223 }
224