1 /* Copyright (C) 2005-2021 Free Software Foundation, Inc.
2    Contributed by Richard Henderson <rth@redhat.com>.
3 
4    This file is part of the GNU Offloading and Multi Processing Library
5    (libgomp).
6 
7    Libgomp is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15    more details.
16 
17    Under Section 7 of GPL version 3, you are granted additional
18    permissions described in the GCC Runtime Library Exception, version
19    3.1, as published by the Free Software Foundation.
20 
21    You should have received a copy of the GNU General Public License and
22    a copy of the GCC Runtime Library Exception along with this program;
23    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24    <http://www.gnu.org/licenses/>.  */
25 
26 /* This is the default implementation of a barrier synchronization mechanism
27    for libgomp.  This type is private to the library.  Note that we rely on
28    being able to adjust the barrier count while threads are blocked, so the
29    POSIX pthread_barrier_t won't work.  */
30 
31 #include "libgomp.h"
32 
33 
34 void
gomp_barrier_init(gomp_barrier_t * bar,unsigned count)35 gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
36 {
37   gomp_mutex_init (&bar->mutex1);
38 #ifndef HAVE_SYNC_BUILTINS
39   gomp_mutex_init (&bar->mutex2);
40 #endif
41   gomp_sem_init (&bar->sem1, 0);
42   gomp_sem_init (&bar->sem2, 0);
43   bar->total = count;
44   bar->arrived = 0;
45   bar->generation = 0;
46   bar->cancellable = false;
47 }
48 
49 void
gomp_barrier_destroy(gomp_barrier_t * bar)50 gomp_barrier_destroy (gomp_barrier_t *bar)
51 {
52   /* Before destroying, make sure all threads have left the barrier.  */
53   gomp_mutex_lock (&bar->mutex1);
54   gomp_mutex_unlock (&bar->mutex1);
55 
56   gomp_mutex_destroy (&bar->mutex1);
57 #ifndef HAVE_SYNC_BUILTINS
58   gomp_mutex_destroy (&bar->mutex2);
59 #endif
60   gomp_sem_destroy (&bar->sem1);
61   gomp_sem_destroy (&bar->sem2);
62 }
63 
64 void
gomp_barrier_reinit(gomp_barrier_t * bar,unsigned count)65 gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count)
66 {
67   gomp_mutex_lock (&bar->mutex1);
68   bar->total = count;
69   gomp_mutex_unlock (&bar->mutex1);
70 }
71 
72 void
gomp_barrier_wait_end(gomp_barrier_t * bar,gomp_barrier_state_t state)73 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
74 {
75   unsigned int n;
76 
77   if (state & BAR_WAS_LAST)
78     {
79       n = --bar->arrived;
80       if (n > 0)
81 	{
82 	  do
83 	    gomp_sem_post (&bar->sem1);
84 	  while (--n != 0);
85 	  gomp_sem_wait (&bar->sem2);
86 	}
87       gomp_mutex_unlock (&bar->mutex1);
88     }
89   else
90     {
91       gomp_mutex_unlock (&bar->mutex1);
92       gomp_sem_wait (&bar->sem1);
93 
94 #ifdef HAVE_SYNC_BUILTINS
95       n = __sync_add_and_fetch (&bar->arrived, -1);
96 #else
97       gomp_mutex_lock (&bar->mutex2);
98       n = --bar->arrived;
99       gomp_mutex_unlock (&bar->mutex2);
100 #endif
101 
102       if (n == 0)
103 	gomp_sem_post (&bar->sem2);
104     }
105 }
106 
107 void
gomp_barrier_wait(gomp_barrier_t * barrier)108 gomp_barrier_wait (gomp_barrier_t *barrier)
109 {
110   gomp_barrier_wait_end (barrier, gomp_barrier_wait_start (barrier));
111 }
112 
113 void
gomp_team_barrier_wait_end(gomp_barrier_t * bar,gomp_barrier_state_t state)114 gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
115 {
116   unsigned int n;
117 
118   state &= ~BAR_CANCELLED;
119   if (state & BAR_WAS_LAST)
120     {
121       n = --bar->arrived;
122       struct gomp_thread *thr = gomp_thread ();
123       struct gomp_team *team = thr->ts.team;
124 
125       team->work_share_cancelled = 0;
126       if (team->task_count)
127 	{
128 	  gomp_barrier_handle_tasks (state);
129 	  if (n > 0)
130 	    gomp_sem_wait (&bar->sem2);
131 	  gomp_mutex_unlock (&bar->mutex1);
132 	  return;
133 	}
134 
135       bar->generation = state + BAR_INCR - BAR_WAS_LAST;
136       if (n > 0)
137 	{
138 	  do
139 	    gomp_sem_post (&bar->sem1);
140 	  while (--n != 0);
141 	  gomp_sem_wait (&bar->sem2);
142 	}
143       gomp_mutex_unlock (&bar->mutex1);
144     }
145   else
146     {
147       gomp_mutex_unlock (&bar->mutex1);
148       int gen;
149       do
150 	{
151 	  gomp_sem_wait (&bar->sem1);
152 	  gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
153 	  if (gen & BAR_TASK_PENDING)
154 	    {
155 	      gomp_barrier_handle_tasks (state);
156 	      gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
157 	    }
158 	}
159       while (gen != state + BAR_INCR);
160 
161 #ifdef HAVE_SYNC_BUILTINS
162       n = __sync_add_and_fetch (&bar->arrived, -1);
163 #else
164       gomp_mutex_lock (&bar->mutex2);
165       n = --bar->arrived;
166       gomp_mutex_unlock (&bar->mutex2);
167 #endif
168 
169       if (n == 0)
170 	gomp_sem_post (&bar->sem2);
171     }
172 }
173 
174 bool
gomp_team_barrier_wait_cancel_end(gomp_barrier_t * bar,gomp_barrier_state_t state)175 gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
176 				   gomp_barrier_state_t state)
177 {
178   unsigned int n;
179 
180   if (state & BAR_WAS_LAST)
181     {
182       bar->cancellable = false;
183       n = --bar->arrived;
184       struct gomp_thread *thr = gomp_thread ();
185       struct gomp_team *team = thr->ts.team;
186 
187       team->work_share_cancelled = 0;
188       if (team->task_count)
189 	{
190 	  gomp_barrier_handle_tasks (state);
191 	  if (n > 0)
192 	    gomp_sem_wait (&bar->sem2);
193 	  gomp_mutex_unlock (&bar->mutex1);
194 	  return false;
195 	}
196 
197       bar->generation = state + BAR_INCR - BAR_WAS_LAST;
198       if (n > 0)
199 	{
200 	  do
201 	    gomp_sem_post (&bar->sem1);
202 	  while (--n != 0);
203 	  gomp_sem_wait (&bar->sem2);
204 	}
205       gomp_mutex_unlock (&bar->mutex1);
206     }
207   else
208     {
209       if (state & BAR_CANCELLED)
210 	{
211 	  gomp_mutex_unlock (&bar->mutex1);
212 	  return true;
213 	}
214       bar->cancellable = true;
215       gomp_mutex_unlock (&bar->mutex1);
216       int gen;
217       do
218 	{
219 	  gomp_sem_wait (&bar->sem1);
220 	  gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
221 	  if (gen & BAR_CANCELLED)
222 	    break;
223 	  if (gen & BAR_TASK_PENDING)
224 	    {
225 	      gomp_barrier_handle_tasks (state);
226 	      gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
227 	      if (gen & BAR_CANCELLED)
228 		break;
229 	    }
230 	}
231       while (gen != state + BAR_INCR);
232 
233 #ifdef HAVE_SYNC_BUILTINS
234       n = __sync_add_and_fetch (&bar->arrived, -1);
235 #else
236       gomp_mutex_lock (&bar->mutex2);
237       n = --bar->arrived;
238       gomp_mutex_unlock (&bar->mutex2);
239 #endif
240 
241       if (n == 0)
242 	gomp_sem_post (&bar->sem2);
243       if (gen & BAR_CANCELLED)
244 	return true;
245     }
246   return false;
247 }
248 
249 void
gomp_team_barrier_wait(gomp_barrier_t * barrier)250 gomp_team_barrier_wait (gomp_barrier_t *barrier)
251 {
252   gomp_team_barrier_wait_end (barrier, gomp_barrier_wait_start (barrier));
253 }
254 
255 void
gomp_team_barrier_wake(gomp_barrier_t * bar,int count)256 gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
257 {
258   if (count == 0)
259     count = bar->total - 1;
260   while (count-- > 0)
261     gomp_sem_post (&bar->sem1);
262 }
263 
264 bool
gomp_team_barrier_wait_cancel(gomp_barrier_t * bar)265 gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
266 {
267   gomp_barrier_state_t state = gomp_barrier_wait_cancel_start (bar);
268   return gomp_team_barrier_wait_cancel_end (bar, state);
269 }
270 
271 void
gomp_team_barrier_cancel(struct gomp_team * team)272 gomp_team_barrier_cancel (struct gomp_team *team)
273 {
274   if (team->barrier.generation & BAR_CANCELLED)
275     return;
276   gomp_mutex_lock (&team->barrier.mutex1);
277   gomp_mutex_lock (&team->task_lock);
278   if (team->barrier.generation & BAR_CANCELLED)
279     {
280       gomp_mutex_unlock (&team->task_lock);
281       gomp_mutex_unlock (&team->barrier.mutex1);
282       return;
283     }
284   team->barrier.generation |= BAR_CANCELLED;
285   gomp_mutex_unlock (&team->task_lock);
286   if (team->barrier.cancellable)
287     {
288       int n = team->barrier.arrived;
289       if (n > 0)
290 	{
291 	  do
292 	    gomp_sem_post (&team->barrier.sem1);
293 	  while (--n != 0);
294 	  gomp_sem_wait (&team->barrier.sem2);
295 	}
296       team->barrier.cancellable = false;
297     }
298   gomp_mutex_unlock (&team->barrier.mutex1);
299 }
300