1 /* Copyright (C) 2005-2021 Free Software Foundation, Inc.
2    Contributed by Sebastian Huber <sebastian.huber@embedded-brains.de>.
3 
4    This file is part of the GNU OpenMP Library (libgomp).
5 
6    Libgomp is free software; you can redistribute it and/or modify it
7    under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3, or (at your option)
9    any later version.
10 
11    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14    more details.
15 
16    Under Section 7 of GPL version 3, you are granted additional
17    permissions described in the GCC Runtime Library Exception, version
18    3.1, as published by the Free Software Foundation.
19 
20    You should have received a copy of the GNU General Public License and
21    a copy of the GCC Runtime Library Exception along with this program;
22    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23    <http://www.gnu.org/licenses/>.  */
24 
25 /* This is the RTEMS implementation of a barrier synchronization
26    mechanism for libgomp.  It is identical to the Linux implementation, except
27    that the futex API is slightly different.  This type is private to the
28    library.  */
29 
30 #ifndef GOMP_BARRIER_H
31 #define GOMP_BARRIER_H 1
32 
33 #include <sys/lock.h>
34 
35 typedef struct
36 {
37   /* Make sure total/generation is in a mostly read cacheline, while
38      awaited in a separate cacheline.  */
39   unsigned total __attribute__((aligned (64)));
40   unsigned generation;
41   struct _Futex_Control futex;
42   unsigned awaited __attribute__((aligned (64)));
43   unsigned awaited_final;
44 } gomp_barrier_t;
45 
46 typedef unsigned int gomp_barrier_state_t;
47 
48 /* The generation field contains a counter in the high bits, with a few
49    low bits dedicated to flags.  Note that TASK_PENDING and WAS_LAST can
50    share space because WAS_LAST is never stored back to generation.  */
51 #define BAR_TASK_PENDING	1
52 #define BAR_WAS_LAST		1
53 #define BAR_WAITING_FOR_TASK	2
54 #define BAR_CANCELLED		4
55 #define BAR_INCR		8
56 
gomp_barrier_init(gomp_barrier_t * bar,unsigned count)57 static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
58 {
59   bar->total = count;
60   bar->awaited = count;
61   bar->awaited_final = count;
62   bar->generation = 0;
63   _Futex_Initialize (&bar->futex);
64 }
65 
gomp_barrier_reinit(gomp_barrier_t * bar,unsigned count)66 static inline void gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count)
67 {
68   __atomic_add_fetch (&bar->awaited, count - bar->total, MEMMODEL_ACQ_REL);
69   bar->total = count;
70 }
71 
gomp_barrier_destroy(gomp_barrier_t * bar)72 static inline void gomp_barrier_destroy (gomp_barrier_t *bar)
73 {
74 }
75 
76 extern void gomp_barrier_wait (gomp_barrier_t *);
77 extern void gomp_barrier_wait_last (gomp_barrier_t *);
78 extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
79 extern void gomp_team_barrier_wait (gomp_barrier_t *);
80 extern void gomp_team_barrier_wait_final (gomp_barrier_t *);
81 extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
82 					gomp_barrier_state_t);
83 extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
84 extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
85 					       gomp_barrier_state_t);
86 extern void gomp_team_barrier_wake (gomp_barrier_t *, int);
87 struct gomp_team;
88 extern void gomp_team_barrier_cancel (struct gomp_team *);
89 
90 static inline gomp_barrier_state_t
gomp_barrier_wait_start(gomp_barrier_t * bar)91 gomp_barrier_wait_start (gomp_barrier_t *bar)
92 {
93   unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
94   ret &= -BAR_INCR | BAR_CANCELLED;
95   /* A memory barrier is needed before exiting from the various forms
96      of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section
97      2.8.6 flush Construct, which says there is an implicit flush during
98      a barrier region.  This is a convenient place to add the barrier,
99      so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE.  */
100   if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0)
101     ret |= BAR_WAS_LAST;
102   return ret;
103 }
104 
105 static inline gomp_barrier_state_t
gomp_barrier_wait_cancel_start(gomp_barrier_t * bar)106 gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
107 {
108   return gomp_barrier_wait_start (bar);
109 }
110 
111 /* This is like gomp_barrier_wait_start, except it decrements
112    bar->awaited_final rather than bar->awaited and should be used
113    for the gomp_team_end barrier only.  */
114 static inline gomp_barrier_state_t
gomp_barrier_wait_final_start(gomp_barrier_t * bar)115 gomp_barrier_wait_final_start (gomp_barrier_t *bar)
116 {
117   unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
118   ret &= -BAR_INCR | BAR_CANCELLED;
119   /* See above gomp_barrier_wait_start comment.  */
120   if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_ACQ_REL) == 0)
121     ret |= BAR_WAS_LAST;
122   return ret;
123 }
124 
125 static inline bool
gomp_barrier_last_thread(gomp_barrier_state_t state)126 gomp_barrier_last_thread (gomp_barrier_state_t state)
127 {
128   return state & BAR_WAS_LAST;
129 }
130 
131 /* All the inlines below must be called with team->task_lock
132    held.  */
133 
134 static inline void
gomp_team_barrier_set_task_pending(gomp_barrier_t * bar)135 gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
136 {
137   bar->generation |= BAR_TASK_PENDING;
138 }
139 
140 static inline void
gomp_team_barrier_clear_task_pending(gomp_barrier_t * bar)141 gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
142 {
143   bar->generation &= ~BAR_TASK_PENDING;
144 }
145 
146 static inline void
gomp_team_barrier_set_waiting_for_tasks(gomp_barrier_t * bar)147 gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
148 {
149   bar->generation |= BAR_WAITING_FOR_TASK;
150 }
151 
152 static inline bool
gomp_team_barrier_waiting_for_tasks(gomp_barrier_t * bar)153 gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
154 {
155   return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
156 }
157 
158 static inline bool
gomp_team_barrier_cancelled(gomp_barrier_t * bar)159 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
160 {
161   return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
162 }
163 
164 static inline void
gomp_team_barrier_done(gomp_barrier_t * bar,gomp_barrier_state_t state)165 gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
166 {
167   bar->generation = (state & -BAR_INCR) + BAR_INCR;
168 }
169 
170 #endif /* GOMP_BARRIER_H */
171