1 /* $OpenBSD: atomic.h,v 1.9 2022/03/21 05:45:52 visa Exp $ */
2 /*
3 * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
4 * Copyright (c) 2022 Alexander Bluhm <bluhm@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #ifndef _SYS_ATOMIC_H_
20 #define _SYS_ATOMIC_H_
21
22 #include <machine/atomic.h>
23
24 /*
25 * an arch wanting to provide its own implementations does so by defining
26 * macros.
27 */
28
29 /*
30 * atomic_cas_*
31 */
32
33 #ifndef atomic_cas_uint
34 static inline unsigned int
atomic_cas_uint(volatile unsigned int * p,unsigned int o,unsigned int n)35 atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
36 {
37 return __sync_val_compare_and_swap(p, o, n);
38 }
39 #endif
40
41 #ifndef atomic_cas_ulong
42 static inline unsigned long
atomic_cas_ulong(volatile unsigned long * p,unsigned long o,unsigned long n)43 atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
44 {
45 return __sync_val_compare_and_swap(p, o, n);
46 }
47 #endif
48
49 #ifndef atomic_cas_ptr
50 static inline void *
atomic_cas_ptr(volatile void * pp,void * o,void * n)51 atomic_cas_ptr(volatile void *pp, void *o, void *n)
52 {
53 void * volatile *p = (void * volatile *)pp;
54 return __sync_val_compare_and_swap(p, o, n);
55 }
56 #endif
57
58 /*
59 * atomic_swap_*
60 */
61
62 #ifndef atomic_swap_uint
63 static inline unsigned int
atomic_swap_uint(volatile unsigned int * p,unsigned int v)64 atomic_swap_uint(volatile unsigned int *p, unsigned int v)
65 {
66 return __sync_lock_test_and_set(p, v);
67 }
68 #endif
69
70 #ifndef atomic_swap_ulong
71 static inline unsigned long
atomic_swap_ulong(volatile unsigned long * p,unsigned long v)72 atomic_swap_ulong(volatile unsigned long *p, unsigned long v)
73 {
74 return __sync_lock_test_and_set(p, v);
75 }
76 #endif
77
78 #ifndef atomic_swap_ptr
79 static inline void *
atomic_swap_ptr(volatile void * pp,void * v)80 atomic_swap_ptr(volatile void *pp, void *v)
81 {
82 void * volatile *p = (void * volatile *)pp;
83 return __sync_lock_test_and_set(p, v);
84 }
85 #endif
86
87 /*
88 * atomic_add_*_nv - add and fetch
89 */
90
91 #ifndef atomic_add_int_nv
92 static inline unsigned int
atomic_add_int_nv(volatile unsigned int * p,unsigned int v)93 atomic_add_int_nv(volatile unsigned int *p, unsigned int v)
94 {
95 return __sync_add_and_fetch(p, v);
96 }
97 #endif
98
99 #ifndef atomic_add_long_nv
100 static inline unsigned long
atomic_add_long_nv(volatile unsigned long * p,unsigned long v)101 atomic_add_long_nv(volatile unsigned long *p, unsigned long v)
102 {
103 return __sync_add_and_fetch(p, v);
104 }
105 #endif
106
107 /*
108 * atomic_add - add
109 */
110
111 #ifndef atomic_add_int
112 #define atomic_add_int(_p, _v) ((void)atomic_add_int_nv((_p), (_v)))
113 #endif
114
115 #ifndef atomic_add_long
116 #define atomic_add_long(_p, _v) ((void)atomic_add_long_nv((_p), (_v)))
117 #endif
118
119 /*
120 * atomic_inc_*_nv - increment and fetch
121 */
122
123 #ifndef atomic_inc_int_nv
124 #define atomic_inc_int_nv(_p) atomic_add_int_nv((_p), 1)
125 #endif
126
127 #ifndef atomic_inc_long_nv
128 #define atomic_inc_long_nv(_p) atomic_add_long_nv((_p), 1)
129 #endif
130
131 /*
132 * atomic_inc_* - increment
133 */
134
135 #ifndef atomic_inc_int
136 #define atomic_inc_int(_p) ((void)atomic_inc_int_nv(_p))
137 #endif
138
139 #ifndef atomic_inc_long
140 #define atomic_inc_long(_p) ((void)atomic_inc_long_nv(_p))
141 #endif
142
143 /*
144 * atomic_sub_*_nv - sub and fetch
145 */
146
147 #ifndef atomic_sub_int_nv
148 static inline unsigned int
atomic_sub_int_nv(volatile unsigned int * p,unsigned int v)149 atomic_sub_int_nv(volatile unsigned int *p, unsigned int v)
150 {
151 return __sync_sub_and_fetch(p, v);
152 }
153 #endif
154
155 #ifndef atomic_sub_long_nv
156 static inline unsigned long
atomic_sub_long_nv(volatile unsigned long * p,unsigned long v)157 atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
158 {
159 return __sync_sub_and_fetch(p, v);
160 }
161 #endif
162
163 /*
164 * atomic_sub_* - sub
165 */
166
167 #ifndef atomic_sub_int
168 #define atomic_sub_int(_p, _v) ((void)atomic_sub_int_nv((_p), (_v)))
169 #endif
170
171 #ifndef atomic_sub_long
172 #define atomic_sub_long(_p, _v) ((void)atomic_sub_long_nv((_p), (_v)))
173 #endif
174
175 /*
176 * atomic_dec_*_nv - decrement and fetch
177 */
178
179 #ifndef atomic_dec_int_nv
180 #define atomic_dec_int_nv(_p) atomic_sub_int_nv((_p), 1)
181 #endif
182
183 #ifndef atomic_dec_long_nv
184 #define atomic_dec_long_nv(_p) atomic_sub_long_nv((_p), 1)
185 #endif
186
187 /*
188 * atomic_dec_* - decrement
189 */
190
191 #ifndef atomic_dec_int
192 #define atomic_dec_int(_p) ((void)atomic_dec_int_nv(_p))
193 #endif
194
195 #ifndef atomic_dec_long
196 #define atomic_dec_long(_p) ((void)atomic_dec_long_nv(_p))
197 #endif
198
199 #ifdef _KERNEL
200 /*
201 * atomic_load_* - read from memory
202 */
203
204 static inline unsigned int
atomic_load_int(volatile unsigned int * p)205 atomic_load_int(volatile unsigned int *p)
206 {
207 return *p;
208 }
209
210 static inline unsigned long
atomic_load_long(volatile unsigned long * p)211 atomic_load_long(volatile unsigned long *p)
212 {
213 return *p;
214 }
215
216 /*
217 * atomic_store_* - write to memory
218 */
219
220 static inline void
atomic_store_int(volatile unsigned int * p,unsigned int v)221 atomic_store_int(volatile unsigned int *p, unsigned int v)
222 {
223 *p = v;
224 }
225
226 static inline void
atomic_store_long(volatile unsigned long * p,unsigned long v)227 atomic_store_long(volatile unsigned long *p, unsigned long v)
228 {
229 *p = v;
230 }
231 #endif /* _KERNEL */
232
233 /*
234 * memory barriers
235 */
236
237 #ifndef membar_enter
238 #define membar_enter() __sync_synchronize()
239 #endif
240
241 #ifndef membar_exit
242 #define membar_exit() __sync_synchronize()
243 #endif
244
245 #ifndef membar_producer
246 #define membar_producer() __sync_synchronize()
247 #endif
248
249 #ifndef membar_consumer
250 #define membar_consumer() __sync_synchronize()
251 #endif
252
253 #ifndef membar_sync
254 #define membar_sync() __sync_synchronize()
255 #endif
256
257 #ifndef membar_enter_after_atomic
258 #define membar_enter_after_atomic() membar_enter()
259 #endif
260
261 #ifndef membar_exit_before_atomic
262 #define membar_exit_before_atomic() membar_exit()
263 #endif
264
265 #ifdef _KERNEL
266
267 /*
268 * Force any preceding reads to happen before any subsequent reads that
269 * depend on the value returned by the preceding reads.
270 */
271 static inline void
membar_datadep_consumer(void)272 membar_datadep_consumer(void)
273 {
274 #ifdef __alpha__
275 membar_consumer();
276 #endif
277 }
278
279 #define READ_ONCE(x) ({ \
280 typeof(x) __tmp = *(volatile typeof(x) *)&(x); \
281 membar_datadep_consumer(); \
282 __tmp; \
283 })
284
285 #define WRITE_ONCE(x, val) ({ \
286 typeof(x) __tmp = (val); \
287 *(volatile typeof(x) *)&(x) = __tmp; \
288 __tmp; \
289 })
290
291 #endif /* _KERNEL */
292
293 #endif /* _SYS_ATOMIC_H_ */
294