1 /* $OpenBSD: atomic.h,v 1.13 2022/08/29 02:01:18 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2015 Martin Pieuchot
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19
20 #ifndef _POWERPC_ATOMIC_H_
21 #define _POWERPC_ATOMIC_H_
22
23 #if defined(_KERNEL)
24
25 static __inline void
atomic_setbits_int(volatile unsigned int * uip,unsigned int v)26 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
27 {
28 unsigned int tmp;
29
30 __asm volatile (
31 "1: lwarx %0, 0, %2 \n"
32 " or %0, %1, %0 \n"
33 " stwcx. %0, 0, %2 \n"
34 " bne- 1b \n"
35 " sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
36 }
37
38 static __inline void
atomic_clearbits_int(volatile unsigned int * uip,unsigned int v)39 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
40 {
41 unsigned int tmp;
42
43 __asm volatile (
44 "1: lwarx %0, 0, %2 \n"
45 " andc %0, %0, %1 \n"
46 " stwcx. %0, 0, %2 \n"
47 " bne- 1b \n"
48 " sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
49 }
50
51 #endif /* defined(_KERNEL) */
52
53 static inline unsigned int
_atomic_cas_uint(volatile unsigned int * p,unsigned int o,unsigned int n)54 _atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
55 {
56 unsigned int rv;
57
58 __asm volatile (
59 "1: lwarx %0, 0, %2 \n"
60 " cmpw 0, %0, %4 \n"
61 " bne- 2f \n"
62 " stwcx. %3, 0, %2 \n"
63 " bne- 1b \n"
64 "2: \n"
65 : "=&r" (rv), "+m" (*p)
66 : "r" (p), "r" (n), "r" (o)
67 : "cc");
68
69 return (rv);
70 }
71 #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
72
73 static inline unsigned long
_atomic_cas_ulong(volatile unsigned long * p,unsigned long o,unsigned long n)74 _atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
75 {
76 unsigned long rv;
77
78 __asm volatile (
79 "1: lwarx %0, 0, %2 \n"
80 " cmpw 0, %0, %4 \n"
81 " bne- 2f \n"
82 " stwcx. %3, 0, %2 \n"
83 " bne- 1b \n"
84 "2: \n"
85 : "=&r" (rv), "+m" (*p)
86 : "r" (p), "r" (n), "r" (o)
87 : "cc");
88
89 return (rv);
90 }
91 #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
92
93 static inline void *
_atomic_cas_ptr(volatile void * pp,void * o,void * n)94 _atomic_cas_ptr(volatile void *pp, void *o, void *n)
95 {
96 void * volatile *p = pp;
97 void *rv;
98
99 __asm volatile (
100 "1: lwarx %0, 0, %2 \n"
101 " cmpw 0, %0, %4 \n"
102 " bne- 2f \n"
103 " stwcx. %3, 0, %2 \n"
104 " bne- 1b \n"
105 "2: \n"
106 : "=&r" (rv), "+m" (*p)
107 : "r" (p), "r" (n), "r" (o)
108 : "cc");
109
110 return (rv);
111 }
112 #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
113
114 static inline unsigned int
_atomic_swap_uint(volatile unsigned int * p,unsigned int v)115 _atomic_swap_uint(volatile unsigned int *p, unsigned int v)
116 {
117 unsigned int rv;
118
119 __asm volatile (
120 "1: lwarx %0, 0, %2 \n"
121 " stwcx. %3, 0, %2 \n"
122 " bne- 1b \n"
123 : "=&r" (rv), "+m" (*p)
124 : "r" (p), "r" (v)
125 : "cc");
126
127 return (rv);
128 }
129 #define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
130
131 static inline unsigned long
_atomic_swap_ulong(volatile unsigned long * p,unsigned long v)132 _atomic_swap_ulong(volatile unsigned long *p, unsigned long v)
133 {
134 unsigned long rv;
135
136 __asm volatile (
137 "1: lwarx %0, 0, %2 \n"
138 " stwcx. %3, 0, %2 \n"
139 " bne- 1b \n"
140 : "=&r" (rv), "+m" (*p)
141 : "r" (p), "r" (v)
142 : "cc");
143
144 return (rv);
145 }
146 #define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
147
148 static inline void *
_atomic_swap_ptr(volatile void * pp,void * v)149 _atomic_swap_ptr(volatile void *pp, void *v)
150 {
151 void * volatile *p = pp;
152 void *rv;
153
154 __asm volatile (
155 "1: lwarx %0, 0, %2 \n"
156 " stwcx. %3, 0, %2 \n"
157 " bne- 1b \n"
158 : "=&r" (rv), "+m" (*p)
159 : "r" (p), "r" (v)
160 : "cc");
161
162 return (rv);
163 }
164 #define atomic_swap_ptr(_p, _v) _atomic_swap_ptr((_p), (_v))
165
166 static inline unsigned int
_atomic_add_int_nv(volatile unsigned int * p,unsigned int v)167 _atomic_add_int_nv(volatile unsigned int *p, unsigned int v)
168 {
169 unsigned int rv;
170
171 __asm volatile (
172 "1: lwarx %0, 0, %2 \n"
173 " add %0, %3, %0 \n"
174 " stwcx. %0, 0, %2 \n"
175 " bne- 1b \n"
176 : "=&r" (rv), "+m" (*p)
177 : "r" (p), "r" (v)
178 : "cc", "xer");
179
180 return (rv);
181 }
182 #define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
183
184 static inline unsigned long
_atomic_add_long_nv(volatile unsigned long * p,unsigned long v)185 _atomic_add_long_nv(volatile unsigned long *p, unsigned long v)
186 {
187 unsigned long rv;
188
189 __asm volatile (
190 "1: lwarx %0, 0, %2 \n"
191 " add %0, %3, %0 \n"
192 " stwcx. %0, 0, %2 \n"
193 " bne- 1b \n"
194 : "=&r" (rv), "+m" (*p)
195 : "r" (p), "r" (v)
196 : "cc", "xer");
197
198 return (rv);
199 }
200 #define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
201
202 static inline unsigned int
_atomic_sub_int_nv(volatile unsigned int * p,unsigned int v)203 _atomic_sub_int_nv(volatile unsigned int *p, unsigned int v)
204 {
205 unsigned int rv;
206
207 __asm volatile (
208 "1: lwarx %0, 0, %2 \n"
209 " subf %0, %3, %0 \n"
210 " stwcx. %0, 0, %2 \n"
211 " bne- 1b \n"
212 : "=&r" (rv), "+m" (*p)
213 : "r" (p), "r" (v)
214 : "cc", "xer");
215
216 return (rv);
217 }
218 #define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
219
220 static inline unsigned long
_atomic_sub_long_nv(volatile unsigned long * p,unsigned long v)221 _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
222 {
223 unsigned long rv;
224
225 __asm volatile (
226 "1: lwarx %0, 0, %2 \n"
227 " subf %0, %3, %0 \n"
228 " stwcx. %0, 0, %2 \n"
229 " bne- 1b \n"
230 : "=&r" (rv), "+m" (*p)
231 : "r" (p), "r" (v)
232 : "cc", "xer");
233
234 return (rv);
235 }
236 #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
237
238 static inline unsigned int
_atomic_addic_int_nv(volatile unsigned int * p,unsigned int v)239 _atomic_addic_int_nv(volatile unsigned int *p, unsigned int v)
240 {
241 unsigned int rv;
242
243 __asm volatile (
244 "1: lwarx %0, 0, %2 \n"
245 " addic %0, %0, %3 \n"
246 " stwcx. %0, 0, %2 \n"
247 " bne- 1b \n"
248 : "=&r" (rv), "+m" (*p)
249 : "r" (p), "i" (v)
250 : "cc", "xer");
251
252 return (rv);
253 }
254 #define atomic_inc_int_nv(_p) _atomic_addic_int_nv((_p), 1)
255 #define atomic_dec_int_nv(_p) _atomic_addic_int_nv((_p), -1)
256
257 static inline unsigned long
_atomic_addic_long_nv(volatile unsigned long * p,unsigned long v)258 _atomic_addic_long_nv(volatile unsigned long *p, unsigned long v)
259 {
260 unsigned long rv;
261
262 __asm volatile (
263 "1: lwarx %0, 0, %2 \n"
264 " addic %0, %0, %3 \n"
265 " stwcx. %0, 0, %2 \n"
266 " bne- 1b \n"
267 : "=&r" (rv), "+m" (*p)
268 : "r" (p), "i" (v)
269 : "cc", "xer");
270
271 return (rv);
272 }
273 #define atomic_inc_long_nv(_p) _atomic_addic_long_nv((_p), 1)
274 #define atomic_dec_long_nv(_p) _atomic_addic_long_nv((_p), -1)
275
276 #define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
277
278 #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
279 #define membar_enter() __membar("isync")
280 #define membar_exit() __membar("sync")
281 #define membar_producer() __membar("sync")
282 #define membar_consumer() __membar("isync")
283 #define membar_sync() __membar("sync")
284 #else
285 #define membar_enter() __membar("")
286 #define membar_exit() __membar("")
287 #define membar_producer() __membar("")
288 #define membar_consumer() __membar("")
289 #define membar_sync() __membar("")
290 #endif
291
292 #endif /* _POWERPC_ATOMIC_H_ */
293