1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _LINUXKPI_ASM_ATOMIC_H_
33 #define	_LINUXKPI_ASM_ATOMIC_H_
34 
35 #include <linux/compiler.h>
36 #include <sys/types.h>
37 #include <machine/atomic.h>
38 #define	ATOMIC_INIT(x)	{ .counter = (x) }
39 
40 typedef struct {
41 	volatile int counter;
42 } atomic_t;
43 
44 /*------------------------------------------------------------------------*
45  *	32-bit atomic operations
46  *------------------------------------------------------------------------*/
47 
48 #define	atomic_add(i, v)		atomic_add_return((i), (v))
49 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
50 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
51 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
52 #define	atomic_add_and_test(i, v)	(atomic_add_return((i), (v)) == 0)
53 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
54 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
55 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
56 #define	atomic_dec_return(v)		atomic_sub_return(1, (v))
57 #define	atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
58 
59 static inline int
60 atomic_add_return(int i, atomic_t *v)
61 {
62 	return i + atomic_fetchadd_int(&v->counter, i);
63 }
64 
65 static inline int
66 atomic_sub_return(int i, atomic_t *v)
67 {
68 	return atomic_fetchadd_int(&v->counter, -i) - i;
69 }
70 
71 static inline void
72 atomic_set(atomic_t *v, int i)
73 {
74 	WRITE_ONCE(v->counter, i);
75 }
76 
77 static inline void
78 atomic_set_release(atomic_t *v, int i)
79 {
80 	atomic_store_rel_int(&v->counter, i);
81 }
82 
83 static inline void
84 atomic_set_mask(unsigned int mask, atomic_t *v)
85 {
86 	atomic_set_int(&v->counter, mask);
87 }
88 
89 static inline int
90 atomic_read(const atomic_t *v)
91 {
92 	return READ_ONCE(v->counter);
93 }
94 
95 static inline int
96 atomic_inc(atomic_t *v)
97 {
98 	return atomic_fetchadd_int(&v->counter, 1) + 1;
99 }
100 
101 static inline int
102 atomic_dec(atomic_t *v)
103 {
104 	return atomic_fetchadd_int(&v->counter, -1) - 1;
105 }
106 
107 static inline int
108 atomic_add_unless(atomic_t *v, int a, int u)
109 {
110 	int c = atomic_read(v);
111 
112 	for (;;) {
113 		if (unlikely(c == u))
114 			break;
115 		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
116 			break;
117 	}
118 	return (c != u);
119 }
120 
121 static inline int
122 atomic_fetch_add_unless(atomic_t *v, int a, int u)
123 {
124 	int c = atomic_read(v);
125 
126 	for (;;) {
127 		if (unlikely(c == u))
128 			break;
129 		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
130 			break;
131 	}
132 	return (c);
133 }
134 
135 static inline void
136 atomic_clear_mask(unsigned int mask, atomic_t *v)
137 {
138 	atomic_clear_int(&v->counter, mask);
139 }
140 
141 static inline int
142 atomic_xchg(atomic_t *v, int i)
143 {
144 	return (atomic_swap_int(&v->counter, i));
145 }
146 
147 static inline int
148 atomic_cmpxchg(atomic_t *v, int old, int new)
149 {
150 	int ret = old;
151 
152 	for (;;) {
153 		if (atomic_fcmpset_int(&v->counter, &ret, new))
154 			break;
155 		if (ret != old)
156 			break;
157 	}
158 	return (ret);
159 }
160 
161 #if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
162 #define	LINUXKPI_ATOMIC_8(...) __VA_ARGS__
163 #define	LINUXKPI_ATOMIC_16(...) __VA_ARGS__
164 #else
165 #define	LINUXKPI_ATOMIC_8(...)
166 #define	LINUXKPI_ATOMIC_16(...)
167 #endif
168 
169 #if !(defined(i386) || (defined(__powerpc__) && !defined(__powerpc64__)))
170 #define	LINUXKPI_ATOMIC_64(...) __VA_ARGS__
171 #else
172 #define	LINUXKPI_ATOMIC_64(...)
173 #endif
174 
175 #define	cmpxchg(ptr, old, new) ({					\
176 	union {								\
177 		__typeof(*(ptr)) val;					\
178 		u8 u8[0];						\
179 		u16 u16[0];						\
180 		u32 u32[0];						\
181 		u64 u64[0];						\
182 	} __ret = { .val = (old) }, __new = { .val = (new) };		\
183 									\
184 	CTASSERT(							\
185 	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
186 	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
187 	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
188 	    sizeof(__ret.val) == 4);					\
189 									\
190 	switch (sizeof(__ret.val)) {					\
191 	LINUXKPI_ATOMIC_8(						\
192 	case 1:								\
193 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
194 		    __ret.u8, __new.u8[0]) && __ret.val == (old))	\
195 			;						\
196 		break;							\
197 	)								\
198 	LINUXKPI_ATOMIC_16(						\
199 	case 2:								\
200 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
201 		    __ret.u16, __new.u16[0]) && __ret.val == (old))	\
202 			;						\
203 		break;							\
204 	)								\
205 	case 4:								\
206 		while (!atomic_fcmpset_32((volatile u32 *)(ptr),	\
207 		    __ret.u32, __new.u32[0]) && __ret.val == (old))	\
208 			;						\
209 		break;							\
210 	LINUXKPI_ATOMIC_64(						\
211 	case 8:								\
212 		while (!atomic_fcmpset_64((volatile u64 *)(ptr),	\
213 		    __ret.u64, __new.u64[0]) && __ret.val == (old))	\
214 			;						\
215 		break;							\
216 	)								\
217 	}								\
218 	__ret.val;							\
219 })
220 
221 #define	cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
222 
223 #define	xchg(ptr, new) ({						\
224 	union {								\
225 		__typeof(*(ptr)) val;					\
226 		u8 u8[0];						\
227 		u16 u16[0];						\
228 		u32 u32[0];						\
229 		u64 u64[0];						\
230 	} __ret, __new = { .val = (new) };				\
231 									\
232 	CTASSERT(							\
233 	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
234 	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
235 	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
236 	    sizeof(__ret.val) == 4);					\
237 									\
238 	switch (sizeof(__ret.val)) {					\
239 	LINUXKPI_ATOMIC_8(						\
240 	case 1:								\
241 		__ret.val = READ_ONCE(*ptr);				\
242 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
243 	            __ret.u8, __new.u8[0]))				\
244 			;						\
245 		break;							\
246 	)								\
247 	LINUXKPI_ATOMIC_16(						\
248 	case 2:								\
249 		__ret.val = READ_ONCE(*ptr);				\
250 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
251 	            __ret.u16, __new.u16[0]))				\
252 			;						\
253 		break;							\
254 	)								\
255 	case 4:								\
256 		__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr),	\
257 		    __new.u32[0]);					\
258 		break;							\
259 	LINUXKPI_ATOMIC_64(						\
260 	case 8:								\
261 		__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr),	\
262 		    __new.u64[0]);					\
263 		break;							\
264 	)								\
265 	}								\
266 	__ret.val;							\
267 })
268 
269 #define try_cmpxchg(p, op, n)							\
270 ({										\
271 	__typeof(p) __op = (__typeof((p)))(op);					\
272 	__typeof(*(p)) __o = *__op;						\
273 	__typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n));	\
274 	if (__p != __o)								\
275 		*__op = __p;							\
276 	(__p == __o);								\
277 })
278 
279 #define __atomic_try_cmpxchg(type, _p, _po, _n)		\
280 ({							\
281 	__typeof(_po) __po = (_po);			\
282 	__typeof(*(_po)) __r, __o = *__po;		\
283 	__r = atomic_cmpxchg##type((_p), __o, (_n));	\
284 	if (unlikely(__r != __o))			\
285 		*__po = __r;				\
286 	likely(__r == __o);				\
287 })
288 
289 #define	atomic_try_cmpxchg(_p, _po, _n)	__atomic_try_cmpxchg(, _p, _po, _n)
290 
291 static inline int
292 atomic_dec_if_positive(atomic_t *v)
293 {
294 	int retval;
295 	int old;
296 
297 	old = atomic_read(v);
298 	for (;;) {
299 		retval = old - 1;
300 		if (unlikely(retval < 0))
301 			break;
302 		if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
303 			break;
304 	}
305 	return (retval);
306 }
307 
308 #define	LINUX_ATOMIC_OP(op, c_op)				\
309 static inline void atomic_##op(int i, atomic_t *v)		\
310 {								\
311 	int c, old;						\
312 								\
313 	c = v->counter;						\
314 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
315 		c = old;					\
316 }
317 
318 #define	LINUX_ATOMIC_FETCH_OP(op, c_op)				\
319 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
320 {								\
321 	int c, old;						\
322 								\
323 	c = v->counter;						\
324 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
325 		c = old;					\
326 								\
327 	return (c);						\
328 }
329 
330 static inline int
331 atomic_fetch_inc(atomic_t *v)
332 {
333 
334 	return ((atomic_inc_return(v) - 1));
335 }
336 
337 LINUX_ATOMIC_OP(or, |)
338 LINUX_ATOMIC_OP(and, &)
339 LINUX_ATOMIC_OP(andnot, &~)
340 LINUX_ATOMIC_OP(xor, ^)
341 
342 LINUX_ATOMIC_FETCH_OP(or, |)
343 LINUX_ATOMIC_FETCH_OP(and, &)
344 LINUX_ATOMIC_FETCH_OP(andnot, &~)
345 LINUX_ATOMIC_FETCH_OP(xor, ^)
346 
347 #endif					/* _LINUXKPI_ASM_ATOMIC_H_ */
348