1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _LINUXKPI_ASM_ATOMIC_H_
31 #define	_LINUXKPI_ASM_ATOMIC_H_
32 
33 #include <linux/compiler.h>
34 #include <sys/types.h>
35 #include <machine/atomic.h>
36 #define	ATOMIC_INIT(x)	{ .counter = (x) }
37 
38 typedef struct {
39 	volatile int counter;
40 } atomic_t;
41 
42 /*------------------------------------------------------------------------*
43  *	32-bit atomic operations
44  *------------------------------------------------------------------------*/
45 
46 #define	atomic_add(i, v)		atomic_add_return((i), (v))
47 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
48 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
49 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
50 #define	atomic_add_and_test(i, v)	(atomic_add_return((i), (v)) == 0)
51 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
52 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
53 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
54 #define	atomic_dec_return(v)		atomic_sub_return(1, (v))
55 #define	atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
56 
57 static inline int
58 atomic_add_return(int i, atomic_t *v)
59 {
60 	return i + atomic_fetchadd_int(&v->counter, i);
61 }
62 
63 static inline int
64 atomic_sub_return(int i, atomic_t *v)
65 {
66 	return atomic_fetchadd_int(&v->counter, -i) - i;
67 }
68 
69 static inline void
70 atomic_set(atomic_t *v, int i)
71 {
72 	WRITE_ONCE(v->counter, i);
73 }
74 
75 static inline void
76 atomic_set_release(atomic_t *v, int i)
77 {
78 	atomic_store_rel_int(&v->counter, i);
79 }
80 
81 static inline void
82 atomic_set_mask(unsigned int mask, atomic_t *v)
83 {
84 	atomic_set_int(&v->counter, mask);
85 }
86 
87 static inline int
88 atomic_read(const atomic_t *v)
89 {
90 	return READ_ONCE(v->counter);
91 }
92 
93 static inline int
94 atomic_inc(atomic_t *v)
95 {
96 	return atomic_fetchadd_int(&v->counter, 1) + 1;
97 }
98 
99 static inline int
100 atomic_dec(atomic_t *v)
101 {
102 	return atomic_fetchadd_int(&v->counter, -1) - 1;
103 }
104 
105 static inline int
106 atomic_add_unless(atomic_t *v, int a, int u)
107 {
108 	int c = atomic_read(v);
109 
110 	for (;;) {
111 		if (unlikely(c == u))
112 			break;
113 		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
114 			break;
115 	}
116 	return (c != u);
117 }
118 
119 static inline int
120 atomic_fetch_add_unless(atomic_t *v, int a, int u)
121 {
122 	int c = atomic_read(v);
123 
124 	for (;;) {
125 		if (unlikely(c == u))
126 			break;
127 		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
128 			break;
129 	}
130 	return (c);
131 }
132 
133 static inline void
134 atomic_clear_mask(unsigned int mask, atomic_t *v)
135 {
136 	atomic_clear_int(&v->counter, mask);
137 }
138 
139 static inline int
140 atomic_xchg(atomic_t *v, int i)
141 {
142 	return (atomic_swap_int(&v->counter, i));
143 }
144 
145 static inline int
146 atomic_cmpxchg(atomic_t *v, int old, int new)
147 {
148 	int ret = old;
149 
150 	for (;;) {
151 		if (atomic_fcmpset_int(&v->counter, &ret, new))
152 			break;
153 		if (ret != old)
154 			break;
155 	}
156 	return (ret);
157 }
158 
159 #if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
160 #define	LINUXKPI_ATOMIC_8(...) __VA_ARGS__
161 #define	LINUXKPI_ATOMIC_16(...) __VA_ARGS__
162 #else
163 #define	LINUXKPI_ATOMIC_8(...)
164 #define	LINUXKPI_ATOMIC_16(...)
165 #endif
166 
167 #if !(defined(i386) || (defined(__powerpc__) && !defined(__powerpc64__)))
168 #define	LINUXKPI_ATOMIC_64(...) __VA_ARGS__
169 #else
170 #define	LINUXKPI_ATOMIC_64(...)
171 #endif
172 
173 #define	cmpxchg(ptr, old, new) ({					\
174 	union {								\
175 		__typeof(*(ptr)) val;					\
176 		u8 u8[0];						\
177 		u16 u16[0];						\
178 		u32 u32[0];						\
179 		u64 u64[0];						\
180 	} __ret = { .val = (old) }, __new = { .val = (new) };		\
181 									\
182 	CTASSERT(							\
183 	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
184 	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
185 	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
186 	    sizeof(__ret.val) == 4);					\
187 									\
188 	switch (sizeof(__ret.val)) {					\
189 	LINUXKPI_ATOMIC_8(						\
190 	case 1:								\
191 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
192 		    __ret.u8, __new.u8[0]) && __ret.val == (old))	\
193 			;						\
194 		break;							\
195 	)								\
196 	LINUXKPI_ATOMIC_16(						\
197 	case 2:								\
198 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
199 		    __ret.u16, __new.u16[0]) && __ret.val == (old))	\
200 			;						\
201 		break;							\
202 	)								\
203 	case 4:								\
204 		while (!atomic_fcmpset_32((volatile u32 *)(ptr),	\
205 		    __ret.u32, __new.u32[0]) && __ret.val == (old))	\
206 			;						\
207 		break;							\
208 	LINUXKPI_ATOMIC_64(						\
209 	case 8:								\
210 		while (!atomic_fcmpset_64((volatile u64 *)(ptr),	\
211 		    __ret.u64, __new.u64[0]) && __ret.val == (old))	\
212 			;						\
213 		break;							\
214 	)								\
215 	}								\
216 	__ret.val;							\
217 })
218 
219 #define	cmpxchg64(...)		cmpxchg(__VA_ARGS__)
220 #define	cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
221 
222 #define	xchg(ptr, new) ({						\
223 	union {								\
224 		__typeof(*(ptr)) val;					\
225 		u8 u8[0];						\
226 		u16 u16[0];						\
227 		u32 u32[0];						\
228 		u64 u64[0];						\
229 	} __ret, __new = { .val = (new) };				\
230 									\
231 	CTASSERT(							\
232 	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
233 	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
234 	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
235 	    sizeof(__ret.val) == 4);					\
236 									\
237 	switch (sizeof(__ret.val)) {					\
238 	LINUXKPI_ATOMIC_8(						\
239 	case 1:								\
240 		__ret.val = READ_ONCE(*ptr);				\
241 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
242 	            __ret.u8, __new.u8[0]))				\
243 			;						\
244 		break;							\
245 	)								\
246 	LINUXKPI_ATOMIC_16(						\
247 	case 2:								\
248 		__ret.val = READ_ONCE(*ptr);				\
249 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
250 	            __ret.u16, __new.u16[0]))				\
251 			;						\
252 		break;							\
253 	)								\
254 	case 4:								\
255 		__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr),	\
256 		    __new.u32[0]);					\
257 		break;							\
258 	LINUXKPI_ATOMIC_64(						\
259 	case 8:								\
260 		__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr),	\
261 		    __new.u64[0]);					\
262 		break;							\
263 	)								\
264 	}								\
265 	__ret.val;							\
266 })
267 
268 #define try_cmpxchg(p, op, n)							\
269 ({										\
270 	__typeof(p) __op = (__typeof((p)))(op);					\
271 	__typeof(*(p)) __o = *__op;						\
272 	__typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n));	\
273 	if (__p != __o)								\
274 		*__op = __p;							\
275 	(__p == __o);								\
276 })
277 
278 #define __atomic_try_cmpxchg(type, _p, _po, _n)		\
279 ({							\
280 	__typeof(_po) __po = (_po);			\
281 	__typeof(*(_po)) __r, __o = *__po;		\
282 	__r = atomic_cmpxchg##type((_p), __o, (_n));	\
283 	if (unlikely(__r != __o))			\
284 		*__po = __r;				\
285 	likely(__r == __o);				\
286 })
287 
288 #define	atomic_try_cmpxchg(_p, _po, _n)	__atomic_try_cmpxchg(, _p, _po, _n)
289 
290 static inline int
291 atomic_dec_if_positive(atomic_t *v)
292 {
293 	int retval;
294 	int old;
295 
296 	old = atomic_read(v);
297 	for (;;) {
298 		retval = old - 1;
299 		if (unlikely(retval < 0))
300 			break;
301 		if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
302 			break;
303 	}
304 	return (retval);
305 }
306 
307 #define	LINUX_ATOMIC_OP(op, c_op)				\
308 static inline void atomic_##op(int i, atomic_t *v)		\
309 {								\
310 	int c, old;						\
311 								\
312 	c = v->counter;						\
313 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
314 		c = old;					\
315 }
316 
317 #define	LINUX_ATOMIC_FETCH_OP(op, c_op)				\
318 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
319 {								\
320 	int c, old;						\
321 								\
322 	c = v->counter;						\
323 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
324 		c = old;					\
325 								\
326 	return (c);						\
327 }
328 
329 static inline int
330 atomic_fetch_inc(atomic_t *v)
331 {
332 
333 	return ((atomic_inc_return(v) - 1));
334 }
335 
336 LINUX_ATOMIC_OP(or, |)
337 LINUX_ATOMIC_OP(and, &)
338 LINUX_ATOMIC_OP(andnot, &~)
339 LINUX_ATOMIC_OP(xor, ^)
340 
341 LINUX_ATOMIC_FETCH_OP(or, |)
342 LINUX_ATOMIC_FETCH_OP(and, &)
343 LINUX_ATOMIC_FETCH_OP(andnot, &~)
344 LINUX_ATOMIC_FETCH_OP(xor, ^)
345 
346 #endif					/* _LINUXKPI_ASM_ATOMIC_H_ */
347