1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _ASM_ATOMIC_H_
33 #define	_ASM_ATOMIC_H_
34 
35 #include <linux/compiler.h>
36 #include <sys/types.h>
37 #include <machine/atomic.h>
38 
39 #define	ATOMIC_INIT(x)	{ .counter = (x) }
40 
41 typedef struct {
42 	volatile int counter;
43 } atomic_t;
44 
45 /*------------------------------------------------------------------------*
46  *	32-bit atomic operations
47  *------------------------------------------------------------------------*/
48 
49 #define	atomic_add(i, v)		atomic_add_return((i), (v))
50 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
51 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
52 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
53 #define	atomic_add_and_test(i, v)	(atomic_add_return((i), (v)) == 0)
54 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
55 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
56 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
57 #define	atomic_dec_return(v)		atomic_sub_return(1, (v))
58 #define	atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
59 
60 static inline int
61 atomic_add_return(int i, atomic_t *v)
62 {
63 	return i + atomic_fetchadd_int(&v->counter, i);
64 }
65 
66 static inline int
67 atomic_sub_return(int i, atomic_t *v)
68 {
69 	return atomic_fetchadd_int(&v->counter, -i) - i;
70 }
71 
72 static inline void
73 atomic_set(atomic_t *v, int i)
74 {
75 	WRITE_ONCE(v->counter, i);
76 }
77 
78 static inline void
79 atomic_set_release(atomic_t *v, int i)
80 {
81 	atomic_store_rel_int(&v->counter, i);
82 }
83 
84 static inline void
85 atomic_set_mask(unsigned int mask, atomic_t *v)
86 {
87 	atomic_set_int(&v->counter, mask);
88 }
89 
90 static inline int
91 atomic_read(const atomic_t *v)
92 {
93 	return READ_ONCE(v->counter);
94 }
95 
96 static inline int
97 atomic_inc(atomic_t *v)
98 {
99 	return atomic_fetchadd_int(&v->counter, 1) + 1;
100 }
101 
102 static inline int
103 atomic_dec(atomic_t *v)
104 {
105 	return atomic_fetchadd_int(&v->counter, -1) - 1;
106 }
107 
108 static inline int
109 atomic_add_unless(atomic_t *v, int a, int u)
110 {
111 	int c;
112 
113 	for (;;) {
114 		c = atomic_read(v);
115 		if (unlikely(c == u))
116 			break;
117 		if (likely(atomic_cmpset_int(&v->counter, c, c + a)))
118 			break;
119 	}
120 	return (c != u);
121 }
122 
123 static inline void
124 atomic_clear_mask(unsigned int mask, atomic_t *v)
125 {
126 	atomic_clear_int(&v->counter, mask);
127 }
128 
129 static inline int
130 atomic_xchg(atomic_t *v, int i)
131 {
132 #if defined(__i386__) || defined(__amd64__) || \
133     defined(__arm__) || defined(__aarch64__) || \
134     defined(__powerpc__)
135 	return (atomic_swap_int(&v->counter, i));
136 #else
137 	int ret;
138 	for (;;) {
139 		ret = READ_ONCE(v->counter);
140 		if (atomic_cmpset_int(&v->counter, ret, i))
141 			break;
142 	}
143 	return (ret);
144 #endif
145 }
146 
147 static inline int
148 atomic_cmpxchg(atomic_t *v, int old, int new)
149 {
150 	int ret = old;
151 
152 	for (;;) {
153 		if (atomic_cmpset_int(&v->counter, old, new))
154 			break;
155 		ret = READ_ONCE(v->counter);
156 		if (ret != old)
157 			break;
158 	}
159 	return (ret);
160 }
161 
162 #define	cmpxchg(ptr, old, new) ({					\
163 	union {								\
164 		__typeof(*(ptr)) val;					\
165 		u8 u8[0];						\
166 		u16 u16[0];						\
167 		u32 u32[0];						\
168 		u64 u64[0];						\
169 	} __ret = { .val = (old) }, __new = { .val = (new) };		\
170 									\
171 	CTASSERT(sizeof(__ret.val) == 1 || sizeof(__ret.val) == 2 ||	\
172 	    sizeof(__ret.val) == 4 || sizeof(__ret.val) == 8);		\
173 									\
174 	switch (sizeof(__ret.val)) {					\
175 	case 1:								\
176 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
177 		    __ret.u8, __new.u8[0]) && __ret.val == (old))	\
178 			;						\
179 		break;							\
180 	case 2:								\
181 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
182 		    __ret.u16, __new.u16[0]) && __ret.val == (old))	\
183 			;						\
184 		break;							\
185 	case 4:								\
186 		while (!atomic_fcmpset_32((volatile u32 *)(ptr),	\
187 		    __ret.u32, __new.u32[0]) && __ret.val == (old))	\
188 			;						\
189 		break;							\
190 	case 8:								\
191 		while (!atomic_fcmpset_64((volatile u64 *)(ptr),	\
192 		    __ret.u64, __new.u64[0]) && __ret.val == (old))	\
193 			;						\
194 		break;							\
195 	}								\
196 	__ret.val;							\
197 })
198 
199 #define	cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
200 
201 #define	xchg(ptr, new) ({						\
202 	union {								\
203 		__typeof(*(ptr)) val;					\
204 		u8 u8[0];						\
205 		u16 u16[0];						\
206 		u32 u32[0];						\
207 		u64 u64[0];						\
208 	} __ret, __new = { .val = (new) };				\
209 									\
210 	CTASSERT(sizeof(__ret.val) == 1 || sizeof(__ret.val) == 2 ||	\
211 	    sizeof(__ret.val) == 4 || sizeof(__ret.val) == 8);		\
212 									\
213 	switch (sizeof(__ret.val)) {					\
214 	case 1:								\
215 		__ret.val = READ_ONCE(*ptr);				\
216 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
217 	            __ret.u8, __new.u8[0]))				\
218 			;						\
219 		break;							\
220 	case 2:								\
221 		__ret.val = READ_ONCE(*ptr);				\
222 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
223 	            __ret.u16, __new.u16[0]))				\
224 			;						\
225 		break;							\
226 	case 4:								\
227 		__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr),	\
228 		    __new.u32[0]);					\
229 		break;							\
230 	case 8:								\
231 		__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr),	\
232 		    __new.u64[0]);					\
233 		break;							\
234 	}								\
235 	__ret.val;							\
236 })
237 
238 #define	LINUX_ATOMIC_OP(op, c_op)				\
239 static inline void atomic_##op(int i, atomic_t *v)		\
240 {								\
241 	int c, old;						\
242 								\
243 	c = v->counter;						\
244 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
245 		c = old;					\
246 }
247 
248 #define	LINUX_ATOMIC_FETCH_OP(op, c_op)				\
249 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
250 {								\
251 	int c, old;						\
252 								\
253 	c = v->counter;						\
254 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
255 		c = old;					\
256 								\
257 	return (c);						\
258 }
259 
260 LINUX_ATOMIC_OP(or, |)
261 LINUX_ATOMIC_OP(and, &)
262 LINUX_ATOMIC_OP(andnot, &~)
263 LINUX_ATOMIC_OP(xor, ^)
264 
265 LINUX_ATOMIC_FETCH_OP(or, |)
266 LINUX_ATOMIC_FETCH_OP(and, &)
267 LINUX_ATOMIC_FETCH_OP(andnot, &~)
268 LINUX_ATOMIC_FETCH_OP(xor, ^)
269 
270 #endif					/* _ASM_ATOMIC_H_ */
271