1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2013-2020 François Tigeot <ftigeot@wolfpond.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef _LINUX_ATOMIC_H_
32 #define _LINUX_ATOMIC_H_
33
34 #include <asm/atomic.h>
35 #include <asm/barrier.h>
36
37 typedef struct {
38 volatile u_int counter;
39 } atomic_t;
40
41 #define atomic_add(i, v) atomic_add_return((i), (v))
42 #define atomic_sub(i, v) atomic_sub_return((i), (v))
43 #define atomic_inc_return(v) atomic_add_return(1, (v))
44 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
45 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
46 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
47 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
48 #define atomic_dec_return(v) atomic_sub_return(1, (v))
49
50 #define atomic64_add(i, v) atomic_add_return_long((i), (v))
51 #define atomic64_sub(i, v) atomic_sub_return_long((i), (v))
52
53 #define atomic_xchg(p, v) atomic_swap_int(&((p)->counter), v)
54 #define atomic64_xchg(p, v) atomic_swap_long(&((p)->counter), v)
55
56 #define atomic_cmpset(p, o, n) atomic_cmpset_32(&((p)->counter), o, n)
57
58 #define atomic64_cmpxchg(p, o, n) \
59 (atomic_cmpset_long((volatile uint64_t *)(p),(o),(n)) ? (o) : (0))
60
61 static inline int
atomic_add_return(int i,atomic_t * v)62 atomic_add_return(int i, atomic_t *v)
63 {
64 return i + atomic_fetchadd_int(&v->counter, i);
65 }
66
67 static inline int64_t
atomic_add_return_long(int64_t i,atomic64_t * v)68 atomic_add_return_long(int64_t i, atomic64_t *v)
69 {
70 return i + atomic_fetchadd_long(&v->counter, i);
71 }
72
73 static inline int
atomic_sub_return(int i,atomic_t * v)74 atomic_sub_return(int i, atomic_t *v)
75 {
76 return atomic_fetchadd_int(&v->counter, -i) - i;
77 }
78
79 static inline int64_t
atomic_sub_return_long(int64_t i,atomic64_t * v)80 atomic_sub_return_long(int64_t i, atomic64_t *v)
81 {
82 return atomic_fetchadd_long(&v->counter, -i) - i;
83 }
84
85 static inline void
atomic_set(atomic_t * v,int i)86 atomic_set(atomic_t *v, int i)
87 {
88 atomic_store_rel_int(&v->counter, i);
89 }
90
91 static inline void
atomic64_set(atomic64_t * v,long i)92 atomic64_set(atomic64_t *v, long i)
93 {
94 atomic_store_rel_long(&v->counter, i);
95 }
96
97 static inline int
atomic_read(const atomic_t * v)98 atomic_read(const atomic_t *v)
99 {
100 return READ_ONCE(v->counter);
101 }
102
103 static inline int64_t
atomic64_read(atomic64_t * v)104 atomic64_read(atomic64_t *v)
105 {
106 return atomic_load_acq_long(&v->counter);
107 }
108
109 static inline int
atomic_inc(atomic_t * v)110 atomic_inc(atomic_t *v)
111 {
112 return atomic_fetchadd_int(&v->counter, 1) + 1;
113 }
114
115 static inline int
atomic_dec(atomic_t * v)116 atomic_dec(atomic_t *v)
117 {
118 return atomic_fetchadd_int(&v->counter, -1) - 1;
119 }
120
atomic_cmpxchg(atomic_t * v,int old,int new)121 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
122 {
123 return atomic_cmpxchg_int(&v->counter, old, new);
124 }
125
atomic_add_unless(atomic_t * v,int add,int unless)126 static inline int atomic_add_unless(atomic_t *v, int add, int unless)
127 {
128 int c, old;
129 c = atomic_read(v);
130 for (;;) {
131 if (unlikely(c == unless))
132 break;
133 old = atomic_cmpxchg_int(&v->counter, c, c + add);
134 if (likely(old == c))
135 break;
136 c = old;
137 }
138 return c != unless;
139 }
140
141 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
142
143 /* atomic_clear_mask: atomically clears a variable from the bit set in mask */
144 #define atomic_clear_mask(mask, addr) \
145 /* atomic *addr &= ~mask; */ \
146 __asm __volatile("lock andl %0, %1" \
147 : \
148 : "r" (~mask), "m" (*addr) \
149 : "memory");
150
151 #define smp_mb__before_atomic() cpu_ccfence()
152 #define smp_mb__after_atomic() cpu_ccfence()
153
154 static inline void
atomic_andnot(int i,atomic_t * v)155 atomic_andnot(int i, atomic_t *v)
156 {
157 /* v->counter = v->counter & ~i; */
158 atomic_clear_int(&v->counter, i);
159 }
160
161 #define cmpxchg(ptr, old, new) ({ \
162 __typeof(*(ptr)) __ret; \
163 \
164 CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \
165 sizeof(__ret) == 4 || sizeof(__ret) == 8); \
166 \
167 __ret = (old); \
168 switch (sizeof(__ret)) { \
169 case 1: \
170 while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \
171 (int8_t *)&__ret, (u64)(new)) && __ret == (old)) \
172 ; \
173 break; \
174 case 2: \
175 while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \
176 (int16_t *)&__ret, (u64)(new)) && __ret == (old)) \
177 ; \
178 break; \
179 case 4: \
180 while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \
181 (int32_t *)&__ret, (u64)(new)) && __ret == (old)) \
182 ; \
183 break; \
184 case 8: \
185 while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \
186 (int64_t *)&__ret, (u64)(new)) && __ret == (old)) \
187 ; \
188 break; \
189 } \
190 __ret; \
191 })
192
193 #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__)
194
195 #define atomic64_inc_return(p) __sync_add_and_fetch_8(p, 1)
196
197 static inline void
atomic_set_release(atomic_t * v,int i)198 atomic_set_release(atomic_t *v, int i)
199 {
200 atomic_store_rel_int(&v->counter, i);
201 }
202
203 /* Returns the old value of v->counter */
204 static inline int
atomic_fetch_xor(int i,atomic_t * v)205 atomic_fetch_xor(int i, atomic_t *v)
206 {
207 int val = READ_ONCE(v->counter);
208
209 while (atomic_cmpxchg_int(&v->counter, val, val ^ i) == 0) {
210 }
211
212 return val;
213 }
214
215 static inline int
atomic_dec_if_positive(atomic_t * v)216 atomic_dec_if_positive(atomic_t *v)
217 {
218 int retval;
219 int old;
220
221 old = atomic_read(v);
222 for (;;) {
223 retval = old - 1;
224 if (unlikely(retval < 0))
225 break;
226 if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
227 break;
228 }
229 return (retval);
230 }
231
232 #include <asm-generic/atomic-long.h>
233
234 #endif /* _LINUX_ATOMIC_H_ */
235