xref: /dragonfly/sys/dev/drm/include/linux/atomic.h (revision 50b09fda)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * Copyright (c) 2013-2017 François Tigeot
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef	_LINUX_ATOMIC_H_
32 #define	_LINUX_ATOMIC_H_
33 
34 #include <asm/atomic.h>
35 
36 typedef struct {
37 	volatile u_int counter;
38 } atomic_t;
39 
40 typedef struct {
41 	volatile u_long counter;
42 } atomic64_t;
43 
44 #define	atomic_add(i, v)		atomic_add_return((i), (v))
45 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
46 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
47 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
48 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
49 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
50 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
51 #define atomic_dec_return(v)             atomic_sub_return(1, (v))
52 
53 #define	atomic64_add(i, v)		atomic_add_return_long((i), (v))
54 #define	atomic64_sub(i, v)		atomic_sub_return_long((i), (v))
55 
56 #define atomic_xchg(p, v)		atomic_swap_int(&((p)->counter), v)
57 #define atomic64_xchg(p, v)		atomic_swap_long(&((p)->counter), v)
58 
59 #define atomic_cmpset(p, o, n)		atomic_cmpset_32(&((p)->counter), o, n)
60 
61 #define atomic64_cmpxchg(p, o, n)						\
62 	(atomic_cmpset_long((volatile uint64_t *)(p),(o),(n)) ? (o) : (0))
63 
64 static inline int
65 atomic_add_return(int i, atomic_t *v)
66 {
67 	return i + atomic_fetchadd_int(&v->counter, i);
68 }
69 
70 static inline int64_t
71 atomic_add_return_long(int64_t i, atomic64_t *v)
72 {
73 	return i + atomic_fetchadd_long(&v->counter, i);
74 }
75 
76 static inline int
77 atomic_sub_return(int i, atomic_t *v)
78 {
79 	return atomic_fetchadd_int(&v->counter, -i) - i;
80 }
81 
82 static inline int64_t
83 atomic_sub_return_long(int64_t i, atomic64_t *v)
84 {
85 	return atomic_fetchadd_long(&v->counter, -i) - i;
86 }
87 
88 static inline void
89 atomic_set(atomic_t *v, int i)
90 {
91 	atomic_store_rel_int(&v->counter, i);
92 }
93 
94 static inline void
95 atomic64_set(atomic64_t *v, long i)
96 {
97 	atomic_store_rel_long(&v->counter, i);
98 }
99 
100 static inline int
101 atomic_read(atomic_t *v)
102 {
103 	return atomic_load_acq_int(&v->counter);
104 }
105 
106 static inline int64_t
107 atomic64_read(atomic64_t *v)
108 {
109 	return atomic_load_acq_long(&v->counter);
110 }
111 
112 static inline int
113 atomic_inc(atomic_t *v)
114 {
115 	return atomic_fetchadd_int(&v->counter, 1) + 1;
116 }
117 
118 static inline int
119 atomic_dec(atomic_t *v)
120 {
121 	return atomic_fetchadd_int(&v->counter, -1) - 1;
122 }
123 
124 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
125 {
126 	return atomic_cmpxchg_int(&v->counter, old, new);
127 }
128 
129 static inline int atomic_add_unless(atomic_t *v, int add, int unless)
130 {
131         int c, old;
132         c = atomic_read(v);
133         for (;;) {
134                 if (unlikely(c == unless))
135                         break;
136                 old = atomic_cmpxchg_int(&v->counter, c, c + add);
137                 if (likely(old == c))
138                         break;
139                 c = old;
140         }
141         return c != unless;
142 }
143 
144 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
145 
146 /* atomic_clear_mask: atomically clears a variable from the bit set in mask */
147 #define atomic_clear_mask(mask, addr)		\
148 	/* atomic *addr &= ~mask; */		\
149 	__asm __volatile("lock andl %0, %1"	\
150 		:				\
151 		: "r" (~mask), "m" (*addr)	\
152 		: "memory");
153 
154 #define smp_mb__before_atomic()	cpu_ccfence()
155 #define smp_mb__after_atomic()	cpu_ccfence()
156 
157 static inline void
158 atomic_andnot(int i, atomic_t *v)
159 {
160 	/* v->counter = v->counter & ~i; */
161 	atomic_clear_int(&v->counter, i);
162 }
163 
164 #define cmpxchg(ptr, old, new) ({				\
165 	__typeof(*(ptr)) __ret;					\
166 								\
167 	CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 ||	\
168 	    sizeof(__ret) == 4 || sizeof(__ret) == 8);		\
169 								\
170 	__ret = (old);						\
171 	switch (sizeof(__ret)) {				\
172 	case 1:							\
173 		while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \
174 		    (int8_t *)&__ret, (new)) && __ret == (old))	\
175 			;					\
176 		break;						\
177 	case 2:							\
178 		while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \
179 		    (int16_t *)&__ret, (new)) && __ret == (old)) \
180 			;					\
181 		break;						\
182 	case 4:							\
183 		while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \
184 		    (int32_t *)&__ret, (new)) && __ret == (old)) \
185 			;					\
186 		break;						\
187 	case 8:							\
188 		while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \
189 		    (int64_t *)&__ret, (new)) && __ret == (old)) \
190 			;					\
191 		break;						\
192 	}							\
193 	__ret;							\
194 })
195 
196 #define cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
197 
198 #endif	/* _LINUX_ATOMIC_H_ */
199