xref: /dragonfly/sys/dev/drm/include/linux/atomic.h (revision 9f47dde1)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * Copyright (c) 2013-2018 François Tigeot <ftigeot@wolfpond.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef	_LINUX_ATOMIC_H_
32 #define	_LINUX_ATOMIC_H_
33 
34 #include <asm/atomic.h>
35 #include <asm/barrier.h>
36 
37 typedef struct {
38 	volatile u_int counter;
39 } atomic_t;
40 
41 typedef struct {
42 	volatile u_long counter;
43 } atomic64_t;
44 
45 #define	atomic_add(i, v)		atomic_add_return((i), (v))
46 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
47 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
48 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
49 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
50 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
51 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
52 #define atomic_dec_return(v)             atomic_sub_return(1, (v))
53 
54 #define	atomic64_add(i, v)		atomic_add_return_long((i), (v))
55 #define	atomic64_sub(i, v)		atomic_sub_return_long((i), (v))
56 
57 #define atomic_xchg(p, v)		atomic_swap_int(&((p)->counter), v)
58 #define atomic64_xchg(p, v)		atomic_swap_long(&((p)->counter), v)
59 
60 #define atomic_cmpset(p, o, n)		atomic_cmpset_32(&((p)->counter), o, n)
61 
62 #define atomic64_cmpxchg(p, o, n)						\
63 	(atomic_cmpset_long((volatile uint64_t *)(p),(o),(n)) ? (o) : (0))
64 
65 static inline int
66 atomic_add_return(int i, atomic_t *v)
67 {
68 	return i + atomic_fetchadd_int(&v->counter, i);
69 }
70 
71 static inline int64_t
72 atomic_add_return_long(int64_t i, atomic64_t *v)
73 {
74 	return i + atomic_fetchadd_long(&v->counter, i);
75 }
76 
77 static inline int
78 atomic_sub_return(int i, atomic_t *v)
79 {
80 	return atomic_fetchadd_int(&v->counter, -i) - i;
81 }
82 
83 static inline int64_t
84 atomic_sub_return_long(int64_t i, atomic64_t *v)
85 {
86 	return atomic_fetchadd_long(&v->counter, -i) - i;
87 }
88 
89 static inline void
90 atomic_set(atomic_t *v, int i)
91 {
92 	atomic_store_rel_int(&v->counter, i);
93 }
94 
95 static inline void
96 atomic64_set(atomic64_t *v, long i)
97 {
98 	atomic_store_rel_long(&v->counter, i);
99 }
100 
101 static inline int
102 atomic_read(atomic_t *v)
103 {
104 	return atomic_load_acq_int(&v->counter);
105 }
106 
107 static inline int64_t
108 atomic64_read(atomic64_t *v)
109 {
110 	return atomic_load_acq_long(&v->counter);
111 }
112 
113 static inline int
114 atomic_inc(atomic_t *v)
115 {
116 	return atomic_fetchadd_int(&v->counter, 1) + 1;
117 }
118 
119 static inline int
120 atomic_dec(atomic_t *v)
121 {
122 	return atomic_fetchadd_int(&v->counter, -1) - 1;
123 }
124 
125 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
126 {
127 	return atomic_cmpxchg_int(&v->counter, old, new);
128 }
129 
130 static inline int atomic_add_unless(atomic_t *v, int add, int unless)
131 {
132         int c, old;
133         c = atomic_read(v);
134         for (;;) {
135                 if (unlikely(c == unless))
136                         break;
137                 old = atomic_cmpxchg_int(&v->counter, c, c + add);
138                 if (likely(old == c))
139                         break;
140                 c = old;
141         }
142         return c != unless;
143 }
144 
145 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
146 
147 /* atomic_clear_mask: atomically clears a variable from the bit set in mask */
148 #define atomic_clear_mask(mask, addr)		\
149 	/* atomic *addr &= ~mask; */		\
150 	__asm __volatile("lock andl %0, %1"	\
151 		:				\
152 		: "r" (~mask), "m" (*addr)	\
153 		: "memory");
154 
155 #define smp_mb__before_atomic()	cpu_ccfence()
156 #define smp_mb__after_atomic()	cpu_ccfence()
157 
158 static inline void
159 atomic_andnot(int i, atomic_t *v)
160 {
161 	/* v->counter = v->counter & ~i; */
162 	atomic_clear_int(&v->counter, i);
163 }
164 
165 #define cmpxchg(ptr, old, new) ({				\
166 	__typeof(*(ptr)) __ret;					\
167 								\
168 	CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 ||	\
169 	    sizeof(__ret) == 4 || sizeof(__ret) == 8);		\
170 								\
171 	__ret = (old);						\
172 	switch (sizeof(__ret)) {				\
173 	case 1:							\
174 		while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \
175 		    (int8_t *)&__ret, (new)) && __ret == (old))	\
176 			;					\
177 		break;						\
178 	case 2:							\
179 		while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \
180 		    (int16_t *)&__ret, (new)) && __ret == (old)) \
181 			;					\
182 		break;						\
183 	case 4:							\
184 		while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \
185 		    (int32_t *)&__ret, (new)) && __ret == (old)) \
186 			;					\
187 		break;						\
188 	case 8:							\
189 		while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \
190 		    (int64_t *)&__ret, (new)) && __ret == (old)) \
191 			;					\
192 		break;						\
193 	}							\
194 	__ret;							\
195 })
196 
197 #define cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
198 
199 #endif	/* _LINUX_ATOMIC_H_ */
200