xref: /openbsd/sys/dev/pci/drm/include/linux/atomic.h (revision e5dd7070)
1 /* $OpenBSD: atomic.h,v 1.10 2020/06/17 01:03:57 jsg Exp $ */
2 /**
3  * \file drm_atomic.h
4  * Atomic operations used in the DRM which may or may not be provided by the OS.
5  *
6  * \author Eric Anholt <anholt@FreeBSD.org>
7  */
8 
9 /*-
10  * Copyright 2004 Eric Anholt
11  * All Rights Reserved.
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the next
21  * paragraph) shall be included in all copies or substantial portions of the
22  * Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
27  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30  * OTHER DEALINGS IN THE SOFTWARE.
31  */
32 
33 #ifndef _DRM_LINUX_ATOMIC_H_
34 #define _DRM_LINUX_ATOMIC_H_
35 
36 #include <sys/types.h>
37 #include <sys/mutex.h>
38 #include <machine/intr.h>
39 #include <linux/types.h>
40 #include <linux/compiler.h>	/* via x86/include/asm/atomic.h */
41 
42 #define ATOMIC_INIT(x)		(x)
43 
44 #define atomic_set(p, v)	WRITE_ONCE(*(p), (v))
45 #define atomic_read(p)		READ_ONCE(*(p))
46 #define atomic_inc(p)		__sync_fetch_and_add(p, 1)
47 #define atomic_dec(p)		__sync_fetch_and_sub(p, 1)
48 #define atomic_add(n, p)	__sync_fetch_and_add(p, n)
49 #define atomic_sub(n, p)	__sync_fetch_and_sub(p, n)
50 #define atomic_and(n, p)	__sync_fetch_and_and(p, n)
51 #define atomic_or(n, p)		atomic_setbits_int(p, n)
52 #define atomic_add_return(n, p) __sync_add_and_fetch(p, n)
53 #define atomic_sub_return(n, p) __sync_sub_and_fetch(p, n)
54 #define atomic_inc_return(v)	atomic_add_return(1, (v))
55 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
56 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
57 #define atomic_inc_and_test(v)	(atomic_inc_return(v) == 0)
58 #define atomic_cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
59 #define cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
60 #define atomic_set_release(p, v)	atomic_set((p), (v))
61 #define atomic_andnot(bits, p)		atomic_clearbits_int(p,bits)
62 #define atomic_fetch_inc(p)		__sync_fetch_and_add(p, 1)
63 #define atomic_fetch_xor(n, p)		__sync_fetch_and_xor(p, n)
64 
65 #define try_cmpxchg(p, op, n)						\
66 ({									\
67 	__typeof(p) __op = (__typeof((p)))(op);				\
68 	__typeof(*(p)) __o = *__op;					\
69 	__typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n)); \
70 	if (__p != __o)							\
71 		*__op = __p;						\
72 	(__p == __o);							\
73 })
74 
75 static inline bool
76 atomic_try_cmpxchg(volatile int *p, int *op, int n)
77 {
78 	return try_cmpxchg(p, op, n);
79 }
80 
81 static inline int
82 atomic_xchg(volatile int *v, int n)
83 {
84 	__sync_synchronize();
85 	return __sync_lock_test_and_set(v, n);
86 }
87 
88 #define xchg(v, n)	__sync_lock_test_and_set(v, n)
89 
90 static inline int
91 atomic_add_unless(volatile int *v, int n, int u)
92 {
93 	int o;
94 
95 	do {
96 		o = *v;
97 		if (o == u)
98 			return 0;
99 	} while (__sync_val_compare_and_swap(v, o, o +n) != o);
100 
101 	return 1;
102 }
103 
104 #define atomic_inc_not_zero(v)	atomic_add_unless((v), 1, 0)
105 
106 static inline int
107 atomic_dec_if_positive(volatile int *v)
108 {
109 	int r, o;
110 
111 	do {
112 		o = *v;
113 		r = o - 1;
114 		if (r < 0)
115 			break;
116 	} while (__sync_val_compare_and_swap(v, o, r) != o);
117 
118 	return r;
119 }
120 
121 #define atomic_long_read(p)	READ_ONCE(*(p))
122 
123 #ifdef __LP64__
124 typedef int64_t atomic64_t;
125 
126 #define ATOMIC64_INIT(x)	(x)
127 
128 #define atomic64_set(p, v)	WRITE_ONCE(*(p), (v))
129 #define atomic64_read(p)	READ_ONCE(*(p))
130 
131 static inline int64_t
132 atomic64_xchg(volatile int64_t *v, int64_t n)
133 {
134 	__sync_synchronize();
135 	return __sync_lock_test_and_set(v, n);
136 }
137 
138 #define atomic64_add(n, p)	__sync_fetch_and_add_8(p, n)
139 #define atomic64_sub(n, p)	__sync_fetch_and_sub_8(p, n)
140 #define atomic64_inc(p)		__sync_fetch_and_add_8(p, 1)
141 #define atomic64_add_return(n, p) __sync_add_and_fetch_8(p, n)
142 #define atomic64_inc_return(p)	__sync_add_and_fetch_8(p, 1)
143 
144 #else
145 
146 typedef struct {
147 	volatile int64_t val;
148 	struct mutex lock;
149 } atomic64_t;
150 
151 #define ATOMIC64_INIT(x)	{ (x), .lock = MUTEX_INITIALIZER(IPL_HIGH) }
152 
153 static inline void
154 atomic64_set(atomic64_t *v, int64_t i)
155 {
156 	mtx_init(&v->lock, IPL_HIGH);
157 	v->val = i;
158 }
159 
160 static inline int64_t
161 atomic64_read(atomic64_t *v)
162 {
163 	int64_t val;
164 
165 	mtx_enter(&v->lock);
166 	val = v->val;
167 	mtx_leave(&v->lock);
168 
169 	return val;
170 }
171 
172 static inline int64_t
173 atomic64_xchg(atomic64_t *v, int64_t n)
174 {
175 	int64_t val;
176 
177 	mtx_enter(&v->lock);
178 	val = v->val;
179 	v->val = n;
180 	mtx_leave(&v->lock);
181 
182 	return val;
183 }
184 
185 static inline void
186 atomic64_add(int i, atomic64_t *v)
187 {
188 	mtx_enter(&v->lock);
189 	v->val += i;
190 	mtx_leave(&v->lock);
191 }
192 
193 #define atomic64_inc(p)		atomic64_add(p, 1)
194 
195 static inline int64_t
196 atomic64_add_return(int i, atomic64_t *v)
197 {
198 	int64_t val;
199 
200 	mtx_enter(&v->lock);
201 	val = v->val + i;
202 	v->val = val;
203 	mtx_leave(&v->lock);
204 
205 	return val;
206 }
207 
208 #define atomic64_inc_return(p)		atomic64_add_return(p, 1)
209 
210 static inline void
211 atomic64_sub(int i, atomic64_t *v)
212 {
213 	mtx_enter(&v->lock);
214 	v->val -= i;
215 	mtx_leave(&v->lock);
216 }
217 #endif
218 
219 #ifdef __LP64__
220 typedef int64_t atomic_long_t;
221 #define atomic_long_set(p, v)		atomic64_set(p, v)
222 #define atomic_long_xchg(v, n)		atomic64_xchg(v, n)
223 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
224 #else
225 typedef int32_t atomic_long_t;
226 #define atomic_long_set(p, v)		atomic_set(p, v)
227 #define atomic_long_xchg(v, n)		atomic_xchg(v, n)
228 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
229 #endif
230 
231 static inline atomic_t
232 test_and_set_bit(u_int b, volatile void *p)
233 {
234 	unsigned int m = 1 << (b & 0x1f);
235 	unsigned int prev = __sync_fetch_and_or((volatile u_int *)p + (b >> 5), m);
236 	return (prev & m) != 0;
237 }
238 
239 static inline void
240 clear_bit(u_int b, volatile void *p)
241 {
242 	atomic_clearbits_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
243 }
244 
245 static inline void
246 clear_bit_unlock(u_int b, volatile void *p)
247 {
248 	membar_enter();
249 	clear_bit(b, p);
250 }
251 
252 static inline void
253 set_bit(u_int b, volatile void *p)
254 {
255 	atomic_setbits_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
256 }
257 
258 static inline void
259 __clear_bit(u_int b, volatile void *p)
260 {
261 	volatile u_int *ptr = (volatile u_int *)p;
262 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
263 }
264 
265 static inline void
266 __set_bit(u_int b, volatile void *p)
267 {
268 	volatile u_int *ptr = (volatile u_int *)p;
269 	ptr[b >> 5] |= (1 << (b & 0x1f));
270 }
271 
272 static inline int
273 test_bit(u_int b, const volatile void *p)
274 {
275 	return !!(((volatile u_int *)p)[b >> 5] & (1 << (b & 0x1f)));
276 }
277 
278 static inline int
279 __test_and_set_bit(u_int b, volatile void *p)
280 {
281 	unsigned int m = 1 << (b & 0x1f);
282 	volatile u_int *ptr = (volatile u_int *)p;
283 	unsigned int prev = ptr[b >> 5];
284 	ptr[b >> 5] |= m;
285 
286 	return (prev & m) != 0;
287 }
288 
289 static inline int
290 test_and_clear_bit(u_int b, volatile void *p)
291 {
292 	unsigned int m = 1 << (b & 0x1f);
293 	unsigned int prev = __sync_fetch_and_and((volatile u_int *)p + (b >> 5), ~m);
294 	return (prev & m) != 0;
295 }
296 
297 static inline int
298 __test_and_clear_bit(u_int b, volatile void *p)
299 {
300 	volatile u_int *ptr = (volatile u_int *)p;
301 	int rv = !!(ptr[b >> 5] & (1 << (b & 0x1f)));
302 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
303 	return rv;
304 }
305 
306 static inline int
307 find_first_zero_bit(volatile void *p, int max)
308 {
309 	int b;
310 	volatile u_int *ptr = (volatile u_int *)p;
311 
312 	for (b = 0; b < max; b += 32) {
313 		if (ptr[b >> 5] != ~0) {
314 			for (;;) {
315 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
316 					return b;
317 				b++;
318 			}
319 		}
320 	}
321 	return max;
322 }
323 
324 static inline int
325 find_next_zero_bit(volatile void *p, int max, int b)
326 {
327 	volatile u_int *ptr = (volatile u_int *)p;
328 
329 	for (; b < max; b += 32) {
330 		if (ptr[b >> 5] != ~0) {
331 			for (;;) {
332 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
333 					return b;
334 				b++;
335 			}
336 		}
337 	}
338 	return max;
339 }
340 
341 static inline int
342 find_first_bit(volatile void *p, int max)
343 {
344 	int b;
345 	volatile u_int *ptr = (volatile u_int *)p;
346 
347 	for (b = 0; b < max; b += 32) {
348 		if (ptr[b >> 5] != 0) {
349 			for (;;) {
350 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
351 					return b;
352 				b++;
353 			}
354 		}
355 	}
356 	return max;
357 }
358 
359 static inline int
360 find_next_bit(volatile void *p, int max, int b)
361 {
362 	volatile u_int *ptr = (volatile u_int *)p;
363 
364 	for (; b < max; b+= 32) {
365 		if (ptr[b >> 5] != 0) {
366 			for (;;) {
367 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
368 					return b;
369 				b++;
370 			}
371 		}
372 	}
373 	return max;
374 }
375 
376 #define for_each_set_bit(b, p, max) \
377 	for ((b) = find_first_bit((p), (max));			\
378 	     (b) < (max);					\
379 	     (b) = find_next_bit((p), (max), (b) + 1))
380 
381 #define for_each_clear_bit(b, p, max) \
382 	for ((b) = find_first_zero_bit((p), (max));		\
383 	     (b) < (max);					\
384 	     (b) = find_next_zero_bit((p), (max), (b) + 1))
385 
386 #if defined(__i386__)
387 #define rmb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
388 #define wmb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
389 #define mb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
390 #define smp_mb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
391 #define smp_rmb()	__asm __volatile("" : : : "memory")
392 #define smp_wmb()	__asm __volatile("" : : : "memory")
393 #define __smp_store_mb(var, value)	do { (void)xchg(&var, value); } while (0)
394 #define smp_mb__after_atomic()	do { } while (0)
395 #define smp_mb__before_atomic()	do { } while (0)
396 #elif defined(__alpha__)
397 #define rmb()	alpha_mb();
398 #define wmb()	alpha_wmb();
399 #define mb()	alpha_mb();
400 #elif defined(__amd64__)
401 #define rmb()	__asm __volatile("lfence" : : : "memory")
402 #define wmb()	__asm __volatile("sfence" : : : "memory")
403 #define mb()	__asm __volatile("mfence" : : : "memory")
404 #define smp_mb()	__asm __volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc");
405 #define smp_rmb()	__asm __volatile("" : : : "memory")
406 #define smp_wmb()	__asm __volatile("" : : : "memory")
407 #define __smp_store_mb(var, value)	do { (void)xchg(&var, value); } while (0)
408 #define smp_mb__after_atomic()	do { } while (0)
409 #define smp_mb__before_atomic()	do { } while (0)
410 #elif defined(__aarch64__)
411 #define rmb()	__membar("dsb ld")
412 #define wmb()	__membar("dsb st")
413 #define mb()	__membar("dsb sy")
414 #elif defined(__mips64__)
415 #define rmb()	mips_sync()
416 #define wmb()	mips_sync()
417 #define mb()	mips_sync()
418 #elif defined(__powerpc__)
419 #define rmb()	__asm __volatile("sync" : : : "memory");
420 #define wmb()	__asm __volatile("sync" : : : "memory");
421 #define mb()	__asm __volatile("sync" : : : "memory");
422 #elif defined(__sparc64__)
423 #define rmb()	membar_sync()
424 #define wmb()	membar_sync()
425 #define mb()	membar_sync()
426 #endif
427 
428 #ifndef smp_rmb
429 #define smp_rmb()	rmb()
430 #endif
431 
432 #ifndef smp_wmb
433 #define smp_wmb()	wmb()
434 #endif
435 
436 #ifndef mmiowb
437 #define mmiowb()	wmb()
438 #endif
439 
440 #ifndef smp_mb__before_atomic
441 #define smp_mb__before_atomic()	mb()
442 #endif
443 
444 #ifndef smp_mb__after_atomic
445 #define smp_mb__after_atomic()	mb()
446 #endif
447 
448 #ifndef smp_store_mb
449 #define smp_store_mb(x, v)	do { x = v; mb(); } while (0)
450 #endif
451 
452 #endif
453