xref: /linux/include/linux/atomic.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Atomic operations usable in machine independent code */
3 #ifndef _LINUX_ATOMIC_H
4 #define _LINUX_ATOMIC_H
5 #include <linux/types.h>
6 
7 #include <asm/atomic.h>
8 #include <asm/barrier.h>
9 
10 /*
11  * Relaxed variants of xchg, cmpxchg and some atomic operations.
12  *
13  * We support four variants:
14  *
15  * - Fully ordered: The default implementation, no suffix required.
16  * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
17  * - Release: Provides RELEASE semantics, _release suffix.
18  * - Relaxed: No ordering guarantees, _relaxed suffix.
19  *
20  * For compound atomics performing both a load and a store, ACQUIRE
21  * semantics apply only to the load and RELEASE semantics only to the
22  * store portion of the operation. Note that a failed cmpxchg_acquire
23  * does -not- imply any memory ordering constraints.
24  *
25  * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
26  */
27 
28 /*
29  * The idea here is to build acquire/release variants by adding explicit
30  * barriers on top of the relaxed variant. In the case where the relaxed
31  * variant is already fully ordered, no additional barriers are needed.
32  *
33  * If an architecture overrides __atomic_acquire_fence() it will probably
34  * want to define smp_mb__after_spinlock().
35  */
36 #ifndef __atomic_acquire_fence
37 #define __atomic_acquire_fence		smp_mb__after_atomic
38 #endif
39 
40 #ifndef __atomic_release_fence
41 #define __atomic_release_fence		smp_mb__before_atomic
42 #endif
43 
44 #ifndef __atomic_pre_full_fence
45 #define __atomic_pre_full_fence		smp_mb__before_atomic
46 #endif
47 
48 #ifndef __atomic_post_full_fence
49 #define __atomic_post_full_fence	smp_mb__after_atomic
50 #endif
51 
52 #define __atomic_op_acquire(op, args...)				\
53 ({									\
54 	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
55 	__atomic_acquire_fence();					\
56 	__ret;								\
57 })
58 
59 #define __atomic_op_release(op, args...)				\
60 ({									\
61 	__atomic_release_fence();					\
62 	op##_relaxed(args);						\
63 })
64 
65 #define __atomic_op_fence(op, args...)					\
66 ({									\
67 	typeof(op##_relaxed(args)) __ret;				\
68 	__atomic_pre_full_fence();					\
69 	__ret = op##_relaxed(args);					\
70 	__atomic_post_full_fence();					\
71 	__ret;								\
72 })
73 
74 #include <linux/atomic-fallback.h>
75 
76 #include <asm-generic/atomic-long.h>
77 
78 #endif /* _LINUX_ATOMIC_H */
79