1 /* $OpenBSD: atomic.h,v 1.16 2022/08/29 02:01:18 jsg Exp $ */
2
3 /* Public Domain */
4
5 #ifndef _M88K_ATOMIC_H_
6 #define _M88K_ATOMIC_H_
7
8 #if defined(_KERNEL)
9
10 #ifdef MULTIPROCESSOR
11
12 /* actual implementation is hairy, see atomic.S */
13 void atomic_setbits_int(volatile unsigned int *, unsigned int);
14 void atomic_clearbits_int(volatile unsigned int *, unsigned int);
15 unsigned int atomic_add_int_nv_mp(volatile unsigned int *, unsigned int);
16 unsigned int atomic_sub_int_nv_mp(volatile unsigned int *, unsigned int);
17 unsigned int atomic_cas_uint_mp(unsigned int *, unsigned int, unsigned int);
18 unsigned int atomic_swap_uint_mp(unsigned int *, unsigned int);
19
20 #define atomic_add_int_nv atomic_add_int_nv_mp
21 #define atomic_sub_int_nv atomic_sub_int_nv_mp
22 #define atomic_cas_uint atomic_cas_uint_mp
23 #define atomic_swap_uint atomic_swap_uint_mp
24
25 #else
26
27 #include <machine/asm_macro.h>
28 #include <machine/psl.h>
29
30 static __inline void
atomic_setbits_int(volatile unsigned int * uip,unsigned int v)31 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
32 {
33 u_int psr;
34
35 psr = get_psr();
36 set_psr(psr | PSR_IND);
37 *uip |= v;
38 set_psr(psr);
39 }
40
41 static __inline void
atomic_clearbits_int(volatile unsigned int * uip,unsigned int v)42 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
43 {
44 u_int psr;
45
46 psr = get_psr();
47 set_psr(psr | PSR_IND);
48 *uip &= ~v;
49 set_psr(psr);
50 }
51
52 static __inline unsigned int
atomic_add_int_nv_sp(volatile unsigned int * uip,unsigned int v)53 atomic_add_int_nv_sp(volatile unsigned int *uip, unsigned int v)
54 {
55 u_int psr;
56 unsigned int nv;
57
58 psr = get_psr();
59 set_psr(psr | PSR_IND);
60 *uip += v;
61 nv = *uip;
62 set_psr(psr);
63
64 return nv;
65 }
66
67 static __inline unsigned int
atomic_sub_int_nv_sp(volatile unsigned int * uip,unsigned int v)68 atomic_sub_int_nv_sp(volatile unsigned int *uip, unsigned int v)
69 {
70 u_int psr;
71 unsigned int nv;
72
73 psr = get_psr();
74 set_psr(psr | PSR_IND);
75 *uip -= v;
76 nv = *uip;
77 set_psr(psr);
78
79 return nv;
80 }
81
82 static inline unsigned int
atomic_cas_uint_sp(unsigned int * p,unsigned int o,unsigned int n)83 atomic_cas_uint_sp(unsigned int *p, unsigned int o, unsigned int n)
84 {
85 u_int psr;
86 unsigned int ov;
87
88 psr = get_psr();
89 set_psr(psr | PSR_IND);
90 ov = *p;
91 if (ov == o)
92 *p = n;
93 set_psr(psr);
94
95 return ov;
96 }
97
98 static inline unsigned int
atomic_swap_uint_sp(unsigned int * p,unsigned int v)99 atomic_swap_uint_sp(unsigned int *p, unsigned int v)
100 {
101 u_int psr;
102 unsigned int ov;
103
104 psr = get_psr();
105 set_psr(psr | PSR_IND);
106 ov = *p;
107 *p = v;
108 set_psr(psr);
109
110 return ov;
111 }
112
113 #define atomic_add_int_nv atomic_add_int_nv_sp
114 #define atomic_sub_int_nv atomic_sub_int_nv_sp
115 #define atomic_cas_uint atomic_cas_uint_sp
116 #define atomic_swap_uint atomic_swap_uint_sp
117
118 #endif /* MULTIPROCESSOR */
119
120 static __inline__ unsigned int
atomic_clear_int(volatile unsigned int * uip)121 atomic_clear_int(volatile unsigned int *uip)
122 {
123 u_int oldval;
124
125 oldval = 0;
126 __asm__ volatile
127 ("xmem %0, %2, %%r0" : "+r"(oldval), "+m"(*uip) : "r"(uip));
128 return oldval;
129 }
130
131 #define atomic_add_long_nv(p,v) \
132 ((unsigned long)atomic_add_int_nv((unsigned int *)p, (unsigned int)v))
133 #define atomic_sub_long_nv(p,v) \
134 ((unsigned long)atomic_sub_int_nv((unsigned int *)p, (unsigned int)v))
135
136 #define atomic_cas_ulong(p,o,n) \
137 ((unsigned long)atomic_cas_uint((unsigned int *)p, (unsigned int)o, \
138 (unsigned int)n))
139 #define atomic_cas_ptr(p,o,n) \
140 ((void *)atomic_cas_uint((void *)p, (unsigned int)o, (unsigned int)n))
141
142 #define atomic_swap_ulong(p,o) \
143 ((unsigned long)atomic_swap_uint((unsigned int *)p, (unsigned int)o)
144 #define atomic_swap_ptr(p,o) \
145 ((void *)atomic_swap_uint((void *)p, (unsigned int)o))
146
147 static inline void
__sync_synchronize(void)148 __sync_synchronize(void)
149 {
150 /* flush_pipeline(); */
151 __asm__ volatile ("tb1 0, %%r0, 0" ::: "memory");
152 }
153
154 #else /* _KERNEL */
155
156 #if !defined(__GNUC__) || (__GNUC__ < 4)
157
158 /*
159 * Atomic routines are not available to userland, but we need to prevent
160 * <sys/atomic.h> from declaring them as inline wrappers of __sync_* functions,
161 * which are not available with gcc 3.
162 */
163
164 #define atomic_cas_uint UNIMPLEMENTED
165 #define atomic_cas_ulong UNIMPLEMENTED
166 #define atomic_cas_ptr UNIMPLEMENTED
167
168 #define atomic_swap_uint UNIMPLEMENTED
169 #define atomic_swap_ulong UNIMPLEMENTED
170 #define atomic_swap_ptr UNIMPLEMENTED
171
172 #define atomic_add_int_nv UNIMPLEMENTED
173 #define atomic_add_long_nv UNIMPLEMENTED
174 #define atomic_add_int UNIMPLEMENTED
175 #define atomic_add_long UNIMPLEMENTED
176
177 #define atomic_inc_int UNIMPLEMENTED
178 #define atomic_inc_long UNIMPLEMENTED
179
180 #define atomic_sub_int_nv UNIMPLEMENTED
181 #define atomic_sub_long_nv UNIMPLEMENTED
182 #define atomic_sub_int UNIMPLEMENTED
183 #define atomic_sub_long UNIMPLEMENTED
184
185 #define atomic_dec_int UNIMPLEMENTED
186 #define atomic_dec_long UNIMPLEMENTED
187
188 /* trap numbers below 128 would cause a privileged instruction fault */
189 #define __membar() do { \
190 __asm volatile("tb1 0, %%r0, 128" ::: "memory"); \
191 } while (0)
192
193 #endif /* gcc < 4 */
194
195 #define membar_enter() __membar()
196 #define membar_exit() __membar()
197 #define membar_producer() __membar()
198 #define membar_consumer() __membar()
199 #define membar_sync() __membar()
200
201 #endif /* defined(_KERNEL) */
202
203 #endif /* _M88K_ATOMIC_H_ */
204