xref: /openbsd/sys/arch/hppa/include/atomic.h (revision 09467b48)
1 /*	$OpenBSD: atomic.h,v 1.11 2018/05/14 09:33:20 kettenis Exp $	*/
2 
3 /* Public Domain */
4 
5 #ifndef _MACHINE_ATOMIC_H_
6 #define _MACHINE_ATOMIC_H_
7 
8 #if defined(_KERNEL)
9 
10 typedef volatile u_int __cpu_simple_lock_t __attribute__((__aligned__(16)));
11 
12 #define __SIMPLELOCK_LOCKED	0
13 #define __SIMPLELOCK_UNLOCKED	1
14 
15 static inline void
16 __cpu_simple_lock_init(__cpu_simple_lock_t *l)
17 {
18 	*l = __SIMPLELOCK_UNLOCKED;
19 }
20 
21 static inline unsigned int
22 __cpu_simple_lock_ldcws(__cpu_simple_lock_t *l)
23 {
24 	unsigned int o;
25 
26 	asm volatile("ldcws 0(%2), %0" : "=&r" (o), "+m" (l) : "r" (l));
27 
28 	return (o);
29 }
30 
31 static inline int
32 __cpu_simple_lock_try(__cpu_simple_lock_t *l)
33 {
34 	return (__cpu_simple_lock_ldcws(l) == __SIMPLELOCK_UNLOCKED);
35 }
36 
37 static inline void
38 __cpu_simple_lock(__cpu_simple_lock_t *l)
39 {
40 	while (!__cpu_simple_lock_ldcws(l))
41 		;
42 }
43 
44 static inline void
45 __cpu_simple_unlock(__cpu_simple_lock_t *l)
46 {
47 	*l = __SIMPLELOCK_UNLOCKED;
48 }
49 
50 #ifdef MULTIPROCESSOR
51 extern __cpu_simple_lock_t atomic_lock;
52 #define ATOMIC_LOCK	__cpu_simple_lock(&atomic_lock);
53 #define ATOMIC_UNLOCK	__cpu_simple_unlock(&atomic_lock);
54 #else
55 #define ATOMIC_LOCK
56 #define ATOMIC_UNLOCK
57 #endif
58 
59 static inline register_t
60 atomic_enter(void)
61 {
62 	register_t eiem;
63 
64 	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
65 	__asm volatile("mtctl	%r0, %cr15");
66 	ATOMIC_LOCK;
67 
68 	return (eiem);
69 }
70 
71 static inline void
72 atomic_leave(register_t eiem)
73 {
74 	ATOMIC_UNLOCK;
75 	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
76 }
77 
78 static inline unsigned int
79 _atomic_cas_uint(volatile unsigned int *uip, unsigned int o, unsigned int n)
80 {
81 	register_t eiem;
82 	unsigned int rv;
83 
84 	eiem = atomic_enter();
85 	rv = *uip;
86 	if (rv == o)
87 		*uip = n;
88 	atomic_leave(eiem);
89 
90 	return (rv);
91 }
92 #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
93 
94 static inline unsigned long
95 _atomic_cas_ulong(volatile unsigned long *uip, unsigned long o, unsigned long n)
96 {
97 	register_t eiem;
98 	unsigned long rv;
99 
100 	eiem = atomic_enter();
101 	rv = *uip;
102 	if (rv == o)
103 		*uip = n;
104 	atomic_leave(eiem);
105 
106 	return (rv);
107 }
108 #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
109 
110 static inline void *
111 _atomic_cas_ptr(volatile void *uip, void *o, void *n)
112 {
113 	register_t eiem;
114 	void * volatile *uipp = (void * volatile *)uip;
115 	void *rv;
116 
117 	eiem = atomic_enter();
118 	rv = *uipp;
119 	if (rv == o)
120 		*uipp = n;
121 	atomic_leave(eiem);
122 
123 	return (rv);
124 }
125 #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
126 
127 static inline unsigned int
128 _atomic_swap_uint(volatile unsigned int *uip, unsigned int n)
129 {
130 	register_t eiem;
131 	unsigned int rv;
132 
133 	eiem = atomic_enter();
134 	rv = *uip;
135 	*uip = n;
136 	atomic_leave(eiem);
137 
138 	return (rv);
139 }
140 #define atomic_swap_uint(_p, _n) _atomic_swap_uint((_p), (_n))
141 
142 static inline unsigned long
143 _atomic_swap_ulong(volatile unsigned long *uip, unsigned long n)
144 {
145 	register_t eiem;
146 	unsigned long rv;
147 
148 	eiem = atomic_enter();
149 	rv = *uip;
150 	*uip = n;
151 	atomic_leave(eiem);
152 
153 	return (rv);
154 }
155 #define atomic_swap_ulong(_p, _n) _atomic_swap_ulong((_p), (_n))
156 
157 static inline void *
158 _atomic_swap_ptr(volatile void *uip, void *n)
159 {
160 	register_t eiem;
161 	void * volatile *uipp = (void * volatile *)uip;
162 	void *rv;
163 
164 	eiem = atomic_enter();
165 	rv = *uipp;
166 	*uipp = n;
167 	atomic_leave(eiem);
168 
169 	return (rv);
170 }
171 #define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n))
172 
173 static __inline unsigned int
174 _atomic_add_int_nv(volatile unsigned int *uip, unsigned int v)
175 {
176 	register_t eiem;
177 	unsigned int rv;
178 
179 	eiem = atomic_enter();
180 	rv = *uip + v;
181 	*uip = rv;
182 	atomic_leave(eiem);
183 
184 	return (rv);
185 }
186 #define atomic_add_int_nv(_uip, _v) _atomic_add_int_nv((_uip), (_v))
187 #define atomic_sub_int_nv(_uip, _v) _atomic_add_int_nv((_uip), 0 - (_v))
188 
189 static __inline unsigned long
190 _atomic_add_long_nv(volatile unsigned long *uip, unsigned long v)
191 {
192 	register_t eiem;
193 	unsigned long rv;
194 
195 	eiem = atomic_enter();
196 	rv = *uip + v;
197 	*uip = rv;
198 	atomic_leave(eiem);
199 
200 	return (rv);
201 }
202 #define atomic_add_long_nv(_uip, _v) _atomic_add_long_nv((_uip), (_v))
203 #define atomic_sub_long_nv(_uip, _v) _atomic_add_long_nv((_uip), 0 - (_v))
204 
205 static __inline void
206 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
207 {
208 	register_t eiem;
209 
210 	eiem = atomic_enter();
211 	*uip |= v;
212 	atomic_leave(eiem);
213 }
214 
215 static __inline void
216 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
217 {
218 	register_t eiem;
219 
220 	eiem = atomic_enter();
221 	*uip &= ~v;
222 	atomic_leave(eiem);
223 }
224 
225 static __inline void
226 atomic_setbits_long(volatile unsigned long *uip, unsigned long v)
227 {
228 	register_t eiem;
229 
230 	eiem = atomic_enter();
231 	*uip |= v;
232 	atomic_leave(eiem);
233 }
234 
235 static __inline void
236 atomic_clearbits_long(volatile unsigned long *uip, unsigned long v)
237 {
238 	register_t eiem;
239 
240 	eiem = atomic_enter();
241 	*uip &= ~v;
242 	atomic_leave(eiem);
243 }
244 
245 #endif /* defined(_KERNEL) */
246 
247 /*
248  * Although the PA-RISC 2.0 architecture allows an implementation to
249  * be weakly ordered, all PA-RISC processers to date implement a
250  * strong memory ordering model.  So all we need is a compiler
251  * barrier.
252  */
253 
254 static inline void
255 __insn_barrier(void)
256 {
257 	__asm volatile("" : : : "memory");
258 }
259 
260 #define membar_enter()		__insn_barrier()
261 #define membar_exit()		__insn_barrier()
262 #define membar_producer()	__insn_barrier()
263 #define membar_consumer()	__insn_barrier()
264 #define membar_sync()		__insn_barrier()
265 
266 #endif /* _MACHINE_ATOMIC_H_ */
267