xref: /openbsd/sys/arch/m88k/include/lock.h (revision a6445c1d)
1 #ifndef	_M88K_LOCK_H_
2 #define	_M88K_LOCK_H_
3 /*	$OpenBSD: lock.h,v 1.10 2014/07/15 16:28:11 miod Exp $	*/
4 
5 /*
6  * Copyright (c) 2005, Miodrag Vallat.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <m88k/asm.h>
31 
32 typedef volatile u_int	__cpu_simple_lock_t;
33 
34 /* do not change these - code below assumes r0 == __SIMPLELOCK_UNLOCKED */
35 #define	__SIMPLELOCK_LOCKED	1
36 #define	__SIMPLELOCK_UNLOCKED	0
37 
38 static __inline__ void
39 __cpu_simple_lock_init(__cpu_simple_lock_t *l)
40 {
41 	*l = __SIMPLELOCK_UNLOCKED;
42 }
43 
44 static __inline__ int
45 __cpu_simple_lock_try(__cpu_simple_lock_t *l)
46 {
47 	/*
48 	 * The local __cpu_simple_lock_t is not declared volatile, so that
49 	 * there are not pipeline synchronization around stores to it.
50 	 * xmem will do the right thing regardless of the volatile qualifier.
51 	 */
52 	u_int old = __SIMPLELOCK_LOCKED;
53 
54 	__asm__ volatile
55 	    ("xmem %0, %2, %%r0" : "+r"(old), "+m"(*l) : "r"(l));
56 
57 	return (old == __SIMPLELOCK_UNLOCKED);
58 }
59 
60 static __inline__ void
61 __cpu_simple_lock(__cpu_simple_lock_t *l)
62 {
63 	for (;;) {
64 		if (__cpu_simple_lock_try(l) != 0)
65 			break;
66 		while (*l != __SIMPLELOCK_UNLOCKED)
67 			;	/* spin without exclusive bus access */
68 	}
69 }
70 
71 static __inline__ void
72 __cpu_simple_unlock(__cpu_simple_lock_t *l)
73 {
74 	*l = __SIMPLELOCK_UNLOCKED;
75 }
76 
77 #if defined(_KERNEL) && defined(MULTIPROCESSOR)
78 #define	rw_cas(p, o, n) (atomic_cas_ulong(p, o, n) != o)
79 #endif
80 
81 #endif	/* _M88K_LOCK_H_ */
82