xref: /netbsd/sys/arch/arm/include/lock.h (revision c4a72b64)
1 /*	$NetBSD: lock.h,v 1.3 2002/10/07 23:19:49 bjh21 Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Machine-dependent spin lock operations.
41  *
42  * NOTE: The SWP insn used here is available only on ARM architecture
43  * version 3 and later (as well as 2a).  What we are going to do is
44  * expect that the kernel will trap and emulate the insn.  That will
45  * be slow, but give us the atomicity that we need.
46  */
47 
48 #ifndef _ARM_LOCK_H_
49 #define	_ARM_LOCK_H_
50 
51 typedef	__volatile int		__cpu_simple_lock_t;
52 
53 #define	__SIMPLELOCK_LOCKED	1
54 #define	__SIMPLELOCK_UNLOCKED	0
55 
56 static __inline int
57 __swp(int __val, __volatile int *__ptr)
58 {
59 
60 	__asm __volatile("swp %0, %1, [%2]"
61 	    : "=r" (__val) : "r" (__val), "r" (__ptr) : "memory");
62 	return __val;
63 }
64 
65 static __inline void __attribute__((__unused__))
66 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
67 {
68 
69 	*alp = __SIMPLELOCK_UNLOCKED;
70 }
71 
72 static __inline void __attribute__((__unused__))
73 __cpu_simple_lock(__cpu_simple_lock_t *alp)
74 {
75 
76 	while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED)
77 		continue;
78 }
79 
80 static __inline int __attribute__((__unused__))
81 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
82 {
83 
84 	return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED);
85 }
86 
87 static __inline void __attribute__((__unused__))
88 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
89 {
90 
91 	*alp = __SIMPLELOCK_UNLOCKED;
92 }
93 
94 #endif /* _ARM_LOCK_H_ */
95