xref: /dragonfly/sys/platform/pc64/include/lock.h (revision 956939d5)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
35  * $DragonFly: src/sys/platform/pc32/include/lock.h,v 1.17 2008/06/19 21:32:55 aggelos Exp $
36  */
37 
38 #ifndef _MACHINE_LOCK_H_
39 #define _MACHINE_LOCK_H_
40 
41 #ifndef _CPU_PSL_H_
42 #include <machine/psl.h>
43 #endif
44 
45 /*
46  * MP_FREE_LOCK is used by both assembly and C under SMP.
47  */
48 #ifdef SMP
49 #define MP_FREE_LOCK		0xffffffff	/* value of lock when free */
50 #endif
51 
52 #ifdef LOCORE
53 
54 /*
55  * Spinlock assembly support.  Note: rax and rcx can be tromped.  No
56  * other register will be.   Note that these routines are sometimes
57  * called with (%edx) as the mem argument.
58  *
59  * Under UP the spinlock routines still serve to disable/restore
60  * interrupts.
61  */
62 
63 
64 #ifdef SMP
65 
66 #define SPIN_INIT(mem)						\
67 	movq	$0,mem ;					\
68 
69 #define SPIN_INIT_NOREG(mem)					\
70 	SPIN_INIT(mem) ;					\
71 
72 #define SPIN_LOCK(mem)						\
73 	pushfq ;						\
74 	popq	%rcx ;		/* flags */			\
75 	cli ;							\
76 	orq	$PSL_C,%rcx ;	/* make sure non-zero */	\
77 7: ;								\
78 	movq	$0,%rax ;	/* expected contents of lock */	\
79 	lock cmpxchgq %rcx,mem ; /* Z=1 (jz) on success */	\
80 	pause ;							\
81 	jnz	7b ; 						\
82 
83 #define SPIN_LOCK_PUSH_REGS					\
84 	subq	$16,%rsp ;					\
85 	movq	%rcx,(%rsp) ;					\
86 	movq	%rax,8(%rsp) ;					\
87 
88 #define SPIN_LOCK_POP_REGS					\
89 	movq	(%rsp),%rcx ;					\
90 	movq	8(%rsp),%rax ;					\
91 	addq	$16,%rsp ;					\
92 
93 #define SPIN_LOCK_FRAME_SIZE	16
94 
95 #define SPIN_LOCK_NOREG(mem)					\
96 	SPIN_LOCK_PUSH_REGS ;					\
97 	SPIN_LOCK(mem) ;					\
98 	SPIN_LOCK_POP_REGS ;					\
99 
100 #define SPIN_UNLOCK(mem)					\
101 	pushq	mem ;						\
102 	movq	$0,mem ;					\
103 	popfq ;							\
104 
105 #define SPIN_UNLOCK_PUSH_REGS
106 #define SPIN_UNLOCK_POP_REGS
107 #define SPIN_UNLOCK_FRAME_SIZE	0
108 
109 #define SPIN_UNLOCK_NOREG(mem)					\
110 	SPIN_UNLOCK(mem) ;					\
111 
112 #else /* !SMP */
113 
114 #define SPIN_LOCK(mem)						\
115 	pushfq ;						\
116 	cli ;							\
117 	orq	$PSL_C,(%rsp) ;					\
118 	popq	mem ;						\
119 
120 #define SPIN_LOCK_PUSH_RESG
121 #define SPIN_LOCK_POP_REGS
122 #define SPIN_LOCK_FRAME_SIZE	0
123 
124 #define SPIN_UNLOCK(mem)					\
125 	pushq	mem ;						\
126 	movq	$0,mem ;					\
127 	popfq ;							\
128 
129 #define SPIN_UNLOCK_PUSH_REGS
130 #define SPIN_UNLOCK_POP_REGS
131 #define SPIN_UNLOCK_FRAME_SIZE	0
132 
133 #endif	/* SMP */
134 
135 #else	/* !LOCORE */
136 
137 #ifdef _KERNEL
138 
139 /*
140  * Spinlock functions (UP and SMP).  Under UP a spinlock still serves
141  * to disable/restore interrupts even if it doesn't spin.
142  */
143 struct spinlock_deprecated {
144 	volatile long	opaque;
145 };
146 
147 typedef struct spinlock_deprecated *spinlock_t;
148 
149 void	mpintr_lock(void);	/* disables int / spinlock combo */
150 void	mpintr_unlock(void);
151 void	com_lock(void);		/* disables int / spinlock combo */
152 void	com_unlock(void);
153 void	imen_lock(void);	/* disables int / spinlock combo */
154 void	imen_unlock(void);
155 void	clock_lock(void);	/* disables int / spinlock combo */
156 void	clock_unlock(void);
157 
158 extern struct spinlock_deprecated smp_rv_spinlock;
159 
160 void	spin_lock_deprecated(spinlock_t lock);
161 void	spin_unlock_deprecated(spinlock_t lock);
162 
163 /*
164  * Inline version of spinlock routines -- overrides assembly.  Only unlock
165  * and init here please.
166  */
167 static __inline void
168 spin_lock_init(spinlock_t lock)
169 {
170 	lock->opaque = 0;
171 }
172 
173 #endif  /* _KERNEL */
174 
175 #if defined(_KERNEL) || defined(_UTHREAD)
176 
177 /*
178  * MP LOCK functions for SMP and UP.  Under UP the MP lock does not exist
179  * but we leave a few functions intact as macros for convenience.
180  */
181 #ifdef SMP
182 
183 void	get_mplock(void);
184 int	try_mplock(void);
185 void	rel_mplock(void);
186 int	cpu_try_mplock(void);
187 void	cpu_get_initial_mplock(void);
188 
189 extern u_int	mp_lock;
190 
191 #define MP_LOCK_HELD()   (mp_lock == mycpu->gd_cpuid)
192 #define ASSERT_MP_LOCK_HELD(td)   KASSERT(MP_LOCK_HELD(), ("MP_LOCK_HELD(): not held thread %p", td))
193 
194 static __inline void
195 cpu_rel_mplock(void)
196 {
197 	mp_lock = MP_FREE_LOCK;
198 }
199 
200 static __inline int
201 owner_mplock(void)
202 {
203 	return (mp_lock);
204 }
205 
206 #else /* !SMP */
207 
208 #define get_mplock()
209 #define try_mplock()	1
210 #define rel_mplock()
211 #define owner_mplock()	0	/* always cpu 0 */
212 #define MP_LOCK_HELD()	(!0)
213 #define ASSERT_MP_LOCK_HELD(td)
214 
215 #endif	/* SMP */
216 #endif  /* _KERNEL || _UTHREAD */
217 #endif	/* LOCORE */
218 #endif	/* !_MACHINE_LOCK_H_ */
219