xref: /dragonfly/sys/sys/spinlock2.h (revision eef623fc)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
35 
36 #ifndef _KERNEL
37 #error "This file should not be included by userland programs."
38 #endif
39 
40 #ifndef _SYS_SYSTM_H_
41 #include <sys/systm.h>
42 #endif
43 #ifndef _SYS_THREAD2_H_
44 #include <sys/thread2.h>
45 #endif
46 #ifndef _SYS_GLOBALDATA_H_
47 #include <sys/globaldata.h>
48 #endif
49 #include <machine/atomic.h>
50 #include <machine/cpufunc.h>
51 
52 extern struct spinlock pmap_spin;
53 
54 int spin_trylock_contested(struct spinlock *spin);
55 void _spin_lock_contested(struct spinlock *spin, const char *ident, int count);
56 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident);
57 
58 #define spin_lock(spin)			_spin_lock(spin, __func__)
59 #define spin_lock_quick(spin)		_spin_lock_quick(spin, __func__)
60 #define spin_lock_shared(spin)		_spin_lock_shared(spin, __func__)
61 #define spin_lock_shared_quick(spin)	_spin_lock_shared_quick(spin, __func__)
62 
63 /*
64  * Attempt to obtain an exclusive spinlock.  Returns FALSE on failure,
65  * TRUE on success.
66  */
67 static __inline boolean_t
68 spin_trylock(struct spinlock *spin)
69 {
70 	globaldata_t gd = mycpu;
71 
72 	crit_enter_raw(gd->gd_curthread);
73 	++gd->gd_spinlocks;
74 	cpu_ccfence();
75 	if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
76 		return (spin_trylock_contested(spin));
77 #ifdef DEBUG_LOCKS
78 	int i;
79 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
80 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
81 			gd->gd_curthread->td_spinlock_stack_id[i] = 1;
82 			gd->gd_curthread->td_spinlock_stack[i] = spin;
83 			gd->gd_curthread->td_spinlock_caller_pc[i] =
84 				__builtin_return_address(0);
85 			break;
86 		}
87 	}
88 #endif
89 	return (TRUE);
90 }
91 
92 /*
93  * Return TRUE if the spinlock is held (we can't tell by whom, though)
94  */
95 static __inline int
96 spin_held(struct spinlock *spin)
97 {
98 	return((spin->counta & ~SPINLOCK_SHARED) != 0);
99 }
100 
101 /*
102  * Obtain an exclusive spinlock and return.  It is possible for the
103  * SPINLOCK_SHARED bit to already be set, in which case the contested
104  * code is called to fix it up.
105  */
106 static __inline void
107 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
108 {
109 	int count;
110 
111 	crit_enter_raw(gd->gd_curthread);
112 	++gd->gd_spinlocks;
113 	cpu_ccfence();
114 
115 	count = atomic_fetchadd_int(&spin->counta, 1);
116 	if (__predict_false(count != 0)) {
117 		_spin_lock_contested(spin, ident, count);
118 	}
119 #ifdef DEBUG_LOCKS
120 	int i;
121 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
122 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
123 			gd->gd_curthread->td_spinlock_stack_id[i] = 1;
124 			gd->gd_curthread->td_spinlock_stack[i] = spin;
125 			gd->gd_curthread->td_spinlock_caller_pc[i] =
126 				__builtin_return_address(0);
127 			break;
128 		}
129 	}
130 #endif
131 }
132 
133 static __inline void
134 _spin_lock(struct spinlock *spin, const char *ident)
135 {
136 	_spin_lock_quick(mycpu, spin, ident);
137 }
138 
139 /*
140  * Release an exclusive spinlock.  We can just do this passively, only
141  * ensuring that our spinlock count is left intact until the mutex is
142  * cleared.
143  */
144 static __inline void
145 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
146 {
147 #ifdef DEBUG_LOCKS
148 	int i;
149 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
150 		if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
151 		    (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
152 			gd->gd_curthread->td_spinlock_stack_id[i] = 0;
153 			gd->gd_curthread->td_spinlock_stack[i] = NULL;
154 			gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
155 			break;
156 		}
157 	}
158 #endif
159 	/*
160 	 * Don't use a locked instruction here.  To reduce latency we avoid
161 	 * reading spin->counta prior to writing to it.
162 	 */
163 #ifdef DEBUG_LOCKS
164 	KKASSERT(spin->counta != 0);
165 #endif
166 	cpu_sfence();
167 	atomic_add_int(&spin->counta, -1);
168 	cpu_sfence();
169 #ifdef DEBUG_LOCKS
170 	KKASSERT(gd->gd_spinlocks > 0);
171 #endif
172 	cpu_ccfence();
173 	--gd->gd_spinlocks;
174 	crit_exit_raw(gd->gd_curthread);
175 }
176 
177 static __inline void
178 spin_unlock(struct spinlock *spin)
179 {
180 	spin_unlock_quick(mycpu, spin);
181 }
182 
183 /*
184  * Shared spinlock.  Acquire a count, if SPINLOCK_SHARED is not already
185  * set then try a trivial conversion and drop into the contested code if
186  * the trivial cocnversion fails.  The SHARED bit is 'cached' when lock
187  * counts go to 0 so the critical path is typically just the fetchadd.
188  *
189  * WARNING!  Due to the way exclusive conflict resolution works, we cannot
190  *	     just unconditionally set the SHARED bit on previous-count == 0.
191  *	     Doing so will interfere with the exclusive contended code.
192  */
193 static __inline void
194 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
195 			const char *ident)
196 {
197 	int counta;
198 
199 	crit_enter_raw(gd->gd_curthread);
200 	++gd->gd_spinlocks;
201 	cpu_ccfence();
202 
203 	counta = atomic_fetchadd_int(&spin->counta, 1);
204 	if (__predict_false((counta & SPINLOCK_SHARED) == 0)) {
205 		if (counta != 0 ||
206 		    !atomic_cmpset_int(&spin->counta, 1, SPINLOCK_SHARED | 1)) {
207 			_spin_lock_shared_contested(spin, ident);
208 		}
209 	}
210 #ifdef DEBUG_LOCKS
211 	int i;
212 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
213 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
214 			gd->gd_curthread->td_spinlock_stack_id[i] = 1;
215 			gd->gd_curthread->td_spinlock_stack[i] = spin;
216 			gd->gd_curthread->td_spinlock_caller_pc[i] =
217 				__builtin_return_address(0);
218 			break;
219 		}
220 	}
221 #endif
222 }
223 
224 /*
225  * Unlock a shared lock.  For convenience we allow the last transition
226  * to be to (SPINLOCK_SHARED|0), leaving the SPINLOCK_SHARED bit set
227  * with a count to 0 which will optimize the next shared lock obtained.
228  *
229  * WARNING! In order to implement shared and exclusive spinlocks, an
230  *	    exclusive request will convert a multiply-held shared lock
231  *	    to exclusive and wait for shared holders to unlock.  So keep
232  *	    in mind that as of now the spinlock could actually be in an
233  *	    exclusive state.
234  */
235 static __inline void
236 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
237 {
238 #ifdef DEBUG_LOCKS
239 	int i;
240 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
241 		if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
242 		    (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
243 			gd->gd_curthread->td_spinlock_stack_id[i] = 0;
244 			gd->gd_curthread->td_spinlock_stack[i] = NULL;
245 			gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
246 			break;
247 		}
248 	}
249 #endif
250 #ifdef DEBUG_LOCKS
251 	KKASSERT(spin->counta != 0);
252 #endif
253 	cpu_sfence();
254 	atomic_add_int(&spin->counta, -1);
255 
256 #ifdef DEBUG_LOCKS
257 	KKASSERT(gd->gd_spinlocks > 0);
258 #endif
259 	cpu_ccfence();
260 	--gd->gd_spinlocks;
261 	crit_exit_raw(gd->gd_curthread);
262 }
263 
264 static __inline void
265 _spin_lock_shared(struct spinlock *spin, const char *ident)
266 {
267 	_spin_lock_shared_quick(mycpu, spin, ident);
268 }
269 
270 static __inline void
271 spin_unlock_shared(struct spinlock *spin)
272 {
273 	spin_unlock_shared_quick(mycpu, spin);
274 }
275 
276 /*
277  * Attempt to upgrade a shared spinlock to exclusive.  Return non-zero
278  * on success, 0 on failure.
279  */
280 static __inline int
281 spin_lock_upgrade_try(struct spinlock *spin)
282 {
283 	if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|1, 1))
284 		return 1;
285 	else
286 		return 0;
287 }
288 
289 static __inline void
290 spin_init(struct spinlock *spin, const char *descr __unused)
291 {
292 	spin->counta = 0;
293 	spin->countb = 0;
294 #if 0
295 	spin->descr  = descr;
296 #endif
297 }
298 
299 static __inline void
300 spin_uninit(struct spinlock *spin)
301 {
302 	/* unused */
303 }
304 
305 #endif	/* _SYS_SPINLOCK2_H_ */
306