xref: /dragonfly/sys/kern/kern_spinlock.c (revision dccc281f)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu. and Matthew Dillon
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * The implementation is designed to avoid looping when compatible operations
35  * are executed.
36  *
37  * To acquire a spinlock we first increment counta.  Then we check if counta
38  * meets our requirements.  For an exclusive spinlock it must be 1, of a
39  * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set.
40  *
41  * Shared spinlock failure case: Decrement the count, loop until we can
42  * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK
43  * is set and increment the count.
44  *
45  * Exclusive spinlock failure case: While maintaining the count, clear the
46  * SHARED_SPINLOCK flag unconditionally.  Then use an atomic add to transfer
47  * the count from the low bits to the high bits of counta.  Then loop until
48  * all low bits are 0.  Once the low bits drop to 0 we can transfer the
49  * count back with an atomic_cmpset_int(), atomically, and return.
50  */
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/types.h>
54 #include <sys/kernel.h>
55 #include <sys/sysctl.h>
56 #ifdef INVARIANTS
57 #include <sys/proc.h>
58 #endif
59 #include <sys/priv.h>
60 #include <machine/atomic.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 #include <machine/specialreg.h>
64 #include <machine/clock.h>
65 #include <sys/indefinite2.h>
66 #include <sys/spinlock.h>
67 #include <sys/spinlock2.h>
68 #include <sys/ktr.h>
69 
70 #ifdef _KERNEL_VIRTUAL
71 #include <pthread.h>
72 #endif
73 
74 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin");
75 
76 /*
77  * Kernal Trace
78  */
79 #if !defined(KTR_SPIN_CONTENTION)
80 #define KTR_SPIN_CONTENTION	KTR_ALL
81 #endif
82 #define SPIN_STRING	"spin=%p type=%c"
83 #define SPIN_ARG_SIZE	(sizeof(void *) + sizeof(int))
84 
85 KTR_INFO_MASTER(spin);
86 #if 0
87 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE);
88 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE);
89 #endif
90 
91 #define logspin(name, spin, type)			\
92 	KTR_LOG(spin_ ## name, spin, type)
93 
94 #ifdef INVARIANTS
95 static int spin_lock_test_mode;
96 #endif
97 
98 #ifdef DEBUG_LOCKS_LATENCY
99 
100 static long spinlocks_add_latency;
101 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW,
102     &spinlocks_add_latency, 0,
103     "Add spinlock latency");
104 
105 #endif
106 
107 static long spin_backoff_max = 4096;
108 SYSCTL_LONG(_debug, OID_AUTO, spin_backoff_max, CTLFLAG_RW,
109     &spin_backoff_max, 0,
110     "Spinlock exponential backoff limit");
111 static long spin_window_shift = 8;	/* 1 << n clock cycles, approx */
112 SYSCTL_LONG(_debug, OID_AUTO, spin_window_shift, CTLFLAG_RW,
113     &spin_window_shift, 0,
114     "Spinlock TSC windowing");
115 
116 /*
117  * We contested due to another exclusive lock holder.  We lose.
118  *
119  * We have to unwind the attempt and may acquire the spinlock
120  * anyway while doing so.
121  */
122 int
123 spin_trylock_contested(struct spinlock *spin)
124 {
125 	globaldata_t gd = mycpu;
126 
127 	/*
128 	 * Handle degenerate case, else fail.
129 	 */
130 	if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1))
131 		return TRUE;
132 	/*atomic_add_int(&spin->counta, -1);*/
133 	--gd->gd_spinlocks;
134 	crit_exit_raw(gd->gd_curthread);
135 
136 	return (FALSE);
137 }
138 
139 /*
140  * The spin_lock() inline was unable to acquire the lock and calls this
141  * function with spin->counta already incremented, passing (spin->counta - 1)
142  * to the function (the result of the inline's fetchadd).
143  *
144  * Note that we implement both exclusive and shared spinlocks, so we cannot
145  * use atomic_swap_int().  Instead, we try to use atomic_fetchadd_int()
146  * to put most of the burden on the cpu.  Atomic_cmpset_int() (cmpxchg)
147  * can cause a lot of unnecessary looping in situations where it is just
148  * trying to increment the count.
149  *
150  * Similarly, we leave the SHARED flag intact and incur slightly more
151  * overhead when switching from shared to exclusive.  This allows us to
152  * use atomic_fetchadd_int() for both spinlock types in the critical
153  * path.
154  *
155  * The exponential (n^1.5) backoff algorithm is designed to both reduce
156  * cache bus contention between cpu cores and sockets, and to allow some
157  * bursting of exclusive locks in heavily contended situations to improve
158  * performance.
159  *
160  * The exclusive lock priority mechanism prevents even heavily contended
161  * exclusive locks from being starved by shared locks
162  */
163 void
164 _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
165 {
166 	indefinite_info_t info;
167 	uint32_t ovalue;
168 	long expbackoff;
169 	long loop;
170 
171 	/*
172 	 * WARNING! Caller has already incremented the lock.  We must
173 	 *	    increment the count value (from the inline's fetch-add)
174 	 *	    to match.
175 	 *
176 	 * Handle the degenerate case where the spinlock is flagged SHARED
177 	 * with only our reference.  We can convert it to EXCLUSIVE.
178 	 */
179 	if (value == (SPINLOCK_SHARED | 1) - 1) {
180 		if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED | 1, 1))
181 			return;
182 	}
183 	/* ++value; value not used after this */
184 	info.type = 0;		/* avoid improper gcc warning */
185 	info.ident = NULL;	/* avoid improper gcc warning */
186 	expbackoff = 0;
187 
188 	/*
189 	 * Transfer our exclusive request to the high bits and clear the
190 	 * SPINLOCK_SHARED bit if it was set.  This makes the spinlock
191 	 * appear exclusive, preventing any NEW shared or exclusive
192 	 * spinlocks from being obtained while we wait for existing
193 	 * shared or exclusive holders to unlock.
194 	 *
195 	 * Don't tread on earlier exclusive waiters by stealing the lock
196 	 * away early if the low bits happen to now be 1.
197 	 *
198 	 * The shared unlock understands that this may occur.
199 	 */
200 	ovalue = atomic_fetchadd_int(&spin->counta, SPINLOCK_EXCLWAIT - 1);
201 	ovalue += SPINLOCK_EXCLWAIT - 1;
202 	if (ovalue & SPINLOCK_SHARED) {
203 		atomic_clear_int(&spin->counta, SPINLOCK_SHARED);
204 		ovalue &= ~SPINLOCK_SHARED;
205 	}
206 
207 	for (;;) {
208 		expbackoff = (expbackoff + 1) * 3 / 2;
209 		if (expbackoff == 6)		/* 1, 3, 6, 10, ... */
210 			indefinite_init(&info, ident, 0, 'S');
211 		if ((rdtsc() >> spin_window_shift) % ncpus != mycpuid)  {
212 			for (loop = expbackoff; loop; --loop)
213 				cpu_pause();
214 		}
215 		/*cpu_lfence();*/
216 
217 		/*
218 		 * If the low bits are zero, try to acquire the exclusive lock
219 		 * by transfering our high bit reservation to the low bits.
220 		 *
221 		 * NOTE: Avoid unconditional atomic op by testing ovalue,
222 		 *	 otherwise we get cache bus armageddon.
223 		 */
224 		ovalue = spin->counta;
225 		cpu_ccfence();
226 		if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0) {
227 			if (atomic_fcmpset_int(&spin->counta, &ovalue,
228 				      (ovalue - SPINLOCK_EXCLWAIT) | 1)) {
229 				break;
230 			}
231 			continue;
232 		}
233 		if (expbackoff > 6 + spin_backoff_max)
234 			expbackoff = 6 + spin_backoff_max;
235 		if (expbackoff >= 6) {
236 			if (indefinite_check(&info))
237 				break;
238 		}
239 	}
240 	if (expbackoff >= 6)
241 		indefinite_done(&info);
242 }
243 
244 /*
245  * The spin_lock_shared() inline was unable to acquire the lock and calls
246  * this function with spin->counta already incremented.
247  *
248  * This is not in the critical path unless there is contention between
249  * shared and exclusive holders.
250  *
251  * Exclusive locks have priority over shared locks.  However, this can
252  * cause shared locks to be starved when large numbers of threads are
253  * competing for exclusive locks so the shared lock code uses TSC-windowing
254  * to selectively ignore the exclusive priority mechanism.  This has the
255  * effect of allowing a limited number of shared locks to compete against
256  * exclusive waiters at any given moment.
257  *
258  * Note that shared locks do not implement exponential backoff.  Instead,
259  * the shared lock simply polls the lock value.  One cpu_pause() is built
260  * into indefinite_check().
261  */
262 void
263 _spin_lock_shared_contested(struct spinlock *spin, const char *ident)
264 {
265 	indefinite_info_t info;
266 	uint32_t ovalue;
267 
268 	/*
269 	 * Undo the inline's increment.
270 	 */
271 	ovalue = atomic_fetchadd_int(&spin->counta, -1) - 1;
272 
273 	indefinite_init(&info, ident, 0, 's');
274 	cpu_pause();
275 
276 #ifdef DEBUG_LOCKS_LATENCY
277 	long j;
278 	for (j = spinlocks_add_latency; j > 0; --j)
279 		cpu_ccfence();
280 #endif
281 
282 	for (;;) {
283 		/*
284 		 * Loop until we can acquire the shared spinlock.  Note that
285 		 * the low bits can be zero while the high EXCLWAIT bits are
286 		 * non-zero.  In this situation exclusive requesters have
287 		 * priority (otherwise shared users on multiple cpus can hog
288 		 * the spinlnock).
289 		 *
290 		 * NOTE: Reading spin->counta prior to the swap is extremely
291 		 *	 important on multi-chip/many-core boxes.  On 48-core
292 		 *	 this one change improves fully concurrent all-cores
293 		 *	 compiles by 100% or better.
294 		 *
295 		 *	 I can't emphasize enough how important the pre-read
296 		 *	 is in preventing hw cache bus armageddon on
297 		 *	 multi-chip systems.  And on single-chip/multi-core
298 		 *	 systems it just doesn't hurt.
299 		 */
300 		cpu_ccfence();
301 
302 		/*
303 		 * Ignore the EXCLWAIT bits if we are inside our window.
304 		 */
305 		if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 &&
306 		    (rdtsc() >> spin_window_shift) % ncpus == mycpuid)  {
307 			if (atomic_fcmpset_int(&spin->counta, &ovalue,
308 					       ovalue | SPINLOCK_SHARED | 1)) {
309 				break;
310 			}
311 			continue;
312 		}
313 
314 		/*
315 		 * Check ovalue tightly (no exponential backoff for shared
316 		 * locks, that would result in horrible performance.  Instead,
317 		 * shared locks depend on the exclusive priority mechanism
318 		 * to avoid starving exclusive locks).
319 		 */
320 		if (ovalue == 0) {
321 			if (atomic_fcmpset_int(&spin->counta, &ovalue,
322 					      SPINLOCK_SHARED | 1)) {
323 				break;
324 			}
325 			continue;
326 		}
327 
328 		/*
329 		 * If SHARED is already set, go for the increment, improving
330 		 * the exclusive to multiple-readers transition.
331 		 */
332 		if (ovalue & SPINLOCK_SHARED) {
333 			ovalue = atomic_fetchadd_int(&spin->counta, 1);
334 			/* ovalue += 1; NOT NEEDED */
335 			if (ovalue & SPINLOCK_SHARED)
336 				break;
337 			ovalue = atomic_fetchadd_int(&spin->counta, -1);
338 			ovalue += -1;
339 			continue;
340 		}
341 		if (indefinite_check(&info))
342 			break;
343 		/*
344 		 * ovalue was wrong anyway, just reload
345 		 */
346 		ovalue = spin->counta;
347 	}
348 	indefinite_done(&info);
349 }
350 
351 /*
352  * If INVARIANTS is enabled various spinlock timing tests can be run
353  * by setting debug.spin_lock_test:
354  *
355  *	1	Test the indefinite wait code
356  *	2	Time the best-case exclusive lock overhead (spin_test_count)
357  *	3	Time the best-case shared lock overhead (spin_test_count)
358  */
359 
360 #ifdef INVARIANTS
361 
362 static int spin_test_count = 10000000;
363 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0,
364     "Number of iterations to use for spinlock wait code test");
365 
366 static int
367 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
368 {
369         struct spinlock spin;
370 	int error;
371 	int value = 0;
372 	int i;
373 
374 	if ((error = priv_check(curthread, PRIV_ROOT)) != 0)
375 		return (error);
376 	if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0)
377 		return (error);
378 
379 	/*
380 	 * Indefinite wait test
381 	 */
382 	if (value == 1) {
383 		spin_init(&spin, "sysctllock");
384 		spin_lock(&spin);	/* force an indefinite wait */
385 		spin_lock_test_mode = 1;
386 		spin_lock(&spin);
387 		spin_unlock(&spin);	/* Clean up the spinlock count */
388 		spin_unlock(&spin);
389 		spin_lock_test_mode = 0;
390 	}
391 
392 	/*
393 	 * Time best-case exclusive spinlocks
394 	 */
395 	if (value == 2) {
396 		globaldata_t gd = mycpu;
397 
398 		spin_init(&spin, "sysctllocktest");
399 		for (i = spin_test_count; i > 0; --i) {
400 		    _spin_lock_quick(gd, &spin, "test");
401 		    spin_unlock_quick(gd, &spin);
402 		}
403 	}
404 
405         return (0);
406 }
407 
408 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT,
409         0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code");
410 
411 #endif	/* INVARIANTS */
412