xref: /dragonfly/sys/kern/kern_spinlock.c (revision a78dfe84)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu. and Matthew Dillon
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * The implementation is designed to avoid looping when compatible operations
35  * are executed.
36  *
37  * To acquire a spinlock we first increment counta.  Then we check if counta
38  * meets our requirements.  For an exclusive spinlock it must be 1, of a
39  * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set.
40  *
41  * Shared spinlock failure case: Decrement the count, loop until we can
42  * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK
43  * is set and increment the count.
44  *
45  * Exclusive spinlock failure case: While maintaining the count, clear the
46  * SHARED_SPINLOCK flag unconditionally.  Then use an atomic add to transfer
47  * the count from the low bits to the high bits of counta.  Then loop until
48  * all low bits are 0.  Once the low bits drop to 0 we can transfer the
49  * count back with an atomic_cmpset_int(), atomically, and return.
50  */
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/types.h>
54 #include <sys/kernel.h>
55 #include <sys/sysctl.h>
56 #ifdef INVARIANTS
57 #include <sys/proc.h>
58 #endif
59 #include <sys/priv.h>
60 #include <machine/atomic.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 #include <machine/specialreg.h>
64 #include <machine/clock.h>
65 #include <sys/spinlock.h>
66 #include <sys/spinlock2.h>
67 #include <sys/ktr.h>
68 
69 #ifdef _KERNEL_VIRTUAL
70 #include <pthread.h>
71 #endif
72 
73 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin");
74 
75 struct indefinite_info {
76 	sysclock_t	base;
77 	int		secs;
78 	const char	*ident;
79 };
80 
81 /*
82  * Kernal Trace
83  */
84 #if !defined(KTR_SPIN_CONTENTION)
85 #define KTR_SPIN_CONTENTION	KTR_ALL
86 #endif
87 #define SPIN_STRING	"spin=%p type=%c"
88 #define SPIN_ARG_SIZE	(sizeof(void *) + sizeof(int))
89 
90 KTR_INFO_MASTER(spin);
91 #if 0
92 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE);
93 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE);
94 #endif
95 
96 #define logspin(name, spin, type)			\
97 	KTR_LOG(spin_ ## name, spin, type)
98 
99 #ifdef INVARIANTS
100 static int spin_lock_test_mode;
101 #endif
102 
103 #ifdef DEBUG_LOCKS_LATENCY
104 
105 static long spinlocks_add_latency;
106 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW,
107     &spinlocks_add_latency, 0,
108     "Add spinlock latency");
109 
110 #endif
111 
112 static int spin_indefinite_check(struct spinlock *spin,
113 				  struct indefinite_info *info);
114 
115 /*
116  * We contested due to another exclusive lock holder.  We lose.
117  *
118  * We have to unwind the attempt and may acquire the spinlock
119  * anyway while doing so.
120  */
121 int
122 spin_trylock_contested(struct spinlock *spin)
123 {
124 	globaldata_t gd = mycpu;
125 
126 	/*
127 	 * Handle degenerate case, else fail.
128 	 */
129 	if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1))
130 		return TRUE;
131 	/*atomic_add_int(&spin->counta, -1);*/
132 	--gd->gd_spinlocks;
133 	--gd->gd_curthread->td_critcount;
134 	return (FALSE);
135 }
136 
137 /*
138  * The spin_lock() inline was unable to acquire the lock and calls this
139  * function with spin->counta already incremented, passing (spin->counta - 1)
140  * to the function (the result of the inline's fetchadd).
141  *
142  * atomic_swap_int() is the absolute fastest spinlock instruction, at
143  * least on multi-socket systems.  All instructions seem to be about
144  * the same on single-socket multi-core systems.  However, atomic_swap_int()
145  * does not result in an even distribution of successful acquisitions.
146  *
147  * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing
148  * shared spin locks, so as we do a better job removing contention we've
149  * moved to atomic_cmpset_int() to be able handle multiple states.
150  *
151  * Another problem we have is that (at least on the 48-core opteron we test
152  * with) having all 48 cores contesting the same spin lock reduces
153  * performance to around 600,000 ops/sec, verses millions when fewer cores
154  * are going after the same lock.
155  *
156  * Backoff algorithms can create even worse starvation problems, and don't
157  * really improve performance when a lot of cores are contending.
158  *
159  * Our solution is to allow the data cache to lazy-update by reading it
160  * non-atomically and only attempting to acquire the lock if the lazy read
161  * looks good.  This effectively limits cache bus bandwidth.  A cpu_pause()
162  * (for intel/amd anyhow) is not strictly needed as cache bus resource use
163  * is governed by the lazy update.
164  *
165  * WARNING!!!!  Performance matters here, by a huge margin.
166  *
167  *	48-core test with pre-read / -j 48 no-modules kernel compile
168  *	with fanned-out inactive and active queues came in at 55 seconds.
169  *
170  *	48-core test with pre-read / -j 48 no-modules kernel compile
171  *	came in at 75 seconds.  Without pre-read it came in at 170 seconds.
172  *
173  *	4-core test with pre-read / -j 48 no-modules kernel compile
174  *	came in at 83 seconds.  Without pre-read it came in at 83 seconds
175  *	as well (no difference).
176  */
177 void
178 _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
179 {
180 	struct indefinite_info info = { 0, 0, ident };
181 	int i;
182 
183 	/*
184 	 * WARNING! Caller has already incremented the lock.  We must
185 	 *	    increment the count value (from the inline's fetch-add)
186 	 *	    to match.
187 	 *
188 	 * Handle the degenerate case where the spinlock is flagged SHARED
189 	 * with only our reference.  We can convert it to EXCLUSIVE.
190 	 */
191 	++value;
192 	if (value == (SPINLOCK_SHARED | 1)) {
193 		if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED | 1, 1))
194 			return;
195 	}
196 
197 	/*
198 	 * Transfer our exclusive request to the high bits and clear the
199 	 * SPINLOCK_SHARED bit if it was set.  This makes the spinlock
200 	 * appear exclusive, preventing any NEW shared or exclusive
201 	 * spinlocks from being obtained while we wait for existing
202 	 * shared or exclusive holders to unlock.
203 	 *
204 	 * Don't tread on earlier exclusive waiters by stealing the lock
205 	 * away early if the low bits happen to now be 1.
206 	 *
207 	 * The shared unlock understands that this may occur.
208 	 */
209 	atomic_add_int(&spin->counta, SPINLOCK_EXCLWAIT - 1);
210 	if (value & SPINLOCK_SHARED)
211 		atomic_clear_int(&spin->counta, SPINLOCK_SHARED);
212 
213 #ifdef DEBUG_LOCKS_LATENCY
214 	long j;
215 	for (j = spinlocks_add_latency; j > 0; --j)
216 		cpu_ccfence();
217 #endif
218 	/*
219 	 * Spin until we can acquire a low-count of 1.
220 	 */
221 	i = 0;
222 	/*logspin(beg, spin, 'w');*/
223 	for (;;) {
224 		/*
225 		 * If the low bits are zero, try to acquire the exclusive lock
226 		 * by transfering our high bit reservation to the low bits.
227 		 *
228 		 * NOTE: Reading spin->counta prior to the swap is extremely
229 		 *	 important on multi-chip/many-core boxes.  On 48-core
230 		 *	 this one change improves fully concurrent all-cores
231 		 *	 compiles by 100% or better.
232 		 *
233 		 *	 I can't emphasize enough how important the pre-read
234 		 *	 is in preventing hw cache bus armageddon on
235 		 *	 multi-chip systems.  And on single-chip/multi-core
236 		 *	 systems it just doesn't hurt.
237 		 */
238 		uint32_t ovalue = spin->counta;
239 		cpu_ccfence();
240 		if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 &&
241 		    atomic_cmpset_int(&spin->counta, ovalue,
242 				      (ovalue - SPINLOCK_EXCLWAIT) | 1)) {
243 			break;
244 		}
245 		if ((++i & 0x7F) == 0x7F) {
246 			mycpu->gd_cnt.v_lock_name[0] = 'X';
247 			strncpy(mycpu->gd_cnt.v_lock_name + 1,
248 				ident,
249 				sizeof(mycpu->gd_cnt.v_lock_name) - 2);
250 			++mycpu->gd_cnt.v_lock_colls;
251 			if (spin_indefinite_check(spin, &info))
252 				break;
253 		}
254 #ifdef _KERNEL_VIRTUAL
255 		pthread_yield();
256 #endif
257 	}
258 	/*logspin(end, spin, 'w');*/
259 }
260 
261 /*
262  * The spin_lock_shared() inline was unable to acquire the lock and calls
263  * this function with spin->counta already incremented.
264  *
265  * This is not in the critical path unless there is contention between
266  * shared and exclusive holders.
267  */
268 void
269 _spin_lock_shared_contested(struct spinlock *spin, const char *ident)
270 {
271 	struct indefinite_info info = { 0, 0, ident };
272 	int i;
273 
274 	/*
275 	 * Undo the inline's increment.
276 	 */
277 	atomic_add_int(&spin->counta, -1);
278 
279 #ifdef DEBUG_LOCKS_LATENCY
280 	long j;
281 	for (j = spinlocks_add_latency; j > 0; --j)
282 		cpu_ccfence();
283 #endif
284 
285 	/*logspin(beg, spin, 'w');*/
286 	i = 0;
287 	for (;;) {
288 		/*
289 		 * Loop until we can acquire the shared spinlock.  Note that
290 		 * the low bits can be zero while the high EXCLWAIT bits are
291 		 * non-zero.  In this situation exclusive requesters have
292 		 * priority (otherwise shared users on multiple cpus can hog
293 		 * the spinlnock).
294 		 *
295 		 * NOTE: Reading spin->counta prior to the swap is extremely
296 		 *	 important on multi-chip/many-core boxes.  On 48-core
297 		 *	 this one change improves fully concurrent all-cores
298 		 *	 compiles by 100% or better.
299 		 *
300 		 *	 I can't emphasize enough how important the pre-read
301 		 *	 is in preventing hw cache bus armageddon on
302 		 *	 multi-chip systems.  And on single-chip/multi-core
303 		 *	 systems it just doesn't hurt.
304 		 */
305 		uint32_t ovalue = spin->counta;
306 
307 		cpu_ccfence();
308 		if (ovalue == 0) {
309 			if (atomic_cmpset_int(&spin->counta, 0,
310 					      SPINLOCK_SHARED | 1))
311 				break;
312 		} else if (ovalue & SPINLOCK_SHARED) {
313 			if (atomic_cmpset_int(&spin->counta, ovalue,
314 					      ovalue + 1))
315 				break;
316 		}
317 		if ((++i & 0x7F) == 0x7F) {
318 			mycpu->gd_cnt.v_lock_name[0] = 'S';
319 			strncpy(mycpu->gd_cnt.v_lock_name + 1,
320 				ident,
321 				sizeof(mycpu->gd_cnt.v_lock_name) - 2);
322 			++mycpu->gd_cnt.v_lock_colls;
323 			if (spin_indefinite_check(spin, &info))
324 				break;
325 		}
326 #ifdef _KERNEL_VIRTUAL
327 		pthread_yield();
328 #endif
329 	}
330 	/*logspin(end, spin, 'w');*/
331 }
332 
333 static
334 int
335 spin_indefinite_check(struct spinlock *spin, struct indefinite_info *info)
336 {
337 	sysclock_t count;
338 
339 	cpu_spinlock_contested();
340 
341 	count = sys_cputimer->count();
342 	if (info->secs == 0) {
343 		info->base = count;
344 		++info->secs;
345 	} else if (count - info->base > sys_cputimer->freq) {
346 		kprintf("spin_lock: %s(%p), indefinite wait (%d secs)!\n",
347 			info->ident, spin, info->secs);
348 		info->base = count;
349 		++info->secs;
350 		if (panicstr)
351 			return (TRUE);
352 #if defined(INVARIANTS)
353 		if (spin_lock_test_mode) {
354 			print_backtrace(-1);
355 			return (TRUE);
356 		}
357 #endif
358 #if defined(INVARIANTS)
359 		if (info->secs == 11)
360 			print_backtrace(-1);
361 #endif
362 		if (info->secs == 60)
363 			panic("spin_lock: %s(%p), indefinite wait!",
364 			      info->ident, spin);
365 	}
366 	return (FALSE);
367 }
368 
369 /*
370  * If INVARIANTS is enabled various spinlock timing tests can be run
371  * by setting debug.spin_lock_test:
372  *
373  *	1	Test the indefinite wait code
374  *	2	Time the best-case exclusive lock overhead (spin_test_count)
375  *	3	Time the best-case shared lock overhead (spin_test_count)
376  */
377 
378 #ifdef INVARIANTS
379 
380 static int spin_test_count = 10000000;
381 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0,
382     "Number of iterations to use for spinlock wait code test");
383 
384 static int
385 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
386 {
387         struct spinlock spin;
388 	int error;
389 	int value = 0;
390 	int i;
391 
392 	if ((error = priv_check(curthread, PRIV_ROOT)) != 0)
393 		return (error);
394 	if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0)
395 		return (error);
396 
397 	/*
398 	 * Indefinite wait test
399 	 */
400 	if (value == 1) {
401 		spin_init(&spin, "sysctllock");
402 		spin_lock(&spin);	/* force an indefinite wait */
403 		spin_lock_test_mode = 1;
404 		spin_lock(&spin);
405 		spin_unlock(&spin);	/* Clean up the spinlock count */
406 		spin_unlock(&spin);
407 		spin_lock_test_mode = 0;
408 	}
409 
410 	/*
411 	 * Time best-case exclusive spinlocks
412 	 */
413 	if (value == 2) {
414 		globaldata_t gd = mycpu;
415 
416 		spin_init(&spin, "sysctllocktest");
417 		for (i = spin_test_count; i > 0; --i) {
418 		    _spin_lock_quick(gd, &spin, "test");
419 		    spin_unlock_quick(gd, &spin);
420 		}
421 	}
422 
423         return (0);
424 }
425 
426 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT,
427         0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code");
428 
429 #endif	/* INVARIANTS */
430