1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
16 * written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32 */
33
34 /*
35 * Machine independent bits of mutex implementation.
36 */
37
38 #include <sys/cdefs.h>
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/smp.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67
68 #include <ddb/ddb.h>
69
70 #include <fs/devfs/devfs_int.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
77 #endif
78
79 #ifdef HWPMC_HOOKS
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
82 #endif
83
84 /*
85 * Return the mutex address when the lock cookie address is provided.
86 * This functionality assumes that struct mtx* have a member named mtx_lock.
87 */
88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
89
90 /*
91 * Internal utility macros.
92 */
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
94
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
96
97 static void assert_mtx(const struct lock_object *lock, int what);
98 #ifdef DDB
99 static void db_show_mtx(const struct lock_object *lock);
100 #endif
101 static void lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void lock_spin(struct lock_object *lock, uintptr_t how);
103 #ifdef KDTRACE_HOOKS
104 static int owner_mtx(const struct lock_object *lock,
105 struct thread **owner);
106 #endif
107 static uintptr_t unlock_mtx(struct lock_object *lock);
108 static uintptr_t unlock_spin(struct lock_object *lock);
109
110 /*
111 * Lock classes for sleep and spin mutexes.
112 */
113 struct lock_class lock_class_mtx_sleep = {
114 .lc_name = "sleep mutex",
115 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
116 .lc_assert = assert_mtx,
117 #ifdef DDB
118 .lc_ddb_show = db_show_mtx,
119 #endif
120 .lc_lock = lock_mtx,
121 .lc_unlock = unlock_mtx,
122 #ifdef KDTRACE_HOOKS
123 .lc_owner = owner_mtx,
124 #endif
125 };
126 struct lock_class lock_class_mtx_spin = {
127 .lc_name = "spin mutex",
128 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
129 .lc_assert = assert_mtx,
130 #ifdef DDB
131 .lc_ddb_show = db_show_mtx,
132 #endif
133 .lc_lock = lock_spin,
134 .lc_unlock = unlock_spin,
135 #ifdef KDTRACE_HOOKS
136 .lc_owner = owner_mtx,
137 #endif
138 };
139
140 #ifdef ADAPTIVE_MUTEXES
141 #ifdef MUTEX_CUSTOM_BACKOFF
142 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
143 "mtx debugging");
144
145 static struct lock_delay_config __read_frequently mtx_delay;
146
147 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
148 0, "");
149 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
150 0, "");
151
152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
153 #else
154 #define mtx_delay locks_delay
155 #endif
156 #endif
157
158 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF
159 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin,
160 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
161 "mtx spin debugging");
162
163 static struct lock_delay_config __read_frequently mtx_spin_delay;
164
165 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
166 &mtx_spin_delay.base, 0, "");
167 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
168 &mtx_spin_delay.max, 0, "");
169
170 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
171 #else
172 #define mtx_spin_delay locks_delay
173 #endif
174
175 /*
176 * System-wide mutexes
177 */
178 struct mtx blocked_lock;
179 struct mtx __exclusive_cache_line Giant;
180
181 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
182
183 void
assert_mtx(const struct lock_object * lock,int what)184 assert_mtx(const struct lock_object *lock, int what)
185 {
186
187 /*
188 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
189 *
190 * Some callers of lc_assert uses LA_LOCKED to indicate that either
191 * a shared lock or write lock was held, while other callers uses
192 * the more strict LA_XLOCKED (used as MA_OWNED).
193 *
194 * Mutex is the only lock class that can not be shared, as a result,
195 * we can reasonably consider the caller really intends to assert
196 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
197 */
198 if (what & LA_LOCKED) {
199 what &= ~LA_LOCKED;
200 what |= LA_XLOCKED;
201 }
202 mtx_assert((const struct mtx *)lock, what);
203 }
204
205 void
lock_mtx(struct lock_object * lock,uintptr_t how)206 lock_mtx(struct lock_object *lock, uintptr_t how)
207 {
208
209 mtx_lock((struct mtx *)lock);
210 }
211
212 void
lock_spin(struct lock_object * lock,uintptr_t how)213 lock_spin(struct lock_object *lock, uintptr_t how)
214 {
215
216 mtx_lock_spin((struct mtx *)lock);
217 }
218
219 uintptr_t
unlock_mtx(struct lock_object * lock)220 unlock_mtx(struct lock_object *lock)
221 {
222 struct mtx *m;
223
224 m = (struct mtx *)lock;
225 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
226 mtx_unlock(m);
227 return (0);
228 }
229
230 uintptr_t
unlock_spin(struct lock_object * lock)231 unlock_spin(struct lock_object *lock)
232 {
233 struct mtx *m;
234
235 m = (struct mtx *)lock;
236 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
237 mtx_unlock_spin(m);
238 return (0);
239 }
240
241 #ifdef KDTRACE_HOOKS
242 int
owner_mtx(const struct lock_object * lock,struct thread ** owner)243 owner_mtx(const struct lock_object *lock, struct thread **owner)
244 {
245 const struct mtx *m;
246 uintptr_t x;
247
248 m = (const struct mtx *)lock;
249 x = m->mtx_lock;
250 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
251 return (*owner != NULL);
252 }
253 #endif
254
255 /*
256 * Function versions of the inlined __mtx_* macros. These are used by
257 * modules and can also be called from assembly language if needed.
258 */
259 void
__mtx_lock_flags(volatile uintptr_t * c,int opts,const char * file,int line)260 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
261 {
262 struct mtx *m;
263 uintptr_t tid, v;
264
265 m = mtxlock2mtx(c);
266
267 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
268 !TD_IS_IDLETHREAD(curthread),
269 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
270 curthread, m->lock_object.lo_name, file, line));
271 KASSERT(m->mtx_lock != MTX_DESTROYED,
272 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
273 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
274 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
275 file, line));
276 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
277 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
278
279 tid = (uintptr_t)curthread;
280 v = MTX_UNOWNED;
281 if (!_mtx_obtain_lock_fetch(m, &v, tid))
282 _mtx_lock_sleep(m, v, opts, file, line);
283 else
284 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
285 m, 0, 0, file, line);
286 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
287 line);
288 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
289 file, line);
290 TD_LOCKS_INC(curthread);
291 }
292
293 void
__mtx_unlock_flags(volatile uintptr_t * c,int opts,const char * file,int line)294 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
295 {
296 struct mtx *m;
297
298 m = mtxlock2mtx(c);
299
300 KASSERT(m->mtx_lock != MTX_DESTROYED,
301 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
302 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
303 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
304 file, line));
305 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
306 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
307 line);
308 mtx_assert(m, MA_OWNED);
309
310 #ifdef LOCK_PROFILING
311 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
312 #else
313 __mtx_unlock(m, curthread, opts, file, line);
314 #endif
315 TD_LOCKS_DEC(curthread);
316 }
317
318 void
__mtx_lock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)319 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
320 int line)
321 {
322 struct mtx *m;
323 #ifdef SMP
324 uintptr_t tid, v;
325 #endif
326
327 m = mtxlock2mtx(c);
328
329 KASSERT(m->mtx_lock != MTX_DESTROYED,
330 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
331 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
332 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
333 m->lock_object.lo_name, file, line));
334 if (mtx_owned(m))
335 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
336 (opts & MTX_RECURSE) != 0,
337 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
338 m->lock_object.lo_name, file, line));
339 opts &= ~MTX_RECURSE;
340 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
341 file, line, NULL);
342 #ifdef SMP
343 spinlock_enter();
344 tid = (uintptr_t)curthread;
345 v = MTX_UNOWNED;
346 if (!_mtx_obtain_lock_fetch(m, &v, tid))
347 _mtx_lock_spin(m, v, opts, file, line);
348 else
349 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
350 m, 0, 0, file, line);
351 #else
352 __mtx_lock_spin(m, curthread, opts, file, line);
353 #endif
354 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
355 line);
356 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
357 }
358
359 int
__mtx_trylock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)360 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
361 int line)
362 {
363 struct mtx *m;
364
365 if (SCHEDULER_STOPPED())
366 return (1);
367
368 m = mtxlock2mtx(c);
369
370 KASSERT(m->mtx_lock != MTX_DESTROYED,
371 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
372 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
373 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
374 m->lock_object.lo_name, file, line));
375 KASSERT((opts & MTX_RECURSE) == 0,
376 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
377 m->lock_object.lo_name, file, line));
378 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
379 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
380 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
381 return (1);
382 }
383 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
384 return (0);
385 }
386
387 void
__mtx_unlock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)388 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
389 int line)
390 {
391 struct mtx *m;
392
393 m = mtxlock2mtx(c);
394
395 KASSERT(m->mtx_lock != MTX_DESTROYED,
396 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
397 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
398 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
399 m->lock_object.lo_name, file, line));
400 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
401 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
402 line);
403 mtx_assert(m, MA_OWNED);
404
405 __mtx_unlock_spin(m);
406 }
407
408 /*
409 * The important part of mtx_trylock{,_flags}()
410 * Tries to acquire lock `m.' If this function is called on a mutex that
411 * is already owned, it will recursively acquire the lock.
412 */
413 int
_mtx_trylock_flags_int(struct mtx * m,int opts LOCK_FILE_LINE_ARG_DEF)414 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
415 {
416 struct thread *td;
417 uintptr_t tid, v;
418 #ifdef LOCK_PROFILING
419 uint64_t waittime = 0;
420 int contested = 0;
421 #endif
422 int rval;
423 bool recursed;
424
425 td = curthread;
426 tid = (uintptr_t)td;
427 if (SCHEDULER_STOPPED())
428 return (1);
429
430 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
431 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
432 curthread, m->lock_object.lo_name, file, line));
433 KASSERT(m->mtx_lock != MTX_DESTROYED,
434 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
435 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
436 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
437 file, line));
438
439 rval = 1;
440 recursed = false;
441 v = MTX_UNOWNED;
442 for (;;) {
443 if (_mtx_obtain_lock_fetch(m, &v, tid))
444 break;
445 if (v == MTX_UNOWNED)
446 continue;
447 if (v == tid &&
448 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
449 (opts & MTX_RECURSE) != 0)) {
450 m->mtx_recurse++;
451 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
452 recursed = true;
453 break;
454 }
455 rval = 0;
456 break;
457 }
458
459 opts &= ~MTX_RECURSE;
460
461 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
462 if (rval) {
463 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
464 file, line);
465 TD_LOCKS_INC(curthread);
466 if (!recursed)
467 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
468 m, contested, waittime, file, line);
469 }
470
471 return (rval);
472 }
473
474 int
_mtx_trylock_flags_(volatile uintptr_t * c,int opts,const char * file,int line)475 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
476 {
477 struct mtx *m;
478
479 m = mtxlock2mtx(c);
480 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
481 }
482
483 /*
484 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
485 *
486 * We call this if the lock is either contested (i.e. we need to go to
487 * sleep waiting for it), or if we need to recurse on it.
488 */
489 #if LOCK_DEBUG > 0
490 void
__mtx_lock_sleep(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)491 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
492 int line)
493 #else
494 void
495 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
496 #endif
497 {
498 struct thread *td;
499 struct mtx *m;
500 struct turnstile *ts;
501 uintptr_t tid;
502 struct thread *owner;
503 #ifdef LOCK_PROFILING
504 int contested = 0;
505 uint64_t waittime = 0;
506 #endif
507 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
508 struct lock_delay_arg lda;
509 #endif
510 #ifdef KDTRACE_HOOKS
511 u_int sleep_cnt = 0;
512 int64_t sleep_time = 0;
513 int64_t all_time = 0;
514 #endif
515 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
516 int doing_lockprof = 0;
517 #endif
518
519 td = curthread;
520 tid = (uintptr_t)td;
521 m = mtxlock2mtx(c);
522
523 #ifdef KDTRACE_HOOKS
524 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
525 while (v == MTX_UNOWNED) {
526 if (_mtx_obtain_lock_fetch(m, &v, tid))
527 goto out_lockstat;
528 }
529 doing_lockprof = 1;
530 all_time -= lockstat_nsecs(&m->lock_object);
531 }
532 #endif
533 #ifdef LOCK_PROFILING
534 doing_lockprof = 1;
535 #endif
536
537 if (SCHEDULER_STOPPED())
538 return;
539
540 if (__predict_false(v == MTX_UNOWNED))
541 v = MTX_READ_VALUE(m);
542
543 if (__predict_false(lv_mtx_owner(v) == td)) {
544 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
545 (opts & MTX_RECURSE) != 0,
546 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
547 m->lock_object.lo_name, file, line));
548 #if LOCK_DEBUG > 0
549 opts &= ~MTX_RECURSE;
550 #endif
551 m->mtx_recurse++;
552 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
553 if (LOCK_LOG_TEST(&m->lock_object, opts))
554 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
555 return;
556 }
557 #if LOCK_DEBUG > 0
558 opts &= ~MTX_RECURSE;
559 #endif
560
561 #if defined(ADAPTIVE_MUTEXES)
562 lock_delay_arg_init(&lda, &mtx_delay);
563 #elif defined(KDTRACE_HOOKS)
564 lock_delay_arg_init_noadapt(&lda);
565 #endif
566
567 #ifdef HWPMC_HOOKS
568 PMC_SOFT_CALL( , , lock, failed);
569 #endif
570 lock_profile_obtain_lock_failed(&m->lock_object, false,
571 &contested, &waittime);
572 if (LOCK_LOG_TEST(&m->lock_object, opts))
573 CTR4(KTR_LOCK,
574 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
575 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
576
577 THREAD_CONTENDS_ON_LOCK(&m->lock_object);
578
579 for (;;) {
580 if (v == MTX_UNOWNED) {
581 if (_mtx_obtain_lock_fetch(m, &v, tid))
582 break;
583 continue;
584 }
585 #ifdef KDTRACE_HOOKS
586 lda.spin_cnt++;
587 #endif
588 #ifdef ADAPTIVE_MUTEXES
589 /*
590 * If the owner is running on another CPU, spin until the
591 * owner stops running or the state of the lock changes.
592 */
593 owner = lv_mtx_owner(v);
594 if (TD_IS_RUNNING(owner)) {
595 if (LOCK_LOG_TEST(&m->lock_object, 0))
596 CTR3(KTR_LOCK,
597 "%s: spinning on %p held by %p",
598 __func__, m, owner);
599 KTR_STATE1(KTR_SCHED, "thread",
600 sched_tdname((struct thread *)tid),
601 "spinning", "lockname:\"%s\"",
602 m->lock_object.lo_name);
603 do {
604 lock_delay(&lda);
605 v = MTX_READ_VALUE(m);
606 owner = lv_mtx_owner(v);
607 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
608 KTR_STATE0(KTR_SCHED, "thread",
609 sched_tdname((struct thread *)tid),
610 "running");
611 continue;
612 }
613 #endif
614
615 ts = turnstile_trywait(&m->lock_object);
616 v = MTX_READ_VALUE(m);
617 retry_turnstile:
618
619 /*
620 * Check if the lock has been released while spinning for
621 * the turnstile chain lock.
622 */
623 if (v == MTX_UNOWNED) {
624 turnstile_cancel(ts);
625 continue;
626 }
627
628 #ifdef ADAPTIVE_MUTEXES
629 /*
630 * The current lock owner might have started executing
631 * on another CPU (or the lock could have changed
632 * owners) while we were waiting on the turnstile
633 * chain lock. If so, drop the turnstile lock and try
634 * again.
635 */
636 owner = lv_mtx_owner(v);
637 if (TD_IS_RUNNING(owner)) {
638 turnstile_cancel(ts);
639 continue;
640 }
641 #endif
642
643 /*
644 * If the mutex isn't already contested and a failure occurs
645 * setting the contested bit, the mutex was either released
646 * or the state of the MTX_RECURSED bit changed.
647 */
648 if ((v & MTX_CONTESTED) == 0 &&
649 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
650 goto retry_turnstile;
651 }
652
653 /*
654 * We definitely must sleep for this lock.
655 */
656 mtx_assert(m, MA_NOTOWNED);
657
658 /*
659 * Block on the turnstile.
660 */
661 #ifdef KDTRACE_HOOKS
662 sleep_time -= lockstat_nsecs(&m->lock_object);
663 #endif
664 #ifndef ADAPTIVE_MUTEXES
665 owner = mtx_owner(m);
666 #endif
667 MPASS(owner == mtx_owner(m));
668 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
669 #ifdef KDTRACE_HOOKS
670 sleep_time += lockstat_nsecs(&m->lock_object);
671 sleep_cnt++;
672 #endif
673 v = MTX_READ_VALUE(m);
674 }
675 THREAD_CONTENTION_DONE(&m->lock_object);
676 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
677 if (__predict_true(!doing_lockprof))
678 return;
679 #endif
680 #ifdef KDTRACE_HOOKS
681 all_time += lockstat_nsecs(&m->lock_object);
682 if (sleep_time)
683 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
684
685 /*
686 * Only record the loops spinning and not sleeping.
687 */
688 if (lda.spin_cnt > sleep_cnt)
689 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
690 out_lockstat:
691 #endif
692 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
693 waittime, file, line);
694 }
695
696 #ifdef SMP
697 /*
698 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
699 *
700 * This is only called if we need to actually spin for the lock. Recursion
701 * is handled inline.
702 */
703 #if LOCK_DEBUG > 0
704 void
_mtx_lock_spin_cookie(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)705 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
706 const char *file, int line)
707 #else
708 void
709 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
710 #endif
711 {
712 struct mtx *m;
713 struct lock_delay_arg lda;
714 uintptr_t tid;
715 #ifdef LOCK_PROFILING
716 int contested = 0;
717 uint64_t waittime = 0;
718 #endif
719 #ifdef KDTRACE_HOOKS
720 int64_t spin_time = 0;
721 #endif
722 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
723 int doing_lockprof = 0;
724 #endif
725
726 tid = (uintptr_t)curthread;
727 m = mtxlock2mtx(c);
728
729 #ifdef KDTRACE_HOOKS
730 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
731 while (v == MTX_UNOWNED) {
732 if (_mtx_obtain_lock_fetch(m, &v, tid))
733 goto out_lockstat;
734 }
735 doing_lockprof = 1;
736 spin_time -= lockstat_nsecs(&m->lock_object);
737 }
738 #endif
739 #ifdef LOCK_PROFILING
740 doing_lockprof = 1;
741 #endif
742
743 if (__predict_false(v == MTX_UNOWNED))
744 v = MTX_READ_VALUE(m);
745
746 if (__predict_false(v == tid)) {
747 m->mtx_recurse++;
748 return;
749 }
750
751 if (SCHEDULER_STOPPED())
752 return;
753
754 if (LOCK_LOG_TEST(&m->lock_object, opts))
755 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
756 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
757 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
758
759 lock_delay_arg_init(&lda, &mtx_spin_delay);
760
761 #ifdef HWPMC_HOOKS
762 PMC_SOFT_CALL( , , lock, failed);
763 #endif
764 lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
765
766 for (;;) {
767 if (v == MTX_UNOWNED) {
768 if (_mtx_obtain_lock_fetch(m, &v, tid))
769 break;
770 continue;
771 }
772 /* Give interrupts a chance while we spin. */
773 spinlock_exit();
774 do {
775 if (__predict_true(lda.spin_cnt < 10000000)) {
776 lock_delay(&lda);
777 } else {
778 _mtx_lock_indefinite_check(m, &lda);
779 }
780 v = MTX_READ_VALUE(m);
781 } while (v != MTX_UNOWNED);
782 spinlock_enter();
783 }
784
785 if (LOCK_LOG_TEST(&m->lock_object, opts))
786 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
787 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
788 "running");
789
790 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
791 if (__predict_true(!doing_lockprof))
792 return;
793 #endif
794 #ifdef KDTRACE_HOOKS
795 spin_time += lockstat_nsecs(&m->lock_object);
796 if (lda.spin_cnt != 0)
797 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
798 out_lockstat:
799 #endif
800 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
801 contested, waittime, file, line);
802 }
803 #endif /* SMP */
804
805 #ifdef INVARIANTS
806 static void
thread_lock_validate(struct mtx * m,int opts,const char * file,int line)807 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
808 {
809
810 KASSERT(m->mtx_lock != MTX_DESTROYED,
811 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
812 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
813 ("thread_lock() of sleep mutex %s @ %s:%d",
814 m->lock_object.lo_name, file, line));
815 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
816 ("thread_lock: got a recursive mutex %s @ %s:%d\n",
817 m->lock_object.lo_name, file, line));
818 WITNESS_CHECKORDER(&m->lock_object,
819 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
820 }
821 #else
822 #define thread_lock_validate(m, opts, file, line) do { } while (0)
823 #endif
824
825 #ifndef LOCK_PROFILING
826 #if LOCK_DEBUG > 0
827 void
_thread_lock(struct thread * td,int opts,const char * file,int line)828 _thread_lock(struct thread *td, int opts, const char *file, int line)
829 #else
830 void
831 _thread_lock(struct thread *td)
832 #endif
833 {
834 struct mtx *m;
835 uintptr_t tid;
836
837 tid = (uintptr_t)curthread;
838
839 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
840 goto slowpath_noirq;
841 spinlock_enter();
842 m = td->td_lock;
843 thread_lock_validate(m, 0, file, line);
844 if (__predict_false(m == &blocked_lock))
845 goto slowpath_unlocked;
846 if (__predict_false(!_mtx_obtain_lock(m, tid)))
847 goto slowpath_unlocked;
848 if (__predict_true(m == td->td_lock)) {
849 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
850 return;
851 }
852 _mtx_release_lock_quick(m);
853 slowpath_unlocked:
854 spinlock_exit();
855 slowpath_noirq:
856 #if LOCK_DEBUG > 0
857 thread_lock_flags_(td, opts, file, line);
858 #else
859 thread_lock_flags_(td, 0, 0, 0);
860 #endif
861 }
862 #endif
863
864 void
thread_lock_flags_(struct thread * td,int opts,const char * file,int line)865 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
866 {
867 struct mtx *m;
868 uintptr_t tid, v;
869 struct lock_delay_arg lda;
870 #ifdef LOCK_PROFILING
871 int contested = 0;
872 uint64_t waittime = 0;
873 #endif
874 #ifdef KDTRACE_HOOKS
875 int64_t spin_time = 0;
876 #endif
877 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
878 int doing_lockprof = 1;
879 #endif
880
881 tid = (uintptr_t)curthread;
882
883 if (SCHEDULER_STOPPED()) {
884 /*
885 * Ensure that spinlock sections are balanced even when the
886 * scheduler is stopped, since we may otherwise inadvertently
887 * re-enable interrupts while dumping core.
888 */
889 spinlock_enter();
890 return;
891 }
892
893 lock_delay_arg_init(&lda, &mtx_spin_delay);
894
895 #ifdef HWPMC_HOOKS
896 PMC_SOFT_CALL( , , lock, failed);
897 #endif
898
899 #ifdef LOCK_PROFILING
900 doing_lockprof = 1;
901 #elif defined(KDTRACE_HOOKS)
902 doing_lockprof = lockstat_enabled;
903 #endif
904 #ifdef KDTRACE_HOOKS
905 if (__predict_false(doing_lockprof))
906 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
907 #endif
908 spinlock_enter();
909
910 for (;;) {
911 retry:
912 m = td->td_lock;
913 thread_lock_validate(m, opts, file, line);
914 v = MTX_READ_VALUE(m);
915 for (;;) {
916 if (v == MTX_UNOWNED) {
917 if (_mtx_obtain_lock_fetch(m, &v, tid))
918 break;
919 continue;
920 }
921 MPASS(v != tid);
922 lock_profile_obtain_lock_failed(&m->lock_object, true,
923 &contested, &waittime);
924 /* Give interrupts a chance while we spin. */
925 spinlock_exit();
926 do {
927 if (__predict_true(lda.spin_cnt < 10000000)) {
928 lock_delay(&lda);
929 } else {
930 _mtx_lock_indefinite_check(m, &lda);
931 }
932 if (m != td->td_lock) {
933 spinlock_enter();
934 goto retry;
935 }
936 v = MTX_READ_VALUE(m);
937 } while (v != MTX_UNOWNED);
938 spinlock_enter();
939 }
940 if (m == td->td_lock)
941 break;
942 _mtx_release_lock_quick(m);
943 }
944 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
945 line);
946 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
947
948 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
949 if (__predict_true(!doing_lockprof))
950 return;
951 #endif
952 #ifdef KDTRACE_HOOKS
953 spin_time += lockstat_nsecs(&m->lock_object);
954 #endif
955 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
956 waittime, file, line);
957 #ifdef KDTRACE_HOOKS
958 if (lda.spin_cnt != 0)
959 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
960 #endif
961 }
962
963 struct mtx *
thread_lock_block(struct thread * td)964 thread_lock_block(struct thread *td)
965 {
966 struct mtx *lock;
967
968 lock = td->td_lock;
969 mtx_assert(lock, MA_OWNED);
970 td->td_lock = &blocked_lock;
971
972 return (lock);
973 }
974
975 void
thread_lock_unblock(struct thread * td,struct mtx * new)976 thread_lock_unblock(struct thread *td, struct mtx *new)
977 {
978
979 mtx_assert(new, MA_OWNED);
980 KASSERT(td->td_lock == &blocked_lock,
981 ("thread %p lock %p not blocked_lock %p",
982 td, td->td_lock, &blocked_lock));
983 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
984 }
985
986 void
thread_lock_block_wait(struct thread * td)987 thread_lock_block_wait(struct thread *td)
988 {
989
990 while (td->td_lock == &blocked_lock)
991 cpu_spinwait();
992
993 /* Acquire fence to be certain that all thread state is visible. */
994 atomic_thread_fence_acq();
995 }
996
997 void
thread_lock_set(struct thread * td,struct mtx * new)998 thread_lock_set(struct thread *td, struct mtx *new)
999 {
1000 struct mtx *lock;
1001
1002 mtx_assert(new, MA_OWNED);
1003 lock = td->td_lock;
1004 mtx_assert(lock, MA_OWNED);
1005 td->td_lock = new;
1006 mtx_unlock_spin(lock);
1007 }
1008
1009 /*
1010 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
1011 *
1012 * We are only called here if the lock is recursed, contested (i.e. we
1013 * need to wake up a blocked thread) or lockstat probe is active.
1014 */
1015 #if LOCK_DEBUG > 0
1016 void
__mtx_unlock_sleep(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)1017 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1018 const char *file, int line)
1019 #else
1020 void
1021 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1022 #endif
1023 {
1024 struct mtx *m;
1025 struct turnstile *ts;
1026 uintptr_t tid;
1027
1028 if (SCHEDULER_STOPPED())
1029 return;
1030
1031 tid = (uintptr_t)curthread;
1032 m = mtxlock2mtx(c);
1033
1034 if (__predict_false(v == tid))
1035 v = MTX_READ_VALUE(m);
1036
1037 if (__predict_false(v & MTX_RECURSED)) {
1038 if (--(m->mtx_recurse) == 0)
1039 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1040 if (LOCK_LOG_TEST(&m->lock_object, opts))
1041 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1042 return;
1043 }
1044
1045 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1046 if (v == tid && _mtx_release_lock(m, tid))
1047 return;
1048
1049 /*
1050 * We have to lock the chain before the turnstile so this turnstile
1051 * can be removed from the hash list if it is empty.
1052 */
1053 turnstile_chain_lock(&m->lock_object);
1054 _mtx_release_lock_quick(m);
1055 ts = turnstile_lookup(&m->lock_object);
1056 MPASS(ts != NULL);
1057 if (LOCK_LOG_TEST(&m->lock_object, opts))
1058 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1059 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1060
1061 /*
1062 * This turnstile is now no longer associated with the mutex. We can
1063 * unlock the chain lock so a new turnstile may take it's place.
1064 */
1065 turnstile_unpend(ts);
1066 turnstile_chain_unlock(&m->lock_object);
1067 }
1068
1069 /*
1070 * All the unlocking of MTX_SPIN locks is done inline.
1071 * See the __mtx_unlock_spin() macro for the details.
1072 */
1073
1074 /*
1075 * The backing function for the INVARIANTS-enabled mtx_assert()
1076 */
1077 #ifdef INVARIANT_SUPPORT
1078 void
__mtx_assert(const volatile uintptr_t * c,int what,const char * file,int line)1079 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1080 {
1081 const struct mtx *m;
1082
1083 if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
1084 return;
1085
1086 m = mtxlock2mtx(c);
1087
1088 switch (what) {
1089 case MA_OWNED:
1090 case MA_OWNED | MA_RECURSED:
1091 case MA_OWNED | MA_NOTRECURSED:
1092 if (!mtx_owned(m))
1093 panic("mutex %s not owned at %s:%d",
1094 m->lock_object.lo_name, file, line);
1095 if (mtx_recursed(m)) {
1096 if ((what & MA_NOTRECURSED) != 0)
1097 panic("mutex %s recursed at %s:%d",
1098 m->lock_object.lo_name, file, line);
1099 } else if ((what & MA_RECURSED) != 0) {
1100 panic("mutex %s unrecursed at %s:%d",
1101 m->lock_object.lo_name, file, line);
1102 }
1103 break;
1104 case MA_NOTOWNED:
1105 if (mtx_owned(m))
1106 panic("mutex %s owned at %s:%d",
1107 m->lock_object.lo_name, file, line);
1108 break;
1109 default:
1110 panic("unknown mtx_assert at %s:%d", file, line);
1111 }
1112 }
1113 #endif
1114
1115 /*
1116 * General init routine used by the MTX_SYSINIT() macro.
1117 */
1118 void
mtx_sysinit(void * arg)1119 mtx_sysinit(void *arg)
1120 {
1121 struct mtx_args *margs = arg;
1122
1123 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1124 margs->ma_opts);
1125 }
1126
1127 /*
1128 * Mutex initialization routine; initialize lock `m' of type contained in
1129 * `opts' with options contained in `opts' and name `name.' The optional
1130 * lock type `type' is used as a general lock category name for use with
1131 * witness.
1132 */
1133 void
_mtx_init(volatile uintptr_t * c,const char * name,const char * type,int opts)1134 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1135 {
1136 struct mtx *m;
1137 struct lock_class *class;
1138 int flags;
1139
1140 m = mtxlock2mtx(c);
1141
1142 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1143 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1144 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1145 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1146 &m->mtx_lock));
1147
1148 /* Determine lock class and lock flags. */
1149 if (opts & MTX_SPIN)
1150 class = &lock_class_mtx_spin;
1151 else
1152 class = &lock_class_mtx_sleep;
1153 flags = 0;
1154 if (opts & MTX_QUIET)
1155 flags |= LO_QUIET;
1156 if (opts & MTX_RECURSE)
1157 flags |= LO_RECURSABLE;
1158 if ((opts & MTX_NOWITNESS) == 0)
1159 flags |= LO_WITNESS;
1160 if (opts & MTX_DUPOK)
1161 flags |= LO_DUPOK;
1162 if (opts & MTX_NOPROFILE)
1163 flags |= LO_NOPROFILE;
1164 if (opts & MTX_NEW)
1165 flags |= LO_NEW;
1166
1167 /* Initialize mutex. */
1168 lock_init(&m->lock_object, class, name, type, flags);
1169
1170 m->mtx_lock = MTX_UNOWNED;
1171 m->mtx_recurse = 0;
1172 }
1173
1174 /*
1175 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1176 * passed in as a flag here because if the corresponding mtx_init() was
1177 * called with MTX_QUIET set, then it will already be set in the mutex's
1178 * flags.
1179 */
1180 void
_mtx_destroy(volatile uintptr_t * c)1181 _mtx_destroy(volatile uintptr_t *c)
1182 {
1183 struct mtx *m;
1184
1185 m = mtxlock2mtx(c);
1186
1187 if (!mtx_owned(m))
1188 MPASS(mtx_unowned(m));
1189 else {
1190 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1191
1192 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1193 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1194 lock_profile_release_lock(&m->lock_object, true);
1195 spinlock_exit();
1196 } else {
1197 TD_LOCKS_DEC(curthread);
1198 lock_profile_release_lock(&m->lock_object, false);
1199 }
1200
1201 /* Tell witness this isn't locked to make it happy. */
1202 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1203 __LINE__);
1204 }
1205
1206 m->mtx_lock = MTX_DESTROYED;
1207 lock_destroy(&m->lock_object);
1208 }
1209
1210 /*
1211 * Intialize the mutex code and system mutexes. This is called from the MD
1212 * startup code prior to mi_startup(). The per-CPU data space needs to be
1213 * setup before this is called.
1214 */
1215 void
mutex_init(void)1216 mutex_init(void)
1217 {
1218
1219 /* Setup turnstiles so that sleep mutexes work. */
1220 init_turnstiles();
1221
1222 /*
1223 * Initialize mutexes.
1224 */
1225 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1226 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1227 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1228 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1229 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1230 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1231 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1232 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1233 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1234 mtx_lock(&Giant);
1235 }
1236
1237 static void __noinline
_mtx_lock_indefinite_check(struct mtx * m,struct lock_delay_arg * ldap)1238 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1239 {
1240 struct thread *td;
1241
1242 ldap->spin_cnt++;
1243 if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
1244 cpu_lock_delay();
1245 else {
1246 td = mtx_owner(m);
1247
1248 /* If the mutex is unlocked, try again. */
1249 if (td == NULL)
1250 return;
1251
1252 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1253 m, m->lock_object.lo_name, td, td->td_tid);
1254 #ifdef WITNESS
1255 witness_display_spinlock(&m->lock_object, td, printf);
1256 #endif
1257 panic("spin lock held too long");
1258 }
1259 cpu_spinwait();
1260 }
1261
1262 void
mtx_spin_wait_unlocked(struct mtx * m)1263 mtx_spin_wait_unlocked(struct mtx *m)
1264 {
1265 struct lock_delay_arg lda;
1266
1267 KASSERT(m->mtx_lock != MTX_DESTROYED,
1268 ("%s() of destroyed mutex %p", __func__, m));
1269 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1270 ("%s() of sleep mutex %p (%s)", __func__, m,
1271 m->lock_object.lo_name));
1272 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1273 m->lock_object.lo_name));
1274
1275 lda.spin_cnt = 0;
1276
1277 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1278 if (__predict_true(lda.spin_cnt < 10000000)) {
1279 cpu_spinwait();
1280 lda.spin_cnt++;
1281 } else {
1282 _mtx_lock_indefinite_check(m, &lda);
1283 }
1284 }
1285 }
1286
1287 void
mtx_wait_unlocked(struct mtx * m)1288 mtx_wait_unlocked(struct mtx *m)
1289 {
1290 struct thread *owner;
1291 uintptr_t v;
1292
1293 KASSERT(m->mtx_lock != MTX_DESTROYED,
1294 ("%s() of destroyed mutex %p", __func__, m));
1295 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
1296 ("%s() not a sleep mutex %p (%s)", __func__, m,
1297 m->lock_object.lo_name));
1298 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1299 m->lock_object.lo_name));
1300
1301 for (;;) {
1302 v = atomic_load_acq_ptr(&m->mtx_lock);
1303 if (v == MTX_UNOWNED) {
1304 break;
1305 }
1306 owner = lv_mtx_owner(v);
1307 if (!TD_IS_RUNNING(owner)) {
1308 mtx_lock(m);
1309 mtx_unlock(m);
1310 break;
1311 }
1312 cpu_spinwait();
1313 }
1314 }
1315
1316 #ifdef DDB
1317 void
db_show_mtx(const struct lock_object * lock)1318 db_show_mtx(const struct lock_object *lock)
1319 {
1320 struct thread *td;
1321 const struct mtx *m;
1322
1323 m = (const struct mtx *)lock;
1324
1325 db_printf(" flags: {");
1326 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1327 db_printf("SPIN");
1328 else
1329 db_printf("DEF");
1330 if (m->lock_object.lo_flags & LO_RECURSABLE)
1331 db_printf(", RECURSE");
1332 if (m->lock_object.lo_flags & LO_DUPOK)
1333 db_printf(", DUPOK");
1334 db_printf("}\n");
1335 db_printf(" state: {");
1336 if (mtx_unowned(m))
1337 db_printf("UNOWNED");
1338 else if (mtx_destroyed(m))
1339 db_printf("DESTROYED");
1340 else {
1341 db_printf("OWNED");
1342 if (m->mtx_lock & MTX_CONTESTED)
1343 db_printf(", CONTESTED");
1344 if (m->mtx_lock & MTX_RECURSED)
1345 db_printf(", RECURSED");
1346 }
1347 db_printf("}\n");
1348 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1349 td = mtx_owner(m);
1350 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1351 td->td_tid, td->td_proc->p_pid, td->td_name);
1352 if (mtx_recursed(m))
1353 db_printf(" recursed: %d\n", m->mtx_recurse);
1354 }
1355 }
1356 #endif
1357