xref: /freebsd/sys/kern/kern_lock.c (revision e28a4053)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_kdtrace.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #include <sys/linker_set.h>
39 #include <sys/lock.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <machine/cpu.h>
52 
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56 
57 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58     (LK_ADAPTIVE | LK_NOSHARE));
59 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61 
62 #define	SQ_EXCLUSIVE_QUEUE	0
63 #define	SQ_SHARED_QUEUE		1
64 
65 #ifndef INVARIANTS
66 #define	_lockmgr_assert(lk, what, file, line)
67 #define	TD_LOCKS_INC(td)
68 #define	TD_LOCKS_DEC(td)
69 #else
70 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
71 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
72 #endif
73 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
74 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
75 
76 #ifndef DEBUG_LOCKS
77 #define	STACK_PRINT(lk)
78 #define	STACK_SAVE(lk)
79 #define	STACK_ZERO(lk)
80 #else
81 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
82 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
83 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
84 #endif
85 
86 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
87 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
88 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
89 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
90 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
91 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92 
93 #define	GIANT_DECLARE							\
94 	int _i = 0;							\
95 	WITNESS_SAVE_DECL(Giant)
96 #define	GIANT_RESTORE() do {						\
97 	if (_i > 0) {							\
98 		while (_i--)						\
99 			mtx_lock(&Giant);				\
100 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
101 	}								\
102 } while (0)
103 #define	GIANT_SAVE() do {						\
104 	if (mtx_owned(&Giant)) {					\
105 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
106 		while (mtx_owned(&Giant)) {				\
107 			_i++;						\
108 			mtx_unlock(&Giant);				\
109 		}							\
110 	}								\
111 } while (0)
112 
113 #define	LK_CAN_SHARE(x)							\
114 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
115 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
116 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117 #define	LK_TRYOP(x)							\
118 	((x) & LK_NOWAIT)
119 
120 #define	LK_CAN_WITNESS(x)						\
121 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122 #define	LK_TRYWIT(x)							\
123 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124 
125 #define	LK_CAN_ADAPT(lk, f)						\
126 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
127 	((f) & LK_SLEEPFAIL) == 0)
128 
129 #define	lockmgr_disowned(lk)						\
130 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131 
132 #define	lockmgr_xlocked(lk)						\
133 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134 
135 static void	 assert_lockmgr(struct lock_object *lock, int how);
136 #ifdef DDB
137 static void	 db_show_lockmgr(struct lock_object *lock);
138 #endif
139 static void	 lock_lockmgr(struct lock_object *lock, int how);
140 #ifdef KDTRACE_HOOKS
141 static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
142 #endif
143 static int	 unlock_lockmgr(struct lock_object *lock);
144 
145 struct lock_class lock_class_lockmgr = {
146 	.lc_name = "lockmgr",
147 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148 	.lc_assert = assert_lockmgr,
149 #ifdef DDB
150 	.lc_ddb_show = db_show_lockmgr,
151 #endif
152 	.lc_lock = lock_lockmgr,
153 	.lc_unlock = unlock_lockmgr,
154 #ifdef KDTRACE_HOOKS
155 	.lc_owner = owner_lockmgr,
156 #endif
157 };
158 
159 #ifdef ADAPTIVE_LOCKMGRS
160 static u_int alk_retries = 10;
161 static u_int alk_loops = 10000;
162 SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165 #endif
166 
167 static __inline struct thread *
168 lockmgr_xholder(struct lock *lk)
169 {
170 	uintptr_t x;
171 
172 	x = lk->lk_lock;
173 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174 }
175 
176 /*
177  * It assumes sleepq_lock held and returns with this one unheld.
178  * It also assumes the generic interlock is sane and previously checked.
179  * If LK_INTERLOCK is specified the interlock is not reacquired after the
180  * sleep.
181  */
182 static __inline int
183 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184     const char *wmesg, int pri, int timo, int queue)
185 {
186 	GIANT_DECLARE;
187 	struct lock_class *class;
188 	int catch, error;
189 
190 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191 	catch = pri & PCATCH;
192 	pri &= PRIMASK;
193 	error = 0;
194 
195 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197 
198 	if (flags & LK_INTERLOCK)
199 		class->lc_unlock(ilk);
200 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
201 		lk->lk_exslpfail++;
202 	GIANT_SAVE();
203 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
204 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
205 	if ((flags & LK_TIMELOCK) && timo)
206 		sleepq_set_timeout(&lk->lock_object, timo);
207 
208 	/*
209 	 * Decisional switch for real sleeping.
210 	 */
211 	if ((flags & LK_TIMELOCK) && timo && catch)
212 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
213 	else if ((flags & LK_TIMELOCK) && timo)
214 		error = sleepq_timedwait(&lk->lock_object, pri);
215 	else if (catch)
216 		error = sleepq_wait_sig(&lk->lock_object, pri);
217 	else
218 		sleepq_wait(&lk->lock_object, pri);
219 	GIANT_RESTORE();
220 	if ((flags & LK_SLEEPFAIL) && error == 0)
221 		error = ENOLCK;
222 
223 	return (error);
224 }
225 
226 static __inline int
227 wakeupshlk(struct lock *lk, const char *file, int line)
228 {
229 	uintptr_t v, x;
230 	u_int realexslp;
231 	int queue, wakeup_swapper;
232 
233 	TD_LOCKS_DEC(curthread);
234 	TD_SLOCKS_DEC(curthread);
235 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
236 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
237 
238 	wakeup_swapper = 0;
239 	for (;;) {
240 		x = lk->lk_lock;
241 
242 		/*
243 		 * If there is more than one shared lock held, just drop one
244 		 * and return.
245 		 */
246 		if (LK_SHARERS(x) > 1) {
247 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
248 			    x - LK_ONE_SHARER))
249 				break;
250 			continue;
251 		}
252 
253 		/*
254 		 * If there are not waiters on the exclusive queue, drop the
255 		 * lock quickly.
256 		 */
257 		if ((x & LK_ALL_WAITERS) == 0) {
258 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
259 			    LK_SHARERS_LOCK(1));
260 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
261 				break;
262 			continue;
263 		}
264 
265 		/*
266 		 * We should have a sharer with waiters, so enter the hard
267 		 * path in order to handle wakeups correctly.
268 		 */
269 		sleepq_lock(&lk->lock_object);
270 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
271 		v = LK_UNLOCKED;
272 
273 		/*
274 		 * If the lock has exclusive waiters, give them preference in
275 		 * order to avoid deadlock with shared runners up.
276 		 * If interruptible sleeps left the exclusive queue empty
277 		 * avoid a starvation for the threads sleeping on the shared
278 		 * queue by giving them precedence and cleaning up the
279 		 * exclusive waiters bit anyway.
280 		 * Please note that lk_exslpfail count may be lying about
281 		 * the real number of waiters with the LK_SLEEPFAIL flag on
282 		 * because they may be used in conjuction with interruptible
283 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
284 		 * bound, including the edge cases.
285 		 */
286 		realexslp = sleepq_sleepcnt(&lk->lock_object,
287 		    SQ_EXCLUSIVE_QUEUE);
288 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
289 			if (lk->lk_exslpfail < realexslp) {
290 				lk->lk_exslpfail = 0;
291 				queue = SQ_EXCLUSIVE_QUEUE;
292 				v |= (x & LK_SHARED_WAITERS);
293 			} else {
294 				lk->lk_exslpfail = 0;
295 				LOCK_LOG2(lk,
296 				    "%s: %p has only LK_SLEEPFAIL sleepers",
297 				    __func__, lk);
298 				LOCK_LOG2(lk,
299 			    "%s: %p waking up threads on the exclusive queue",
300 				    __func__, lk);
301 				wakeup_swapper =
302 				    sleepq_broadcast(&lk->lock_object,
303 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
304 				queue = SQ_SHARED_QUEUE;
305 			}
306 
307 		} else {
308 
309 			/*
310 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
311 			 * and using interruptible sleeps/timeout may have
312 			 * left spourious lk_exslpfail counts on, so clean
313 			 * it up anyway.
314 			 */
315 			lk->lk_exslpfail = 0;
316 			queue = SQ_SHARED_QUEUE;
317 		}
318 
319 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
320 		    v)) {
321 			sleepq_release(&lk->lock_object);
322 			continue;
323 		}
324 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
325 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
326 		    "exclusive");
327 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
328 		    0, queue);
329 		sleepq_release(&lk->lock_object);
330 		break;
331 	}
332 
333 	lock_profile_release_lock(&lk->lock_object);
334 	return (wakeup_swapper);
335 }
336 
337 static void
338 assert_lockmgr(struct lock_object *lock, int what)
339 {
340 
341 	panic("lockmgr locks do not support assertions");
342 }
343 
344 static void
345 lock_lockmgr(struct lock_object *lock, int how)
346 {
347 
348 	panic("lockmgr locks do not support sleep interlocking");
349 }
350 
351 static int
352 unlock_lockmgr(struct lock_object *lock)
353 {
354 
355 	panic("lockmgr locks do not support sleep interlocking");
356 }
357 
358 #ifdef KDTRACE_HOOKS
359 static int
360 owner_lockmgr(struct lock_object *lock, struct thread **owner)
361 {
362 
363 	panic("lockmgr locks do not support owner inquiring");
364 }
365 #endif
366 
367 void
368 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
369 {
370 	int iflags;
371 
372 	MPASS((flags & ~LK_INIT_MASK) == 0);
373 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
374             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
375             &lk->lk_lock));
376 
377 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
378 	if (flags & LK_CANRECURSE)
379 		iflags |= LO_RECURSABLE;
380 	if ((flags & LK_NODUP) == 0)
381 		iflags |= LO_DUPOK;
382 	if (flags & LK_NOPROFILE)
383 		iflags |= LO_NOPROFILE;
384 	if ((flags & LK_NOWITNESS) == 0)
385 		iflags |= LO_WITNESS;
386 	if (flags & LK_QUIET)
387 		iflags |= LO_QUIET;
388 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
389 
390 	lk->lk_lock = LK_UNLOCKED;
391 	lk->lk_recurse = 0;
392 	lk->lk_exslpfail = 0;
393 	lk->lk_timo = timo;
394 	lk->lk_pri = pri;
395 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
396 	STACK_ZERO(lk);
397 }
398 
399 /*
400  * XXX: Gross hacks to manipulate external lock flags after
401  * initialization.  Used for certain vnode and buf locks.
402  */
403 void
404 lockallowshare(struct lock *lk)
405 {
406 
407 	lockmgr_assert(lk, KA_XLOCKED);
408 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
409 }
410 
411 void
412 lockallowrecurse(struct lock *lk)
413 {
414 
415 	lockmgr_assert(lk, KA_XLOCKED);
416 	lk->lock_object.lo_flags |= LO_RECURSABLE;
417 }
418 
419 void
420 lockdisablerecurse(struct lock *lk)
421 {
422 
423 	lockmgr_assert(lk, KA_XLOCKED);
424 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
425 }
426 
427 void
428 lockdestroy(struct lock *lk)
429 {
430 
431 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
432 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
433 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
434 	lock_destroy(&lk->lock_object);
435 }
436 
437 int
438 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
439     const char *wmesg, int pri, int timo, const char *file, int line)
440 {
441 	GIANT_DECLARE;
442 	struct lock_class *class;
443 	const char *iwmesg;
444 	uintptr_t tid, v, x;
445 	u_int op, realexslp;
446 	int error, ipri, itimo, queue, wakeup_swapper;
447 #ifdef LOCK_PROFILING
448 	uint64_t waittime = 0;
449 	int contested = 0;
450 #endif
451 #ifdef ADAPTIVE_LOCKMGRS
452 	volatile struct thread *owner;
453 	u_int i, spintries = 0;
454 #endif
455 
456 	error = 0;
457 	tid = (uintptr_t)curthread;
458 	op = (flags & LK_TYPE_MASK);
459 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
460 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
461 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
462 
463 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
464 	KASSERT((op & (op - 1)) == 0,
465 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
466 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
467 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
468 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
469 	    __func__, file, line));
470 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
471 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
472 	    __func__, file, line));
473 
474 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
475 	if (panicstr != NULL) {
476 		if (flags & LK_INTERLOCK)
477 			class->lc_unlock(ilk);
478 		return (0);
479 	}
480 
481 	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
482 		op = LK_EXCLUSIVE;
483 
484 	wakeup_swapper = 0;
485 	switch (op) {
486 	case LK_SHARED:
487 		if (LK_CAN_WITNESS(flags))
488 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
489 			    file, line, ilk);
490 		for (;;) {
491 			x = lk->lk_lock;
492 
493 			/*
494 			 * If no other thread has an exclusive lock, or
495 			 * no exclusive waiter is present, bump the count of
496 			 * sharers.  Since we have to preserve the state of
497 			 * waiters, if we fail to acquire the shared lock
498 			 * loop back and retry.
499 			 */
500 			if (LK_CAN_SHARE(x)) {
501 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
502 				    x + LK_ONE_SHARER))
503 					break;
504 				continue;
505 			}
506 			lock_profile_obtain_lock_failed(&lk->lock_object,
507 			    &contested, &waittime);
508 
509 			/*
510 			 * If the lock is already held by curthread in
511 			 * exclusive way avoid a deadlock.
512 			 */
513 			if (LK_HOLDER(x) == tid) {
514 				LOCK_LOG2(lk,
515 				    "%s: %p already held in exclusive mode",
516 				    __func__, lk);
517 				error = EDEADLK;
518 				break;
519 			}
520 
521 			/*
522 			 * If the lock is expected to not sleep just give up
523 			 * and return.
524 			 */
525 			if (LK_TRYOP(flags)) {
526 				LOCK_LOG2(lk, "%s: %p fails the try operation",
527 				    __func__, lk);
528 				error = EBUSY;
529 				break;
530 			}
531 
532 #ifdef ADAPTIVE_LOCKMGRS
533 			/*
534 			 * If the owner is running on another CPU, spin until
535 			 * the owner stops running or the state of the lock
536 			 * changes.  We need a double-state handle here
537 			 * because for a failed acquisition the lock can be
538 			 * either held in exclusive mode or shared mode
539 			 * (for the writer starvation avoidance technique).
540 			 */
541 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
542 			    LK_HOLDER(x) != LK_KERNPROC) {
543 				owner = (struct thread *)LK_HOLDER(x);
544 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
545 					CTR3(KTR_LOCK,
546 					    "%s: spinning on %p held by %p",
547 					    __func__, lk, owner);
548 
549 				/*
550 				 * If we are holding also an interlock drop it
551 				 * in order to avoid a deadlock if the lockmgr
552 				 * owner is adaptively spinning on the
553 				 * interlock itself.
554 				 */
555 				if (flags & LK_INTERLOCK) {
556 					class->lc_unlock(ilk);
557 					flags &= ~LK_INTERLOCK;
558 				}
559 				GIANT_SAVE();
560 				while (LK_HOLDER(lk->lk_lock) ==
561 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
562 					cpu_spinwait();
563 				GIANT_RESTORE();
564 				continue;
565 			} else if (LK_CAN_ADAPT(lk, flags) &&
566 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
567 			    spintries < alk_retries) {
568 				if (flags & LK_INTERLOCK) {
569 					class->lc_unlock(ilk);
570 					flags &= ~LK_INTERLOCK;
571 				}
572 				GIANT_SAVE();
573 				spintries++;
574 				for (i = 0; i < alk_loops; i++) {
575 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
576 						CTR4(KTR_LOCK,
577 				    "%s: shared spinning on %p with %u and %u",
578 						    __func__, lk, spintries, i);
579 					x = lk->lk_lock;
580 					if ((x & LK_SHARE) == 0 ||
581 					    LK_CAN_SHARE(x) != 0)
582 						break;
583 					cpu_spinwait();
584 				}
585 				GIANT_RESTORE();
586 				if (i != alk_loops)
587 					continue;
588 			}
589 #endif
590 
591 			/*
592 			 * Acquire the sleepqueue chain lock because we
593 			 * probabilly will need to manipulate waiters flags.
594 			 */
595 			sleepq_lock(&lk->lock_object);
596 			x = lk->lk_lock;
597 
598 			/*
599 			 * if the lock can be acquired in shared mode, try
600 			 * again.
601 			 */
602 			if (LK_CAN_SHARE(x)) {
603 				sleepq_release(&lk->lock_object);
604 				continue;
605 			}
606 
607 #ifdef ADAPTIVE_LOCKMGRS
608 			/*
609 			 * The current lock owner might have started executing
610 			 * on another CPU (or the lock could have changed
611 			 * owner) while we were waiting on the turnstile
612 			 * chain lock.  If so, drop the turnstile lock and try
613 			 * again.
614 			 */
615 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
616 			    LK_HOLDER(x) != LK_KERNPROC) {
617 				owner = (struct thread *)LK_HOLDER(x);
618 				if (TD_IS_RUNNING(owner)) {
619 					sleepq_release(&lk->lock_object);
620 					continue;
621 				}
622 			}
623 #endif
624 
625 			/*
626 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
627 			 * loop back and retry.
628 			 */
629 			if ((x & LK_SHARED_WAITERS) == 0) {
630 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
631 				    x | LK_SHARED_WAITERS)) {
632 					sleepq_release(&lk->lock_object);
633 					continue;
634 				}
635 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
636 				    __func__, lk);
637 			}
638 
639 			/*
640 			 * As far as we have been unable to acquire the
641 			 * shared lock and the shared waiters flag is set,
642 			 * we will sleep.
643 			 */
644 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
645 			    SQ_SHARED_QUEUE);
646 			flags &= ~LK_INTERLOCK;
647 			if (error) {
648 				LOCK_LOG3(lk,
649 				    "%s: interrupted sleep for %p with %d",
650 				    __func__, lk, error);
651 				break;
652 			}
653 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
654 			    __func__, lk);
655 		}
656 		if (error == 0) {
657 			lock_profile_obtain_lock_success(&lk->lock_object,
658 			    contested, waittime, file, line);
659 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
660 			    line);
661 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
662 			    line);
663 			TD_LOCKS_INC(curthread);
664 			TD_SLOCKS_INC(curthread);
665 			STACK_SAVE(lk);
666 		}
667 		break;
668 	case LK_UPGRADE:
669 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
670 		v = lk->lk_lock;
671 		x = v & LK_ALL_WAITERS;
672 		v &= LK_EXCLUSIVE_SPINNERS;
673 
674 		/*
675 		 * Try to switch from one shared lock to an exclusive one.
676 		 * We need to preserve waiters flags during the operation.
677 		 */
678 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
679 		    tid | x)) {
680 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
681 			    line);
682 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
683 			    LK_TRYWIT(flags), file, line);
684 			TD_SLOCKS_DEC(curthread);
685 			break;
686 		}
687 
688 		/*
689 		 * We have been unable to succeed in upgrading, so just
690 		 * give up the shared lock.
691 		 */
692 		wakeup_swapper |= wakeupshlk(lk, file, line);
693 
694 		/* FALLTHROUGH */
695 	case LK_EXCLUSIVE:
696 		if (LK_CAN_WITNESS(flags))
697 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
698 			    LOP_EXCLUSIVE, file, line, ilk);
699 
700 		/*
701 		 * If curthread already holds the lock and this one is
702 		 * allowed to recurse, simply recurse on it.
703 		 */
704 		if (lockmgr_xlocked(lk)) {
705 			if ((flags & LK_CANRECURSE) == 0 &&
706 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
707 
708 				/*
709 				 * If the lock is expected to not panic just
710 				 * give up and return.
711 				 */
712 				if (LK_TRYOP(flags)) {
713 					LOCK_LOG2(lk,
714 					    "%s: %p fails the try operation",
715 					    __func__, lk);
716 					error = EBUSY;
717 					break;
718 				}
719 				if (flags & LK_INTERLOCK)
720 					class->lc_unlock(ilk);
721 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
722 				    __func__, iwmesg, file, line);
723 			}
724 			lk->lk_recurse++;
725 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
726 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
727 			    lk->lk_recurse, file, line);
728 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
729 			    LK_TRYWIT(flags), file, line);
730 			TD_LOCKS_INC(curthread);
731 			break;
732 		}
733 
734 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
735 		    tid)) {
736 			lock_profile_obtain_lock_failed(&lk->lock_object,
737 			    &contested, &waittime);
738 
739 			/*
740 			 * If the lock is expected to not sleep just give up
741 			 * and return.
742 			 */
743 			if (LK_TRYOP(flags)) {
744 				LOCK_LOG2(lk, "%s: %p fails the try operation",
745 				    __func__, lk);
746 				error = EBUSY;
747 				break;
748 			}
749 
750 #ifdef ADAPTIVE_LOCKMGRS
751 			/*
752 			 * If the owner is running on another CPU, spin until
753 			 * the owner stops running or the state of the lock
754 			 * changes.
755 			 */
756 			x = lk->lk_lock;
757 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
758 			    LK_HOLDER(x) != LK_KERNPROC) {
759 				owner = (struct thread *)LK_HOLDER(x);
760 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
761 					CTR3(KTR_LOCK,
762 					    "%s: spinning on %p held by %p",
763 					    __func__, lk, owner);
764 
765 				/*
766 				 * If we are holding also an interlock drop it
767 				 * in order to avoid a deadlock if the lockmgr
768 				 * owner is adaptively spinning on the
769 				 * interlock itself.
770 				 */
771 				if (flags & LK_INTERLOCK) {
772 					class->lc_unlock(ilk);
773 					flags &= ~LK_INTERLOCK;
774 				}
775 				GIANT_SAVE();
776 				while (LK_HOLDER(lk->lk_lock) ==
777 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
778 					cpu_spinwait();
779 				GIANT_RESTORE();
780 				continue;
781 			} else if (LK_CAN_ADAPT(lk, flags) &&
782 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
783 			    spintries < alk_retries) {
784 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
785 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
786 				    x | LK_EXCLUSIVE_SPINNERS))
787 					continue;
788 				if (flags & LK_INTERLOCK) {
789 					class->lc_unlock(ilk);
790 					flags &= ~LK_INTERLOCK;
791 				}
792 				GIANT_SAVE();
793 				spintries++;
794 				for (i = 0; i < alk_loops; i++) {
795 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
796 						CTR4(KTR_LOCK,
797 				    "%s: shared spinning on %p with %u and %u",
798 						    __func__, lk, spintries, i);
799 					if ((lk->lk_lock &
800 					    LK_EXCLUSIVE_SPINNERS) == 0)
801 						break;
802 					cpu_spinwait();
803 				}
804 				GIANT_RESTORE();
805 				if (i != alk_loops)
806 					continue;
807 			}
808 #endif
809 
810 			/*
811 			 * Acquire the sleepqueue chain lock because we
812 			 * probabilly will need to manipulate waiters flags.
813 			 */
814 			sleepq_lock(&lk->lock_object);
815 			x = lk->lk_lock;
816 
817 			/*
818 			 * if the lock has been released while we spun on
819 			 * the sleepqueue chain lock just try again.
820 			 */
821 			if (x == LK_UNLOCKED) {
822 				sleepq_release(&lk->lock_object);
823 				continue;
824 			}
825 
826 #ifdef ADAPTIVE_LOCKMGRS
827 			/*
828 			 * The current lock owner might have started executing
829 			 * on another CPU (or the lock could have changed
830 			 * owner) while we were waiting on the turnstile
831 			 * chain lock.  If so, drop the turnstile lock and try
832 			 * again.
833 			 */
834 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
835 			    LK_HOLDER(x) != LK_KERNPROC) {
836 				owner = (struct thread *)LK_HOLDER(x);
837 				if (TD_IS_RUNNING(owner)) {
838 					sleepq_release(&lk->lock_object);
839 					continue;
840 				}
841 			}
842 #endif
843 
844 			/*
845 			 * The lock can be in the state where there is a
846 			 * pending queue of waiters, but still no owner.
847 			 * This happens when the lock is contested and an
848 			 * owner is going to claim the lock.
849 			 * If curthread is the one successfully acquiring it
850 			 * claim lock ownership and return, preserving waiters
851 			 * flags.
852 			 */
853 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
854 			if ((x & ~v) == LK_UNLOCKED) {
855 				v &= ~LK_EXCLUSIVE_SPINNERS;
856 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
857 				    tid | v)) {
858 					sleepq_release(&lk->lock_object);
859 					LOCK_LOG2(lk,
860 					    "%s: %p claimed by a new writer",
861 					    __func__, lk);
862 					break;
863 				}
864 				sleepq_release(&lk->lock_object);
865 				continue;
866 			}
867 
868 			/*
869 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
870 			 * fail, loop back and retry.
871 			 */
872 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
873 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
874 				    x | LK_EXCLUSIVE_WAITERS)) {
875 					sleepq_release(&lk->lock_object);
876 					continue;
877 				}
878 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
879 				    __func__, lk);
880 			}
881 
882 			/*
883 			 * As far as we have been unable to acquire the
884 			 * exclusive lock and the exclusive waiters flag
885 			 * is set, we will sleep.
886 			 */
887 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
888 			    SQ_EXCLUSIVE_QUEUE);
889 			flags &= ~LK_INTERLOCK;
890 			if (error) {
891 				LOCK_LOG3(lk,
892 				    "%s: interrupted sleep for %p with %d",
893 				    __func__, lk, error);
894 				break;
895 			}
896 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
897 			    __func__, lk);
898 		}
899 		if (error == 0) {
900 			lock_profile_obtain_lock_success(&lk->lock_object,
901 			    contested, waittime, file, line);
902 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
903 			    lk->lk_recurse, file, line);
904 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
905 			    LK_TRYWIT(flags), file, line);
906 			TD_LOCKS_INC(curthread);
907 			STACK_SAVE(lk);
908 		}
909 		break;
910 	case LK_DOWNGRADE:
911 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
912 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
913 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
914 		TD_SLOCKS_INC(curthread);
915 
916 		/*
917 		 * In order to preserve waiters flags, just spin.
918 		 */
919 		for (;;) {
920 			x = lk->lk_lock;
921 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
922 			x &= LK_ALL_WAITERS;
923 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
924 			    LK_SHARERS_LOCK(1) | x))
925 				break;
926 			cpu_spinwait();
927 		}
928 		break;
929 	case LK_RELEASE:
930 		_lockmgr_assert(lk, KA_LOCKED, file, line);
931 		x = lk->lk_lock;
932 
933 		if ((x & LK_SHARE) == 0) {
934 
935 			/*
936 			 * As first option, treact the lock as if it has not
937 			 * any waiter.
938 			 * Fix-up the tid var if the lock has been disowned.
939 			 */
940 			if (LK_HOLDER(x) == LK_KERNPROC)
941 				tid = LK_KERNPROC;
942 			else {
943 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
944 				    file, line);
945 				TD_LOCKS_DEC(curthread);
946 			}
947 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
948 			    lk->lk_recurse, file, line);
949 
950 			/*
951 			 * The lock is held in exclusive mode.
952 			 * If the lock is recursed also, then unrecurse it.
953 			 */
954 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
955 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
956 				    lk);
957 				lk->lk_recurse--;
958 				break;
959 			}
960 			if (tid != LK_KERNPROC)
961 				lock_profile_release_lock(&lk->lock_object);
962 
963 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
964 			    LK_UNLOCKED))
965 				break;
966 
967 			sleepq_lock(&lk->lock_object);
968 			x = lk->lk_lock;
969 			v = LK_UNLOCKED;
970 
971 			/*
972 		 	 * If the lock has exclusive waiters, give them
973 			 * preference in order to avoid deadlock with
974 			 * shared runners up.
975 			 * If interruptible sleeps left the exclusive queue
976 			 * empty avoid a starvation for the threads sleeping
977 			 * on the shared queue by giving them precedence
978 			 * and cleaning up the exclusive waiters bit anyway.
979 			 * Please note that lk_exslpfail count may be lying
980 			 * about the real number of waiters with the
981 			 * LK_SLEEPFAIL flag on because they may be used in
982 			 * conjuction with interruptible sleeps so
983 			 * lk_exslpfail might be considered an 'upper limit'
984 			 * bound, including the edge cases.
985 			 */
986 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
987 			realexslp = sleepq_sleepcnt(&lk->lock_object,
988 			    SQ_EXCLUSIVE_QUEUE);
989 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
990 				if (lk->lk_exslpfail < realexslp) {
991 					lk->lk_exslpfail = 0;
992 					queue = SQ_EXCLUSIVE_QUEUE;
993 					v |= (x & LK_SHARED_WAITERS);
994 				} else {
995 					lk->lk_exslpfail = 0;
996 					LOCK_LOG2(lk,
997 					"%s: %p has only LK_SLEEPFAIL sleepers",
998 					    __func__, lk);
999 					LOCK_LOG2(lk,
1000 			"%s: %p waking up threads on the exclusive queue",
1001 					    __func__, lk);
1002 					wakeup_swapper =
1003 					    sleepq_broadcast(&lk->lock_object,
1004 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1005 					queue = SQ_SHARED_QUEUE;
1006 				}
1007 			} else {
1008 
1009 				/*
1010 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1011 				 * on and using interruptible sleeps/timeout
1012 				 * may have left spourious lk_exslpfail counts
1013 				 * on, so clean it up anyway.
1014 				 */
1015 				lk->lk_exslpfail = 0;
1016 				queue = SQ_SHARED_QUEUE;
1017 			}
1018 
1019 			LOCK_LOG3(lk,
1020 			    "%s: %p waking up threads on the %s queue",
1021 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1022 			    "exclusive");
1023 			atomic_store_rel_ptr(&lk->lk_lock, v);
1024 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1025 			    SLEEPQ_LK, 0, queue);
1026 			sleepq_release(&lk->lock_object);
1027 			break;
1028 		} else
1029 			wakeup_swapper = wakeupshlk(lk, file, line);
1030 		break;
1031 	case LK_DRAIN:
1032 		if (LK_CAN_WITNESS(flags))
1033 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1034 			    LOP_EXCLUSIVE, file, line, ilk);
1035 
1036 		/*
1037 		 * Trying to drain a lock we already own will result in a
1038 		 * deadlock.
1039 		 */
1040 		if (lockmgr_xlocked(lk)) {
1041 			if (flags & LK_INTERLOCK)
1042 				class->lc_unlock(ilk);
1043 			panic("%s: draining %s with the lock held @ %s:%d\n",
1044 			    __func__, iwmesg, file, line);
1045 		}
1046 
1047 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1048 			lock_profile_obtain_lock_failed(&lk->lock_object,
1049 			    &contested, &waittime);
1050 
1051 			/*
1052 			 * If the lock is expected to not sleep just give up
1053 			 * and return.
1054 			 */
1055 			if (LK_TRYOP(flags)) {
1056 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1057 				    __func__, lk);
1058 				error = EBUSY;
1059 				break;
1060 			}
1061 
1062 			/*
1063 			 * Acquire the sleepqueue chain lock because we
1064 			 * probabilly will need to manipulate waiters flags.
1065 			 */
1066 			sleepq_lock(&lk->lock_object);
1067 			x = lk->lk_lock;
1068 
1069 			/*
1070 			 * if the lock has been released while we spun on
1071 			 * the sleepqueue chain lock just try again.
1072 			 */
1073 			if (x == LK_UNLOCKED) {
1074 				sleepq_release(&lk->lock_object);
1075 				continue;
1076 			}
1077 
1078 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1079 			if ((x & ~v) == LK_UNLOCKED) {
1080 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1081 
1082 				/*
1083 				 * If interruptible sleeps left the exclusive
1084 				 * queue empty avoid a starvation for the
1085 				 * threads sleeping on the shared queue by
1086 				 * giving them precedence and cleaning up the
1087 				 * exclusive waiters bit anyway.
1088 				 * Please note that lk_exslpfail count may be
1089 				 * lying about the real number of waiters with
1090 				 * the LK_SLEEPFAIL flag on because they may
1091 				 * be used in conjuction with interruptible
1092 				 * sleeps so lk_exslpfail might be considered
1093 				 * an 'upper limit' bound, including the edge
1094 				 * cases.
1095 				 */
1096 				if (v & LK_EXCLUSIVE_WAITERS) {
1097 					queue = SQ_EXCLUSIVE_QUEUE;
1098 					v &= ~LK_EXCLUSIVE_WAITERS;
1099 				} else {
1100 
1101 					/*
1102 					 * Exclusive waiters sleeping with
1103 					 * LK_SLEEPFAIL on and using
1104 					 * interruptible sleeps/timeout may
1105 					 * have left spourious lk_exslpfail
1106 					 * counts on, so clean it up anyway.
1107 					 */
1108 					MPASS(v & LK_SHARED_WAITERS);
1109 					lk->lk_exslpfail = 0;
1110 					queue = SQ_SHARED_QUEUE;
1111 					v &= ~LK_SHARED_WAITERS;
1112 				}
1113 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1114 					realexslp =
1115 					    sleepq_sleepcnt(&lk->lock_object,
1116 					    SQ_EXCLUSIVE_QUEUE);
1117 					if (lk->lk_exslpfail >= realexslp) {
1118 						lk->lk_exslpfail = 0;
1119 						queue = SQ_SHARED_QUEUE;
1120 						v &= ~LK_SHARED_WAITERS;
1121 						if (realexslp != 0) {
1122 							LOCK_LOG2(lk,
1123 					"%s: %p has only LK_SLEEPFAIL sleepers",
1124 							    __func__, lk);
1125 							LOCK_LOG2(lk,
1126 			"%s: %p waking up threads on the exclusive queue",
1127 							    __func__, lk);
1128 							wakeup_swapper =
1129 							    sleepq_broadcast(
1130 							    &lk->lock_object,
1131 							    SLEEPQ_LK, 0,
1132 							    SQ_EXCLUSIVE_QUEUE);
1133 						}
1134 					} else
1135 						lk->lk_exslpfail = 0;
1136 				}
1137 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1138 					sleepq_release(&lk->lock_object);
1139 					continue;
1140 				}
1141 				LOCK_LOG3(lk,
1142 				"%s: %p waking up all threads on the %s queue",
1143 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1144 				    "shared" : "exclusive");
1145 				wakeup_swapper |= sleepq_broadcast(
1146 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1147 
1148 				/*
1149 				 * If shared waiters have been woken up we need
1150 				 * to wait for one of them to acquire the lock
1151 				 * before to set the exclusive waiters in
1152 				 * order to avoid a deadlock.
1153 				 */
1154 				if (queue == SQ_SHARED_QUEUE) {
1155 					for (v = lk->lk_lock;
1156 					    (v & LK_SHARE) && !LK_SHARERS(v);
1157 					    v = lk->lk_lock)
1158 						cpu_spinwait();
1159 				}
1160 			}
1161 
1162 			/*
1163 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1164 			 * fail, loop back and retry.
1165 			 */
1166 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1167 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1168 				    x | LK_EXCLUSIVE_WAITERS)) {
1169 					sleepq_release(&lk->lock_object);
1170 					continue;
1171 				}
1172 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1173 				    __func__, lk);
1174 			}
1175 
1176 			/*
1177 			 * As far as we have been unable to acquire the
1178 			 * exclusive lock and the exclusive waiters flag
1179 			 * is set, we will sleep.
1180 			 */
1181 			if (flags & LK_INTERLOCK) {
1182 				class->lc_unlock(ilk);
1183 				flags &= ~LK_INTERLOCK;
1184 			}
1185 			GIANT_SAVE();
1186 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1187 			    SQ_EXCLUSIVE_QUEUE);
1188 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1189 			GIANT_RESTORE();
1190 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1191 			    __func__, lk);
1192 		}
1193 
1194 		if (error == 0) {
1195 			lock_profile_obtain_lock_success(&lk->lock_object,
1196 			    contested, waittime, file, line);
1197 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1198 			    lk->lk_recurse, file, line);
1199 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1200 			    LK_TRYWIT(flags), file, line);
1201 			TD_LOCKS_INC(curthread);
1202 			STACK_SAVE(lk);
1203 		}
1204 		break;
1205 	default:
1206 		if (flags & LK_INTERLOCK)
1207 			class->lc_unlock(ilk);
1208 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1209 	}
1210 
1211 	if (flags & LK_INTERLOCK)
1212 		class->lc_unlock(ilk);
1213 	if (wakeup_swapper)
1214 		kick_proc0();
1215 
1216 	return (error);
1217 }
1218 
1219 void
1220 _lockmgr_disown(struct lock *lk, const char *file, int line)
1221 {
1222 	uintptr_t tid, x;
1223 
1224 	tid = (uintptr_t)curthread;
1225 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1226 
1227 	/*
1228 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1229 	 */
1230 	if (LK_HOLDER(lk->lk_lock) != tid)
1231 		return;
1232 	lock_profile_release_lock(&lk->lock_object);
1233 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1234 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1235 	TD_LOCKS_DEC(curthread);
1236 	STACK_SAVE(lk);
1237 
1238 	/*
1239 	 * In order to preserve waiters flags, just spin.
1240 	 */
1241 	for (;;) {
1242 		x = lk->lk_lock;
1243 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1244 		x &= LK_ALL_WAITERS;
1245 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1246 		    LK_KERNPROC | x))
1247 			return;
1248 		cpu_spinwait();
1249 	}
1250 }
1251 
1252 void
1253 lockmgr_printinfo(struct lock *lk)
1254 {
1255 	struct thread *td;
1256 	uintptr_t x;
1257 
1258 	if (lk->lk_lock == LK_UNLOCKED)
1259 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1260 	else if (lk->lk_lock & LK_SHARE)
1261 		printf("lock type %s: SHARED (count %ju)\n",
1262 		    lk->lock_object.lo_name,
1263 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1264 	else {
1265 		td = lockmgr_xholder(lk);
1266 		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1267 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1268 	}
1269 
1270 	x = lk->lk_lock;
1271 	if (x & LK_EXCLUSIVE_WAITERS)
1272 		printf(" with exclusive waiters pending\n");
1273 	if (x & LK_SHARED_WAITERS)
1274 		printf(" with shared waiters pending\n");
1275 	if (x & LK_EXCLUSIVE_SPINNERS)
1276 		printf(" with exclusive spinners pending\n");
1277 
1278 	STACK_PRINT(lk);
1279 }
1280 
1281 int
1282 lockstatus(struct lock *lk)
1283 {
1284 	uintptr_t v, x;
1285 	int ret;
1286 
1287 	ret = LK_SHARED;
1288 	x = lk->lk_lock;
1289 	v = LK_HOLDER(x);
1290 
1291 	if ((x & LK_SHARE) == 0) {
1292 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1293 			ret = LK_EXCLUSIVE;
1294 		else
1295 			ret = LK_EXCLOTHER;
1296 	} else if (x == LK_UNLOCKED)
1297 		ret = 0;
1298 
1299 	return (ret);
1300 }
1301 
1302 #ifdef INVARIANT_SUPPORT
1303 #ifndef INVARIANTS
1304 #undef	_lockmgr_assert
1305 #endif
1306 
1307 void
1308 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1309 {
1310 	int slocked = 0;
1311 
1312 	if (panicstr != NULL)
1313 		return;
1314 	switch (what) {
1315 	case KA_SLOCKED:
1316 	case KA_SLOCKED | KA_NOTRECURSED:
1317 	case KA_SLOCKED | KA_RECURSED:
1318 		slocked = 1;
1319 	case KA_LOCKED:
1320 	case KA_LOCKED | KA_NOTRECURSED:
1321 	case KA_LOCKED | KA_RECURSED:
1322 #ifdef WITNESS
1323 
1324 		/*
1325 		 * We cannot trust WITNESS if the lock is held in exclusive
1326 		 * mode and a call to lockmgr_disown() happened.
1327 		 * Workaround this skipping the check if the lock is held in
1328 		 * exclusive mode even for the KA_LOCKED case.
1329 		 */
1330 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1331 			witness_assert(&lk->lock_object, what, file, line);
1332 			break;
1333 		}
1334 #endif
1335 		if (lk->lk_lock == LK_UNLOCKED ||
1336 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1337 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1338 			panic("Lock %s not %slocked @ %s:%d\n",
1339 			    lk->lock_object.lo_name, slocked ? "share" : "",
1340 			    file, line);
1341 
1342 		if ((lk->lk_lock & LK_SHARE) == 0) {
1343 			if (lockmgr_recursed(lk)) {
1344 				if (what & KA_NOTRECURSED)
1345 					panic("Lock %s recursed @ %s:%d\n",
1346 					    lk->lock_object.lo_name, file,
1347 					    line);
1348 			} else if (what & KA_RECURSED)
1349 				panic("Lock %s not recursed @ %s:%d\n",
1350 				    lk->lock_object.lo_name, file, line);
1351 		}
1352 		break;
1353 	case KA_XLOCKED:
1354 	case KA_XLOCKED | KA_NOTRECURSED:
1355 	case KA_XLOCKED | KA_RECURSED:
1356 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1357 			panic("Lock %s not exclusively locked @ %s:%d\n",
1358 			    lk->lock_object.lo_name, file, line);
1359 		if (lockmgr_recursed(lk)) {
1360 			if (what & KA_NOTRECURSED)
1361 				panic("Lock %s recursed @ %s:%d\n",
1362 				    lk->lock_object.lo_name, file, line);
1363 		} else if (what & KA_RECURSED)
1364 			panic("Lock %s not recursed @ %s:%d\n",
1365 			    lk->lock_object.lo_name, file, line);
1366 		break;
1367 	case KA_UNLOCKED:
1368 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1369 			panic("Lock %s exclusively locked @ %s:%d\n",
1370 			    lk->lock_object.lo_name, file, line);
1371 		break;
1372 	default:
1373 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1374 		    line);
1375 	}
1376 }
1377 #endif
1378 
1379 #ifdef DDB
1380 int
1381 lockmgr_chain(struct thread *td, struct thread **ownerp)
1382 {
1383 	struct lock *lk;
1384 
1385 	lk = td->td_wchan;
1386 
1387 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1388 		return (0);
1389 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1390 	if (lk->lk_lock & LK_SHARE)
1391 		db_printf("SHARED (count %ju)\n",
1392 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1393 	else
1394 		db_printf("EXCL\n");
1395 	*ownerp = lockmgr_xholder(lk);
1396 
1397 	return (1);
1398 }
1399 
1400 static void
1401 db_show_lockmgr(struct lock_object *lock)
1402 {
1403 	struct thread *td;
1404 	struct lock *lk;
1405 
1406 	lk = (struct lock *)lock;
1407 
1408 	db_printf(" state: ");
1409 	if (lk->lk_lock == LK_UNLOCKED)
1410 		db_printf("UNLOCKED\n");
1411 	else if (lk->lk_lock & LK_SHARE)
1412 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1413 	else {
1414 		td = lockmgr_xholder(lk);
1415 		if (td == (struct thread *)LK_KERNPROC)
1416 			db_printf("XLOCK: LK_KERNPROC\n");
1417 		else
1418 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1419 			    td->td_tid, td->td_proc->p_pid,
1420 			    td->td_proc->p_comm);
1421 		if (lockmgr_recursed(lk))
1422 			db_printf(" recursed: %d\n", lk->lk_recurse);
1423 	}
1424 	db_printf(" waiters: ");
1425 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1426 	case LK_SHARED_WAITERS:
1427 		db_printf("shared\n");
1428 		break;
1429 	case LK_EXCLUSIVE_WAITERS:
1430 		db_printf("exclusive\n");
1431 		break;
1432 	case LK_ALL_WAITERS:
1433 		db_printf("shared and exclusive\n");
1434 		break;
1435 	default:
1436 		db_printf("none\n");
1437 	}
1438 	db_printf(" spinners: ");
1439 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1440 		db_printf("exclusive\n");
1441 	else
1442 		db_printf("none\n");
1443 }
1444 #endif
1445