xref: /dragonfly/sys/kern/kern_lock.c (revision 65ebff40)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *	John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013-2017
7  *	Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>.  Extensively rewritten.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 #include "opt_lint.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
52 #include <sys/indefinite2.h>
53 
54 static void undo_shreq(struct lock *lkp);
55 static int undo_upreq(struct lock *lkp);
56 static int undo_exreq(struct lock *lkp);
57 
58 #ifdef DEBUG_CANCEL_LOCKS
59 
60 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
61 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
62 
63 static struct lock cancel_lk;
64 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
65 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
66 	    sysctl_cancel_lock, "I", "test cancelable locks");
67 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
68 	    sysctl_cancel_test, "I", "test cancelable locks");
69 
70 #endif
71 
72 int lock_test_mode;
73 SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW,
74 	   &lock_test_mode, 0, "");
75 
76 /*
77  * Locking primitives implementation.
78  * Locks provide shared/exclusive sychronization.
79  */
80 
81 #ifdef DEBUG_LOCKS
82 #define COUNT(td, x) (td)->td_locks += (x)
83 #else
84 #define COUNT(td, x) do { } while (0)
85 #endif
86 
87 /*
88  * Helper, assert basic conditions
89  */
90 static __inline void
91 _lockmgr_assert(struct lock *lkp, u_int flags)
92 {
93 	if (mycpu->gd_intr_nesting_level &&
94 	    (flags & LK_NOWAIT) == 0 &&
95 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
96 	    panic_cpu_gd != mycpu
97 	) {
98 		panic("lockmgr %s from %p: called from interrupt, ipi, "
99 		      "or hard code section",
100 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
101 	}
102 }
103 
104 /*
105  * Acquire a shared lock
106  */
107 int
108 lockmgr_shared(struct lock *lkp, u_int flags)
109 {
110 	uint32_t extflags;
111 	thread_t td;
112 	uint64_t count;
113 	int error;
114 	int pflags;
115 	int timo;
116 	int didloop;
117 
118 	_lockmgr_assert(lkp, flags);
119 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
120 	td = curthread;
121 
122 	count = lkp->lk_count;
123 	cpu_ccfence();
124 
125 	/*
126 	 * If the caller already holds the lock exclusively then
127 	 * we silently obtain another count on the exclusive lock.
128 	 * Avoid accessing lk_lockholder until testing exclusivity.
129 	 *
130 	 * WARNING!  The old FreeBSD behavior was to downgrade,
131 	 *	     but this creates a problem when recursions
132 	 *	     return to the caller and the caller expects
133 	 *	     its original exclusive lock to remain exclusively
134 	 *	     locked.
135 	 */
136 	if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
137 		KKASSERT(lkp->lk_count & LKC_XMASK);
138 		if ((extflags & LK_CANRECURSE) == 0) {
139 			if (extflags & LK_NOWAIT)
140 				return EBUSY;
141 			panic("lockmgr: locking against myself");
142 		}
143 		atomic_add_64(&lkp->lk_count, 1);
144 		COUNT(td, 1);
145 		return 0;
146 	}
147 
148 	/*
149 	 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while
150 	 * SHARED is set and either EXREQ or UPREQ are set.
151 	 *
152 	 * NOTE: In the race-to-0 case (see undo_shreq()), we could
153 	 *	 theoretically work the SMASK == 0 case here.
154 	 */
155 	if ((td->td_flags & TDF_DEADLKTREAT) == 0) {
156 		while ((count & LKC_SHARED) &&
157 		       (count & (LKC_EXREQ | LKC_UPREQ))) {
158 			/*
159 			 * Immediate failure conditions
160 			 */
161 			if (extflags & LK_CANCELABLE) {
162 				if (count & LKC_CANCEL)
163 					return ENOLCK;
164 			}
165 			if (extflags & LK_NOWAIT)
166 				return EBUSY;
167 
168 			/*
169 			 * Interlocked tsleep
170 			 */
171 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
172 			timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
173 
174 			tsleep_interlock(lkp, pflags);
175 			count = atomic_fetchadd_long(&lkp->lk_count, 0);
176 
177 			if ((count & LKC_SHARED) &&
178 			    (count & (LKC_EXREQ | LKC_UPREQ))) {
179 				error = tsleep(lkp, pflags | PINTERLOCKED,
180 					       lkp->lk_wmesg, timo);
181 				if (error)
182 					return error;
183 				count = lkp->lk_count;
184 				cpu_ccfence();
185 				continue;
186 			}
187 			break;
188 		}
189 	}
190 
191 	/*
192 	 * Bump the SCOUNT field.  The shared lock is granted only once
193 	 * the SHARED flag gets set.  If it is already set, we are done.
194 	 *
195 	 * (Racing an EXREQ or UPREQ operation is ok here, we already did
196 	 * our duty above).
197 	 */
198 	count = atomic_fetchadd_64(&lkp->lk_count, LKC_SCOUNT) + LKC_SCOUNT;
199 	error = 0;
200 	didloop = 0;
201 
202 	for (;;) {
203 		/*
204 		 * We may be able to grant ourselves the bit trivially.
205 		 * We're done once the SHARED bit is granted.
206 		 */
207 		if ((count & (LKC_XMASK | LKC_EXREQ |
208 			      LKC_UPREQ | LKC_SHARED)) == 0) {
209 			if (atomic_fcmpset_64(&lkp->lk_count,
210 					      &count, count | LKC_SHARED)) {
211 				/* count |= LKC_SHARED; NOT USED */
212 				break;
213 			}
214 			continue;
215 		}
216 		if ((td->td_flags & TDF_DEADLKTREAT) &&
217 		    (count & (LKC_XMASK | LKC_SHARED)) == 0) {
218 			if (atomic_fcmpset_64(&lkp->lk_count,
219 					      &count, count | LKC_SHARED)) {
220 				/* count |= LKC_SHARED; NOT USED */
221 				break;
222 			}
223 			continue;
224 		}
225 		if (count & LKC_SHARED)
226 			break;
227 
228 		/*
229 		 * Slow path
230 		 */
231 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
232 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
233 
234 		if (extflags & LK_CANCELABLE) {
235 			if (count & LKC_CANCEL) {
236 				undo_shreq(lkp);
237 				error = ENOLCK;
238 				break;
239 			}
240 		}
241 		if (extflags & LK_NOWAIT) {
242 			undo_shreq(lkp);
243 			error = EBUSY;
244 			break;
245 		}
246 
247 		/*
248 		 * Interlocked after the first loop.
249 		 */
250 		if (didloop) {
251 			error = tsleep(lkp, pflags | PINTERLOCKED,
252 				       lkp->lk_wmesg, timo);
253 			if (extflags & LK_SLEEPFAIL) {
254 				undo_shreq(lkp);
255 				error = ENOLCK;
256 				break;
257 			}
258 			if (error) {
259 				undo_shreq(lkp);
260 				break;
261 			}
262 		}
263 		didloop = 1;
264 
265 		/*
266 		 * Reload, shortcut grant case, then loop interlock
267 		 * and loop.
268 		 */
269 		count = lkp->lk_count;
270 		if (count & LKC_SHARED)
271 			break;
272 		tsleep_interlock(lkp, pflags);
273 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
274 	}
275 	if (error == 0)
276 		COUNT(td, 1);
277 
278 	return error;
279 }
280 
281 /*
282  * Acquire an exclusive lock
283  */
284 int
285 lockmgr_exclusive(struct lock *lkp, u_int flags)
286 {
287 	uint64_t count;
288 	uint64_t ncount;
289 	uint32_t extflags;
290 	thread_t td;
291 	int error;
292 	int pflags;
293 	int timo;
294 
295 	_lockmgr_assert(lkp, flags);
296 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
297 	td = curthread;
298 
299 	error = 0;
300 	count = lkp->lk_count;
301 	cpu_ccfence();
302 
303 	/*
304 	 * Recursive lock if we already hold it exclusively.  Avoid testing
305 	 * lk_lockholder until after testing lk_count.
306 	 */
307 	if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
308 		if ((extflags & LK_CANRECURSE) == 0) {
309 			if (extflags & LK_NOWAIT)
310 				return EBUSY;
311 			panic("lockmgr: locking against myself");
312 		}
313 		count = atomic_fetchadd_64(&lkp->lk_count, 1) + 1;
314 		KKASSERT((count & LKC_XMASK) > 1);
315 		COUNT(td, 1);
316 		return 0;
317 	}
318 
319 	/*
320 	 * Trivially acquire the lock, or block until we can set EXREQ.
321 	 * Set EXREQ2 if EXREQ is already set or the lock is already
322 	 * held exclusively.  EXREQ2 is an aggregation bit to request
323 	 * a wakeup.
324 	 *
325 	 * WARNING! We cannot set EXREQ if the lock is already held
326 	 *	    exclusively because it may race another EXREQ
327 	 *	    being cleared and granted.  We use the exclusivity
328 	 *	    to prevent both EXREQ and UPREQ from being set.
329 	 *
330 	 *	    This means that both shared and exclusive requests
331 	 *	    have equal priority against a current exclusive holder's
332 	 *	    release.  Exclusive requests still have priority over
333 	 *	    new shared requests when the lock is already held shared.
334 	 */
335 	for (;;) {
336 		/*
337 		 * Normal trivial case
338 		 */
339 		if ((count & (LKC_UPREQ | LKC_EXREQ |
340 			      LKC_XMASK)) == 0 &&
341 		    ((count & LKC_SHARED) == 0 ||
342 		     (count & LKC_SMASK) == 0)) {
343 			ncount = (count + 1) & ~LKC_SHARED;
344 			if (atomic_fcmpset_64(&lkp->lk_count,
345 					      &count, ncount)) {
346 				lkp->lk_lockholder = td;
347 				COUNT(td, 1);
348 				return 0;
349 			}
350 			continue;
351 		}
352 
353 		if (extflags & LK_CANCELABLE) {
354 			if (count & LKC_CANCEL)
355 				return ENOLCK;
356 		}
357 		if (extflags & LK_NOWAIT)
358 			return EBUSY;
359 
360 		/*
361 		 * Interlock to set EXREQ or EXREQ2
362 		 */
363 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
364 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
365 
366 		if (count & (LKC_EXREQ | LKC_XMASK))
367 			ncount = count | LKC_EXREQ2;
368 		else
369 			ncount = count | LKC_EXREQ;
370 		tsleep_interlock(lkp, pflags);
371 		if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
372 			/*
373 			 * If we successfully transitioned to EXREQ we
374 			 * can break out, otherwise we had set EXREQ2 and
375 			 * we block.
376 			 */
377 			if ((count & (LKC_EXREQ | LKC_XMASK)) == 0) {
378 				count = ncount;
379 				break;
380 			}
381 
382 			error = tsleep(lkp, pflags | PINTERLOCKED,
383 				       lkp->lk_wmesg, timo);
384 			count = lkp->lk_count;	/* relod */
385 			cpu_ccfence();
386 		}
387 #ifdef INVARIANTS
388 		if (lock_test_mode > 0) {
389 			--lock_test_mode;
390 			print_backtrace(8);
391 		}
392 #endif
393 		if (error)
394 			return error;
395 		if (extflags & LK_SLEEPFAIL)
396 			return ENOLCK;
397 	}
398 
399 	/*
400 	 * Once EXREQ has been set, wait for it to be granted
401 	 * We enter the loop with tsleep_interlock() already called.
402 	 */
403 	for (;;) {
404 		/*
405 		 * Waiting for EXREQ to be granted to us.
406 		 *
407 		 * NOTE! If we try to trivially get the exclusive lock
408 		 *	 (basically by racing undo_shreq()) and succeed,
409 		 *	 we must still wakeup(lkp) for another exclusive
410 		 *	 lock trying to acquire EXREQ.  Easier to simply
411 		 *	 wait for our own wakeup.
412 		 */
413 		if ((count & LKC_EXREQ) == 0) {
414 			KKASSERT(count & LKC_XMASK);
415 			lkp->lk_lockholder = td;
416 			COUNT(td, 1);
417 			break;
418 		}
419 
420 		/*
421 		 * Block waiting for our exreq to be granted.
422 		 * Check cancelation.  NOWAIT was already dealt with.
423 		 */
424 		if (extflags & LK_CANCELABLE) {
425 			if (count & LKC_CANCEL) {
426 				if (undo_exreq(lkp) == 0) {
427 					lkp->lk_lockholder = LK_KERNTHREAD;
428 					lockmgr_release(lkp, 0);
429 				}
430 				error = ENOLCK;
431 				break;
432 			}
433 		}
434 
435 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
436 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
437 
438 		error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
439 #ifdef INVARIANTS
440 		if (lock_test_mode > 0) {
441 			--lock_test_mode;
442 			print_backtrace(8);
443 		}
444 #endif
445 		/*
446 		 * A tsleep error is uncommon.  If it occurs we have to
447 		 * undo our EXREQ.  If we are granted the exclusive lock
448 		 * as we try to undo we have to deal with it.
449 		 */
450 		if (extflags & LK_SLEEPFAIL) {
451 			if (undo_exreq(lkp) == 0) {
452 				lkp->lk_lockholder = LK_KERNTHREAD;
453 				lockmgr_release(lkp, 0);
454 			}
455 			if (error == 0)
456 				error = ENOLCK;
457 			break;
458 		}
459 		if (error) {
460 			if (undo_exreq(lkp))
461 				break;
462 			lkp->lk_lockholder = td;
463 			COUNT(td, 1);
464 			error = 0;
465 			break;
466 		}
467 
468 		/*
469 		 * Reload after sleep, shortcut grant case.
470 		 * Then set the interlock and loop.
471 		 */
472 		count = lkp->lk_count;
473 		cpu_ccfence();
474 		if ((count & LKC_EXREQ) == 0) {
475 			KKASSERT(count & LKC_XMASK);
476 			lkp->lk_lockholder = td;
477 			COUNT(td, 1);
478 			break;
479 		}
480 		tsleep_interlock(lkp, pflags);
481 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
482 	}
483 	return error;
484 }
485 
486 /*
487  * Downgrade an exclusive lock to shared.
488  *
489  * This function always succeeds as long as the caller owns a legal
490  * exclusive lock with one reference.  UPREQ and EXREQ is ignored.
491  */
492 int
493 lockmgr_downgrade(struct lock *lkp, u_int flags)
494 {
495 	uint64_t count;
496 	uint64_t ncount;
497 	uint32_t extflags;
498 	thread_t otd;
499 	thread_t td;
500 
501 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
502 	td = curthread;
503 	count = lkp->lk_count;
504 
505 	for (;;) {
506 		cpu_ccfence();
507 
508 		/*
509 		 * Downgrade an exclusive lock into a shared lock.  All
510 		 * counts on a recursive exclusive lock become shared.
511 		 *
512 		 * NOTE: Currently to reduce confusion we only allow
513 		 *	 there to be one exclusive lock count, and panic
514 		 *	 if there are more.
515 		 */
516 		if (lkp->lk_lockholder != td || (count & LKC_XMASK) != 1) {
517 			panic("lockmgr: not holding exclusive lock: "
518 			      "%p/%p %016jx", lkp->lk_lockholder, td, count);
519 		}
520 
521 		/*
522 		 * NOTE! Must NULL-out lockholder before releasing the
523 		 *	 exclusive lock.
524 		 *
525 		 * NOTE! There might be pending shared requests, check
526 		 *	 and wake them up.
527 		 */
528 		otd = lkp->lk_lockholder;
529 		lkp->lk_lockholder = NULL;
530 		ncount = (count & ~(LKC_XMASK | LKC_EXREQ2)) +
531 			 ((count & LKC_XMASK) << LKC_SSHIFT);
532 		ncount |= LKC_SHARED;
533 
534 		if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
535 			/*
536 			 * Wakeup any shared waiters (prior SMASK), or
537 			 * any exclusive requests that couldn't set EXREQ
538 			 * because the lock had been held exclusively.
539 			 */
540 			if (count & (LKC_SMASK | LKC_EXREQ2))
541 				wakeup(lkp);
542 			/* count = ncount; NOT USED */
543 			break;
544 		}
545 		lkp->lk_lockholder = otd;
546 		/* retry */
547 	}
548 	return 0;
549 }
550 
551 /*
552  * Upgrade a shared lock to exclusive.  If LK_EXCLUPGRADE then guarantee
553  * that no other exclusive requester can get in front of us and fail
554  * immediately if another upgrade is pending.  If we fail, the shared
555  * lock is released.
556  *
557  * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone
558  * else is in front of us, we release the shared lock and acquire the
559  * exclusive lock normally.  If a failure occurs, the shared lock is
560  * released.
561  */
562 int
563 lockmgr_upgrade(struct lock *lkp, u_int flags)
564 {
565 	uint64_t count;
566 	uint64_t ncount;
567 	uint32_t extflags;
568 	thread_t td;
569 	int error;
570 	int pflags;
571 	int timo;
572 
573 	_lockmgr_assert(lkp, flags);
574 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
575 	td = curthread;
576 	error = 0;
577 	count = lkp->lk_count;
578 	cpu_ccfence();
579 
580 	/*
581 	 * If we already hold the lock exclusively this operation
582 	 * succeeds and is a NOP.
583 	 */
584 	if (count & LKC_XMASK) {
585 		if (lkp->lk_lockholder == td)
586 			return 0;
587 		panic("lockmgr: upgrade unowned lock");
588 	}
589 	if ((count & LKC_SMASK) == 0)
590 		panic("lockmgr: upgrade unowned lock");
591 
592 	/*
593 	 * Loop to acquire LKC_UPREQ
594 	 */
595 	for (;;) {
596 		/*
597 		 * If UPREQ is already pending, release the shared lock
598 		 * and acquire an exclusive lock normally.
599 		 *
600 		 * If NOWAIT or EXCLUPGRADE the operation must be atomic,
601 		 * and this isn't, so we fail.
602 		 */
603 		if (count & LKC_UPREQ) {
604 			lockmgr_release(lkp, 0);
605 			if ((flags & LK_TYPE_MASK) == LK_EXCLUPGRADE)
606 				error = EBUSY;
607 			else if (extflags & LK_NOWAIT)
608 				error = EBUSY;
609 			else
610 				error = lockmgr_exclusive(lkp, flags);
611 			return error;
612 		}
613 
614 		/*
615 		 * Try to immediately grant the upgrade, handle NOWAIT,
616 		 * or release the shared lock and simultaneously set UPREQ.
617 		 */
618 		if ((count & LKC_SMASK) == LKC_SCOUNT) {
619 			/*
620 			 * Immediate grant
621 			 */
622 			ncount = (count - LKC_SCOUNT + 1) & ~LKC_SHARED;
623 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
624 				lkp->lk_lockholder = td;
625 				return 0;
626 			}
627 		} else if (extflags & LK_NOWAIT) {
628 			/*
629 			 * Early EBUSY if an immediate grant is impossible
630 			 */
631 			lockmgr_release(lkp, 0);
632 			return EBUSY;
633 		} else {
634 			/*
635 			 * Multiple shared locks present, request the
636 			 * upgrade and break to the next loop.
637 			 */
638 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
639 			tsleep_interlock(lkp, pflags);
640 			ncount = (count - LKC_SCOUNT) | LKC_UPREQ;
641 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
642 				count = ncount;
643 				break;
644 			}
645 		}
646 		/* retry */
647 	}
648 
649 	/*
650 	 * We have acquired LKC_UPREQ, wait until the upgrade is granted
651 	 * or the tsleep fails.
652 	 *
653 	 * NOWAIT and EXCLUPGRADE have already been handled.  The first
654 	 * tsleep_interlock() has already been associated.
655 	 */
656 	for (;;) {
657 		cpu_ccfence();
658 
659 		/*
660 		 * We were granted our upgrade.  No other UPREQ can be
661 		 * made pending because we are now exclusive.
662 		 */
663 		if ((count & LKC_UPREQ) == 0) {
664 			KKASSERT((count & LKC_XMASK) == 1);
665 			lkp->lk_lockholder = td;
666 			break;
667 		}
668 
669 		if (extflags & LK_CANCELABLE) {
670 			if (count & LKC_CANCEL) {
671 				if (undo_upreq(lkp) == 0) {
672 					lkp->lk_lockholder = LK_KERNTHREAD;
673 					lockmgr_release(lkp, 0);
674 				}
675 				error = ENOLCK;
676 				break;
677 			}
678 		}
679 
680 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
681 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
682 
683 		error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
684 		if (extflags & LK_SLEEPFAIL) {
685 			if (undo_upreq(lkp) == 0) {
686 				lkp->lk_lockholder = LK_KERNTHREAD;
687 				lockmgr_release(lkp, 0);
688 			}
689 			if (error == 0)
690 				error = ENOLCK;
691 			break;
692 		}
693 		if (error) {
694 			if (undo_upreq(lkp))
695 				break;
696 			error = 0;
697 		}
698 
699 		/*
700 		 * Reload the lock, short-cut the UPGRANT code before
701 		 * taking the time to interlock and loop.
702 		 */
703 		count = lkp->lk_count;
704 		if ((count & LKC_UPREQ) == 0) {
705 			KKASSERT((count & LKC_XMASK) == 1);
706 			lkp->lk_lockholder = td;
707 			break;
708 		}
709 		tsleep_interlock(lkp, pflags);
710 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
711 		/* retry */
712 	}
713 	return error;
714 }
715 
716 /*
717  * Release a held lock
718  *
719  * NOTE: When releasing to an unlocked state, we set the SHARED bit
720  *	 to optimize shared lock requests.
721  */
722 int
723 lockmgr_release(struct lock *lkp, u_int flags)
724 {
725 	uint64_t count;
726 	uint64_t ncount;
727 	uint32_t extflags;
728 	thread_t otd;
729 	thread_t td;
730 
731 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
732 	td = curthread;
733 
734 	count = lkp->lk_count;
735 	cpu_ccfence();
736 
737 	for (;;) {
738 		/*
739 		 * Release the currently held lock, grant all requests
740 		 * possible.
741 		 *
742 		 * WARNING! lksleep() assumes that LK_RELEASE does not
743 		 *	    block.
744 		 *
745 		 * Always succeeds.
746 		 * Never blocks.
747 		 */
748 		if ((count & (LKC_SMASK | LKC_XMASK)) == 0)
749 			panic("lockmgr: LK_RELEASE: no lock held");
750 
751 		if (count & LKC_XMASK) {
752 			/*
753 			 * Release exclusively held lock
754 			 */
755 			if (lkp->lk_lockholder != LK_KERNTHREAD &&
756 			    lkp->lk_lockholder != td) {
757 				panic("lockmgr: pid %d, not exclusive "
758 				      "lock holder thr %p/%p unlocking",
759 				    (td->td_proc ? td->td_proc->p_pid : -1),
760 				    td, lkp->lk_lockholder);
761 			}
762 			if ((count & (LKC_UPREQ | LKC_EXREQ |
763 				      LKC_XMASK)) == 1) {
764 				/*
765 				 * Last exclusive count is being released
766 				 * with no UPREQ or EXREQ.  The SHARED
767 				 * bit can be set or not without messing
768 				 * anything up, so precondition it to
769 				 * SHARED (which is the most cpu-optimal).
770 				 *
771 				 * Wakeup any EXREQ2.  EXREQ cannot be
772 				 * set while an exclusive count is present
773 				 * so we have to wakeup any EXREQ2 we find.
774 				 *
775 				 * We could hint the EXREQ2 by leaving
776 				 * SHARED unset, but atm I don't see any
777 				 * usefulness.
778 				 */
779 				otd = lkp->lk_lockholder;
780 				lkp->lk_lockholder = NULL;
781 				ncount = (count - 1);
782 				ncount &= ~(LKC_CANCEL | LKC_EXREQ2);
783 				ncount |= LKC_SHARED;
784 				if (atomic_fcmpset_64(&lkp->lk_count,
785 						      &count, ncount)) {
786 					if (count & (LKC_SMASK | LKC_EXREQ2))
787 						wakeup(lkp);
788 					if (otd != LK_KERNTHREAD)
789 						COUNT(td, -1);
790 					/* count = ncount; NOT USED */
791 					break;
792 				}
793 				lkp->lk_lockholder = otd;
794 				/* retry */
795 			} else if ((count & (LKC_UPREQ | LKC_XMASK)) ==
796 				   (LKC_UPREQ | 1)) {
797 				/*
798 				 * Last exclusive count is being released but
799 				 * an upgrade request is present, automatically
800 				 * grant an exclusive state to the owner of
801 				 * the upgrade request.  Transfer count to
802 				 * grant.
803 				 *
804 				 * EXREQ cannot be set while an exclusive
805 				 * holder exists, so do not clear EXREQ2.
806 				 */
807 				otd = lkp->lk_lockholder;
808 				lkp->lk_lockholder = NULL;
809 				ncount = count & ~LKC_UPREQ;
810 				if (atomic_fcmpset_64(&lkp->lk_count,
811 						      &count, ncount)) {
812 					wakeup(lkp);
813 					if (otd != LK_KERNTHREAD)
814 						COUNT(td, -1);
815 					/* count = ncount; NOT USED */
816 					break;
817 				}
818 				lkp->lk_lockholder = otd;
819 				/* retry */
820 			} else if ((count & (LKC_EXREQ | LKC_XMASK)) ==
821 				   (LKC_EXREQ | 1)) {
822 				/*
823 				 * Last exclusive count is being released but
824 				 * an exclusive request is present.  We
825 				 * automatically grant an exclusive state to
826 				 * the owner of the exclusive request,
827 				 * transfering our count.
828 				 *
829 				 * This case virtually never occurs because
830 				 * EXREQ is not set while exclusive holders
831 				 * exist.  However, it might be set if a
832 				 * an exclusive request is pending and a
833 				 * shared holder upgrades.
834 				 *
835 				 * Don't bother clearing EXREQ2.  A thread
836 				 * waiting to set EXREQ can't do it while
837 				 * an exclusive lock is present.
838 				 */
839 				otd = lkp->lk_lockholder;
840 				lkp->lk_lockholder = NULL;
841 				ncount = count & ~LKC_EXREQ;
842 				if (atomic_fcmpset_64(&lkp->lk_count,
843 						      &count, ncount)) {
844 					wakeup(lkp);
845 					if (otd != LK_KERNTHREAD)
846 						COUNT(td, -1);
847 					/* count = ncount; NOT USED */
848 					break;
849 				}
850 				lkp->lk_lockholder = otd;
851 				/* retry */
852 			} else {
853 				/*
854 				 * Multiple exclusive counts, drop by 1.
855 				 * Since we are the holder and there is more
856 				 * than one count, we can just decrement it.
857 				 */
858 				count =
859 				    atomic_fetchadd_long(&lkp->lk_count, -1);
860 				/* count = count - 1  NOT NEEDED */
861 				if (lkp->lk_lockholder != LK_KERNTHREAD)
862 					COUNT(td, -1);
863 				break;
864 			}
865 			/* retry */
866 		} else {
867 			/*
868 			 * Release shared lock
869 			 */
870 			KKASSERT((count & LKC_SHARED) && (count & LKC_SMASK));
871 			if ((count & (LKC_EXREQ | LKC_UPREQ | LKC_SMASK)) ==
872 			    LKC_SCOUNT) {
873 				/*
874 				 * Last shared count is being released,
875 				 * no exclusive or upgrade request present.
876 				 * Generally leave the shared bit set.
877 				 * Clear the CANCEL bit.
878 				 */
879 				ncount = (count - LKC_SCOUNT) & ~LKC_CANCEL;
880 				if (atomic_fcmpset_64(&lkp->lk_count,
881 						      &count, ncount)) {
882 					COUNT(td, -1);
883 					/* count = ncount; NOT USED */
884 					break;
885 				}
886 				/* retry */
887 			} else if ((count & (LKC_UPREQ | LKC_SMASK)) ==
888 				   (LKC_UPREQ | LKC_SCOUNT)) {
889 				/*
890 				 * Last shared count is being released but
891 				 * an upgrade request is present, automatically
892 				 * grant an exclusive state to the owner of
893 				 * the upgrade request and transfer the count.
894 				 */
895 				ncount = (count - LKC_SCOUNT + 1) &
896 					 ~(LKC_UPREQ | LKC_CANCEL | LKC_SHARED);
897 				if (atomic_fcmpset_64(&lkp->lk_count,
898 						      &count, ncount)) {
899 					wakeup(lkp);
900 					COUNT(td, -1);
901 					/* count = ncount; NOT USED */
902 					break;
903 				}
904 				/* retry */
905 			} else if ((count & (LKC_EXREQ | LKC_SMASK)) ==
906 				   (LKC_EXREQ | LKC_SCOUNT)) {
907 				/*
908 				 * Last shared count is being released but
909 				 * an exclusive request is present, we
910 				 * automatically grant an exclusive state to
911 				 * the owner of the request and transfer
912 				 * the count.
913 				 */
914 				ncount = (count - LKC_SCOUNT + 1) &
915 					 ~(LKC_EXREQ | LKC_EXREQ2 |
916 					   LKC_CANCEL | LKC_SHARED);
917 				if (atomic_fcmpset_64(&lkp->lk_count,
918 						      &count, ncount)) {
919 					wakeup(lkp);
920 					COUNT(td, -1);
921 					/* count = ncount; NOT USED */
922 					break;
923 				}
924 				/* retry */
925 			} else {
926 				/*
927 				 * Shared count is greater than 1.  We can
928 				 * just use undo_shreq() to clean things up.
929 				 * undo_shreq() will also handle races to 0
930 				 * after the fact.
931 				 */
932 				undo_shreq(lkp);
933 				COUNT(td, -1);
934 				break;
935 			}
936 			/* retry */
937 		}
938 		/* retry */
939 	}
940 	return 0;
941 }
942 
943 /*
944  * Start canceling blocked requesters or later requestors.
945  * Only blocked requesters using CANCELABLE can be canceled.
946  *
947  * This is intended to then allow other requesters (usually the
948  * caller) to obtain a non-cancelable lock.
949  *
950  * Don't waste time issuing a wakeup if nobody is pending.
951  */
952 int
953 lockmgr_cancel_beg(struct lock *lkp, u_int flags)
954 {
955 	uint64_t count;
956 
957 	count = lkp->lk_count;
958 	for (;;) {
959 		cpu_ccfence();
960 
961 		KKASSERT((count & LKC_CANCEL) == 0);	/* disallowed case */
962 
963 		/* issue w/lock held */
964 		KKASSERT((count & (LKC_XMASK | LKC_SMASK)) != 0);
965 
966 		if (!atomic_fcmpset_64(&lkp->lk_count,
967 				       &count, count | LKC_CANCEL)) {
968 			continue;
969 		}
970 		/* count |= LKC_CANCEL; NOT USED */
971 
972 		/*
973 		 * Wakeup any waiters.
974 		 *
975 		 * NOTE: EXREQ2 only matters when EXREQ is set, so don't
976 		 *	 bother checking EXREQ2.
977 		 */
978 		if (count & (LKC_EXREQ | LKC_SMASK | LKC_UPREQ)) {
979 			wakeup(lkp);
980 		}
981 		break;
982 	}
983 	return 0;
984 }
985 
986 /*
987  * End our cancel request (typically after we have acquired
988  * the lock ourselves).
989  */
990 int
991 lockmgr_cancel_end(struct lock *lkp, u_int flags)
992 {
993 	atomic_clear_long(&lkp->lk_count, LKC_CANCEL);
994 
995 	return 0;
996 }
997 
998 /*
999  * Backout SCOUNT from a failed shared lock attempt and handle any race
1000  * to 0.  This function is also used by the release code for the less
1001  * optimal race to 0 case.
1002  *
1003  * Always succeeds
1004  * Must not block
1005  */
1006 static void
1007 undo_shreq(struct lock *lkp)
1008 {
1009 	uint64_t count;
1010 	uint64_t ncount;
1011 
1012 	count = atomic_fetchadd_64(&lkp->lk_count, -LKC_SCOUNT) - LKC_SCOUNT;
1013 	while ((count & (LKC_EXREQ | LKC_UPREQ | LKC_CANCEL)) &&
1014 	       (count & (LKC_SMASK | LKC_XMASK)) == 0) {
1015 		/*
1016 		 * Note that UPREQ must have priority over EXREQ, and EXREQ
1017 		 * over CANCEL, so if the atomic op fails we have to loop up.
1018 		 */
1019 		if (count & LKC_UPREQ) {
1020 			ncount = (count + 1) & ~(LKC_UPREQ | LKC_CANCEL |
1021 						 LKC_SHARED);
1022 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1023 				wakeup(lkp);
1024 				/* count = ncount; NOT USED */
1025 				break;
1026 			}
1027 			continue;
1028 		}
1029 		if (count & LKC_EXREQ) {
1030 			ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2 |
1031 						 LKC_CANCEL | LKC_SHARED);
1032 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1033 				wakeup(lkp);
1034 				/* count = ncount; NOT USED */
1035 				break;
1036 			}
1037 			continue;
1038 		}
1039 		if (count & LKC_CANCEL) {
1040 			ncount = count & ~LKC_CANCEL;
1041 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1042 				/* count = ncount; NOT USED */
1043 				break;
1044 			}
1045 		}
1046 		/* retry */
1047 	}
1048 }
1049 
1050 /*
1051  * Undo an exclusive request.  Returns EBUSY if we were able to undo the
1052  * request, and 0 if the request was granted before we could undo it.
1053  * When 0 is returned, the lock state has not been modified.  The caller
1054  * is responsible for setting the lockholder to curthread.
1055  */
1056 static
1057 int
1058 undo_exreq(struct lock *lkp)
1059 {
1060 	uint64_t count;
1061 	uint64_t ncount;
1062 	int error;
1063 
1064 	count = lkp->lk_count;
1065 	error = 0;
1066 
1067 	for (;;) {
1068 		cpu_ccfence();
1069 
1070 		if ((count & LKC_EXREQ) == 0) {
1071 			/*
1072 			 * EXREQ was granted.  We own the exclusive lock.
1073 			 */
1074 			break;
1075 		}
1076 		if (count & LKC_XMASK) {
1077 			/*
1078 			 * Clear the EXREQ we still own.  Only wakeup on
1079 			 * EXREQ2 if no UPREQ.  There are still exclusive
1080 			 * holders so do not wake up any shared locks or
1081 			 * any UPREQ.
1082 			 *
1083 			 * If there is an UPREQ it will issue a wakeup()
1084 			 * for any EXREQ wait looops, so we can clear EXREQ2
1085 			 * now.
1086 			 */
1087 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1088 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1089 				if ((count & (LKC_EXREQ2 | LKC_UPREQ)) ==
1090 				    LKC_EXREQ2) {
1091 					wakeup(lkp);
1092 				}
1093 				error = EBUSY;
1094 				/* count = ncount; NOT USED */
1095 				break;
1096 			}
1097 			/* retry */
1098 		} else if (count & LKC_UPREQ) {
1099 			/*
1100 			 * Clear the EXREQ we still own.  We cannot wakeup any
1101 			 * shared or exclusive waiters because there is an
1102 			 * uprequest pending (that we do not handle here).
1103 			 *
1104 			 * If there is an UPREQ it will issue a wakeup()
1105 			 * for any EXREQ wait looops, so we can clear EXREQ2
1106 			 * now.
1107 			 */
1108 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1109 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1110 				error = EBUSY;
1111 				break;
1112 			}
1113 			/* retry */
1114 		} else if ((count & LKC_SHARED) && (count & LKC_SMASK)) {
1115 			/*
1116 			 * No UPREQ, lock not held exclusively, but the lock
1117 			 * is held shared.  Clear EXREQ, wakeup anyone trying
1118 			 * to get the EXREQ bit (they have to set it
1119 			 * themselves, EXREQ2 is an aggregation).
1120 			 */
1121 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1122 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1123 				if (count & LKC_EXREQ2)
1124 					wakeup(lkp);
1125 				error = EBUSY;
1126 				/* count = ncount; NOT USED */
1127 				break;
1128 			}
1129 			/* retry */
1130 		} else {
1131 			/*
1132 			 * No UPREQ, lock not held exclusively or shared.
1133 			 * Grant the EXREQ and wakeup anyone waiting on
1134 			 * EXREQ2.
1135 			 */
1136 			ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2);
1137 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1138 				if (count & LKC_EXREQ2)
1139 					wakeup(lkp);
1140 				/* count = ncount; NOT USED */
1141 				/* we are granting, error == 0 */
1142 				break;
1143 			}
1144 			/* retry */
1145 		}
1146 		/* retry */
1147 	}
1148 	return error;
1149 }
1150 
1151 /*
1152  * Undo an upgrade request.  Returns EBUSY if we were able to undo the
1153  * request, and 0 if the request was granted before we could undo it.
1154  * When 0 is returned, the lock state has not been modified.  The caller
1155  * is responsible for setting the lockholder to curthread.
1156  */
1157 static
1158 int
1159 undo_upreq(struct lock *lkp)
1160 {
1161 	uint64_t count;
1162 	uint64_t ncount;
1163 	int error;
1164 
1165 	count = lkp->lk_count;
1166 	error = 0;
1167 
1168 	for (;;) {
1169 		cpu_ccfence();
1170 
1171 		if ((count & LKC_UPREQ) == 0) {
1172 			/*
1173 			 * UPREQ was granted
1174 			 */
1175 			break;
1176 		}
1177 		if (count & LKC_XMASK) {
1178 			/*
1179 			 * Clear the UPREQ we still own.  Nobody to wakeup
1180 			 * here because there is an existing exclusive
1181 			 * holder.
1182 			 */
1183 			if (atomic_fcmpset_64(&lkp->lk_count, &count,
1184 					      count & ~LKC_UPREQ)) {
1185 				error = EBUSY;
1186 				/* count &= ~LKC_UPREQ; NOT USED */
1187 				break;
1188 			}
1189 		} else if (count & LKC_EXREQ) {
1190 			/*
1191 			 * Clear the UPREQ we still own.  Grant the exclusive
1192 			 * request and wake it up.
1193 			 */
1194 			ncount = (count + 1);
1195 			ncount &= ~(LKC_EXREQ | LKC_EXREQ2 | LKC_UPREQ);
1196 
1197 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1198 				wakeup(lkp);
1199 				error = EBUSY;
1200 				/* count = ncount; NOT USED */
1201 				break;
1202 			}
1203 		} else {
1204 			/*
1205 			 * Clear the UPREQ we still own.  Wakeup any shared
1206 			 * waiters.
1207 			 */
1208 			ncount = count & ~LKC_UPREQ;
1209 			if (count & LKC_SMASK)
1210 				ncount |= LKC_SHARED;
1211 
1212 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1213 				if ((count & LKC_SHARED) == 0 &&
1214 				    (ncount & LKC_SHARED)) {
1215 					wakeup(lkp);
1216 				}
1217 				error = EBUSY;
1218 				/* count = ncount; NOT USED */
1219 				break;
1220 			}
1221 		}
1222 		/* retry */
1223 	}
1224 	return error;
1225 }
1226 
1227 void
1228 lockmgr_kernproc(struct lock *lp)
1229 {
1230 	struct thread *td __debugvar = curthread;
1231 
1232 	if (lp->lk_lockholder != LK_KERNTHREAD) {
1233 		KASSERT(lp->lk_lockholder == td,
1234 		    ("lockmgr_kernproc: lock not owned by curthread %p: %p",
1235 		    td, lp->lk_lockholder));
1236 		lp->lk_lockholder = LK_KERNTHREAD;
1237 		COUNT(td, -1);
1238 	}
1239 }
1240 
1241 /*
1242  * Initialize a lock; required before use.
1243  */
1244 void
1245 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1246 {
1247 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
1248 	lkp->lk_count = 0;
1249 	lkp->lk_wmesg = wmesg;
1250 	lkp->lk_timo = timo;
1251 	lkp->lk_lockholder = NULL;
1252 }
1253 
1254 /*
1255  * Reinitialize a lock that is being reused for a different purpose, but
1256  * which may have pending (blocked) threads sitting on it.  The caller
1257  * must already hold the interlock.
1258  */
1259 void
1260 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1261 {
1262 	lkp->lk_wmesg = wmesg;
1263 	lkp->lk_timo = timo;
1264 }
1265 
1266 /*
1267  * De-initialize a lock.  The structure must no longer be used by anyone.
1268  */
1269 void
1270 lockuninit(struct lock *lkp)
1271 {
1272 	uint64_t count __unused;
1273 
1274 	count = lkp->lk_count;
1275 	cpu_ccfence();
1276 	KKASSERT((count & (LKC_EXREQ | LKC_UPREQ)) == 0 &&
1277 		 ((count & LKC_SHARED) || (count & LKC_SMASK) == 0));
1278 }
1279 
1280 /*
1281  * Determine the status of a lock.
1282  */
1283 int
1284 lockstatus(struct lock *lkp, struct thread *td)
1285 {
1286 	int lock_type = 0;
1287 	uint64_t count;
1288 
1289 	count = lkp->lk_count;
1290 	cpu_ccfence();
1291 
1292 	if (count & (LKC_XMASK | LKC_SMASK | LKC_EXREQ | LKC_UPREQ)) {
1293 		if (count & LKC_XMASK) {
1294 			if (td == NULL || lkp->lk_lockholder == td)
1295 				lock_type = LK_EXCLUSIVE;
1296 			else
1297 				lock_type = LK_EXCLOTHER;
1298 		} else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1299 			lock_type = LK_SHARED;
1300 		}
1301 	}
1302 	return (lock_type);
1303 }
1304 
1305 /*
1306  * Return non-zero if the caller owns the lock shared or exclusive.
1307  * We can only guess re: shared locks.
1308  */
1309 int
1310 lockowned(struct lock *lkp)
1311 {
1312 	thread_t td = curthread;
1313 	uint64_t count;
1314 
1315 	count = lkp->lk_count;
1316 	cpu_ccfence();
1317 
1318 	if (count & LKC_XMASK)
1319 		return(lkp->lk_lockholder == td);
1320 	else
1321 		return((count & LKC_SMASK) != 0);
1322 }
1323 
1324 #if 0
1325 /*
1326  * Determine the number of holders of a lock.
1327  *
1328  * REMOVED - Cannot be used due to our use of atomic_fetchadd_64()
1329  *	     for shared locks.  Caller can only test if the lock has
1330  *	     a count or not using lockinuse(lk) (sys/lock.h)
1331  */
1332 int
1333 lockcount(struct lock *lkp)
1334 {
1335 	panic("lockcount cannot be used");
1336 }
1337 
1338 int
1339 lockcountnb(struct lock *lkp)
1340 {
1341 	panic("lockcount cannot be used");
1342 }
1343 #endif
1344 
1345 /*
1346  * Print out information about state of a lock. Used by VOP_PRINT
1347  * routines to display status about contained locks.
1348  */
1349 void
1350 lockmgr_printinfo(struct lock *lkp)
1351 {
1352 	struct thread *td = lkp->lk_lockholder;
1353 	struct proc *p;
1354 	uint64_t count;
1355 
1356 	count = lkp->lk_count;
1357 	cpu_ccfence();
1358 
1359 	if (td && td != LK_KERNTHREAD)
1360 		p = td->td_proc;
1361 	else
1362 		p = NULL;
1363 
1364 	if (count & LKC_XMASK) {
1365 		kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d",
1366 		    lkp->lk_wmesg, (intmax_t)count, td,
1367 		    p ? p->p_pid : -99);
1368 	} else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1369 		kprintf(" lock type %s: SHARED (count %016jx)",
1370 		    lkp->lk_wmesg, (intmax_t)count);
1371 	} else {
1372 		kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
1373 	}
1374 	if ((count & (LKC_EXREQ | LKC_UPREQ)) ||
1375 	    ((count & LKC_XMASK) && (count & LKC_SMASK)))
1376 		kprintf(" with waiters\n");
1377 	else
1378 		kprintf("\n");
1379 }
1380 
1381 void
1382 lock_sysinit(struct lock_args *arg)
1383 {
1384 	lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
1385 }
1386 
1387 #ifdef DEBUG_CANCEL_LOCKS
1388 
1389 static
1390 int
1391 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
1392 {
1393 	int error;
1394 
1395 	if (req->newptr) {
1396 		SYSCTL_XUNLOCK();
1397 		lockmgr(&cancel_lk, LK_EXCLUSIVE);
1398 		error = tsleep(&error, PCATCH, "canmas", hz * 5);
1399 		lockmgr(&cancel_lk, LK_CANCEL_BEG);
1400 		error = tsleep(&error, PCATCH, "canmas", hz * 5);
1401 		lockmgr(&cancel_lk, LK_RELEASE);
1402 		SYSCTL_XLOCK();
1403 		SYSCTL_OUT(req, &error, sizeof(error));
1404 	}
1405 	error = 0;
1406 
1407 	return error;
1408 }
1409 
1410 static
1411 int
1412 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
1413 {
1414 	int error;
1415 
1416 	if (req->newptr) {
1417 		error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
1418 		if (error == 0)
1419 			lockmgr(&cancel_lk, LK_RELEASE);
1420 		SYSCTL_OUT(req, &error, sizeof(error));
1421 		kprintf("test %d\n", error);
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 #endif
1428