xref: /dragonfly/sys/kern/kern_lock.c (revision dcb5d66b)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *	John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013-2017
7  *	Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>.  Extensively rewritten.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 #include "opt_lint.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
52 #include <sys/indefinite2.h>
53 
54 static void undo_shreq(struct lock *lkp);
55 static int undo_upreq(struct lock *lkp);
56 static int undo_exreq(struct lock *lkp);
57 
58 #ifdef DEBUG_CANCEL_LOCKS
59 
60 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
61 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
62 
63 static struct lock cancel_lk;
64 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
65 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
66 	    sysctl_cancel_lock, "I", "test cancelable locks");
67 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
68 	    sysctl_cancel_test, "I", "test cancelable locks");
69 
70 #endif
71 
72 int lock_test_mode;
73 SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW,
74 	   &lock_test_mode, 0, "");
75 
76 /*
77  * Locking primitives implementation.
78  * Locks provide shared/exclusive sychronization.
79  */
80 
81 #ifdef DEBUG_LOCKS
82 #define COUNT(td, x) (td)->td_locks += (x)
83 #else
84 #define COUNT(td, x) do { } while (0)
85 #endif
86 
87 /*
88  * Helper, assert basic conditions
89  */
90 static __inline void
91 _lockmgr_assert(struct lock *lkp, u_int flags)
92 {
93 	if (mycpu->gd_intr_nesting_level &&
94 	    (flags & LK_NOWAIT) == 0 &&
95 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
96 	    panic_cpu_gd != mycpu
97 	) {
98 		panic("lockmgr %s from %p: called from interrupt, ipi, "
99 		      "or hard code section",
100 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
101 	}
102 }
103 
104 /*
105  * Acquire a shared lock
106  */
107 int
108 lockmgr_shared(struct lock *lkp, u_int flags)
109 {
110 	uint32_t extflags;
111 	thread_t td;
112 	uint64_t count;
113 	int error;
114 	int pflags;
115 	int timo;
116 	int didloop;
117 
118 	_lockmgr_assert(lkp, flags);
119 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
120 	td = curthread;
121 
122 	count = lkp->lk_count;
123 	cpu_ccfence();
124 
125 	/*
126 	 * If the caller already holds the lock exclusively then
127 	 * we silently obtain another count on the exclusive lock.
128 	 * Avoid accessing lk_lockholder until testing exclusivity.
129 	 *
130 	 * WARNING!  The old FreeBSD behavior was to downgrade,
131 	 *	     but this creates a problem when recursions
132 	 *	     return to the caller and the caller expects
133 	 *	     its original exclusive lock to remain exclusively
134 	 *	     locked.
135 	 */
136 	if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
137 		KKASSERT(lkp->lk_count & LKC_XMASK);
138 		if ((extflags & LK_CANRECURSE) == 0) {
139 			if (extflags & LK_NOWAIT)
140 				return EBUSY;
141 			panic("lockmgr: locking against myself");
142 		}
143 		atomic_add_64(&lkp->lk_count, 1);
144 		COUNT(td, 1);
145 		return 0;
146 	}
147 
148 	/*
149 	 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while
150 	 * SHARED is set and either EXREQ or UPREQ are set.
151 	 *
152 	 * NOTE: In the race-to-0 case (see undo_shreq()), we could
153 	 *	 theoretically work the SMASK == 0 case here.
154 	 */
155 	if ((td->td_flags & TDF_DEADLKTREAT) == 0) {
156 		while ((count & LKC_SHARED) &&
157 		       (count & (LKC_EXREQ | LKC_UPREQ))) {
158 			/*
159 			 * Immediate failure conditions
160 			 */
161 			if (extflags & LK_CANCELABLE) {
162 				if (count & LKC_CANCEL)
163 					return ENOLCK;
164 			}
165 			if (extflags & LK_NOWAIT)
166 				return EBUSY;
167 
168 			/*
169 			 * Interlocked tsleep
170 			 */
171 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
172 			timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
173 
174 			tsleep_interlock(lkp, pflags);
175 			count = atomic_fetchadd_long(&lkp->lk_count, 0);
176 
177 			if ((count & LKC_SHARED) &&
178 			    (count & (LKC_EXREQ | LKC_UPREQ))) {
179 				error = tsleep(lkp, pflags | PINTERLOCKED,
180 					       lkp->lk_wmesg, timo);
181 				if (error)
182 					return error;
183 				count = lkp->lk_count;
184 				cpu_ccfence();
185 				continue;
186 			}
187 			break;
188 		}
189 	}
190 
191 	/*
192 	 * Bump the SCOUNT field.  The shared lock is granted only once
193 	 * the SHARED flag gets set.  If it is already set, we are done.
194 	 *
195 	 * (Racing an EXREQ or UPREQ operation is ok here, we already did
196 	 * our duty above).
197 	 */
198 	count = atomic_fetchadd_64(&lkp->lk_count, LKC_SCOUNT) + LKC_SCOUNT;
199 	error = 0;
200 	didloop = 0;
201 
202 	for (;;) {
203 		/*
204 		 * We may be able to grant ourselves the bit trivially.
205 		 * We're done once the SHARED bit is granted.
206 		 */
207 		if ((count & (LKC_XMASK | LKC_EXREQ |
208 			      LKC_UPREQ | LKC_SHARED)) == 0) {
209 			if (atomic_fcmpset_64(&lkp->lk_count,
210 					      &count, count | LKC_SHARED)) {
211 				/* count |= LKC_SHARED; NOT USED */
212 				break;
213 			}
214 			continue;
215 		}
216 		if ((td->td_flags & TDF_DEADLKTREAT) &&
217 		    (count & (LKC_XMASK | LKC_SHARED)) == 0) {
218 			if (atomic_fcmpset_64(&lkp->lk_count,
219 					      &count, count | LKC_SHARED)) {
220 				/* count |= LKC_SHARED; NOT USED */
221 				break;
222 			}
223 			continue;
224 		}
225 		if (count & LKC_SHARED)
226 			break;
227 
228 		/*
229 		 * Slow path
230 		 */
231 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
232 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
233 
234 		if (extflags & LK_CANCELABLE) {
235 			if (count & LKC_CANCEL) {
236 				undo_shreq(lkp);
237 				error = ENOLCK;
238 				break;
239 			}
240 		}
241 		if (extflags & LK_NOWAIT) {
242 			undo_shreq(lkp);
243 			error = EBUSY;
244 			break;
245 		}
246 
247 		/*
248 		 * Interlocked after the first loop.
249 		 */
250 		if (didloop) {
251 			error = tsleep(lkp, pflags | PINTERLOCKED,
252 				       lkp->lk_wmesg, timo);
253 			if (extflags & LK_SLEEPFAIL) {
254 				undo_shreq(lkp);
255 				error = ENOLCK;
256 				break;
257 			}
258 			if (error) {
259 				undo_shreq(lkp);
260 				break;
261 			}
262 		}
263 		didloop = 1;
264 
265 		/*
266 		 * Reload, shortcut grant case, then loop interlock
267 		 * and loop.
268 		 */
269 		count = lkp->lk_count;
270 		if (count & LKC_SHARED)
271 			break;
272 		tsleep_interlock(lkp, pflags);
273 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
274 	}
275 	if (error == 0)
276 		COUNT(td, 1);
277 
278 	return error;
279 }
280 
281 /*
282  * Acquire an exclusive lock
283  */
284 int
285 lockmgr_exclusive(struct lock *lkp, u_int flags)
286 {
287 	uint64_t count;
288 	uint64_t ncount;
289 	uint32_t extflags;
290 	thread_t td;
291 	int error;
292 	int pflags;
293 	int timo;
294 
295 	_lockmgr_assert(lkp, flags);
296 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
297 	td = curthread;
298 
299 	error = 0;
300 	count = lkp->lk_count;
301 	cpu_ccfence();
302 
303 	/*
304 	 * Recursive lock if we already hold it exclusively.  Avoid testing
305 	 * lk_lockholder until after testing lk_count.
306 	 */
307 	if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
308 		if ((extflags & LK_CANRECURSE) == 0) {
309 			if (extflags & LK_NOWAIT)
310 				return EBUSY;
311 			panic("lockmgr: locking against myself");
312 		}
313 		count = atomic_fetchadd_64(&lkp->lk_count, 1) + 1;
314 		KKASSERT((count & LKC_XMASK) > 1);
315 		COUNT(td, 1);
316 		return 0;
317 	}
318 
319 	/*
320 	 * Trivially acquire the lock, or block until we can set EXREQ.
321 	 * Set EXREQ2 if EXREQ is already set or the lock is already
322 	 * held exclusively.  EXREQ2 is an aggregation bit to request
323 	 * a wakeup.
324 	 *
325 	 * WARNING! We cannot set EXREQ if the lock is already held
326 	 *	    exclusively because it may race another EXREQ
327 	 *	    being cleared and granted.  We use the exclusivity
328 	 *	    to prevent both EXREQ and UPREQ from being set.
329 	 *
330 	 *	    This means that both shared and exclusive requests
331 	 *	    have equal priority against a current exclusive holder's
332 	 *	    release.  Exclusive requests still have priority over
333 	 *	    new shared requests when the lock is already held shared.
334 	 */
335 	for (;;) {
336 		/*
337 		 * Normal trivial case
338 		 */
339 		if ((count & (LKC_UPREQ | LKC_EXREQ |
340 			      LKC_XMASK)) == 0 &&
341 		    ((count & LKC_SHARED) == 0 ||
342 		     (count & LKC_SMASK) == 0)) {
343 			ncount = (count + 1) & ~LKC_SHARED;
344 			if (atomic_fcmpset_64(&lkp->lk_count,
345 					      &count, ncount)) {
346 				lkp->lk_lockholder = td;
347 				COUNT(td, 1);
348 				return 0;
349 			}
350 			continue;
351 		}
352 
353 		if (extflags & LK_CANCELABLE) {
354 			if (count & LKC_CANCEL)
355 				return ENOLCK;
356 		}
357 		if (extflags & LK_NOWAIT)
358 			return EBUSY;
359 
360 		/*
361 		 * Interlock to set EXREQ or EXREQ2
362 		 */
363 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
364 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
365 
366 		if (count & (LKC_EXREQ | LKC_XMASK))
367 			ncount = count | LKC_EXREQ2;
368 		else
369 			ncount = count | LKC_EXREQ;
370 		tsleep_interlock(lkp, pflags);
371 		if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
372 			/*
373 			 * If we successfully transitioned to EXREQ we
374 			 * can break out, otherwise we had set EXREQ2 and
375 			 * we block.
376 			 */
377 			if ((count & (LKC_EXREQ | LKC_XMASK)) == 0) {
378 				count = ncount;
379 				break;
380 			}
381 
382 			error = tsleep(lkp, pflags | PINTERLOCKED,
383 				       lkp->lk_wmesg, timo);
384 			count = lkp->lk_count;	/* relod */
385 			cpu_ccfence();
386 		}
387 #ifdef INVARIANTS
388 		if (lock_test_mode > 0) {
389 			--lock_test_mode;
390 			print_backtrace(8);
391 		}
392 #endif
393 		if (error)
394 			return error;
395 		if (extflags & LK_SLEEPFAIL)
396 			return ENOLCK;
397 	}
398 
399 	/*
400 	 * Once EXREQ has been set, wait for it to be granted
401 	 * We enter the loop with tsleep_interlock() already called.
402 	 */
403 	for (;;) {
404 		/*
405 		 * Waiting for EXREQ to be granted to us.
406 		 *
407 		 * NOTE! If we try to trivially get the exclusive lock
408 		 *	 (basically by racing undo_shreq()) and succeed,
409 		 *	 we must still wakeup(lkp) for another exclusive
410 		 *	 lock trying to acquire EXREQ.  Easier to simply
411 		 *	 wait for our own wakeup.
412 		 */
413 		if ((count & LKC_EXREQ) == 0) {
414 			KKASSERT(count & LKC_XMASK);
415 			lkp->lk_lockholder = td;
416 			COUNT(td, 1);
417 			break;
418 		}
419 
420 		/*
421 		 * Block waiting for our exreq to be granted.
422 		 * Check cancelation.  NOWAIT was already dealt with.
423 		 */
424 		if (extflags & LK_CANCELABLE) {
425 			if (count & LKC_CANCEL) {
426 				if (undo_exreq(lkp) == 0) {
427 					lkp->lk_lockholder = LK_KERNTHREAD;
428 					lockmgr_release(lkp, 0);
429 				}
430 				error = ENOLCK;
431 				break;
432 			}
433 		}
434 
435 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
436 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
437 
438 		error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
439 #ifdef INVARIANTS
440 		if (lock_test_mode > 0) {
441 			--lock_test_mode;
442 			print_backtrace(8);
443 		}
444 #endif
445 		/*
446 		 * A tsleep error is uncommon.  If it occurs we have to
447 		 * undo our EXREQ.  If we are granted the exclusive lock
448 		 * as we try to undo we have to deal with it.
449 		 */
450 		if (extflags & LK_SLEEPFAIL) {
451 			if (undo_exreq(lkp) == 0) {
452 				lkp->lk_lockholder = LK_KERNTHREAD;
453 				lockmgr_release(lkp, 0);
454 			}
455 			if (error == 0)
456 				error = ENOLCK;
457 			break;
458 		}
459 		if (error) {
460 			if (undo_exreq(lkp))
461 				break;
462 			lkp->lk_lockholder = td;
463 			COUNT(td, 1);
464 			error = 0;
465 			break;
466 		}
467 
468 		/*
469 		 * Reload after sleep, shortcut grant case.
470 		 * Then set the interlock and loop.
471 		 */
472 		count = lkp->lk_count;
473 		cpu_ccfence();
474 		if ((count & LKC_EXREQ) == 0) {
475 			KKASSERT(count & LKC_XMASK);
476 			lkp->lk_lockholder = td;
477 			COUNT(td, 1);
478 			break;
479 		}
480 		tsleep_interlock(lkp, pflags);
481 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
482 	}
483 	return error;
484 }
485 
486 /*
487  * Downgrade an exclusive lock to shared.
488  *
489  * This function always succeeds as long as the caller owns a legal
490  * exclusive lock with one reference.  UPREQ and EXREQ is ignored.
491  */
492 int
493 lockmgr_downgrade(struct lock *lkp, u_int flags)
494 {
495 	uint64_t count;
496 	uint64_t ncount;
497 	uint32_t extflags;
498 	thread_t otd;
499 	thread_t td;
500 
501 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
502 	td = curthread;
503 	count = lkp->lk_count;
504 
505 	for (;;) {
506 		cpu_ccfence();
507 
508 		/*
509 		 * Downgrade an exclusive lock into a shared lock.  All
510 		 * counts on a recursive exclusive lock become shared.
511 		 *
512 		 * NOTE: Currently to reduce confusion we only allow
513 		 *	 there to be one exclusive lock count, and panic
514 		 *	 if there are more.
515 		 */
516 		if (lkp->lk_lockholder != td || (count & LKC_XMASK) != 1) {
517 			panic("lockmgr: not holding exclusive lock: "
518 			      "%p/%p %016jx", lkp->lk_lockholder, td, count);
519 		}
520 
521 		/*
522 		 * NOTE! Must NULL-out lockholder before releasing the
523 		 *	 exclusive lock.
524 		 *
525 		 * NOTE! There might be pending shared requests, check
526 		 *	 and wake them up.
527 		 */
528 		otd = lkp->lk_lockholder;
529 		lkp->lk_lockholder = NULL;
530 		ncount = (count & ~(LKC_XMASK | LKC_EXREQ2)) +
531 			 ((count & LKC_XMASK) << LKC_SSHIFT);
532 		ncount |= LKC_SHARED;
533 
534 		if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
535 			/*
536 			 * Wakeup any shared waiters (prior SMASK), or
537 			 * any exclusive requests that couldn't set EXREQ
538 			 * because the lock had been held exclusively.
539 			 */
540 			if (count & (LKC_SMASK | LKC_EXREQ2))
541 				wakeup(lkp);
542 			/* count = ncount; NOT USED */
543 			break;
544 		}
545 		lkp->lk_lockholder = otd;
546 		/* retry */
547 	}
548 	return 0;
549 }
550 
551 /*
552  * Upgrade a shared lock to exclusive.  If LK_EXCLUPGRADE then guarantee
553  * that no other exclusive requester can get in front of us and fail
554  * immediately if another upgrade is pending.  If we fail, the shared
555  * lock is released.
556  *
557  * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone
558  * else is in front of us, we release the shared lock and acquire the
559  * exclusive lock normally.  If a failure occurs, the shared lock is
560  * released.
561  */
562 int
563 lockmgr_upgrade(struct lock *lkp, u_int flags)
564 {
565 	uint64_t count;
566 	uint64_t ncount;
567 	uint32_t extflags;
568 	thread_t td;
569 	int error;
570 	int pflags;
571 	int timo;
572 
573 	_lockmgr_assert(lkp, flags);
574 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
575 	td = curthread;
576 	error = 0;
577 	count = lkp->lk_count;
578 	cpu_ccfence();
579 
580 	/*
581 	 * If we already hold the lock exclusively this operation
582 	 * succeeds and is a NOP.
583 	 */
584 	if (count & LKC_XMASK) {
585 		if (lkp->lk_lockholder == td)
586 			return 0;
587 		panic("lockmgr: upgrade unowned lock");
588 	}
589 	if ((count & LKC_SMASK) == 0)
590 		panic("lockmgr: upgrade unowned lock");
591 
592 	/*
593 	 * Loop to acquire LKC_UPREQ
594 	 */
595 	for (;;) {
596 		/*
597 		 * If UPREQ is already pending, release the shared lock
598 		 * and acquire an exclusive lock normally.
599 		 *
600 		 * If NOWAIT or EXCLUPGRADE the operation must be atomic,
601 		 * and this isn't, so we fail.
602 		 */
603 		if (count & LKC_UPREQ) {
604 			lockmgr_release(lkp, 0);
605 			if ((flags & LK_TYPE_MASK) == LK_EXCLUPGRADE)
606 				error = EBUSY;
607 			else if (extflags & LK_NOWAIT)
608 				error = EBUSY;
609 			else
610 				error = lockmgr_exclusive(lkp, flags);
611 			return error;
612 		}
613 
614 		/*
615 		 * Try to immediately grant the upgrade, handle NOWAIT,
616 		 * or release the shared lock and simultaneously set UPREQ.
617 		 */
618 		if ((count & LKC_SMASK) == LKC_SCOUNT) {
619 			/*
620 			 * Immediate grant
621 			 */
622 			ncount = (count - LKC_SCOUNT + 1) & ~LKC_SHARED;
623 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
624 				lkp->lk_lockholder = td;
625 				return 0;
626 			}
627 		} else if (extflags & LK_NOWAIT) {
628 			/*
629 			 * Early EBUSY if an immediate grant is impossible
630 			 */
631 			lockmgr_release(lkp, 0);
632 			return EBUSY;
633 		} else {
634 			/*
635 			 * Multiple shared locks present, request the
636 			 * upgrade and break to the next loop.
637 			 */
638 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
639 			tsleep_interlock(lkp, pflags);
640 			ncount = (count - LKC_SCOUNT) | LKC_UPREQ;
641 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
642 				count = ncount;
643 				break;
644 			}
645 		}
646 		/* retry */
647 	}
648 
649 	/*
650 	 * We have acquired LKC_UPREQ, wait until the upgrade is granted
651 	 * or the tsleep fails.
652 	 *
653 	 * NOWAIT and EXCLUPGRADE have already been handled.  The first
654 	 * tsleep_interlock() has already been associated.
655 	 */
656 	for (;;) {
657 		cpu_ccfence();
658 
659 		/*
660 		 * We were granted our upgrade.  No other UPREQ can be
661 		 * made pending because we are now exclusive.
662 		 */
663 		if ((count & LKC_UPREQ) == 0) {
664 			KKASSERT((count & LKC_XMASK) == 1);
665 			lkp->lk_lockholder = td;
666 			break;
667 		}
668 
669 		if (extflags & LK_CANCELABLE) {
670 			if (count & LKC_CANCEL) {
671 				if (undo_upreq(lkp) == 0) {
672 					lkp->lk_lockholder = LK_KERNTHREAD;
673 					lockmgr_release(lkp, 0);
674 				}
675 				error = ENOLCK;
676 				break;
677 			}
678 		}
679 
680 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
681 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
682 
683 		error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
684 		if (extflags & LK_SLEEPFAIL) {
685 			if (undo_upreq(lkp) == 0) {
686 				lkp->lk_lockholder = LK_KERNTHREAD;
687 				lockmgr_release(lkp, 0);
688 			}
689 			if (error == 0)
690 				error = ENOLCK;
691 			break;
692 		}
693 		if (error) {
694 			if (undo_upreq(lkp))
695 				break;
696 			error = 0;
697 		}
698 
699 		/*
700 		 * Reload the lock, short-cut the UPGRANT code before
701 		 * taking the time to interlock and loop.
702 		 */
703 		count = lkp->lk_count;
704 		if ((count & LKC_UPREQ) == 0) {
705 			KKASSERT((count & LKC_XMASK) == 1);
706 			lkp->lk_lockholder = td;
707 			break;
708 		}
709 		tsleep_interlock(lkp, pflags);
710 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
711 		/* retry */
712 	}
713 	return error;
714 }
715 
716 /*
717  * Release a held lock
718  *
719  * NOTE: When releasing to an unlocked state, we set the SHARED bit
720  *	 to optimize shared lock requests.
721  */
722 int
723 lockmgr_release(struct lock *lkp, u_int flags)
724 {
725 	uint64_t count;
726 	uint64_t ncount;
727 	uint32_t extflags;
728 	thread_t otd;
729 	thread_t td;
730 
731 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
732 	td = curthread;
733 
734 	count = lkp->lk_count;
735 	cpu_ccfence();
736 
737 	for (;;) {
738 		/*
739 		 * Release the currently held lock, grant all requests
740 		 * possible.
741 		 *
742 		 * WARNING! lksleep() assumes that LK_RELEASE does not
743 		 *	    block.
744 		 *
745 		 * Always succeeds.
746 		 * Never blocks.
747 		 */
748 		if ((count & (LKC_SMASK | LKC_XMASK)) == 0)
749 			panic("lockmgr: LK_RELEASE: no lock held");
750 
751 		if (count & LKC_XMASK) {
752 			/*
753 			 * Release exclusively held lock
754 			 */
755 			if (lkp->lk_lockholder != LK_KERNTHREAD &&
756 			    lkp->lk_lockholder != td) {
757 				panic("lockmgr: pid %d, not exclusive "
758 				      "lock holder thr %p/%p unlocking",
759 				    (td->td_proc ? td->td_proc->p_pid : -1),
760 				    td, lkp->lk_lockholder);
761 			}
762 			if ((count & (LKC_UPREQ | LKC_EXREQ |
763 				      LKC_XMASK)) == 1) {
764 				/*
765 				 * Last exclusive count is being released
766 				 * with no UPREQ or EXREQ.  The SHARED
767 				 * bit can be set or not without messing
768 				 * anything up, so precondition it to
769 				 * SHARED (which is the most cpu-optimal).
770 				 *
771 				 * Wakeup any EXREQ2.  EXREQ cannot be
772 				 * set while an exclusive count is present
773 				 * so we have to wakeup any EXREQ2 we find.
774 				 *
775 				 * We could hint the EXREQ2 by leaving
776 				 * SHARED unset, but atm I don't see any
777 				 * usefulness.
778 				 */
779 				otd = lkp->lk_lockholder;
780 				lkp->lk_lockholder = NULL;
781 				ncount = (count - 1);
782 				ncount &= ~(LKC_CANCEL | LKC_EXREQ2);
783 				ncount |= LKC_SHARED;
784 				if (atomic_fcmpset_64(&lkp->lk_count,
785 						      &count, ncount)) {
786 					if (count & (LKC_SMASK | LKC_EXREQ2))
787 						wakeup(lkp);
788 					if (otd != LK_KERNTHREAD)
789 						COUNT(td, -1);
790 					/* count = ncount; NOT USED */
791 					break;
792 				}
793 				lkp->lk_lockholder = otd;
794 				/* retry */
795 			} else if ((count & (LKC_UPREQ | LKC_XMASK)) ==
796 				   (LKC_UPREQ | 1)) {
797 				/*
798 				 * Last exclusive count is being released but
799 				 * an upgrade request is present, automatically
800 				 * grant an exclusive state to the owner of
801 				 * the upgrade request.  Transfer count to
802 				 * grant.
803 				 *
804 				 * EXREQ cannot be set while an exclusive
805 				 * holder exists, so do not clear EXREQ2.
806 				 */
807 				otd = lkp->lk_lockholder;
808 				lkp->lk_lockholder = NULL;
809 				ncount = count & ~LKC_UPREQ;
810 				if (atomic_fcmpset_64(&lkp->lk_count,
811 						      &count, ncount)) {
812 					wakeup(lkp);
813 					if (otd != LK_KERNTHREAD)
814 						COUNT(td, -1);
815 					/* count = ncount; NOT USED */
816 					break;
817 				}
818 				lkp->lk_lockholder = otd;
819 				/* retry */
820 			} else if ((count & (LKC_EXREQ | LKC_XMASK)) ==
821 				   (LKC_EXREQ | 1)) {
822 				/*
823 				 * Last exclusive count is being released but
824 				 * an exclusive request is present.  We
825 				 * automatically grant an exclusive state to
826 				 * the owner of the exclusive request,
827 				 * transfering our count.
828 				 *
829 				 * This case virtually never occurs because
830 				 * EXREQ is not set while exclusive holders
831 				 * exist.  However, it might be set if a
832 				 * an exclusive request is pending and a
833 				 * shared holder upgrades.
834 				 *
835 				 * Don't bother clearing EXREQ2.  A thread
836 				 * waiting to set EXREQ can't do it while
837 				 * an exclusive lock is present.
838 				 */
839 				otd = lkp->lk_lockholder;
840 				lkp->lk_lockholder = NULL;
841 				ncount = count & ~LKC_EXREQ;
842 				if (atomic_fcmpset_64(&lkp->lk_count,
843 						      &count, ncount)) {
844 					wakeup(lkp);
845 					if (otd != LK_KERNTHREAD)
846 						COUNT(td, -1);
847 					/* count = ncount; NOT USED */
848 					break;
849 				}
850 				lkp->lk_lockholder = otd;
851 				/* retry */
852 			} else {
853 				/*
854 				 * Multiple exclusive counts, drop by 1.
855 				 * Since we are the holder and there is more
856 				 * than one count, we can just decrement it.
857 				 */
858 				count =
859 				    atomic_fetchadd_long(&lkp->lk_count, -1);
860 				/* count = count - 1  NOT NEEDED */
861 				if (lkp->lk_lockholder != LK_KERNTHREAD)
862 					COUNT(td, -1);
863 				break;
864 			}
865 			/* retry */
866 		} else {
867 			/*
868 			 * Release shared lock
869 			 */
870 			KKASSERT((count & LKC_SHARED) && (count & LKC_SMASK));
871 			if ((count & (LKC_EXREQ | LKC_UPREQ | LKC_SMASK)) ==
872 			    LKC_SCOUNT) {
873 				/*
874 				 * Last shared count is being released,
875 				 * no exclusive or upgrade request present.
876 				 * Generally leave the shared bit set.
877 				 * Clear the CANCEL bit.
878 				 */
879 				ncount = (count - LKC_SCOUNT) & ~LKC_CANCEL;
880 				if (atomic_fcmpset_64(&lkp->lk_count,
881 						      &count, ncount)) {
882 					COUNT(td, -1);
883 					/* count = ncount; NOT USED */
884 					break;
885 				}
886 				/* retry */
887 			} else if ((count & (LKC_UPREQ | LKC_SMASK)) ==
888 				   (LKC_UPREQ | LKC_SCOUNT)) {
889 				/*
890 				 * Last shared count is being released but
891 				 * an upgrade request is present, automatically
892 				 * grant an exclusive state to the owner of
893 				 * the upgrade request and transfer the count.
894 				 */
895 				ncount = (count - LKC_SCOUNT + 1) &
896 					 ~(LKC_UPREQ | LKC_CANCEL | LKC_SHARED);
897 				if (atomic_fcmpset_64(&lkp->lk_count,
898 						      &count, ncount)) {
899 					wakeup(lkp);
900 					COUNT(td, -1);
901 					/* count = ncount; NOT USED */
902 					break;
903 				}
904 				/* retry */
905 			} else if ((count & (LKC_EXREQ | LKC_SMASK)) ==
906 				   (LKC_EXREQ | LKC_SCOUNT)) {
907 				/*
908 				 * Last shared count is being released but
909 				 * an exclusive request is present, we
910 				 * automatically grant an exclusive state to
911 				 * the owner of the request and transfer
912 				 * the count.
913 				 */
914 				ncount = (count - LKC_SCOUNT + 1) &
915 					 ~(LKC_EXREQ | LKC_EXREQ2 |
916 					   LKC_CANCEL | LKC_SHARED);
917 				if (atomic_fcmpset_64(&lkp->lk_count,
918 						      &count, ncount)) {
919 					wakeup(lkp);
920 					COUNT(td, -1);
921 					/* count = ncount; NOT USED */
922 					break;
923 				}
924 				/* retry */
925 			} else {
926 				/*
927 				 * Shared count is greater than 1.  We can
928 				 * just use undo_shreq() to clean things up.
929 				 * undo_shreq() will also handle races to 0
930 				 * after the fact.
931 				 */
932 				undo_shreq(lkp);
933 				COUNT(td, -1);
934 				break;
935 			}
936 			/* retry */
937 		}
938 		/* retry */
939 	}
940 	return 0;
941 }
942 
943 /*
944  * Start canceling blocked requesters or later requestors.
945  * Only blocked requesters using CANCELABLE can be canceled.
946  *
947  * This is intended to then allow other requesters (usually the
948  * caller) to obtain a non-cancelable lock.
949  *
950  * Don't waste time issuing a wakeup if nobody is pending.
951  */
952 int
953 lockmgr_cancel_beg(struct lock *lkp, u_int flags)
954 {
955 	uint64_t count;
956 
957 	count = lkp->lk_count;
958 	for (;;) {
959 		cpu_ccfence();
960 
961 		KKASSERT((count & LKC_CANCEL) == 0);	/* disallowed case */
962 
963 		/* issue w/lock held */
964 		KKASSERT((count & (LKC_XMASK | LKC_SMASK)) != 0);
965 
966 		if (!atomic_fcmpset_64(&lkp->lk_count,
967 				       &count, count | LKC_CANCEL)) {
968 			continue;
969 		}
970 		/* count |= LKC_CANCEL; NOT USED */
971 
972 		/*
973 		 * Wakeup any waiters.
974 		 *
975 		 * NOTE: EXREQ2 only matters when EXREQ is set, so don't
976 		 *	 bother checking EXREQ2.
977 		 */
978 		if (count & (LKC_EXREQ | LKC_SMASK | LKC_UPREQ)) {
979 			wakeup(lkp);
980 		}
981 		break;
982 	}
983 	return 0;
984 }
985 
986 /*
987  * End our cancel request (typically after we have acquired
988  * the lock ourselves).
989  */
990 int
991 lockmgr_cancel_end(struct lock *lkp, u_int flags)
992 {
993 	atomic_clear_long(&lkp->lk_count, LKC_CANCEL);
994 
995 	return 0;
996 }
997 
998 /*
999  * Backout SCOUNT from a failed shared lock attempt and handle any race
1000  * to 0.  This function is also used by the release code for the less
1001  * optimal race to 0 case.
1002  *
1003  * WARNING! Since we are unconditionally decrementing LKC_SCOUNT, it is
1004  *	    possible for the lock to get into a LKC_SHARED + ZERO SCOUNT
1005  *	    situation.  A shared request can block with a ZERO SCOUNT if
1006  *	    EXREQ or UPREQ is pending in this situation.  Be sure to always
1007  *	    issue a wakeup() in this situation if we are unable to
1008  *	    transition to an exclusive lock, to handle the race.
1009  *
1010  * Always succeeds
1011  * Must not block
1012  */
1013 static void
1014 undo_shreq(struct lock *lkp)
1015 {
1016 	uint64_t count;
1017 	uint64_t ncount;
1018 
1019 	count = atomic_fetchadd_64(&lkp->lk_count, -LKC_SCOUNT) - LKC_SCOUNT;
1020 	while ((count & (LKC_EXREQ | LKC_UPREQ | LKC_CANCEL)) &&
1021 	       (count & (LKC_SMASK | LKC_XMASK)) == 0) {
1022 		/*
1023 		 * Note that UPREQ must have priority over EXREQ, and EXREQ
1024 		 * over CANCEL, so if the atomic op fails we have to loop up.
1025 		 */
1026 		if (count & LKC_UPREQ) {
1027 			ncount = (count + 1) & ~(LKC_UPREQ | LKC_CANCEL |
1028 						 LKC_SHARED);
1029 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1030 				wakeup(lkp);
1031 				/* count = ncount; NOT USED */
1032 				break;
1033 			}
1034 			wakeup(lkp);
1035 			continue;
1036 		}
1037 		if (count & LKC_EXREQ) {
1038 			ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2 |
1039 						 LKC_CANCEL | LKC_SHARED);
1040 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1041 				wakeup(lkp);
1042 				/* count = ncount; NOT USED */
1043 				break;
1044 			}
1045 			wakeup(lkp);
1046 			continue;
1047 		}
1048 		if (count & LKC_CANCEL) {
1049 			ncount = count & ~LKC_CANCEL;
1050 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1051 				wakeup(lkp);
1052 				/* count = ncount; NOT USED */
1053 				break;
1054 			}
1055 		}
1056 		/* retry */
1057 	}
1058 }
1059 
1060 /*
1061  * Undo an exclusive request.  Returns EBUSY if we were able to undo the
1062  * request, and 0 if the request was granted before we could undo it.
1063  * When 0 is returned, the lock state has not been modified.  The caller
1064  * is responsible for setting the lockholder to curthread.
1065  */
1066 static
1067 int
1068 undo_exreq(struct lock *lkp)
1069 {
1070 	uint64_t count;
1071 	uint64_t ncount;
1072 	int error;
1073 
1074 	count = lkp->lk_count;
1075 	error = 0;
1076 
1077 	for (;;) {
1078 		cpu_ccfence();
1079 
1080 		if ((count & LKC_EXREQ) == 0) {
1081 			/*
1082 			 * EXREQ was granted.  We own the exclusive lock.
1083 			 */
1084 			break;
1085 		}
1086 		if (count & LKC_XMASK) {
1087 			/*
1088 			 * Clear the EXREQ we still own.  Only wakeup on
1089 			 * EXREQ2 if no UPREQ.  There are still exclusive
1090 			 * holders so do not wake up any shared locks or
1091 			 * any UPREQ.
1092 			 *
1093 			 * If there is an UPREQ it will issue a wakeup()
1094 			 * for any EXREQ wait looops, so we can clear EXREQ2
1095 			 * now.
1096 			 */
1097 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1098 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1099 				if ((count & (LKC_EXREQ2 | LKC_UPREQ)) ==
1100 				    LKC_EXREQ2) {
1101 					wakeup(lkp);
1102 				}
1103 				error = EBUSY;
1104 				/* count = ncount; NOT USED */
1105 				break;
1106 			}
1107 			/* retry */
1108 		} else if (count & LKC_UPREQ) {
1109 			/*
1110 			 * Clear the EXREQ we still own.  We cannot wakeup any
1111 			 * shared or exclusive waiters because there is an
1112 			 * uprequest pending (that we do not handle here).
1113 			 *
1114 			 * If there is an UPREQ it will issue a wakeup()
1115 			 * for any EXREQ wait looops, so we can clear EXREQ2
1116 			 * now.
1117 			 */
1118 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1119 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1120 				error = EBUSY;
1121 				break;
1122 			}
1123 			/* retry */
1124 		} else if ((count & LKC_SHARED) && (count & LKC_SMASK)) {
1125 			/*
1126 			 * No UPREQ, lock not held exclusively, but the lock
1127 			 * is held shared.  Clear EXREQ, wakeup anyone trying
1128 			 * to get the EXREQ bit (they have to set it
1129 			 * themselves, EXREQ2 is an aggregation).
1130 			 */
1131 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1132 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1133 				if (count & LKC_EXREQ2)
1134 					wakeup(lkp);
1135 				error = EBUSY;
1136 				/* count = ncount; NOT USED */
1137 				break;
1138 			}
1139 			/* retry */
1140 		} else {
1141 			/*
1142 			 * No UPREQ, lock not held exclusively or shared.
1143 			 * Grant the EXREQ and wakeup anyone waiting on
1144 			 * EXREQ2.
1145 			 */
1146 			ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2);
1147 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1148 				if (count & LKC_EXREQ2)
1149 					wakeup(lkp);
1150 				/* count = ncount; NOT USED */
1151 				/* we are granting, error == 0 */
1152 				break;
1153 			}
1154 			/* retry */
1155 		}
1156 		/* retry */
1157 	}
1158 	return error;
1159 }
1160 
1161 /*
1162  * Undo an upgrade request.  Returns EBUSY if we were able to undo the
1163  * request, and 0 if the request was granted before we could undo it.
1164  * When 0 is returned, the lock state has not been modified.  The caller
1165  * is responsible for setting the lockholder to curthread.
1166  */
1167 static
1168 int
1169 undo_upreq(struct lock *lkp)
1170 {
1171 	uint64_t count;
1172 	uint64_t ncount;
1173 	int error;
1174 
1175 	count = lkp->lk_count;
1176 	error = 0;
1177 
1178 	for (;;) {
1179 		cpu_ccfence();
1180 
1181 		if ((count & LKC_UPREQ) == 0) {
1182 			/*
1183 			 * UPREQ was granted
1184 			 */
1185 			break;
1186 		}
1187 		if (count & LKC_XMASK) {
1188 			/*
1189 			 * Clear the UPREQ we still own.  Nobody to wakeup
1190 			 * here because there is an existing exclusive
1191 			 * holder.
1192 			 */
1193 			if (atomic_fcmpset_64(&lkp->lk_count, &count,
1194 					      count & ~LKC_UPREQ)) {
1195 				error = EBUSY;
1196 				/* count &= ~LKC_UPREQ; NOT USED */
1197 				break;
1198 			}
1199 		} else if (count & LKC_EXREQ) {
1200 			/*
1201 			 * Clear the UPREQ we still own.  Grant the exclusive
1202 			 * request and wake it up.
1203 			 */
1204 			ncount = (count + 1);
1205 			ncount &= ~(LKC_EXREQ | LKC_EXREQ2 | LKC_UPREQ);
1206 
1207 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1208 				wakeup(lkp);
1209 				error = EBUSY;
1210 				/* count = ncount; NOT USED */
1211 				break;
1212 			}
1213 		} else {
1214 			/*
1215 			 * Clear the UPREQ we still own.  Wakeup any shared
1216 			 * waiters.
1217 			 */
1218 			ncount = count & ~LKC_UPREQ;
1219 			if (count & LKC_SMASK)
1220 				ncount |= LKC_SHARED;
1221 
1222 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1223 				if ((count & LKC_SHARED) == 0 &&
1224 				    (ncount & LKC_SHARED)) {
1225 					wakeup(lkp);
1226 				}
1227 				error = EBUSY;
1228 				/* count = ncount; NOT USED */
1229 				break;
1230 			}
1231 		}
1232 		/* retry */
1233 	}
1234 	return error;
1235 }
1236 
1237 void
1238 lockmgr_kernproc(struct lock *lp)
1239 {
1240 	struct thread *td __debugvar = curthread;
1241 
1242 	if (lp->lk_lockholder != LK_KERNTHREAD) {
1243 		KASSERT(lp->lk_lockholder == td,
1244 		    ("lockmgr_kernproc: lock not owned by curthread %p: %p",
1245 		    td, lp->lk_lockholder));
1246 		lp->lk_lockholder = LK_KERNTHREAD;
1247 		COUNT(td, -1);
1248 	}
1249 }
1250 
1251 /*
1252  * Initialize a lock; required before use.
1253  */
1254 void
1255 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1256 {
1257 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
1258 	lkp->lk_count = 0;
1259 	lkp->lk_wmesg = wmesg;
1260 	lkp->lk_timo = timo;
1261 	lkp->lk_lockholder = NULL;
1262 }
1263 
1264 /*
1265  * Reinitialize a lock that is being reused for a different purpose, but
1266  * which may have pending (blocked) threads sitting on it.  The caller
1267  * must already hold the interlock.
1268  */
1269 void
1270 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1271 {
1272 	lkp->lk_wmesg = wmesg;
1273 	lkp->lk_timo = timo;
1274 }
1275 
1276 /*
1277  * De-initialize a lock.  The structure must no longer be used by anyone.
1278  */
1279 void
1280 lockuninit(struct lock *lkp)
1281 {
1282 	uint64_t count __unused;
1283 
1284 	count = lkp->lk_count;
1285 	cpu_ccfence();
1286 	KKASSERT((count & (LKC_EXREQ | LKC_UPREQ)) == 0 &&
1287 		 ((count & LKC_SHARED) || (count & LKC_SMASK) == 0));
1288 }
1289 
1290 /*
1291  * Determine the status of a lock.
1292  */
1293 int
1294 lockstatus(struct lock *lkp, struct thread *td)
1295 {
1296 	int lock_type = 0;
1297 	uint64_t count;
1298 
1299 	count = lkp->lk_count;
1300 	cpu_ccfence();
1301 
1302 	if (count & (LKC_XMASK | LKC_SMASK | LKC_EXREQ | LKC_UPREQ)) {
1303 		if (count & LKC_XMASK) {
1304 			if (td == NULL || lkp->lk_lockholder == td)
1305 				lock_type = LK_EXCLUSIVE;
1306 			else
1307 				lock_type = LK_EXCLOTHER;
1308 		} else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1309 			lock_type = LK_SHARED;
1310 		}
1311 	}
1312 	return (lock_type);
1313 }
1314 
1315 /*
1316  * Return non-zero if the caller owns the lock shared or exclusive.
1317  * We can only guess re: shared locks.
1318  */
1319 int
1320 lockowned(struct lock *lkp)
1321 {
1322 	thread_t td = curthread;
1323 	uint64_t count;
1324 
1325 	count = lkp->lk_count;
1326 	cpu_ccfence();
1327 
1328 	if (count & LKC_XMASK)
1329 		return(lkp->lk_lockholder == td);
1330 	else
1331 		return((count & LKC_SMASK) != 0);
1332 }
1333 
1334 #if 0
1335 /*
1336  * Determine the number of holders of a lock.
1337  *
1338  * REMOVED - Cannot be used due to our use of atomic_fetchadd_64()
1339  *	     for shared locks.  Caller can only test if the lock has
1340  *	     a count or not using lockinuse(lk) (sys/lock.h)
1341  */
1342 int
1343 lockcount(struct lock *lkp)
1344 {
1345 	panic("lockcount cannot be used");
1346 }
1347 
1348 int
1349 lockcountnb(struct lock *lkp)
1350 {
1351 	panic("lockcount cannot be used");
1352 }
1353 #endif
1354 
1355 /*
1356  * Print out information about state of a lock. Used by VOP_PRINT
1357  * routines to display status about contained locks.
1358  */
1359 void
1360 lockmgr_printinfo(struct lock *lkp)
1361 {
1362 	struct thread *td = lkp->lk_lockholder;
1363 	struct proc *p;
1364 	uint64_t count;
1365 
1366 	count = lkp->lk_count;
1367 	cpu_ccfence();
1368 
1369 	if (td && td != LK_KERNTHREAD)
1370 		p = td->td_proc;
1371 	else
1372 		p = NULL;
1373 
1374 	if (count & LKC_XMASK) {
1375 		kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d",
1376 		    lkp->lk_wmesg, (intmax_t)count, td,
1377 		    p ? p->p_pid : -99);
1378 	} else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1379 		kprintf(" lock type %s: SHARED (count %016jx)",
1380 		    lkp->lk_wmesg, (intmax_t)count);
1381 	} else {
1382 		kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
1383 	}
1384 	if ((count & (LKC_EXREQ | LKC_UPREQ)) ||
1385 	    ((count & LKC_XMASK) && (count & LKC_SMASK)))
1386 		kprintf(" with waiters\n");
1387 	else
1388 		kprintf("\n");
1389 }
1390 
1391 void
1392 lock_sysinit(struct lock_args *arg)
1393 {
1394 	lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
1395 }
1396 
1397 #ifdef DEBUG_CANCEL_LOCKS
1398 
1399 static
1400 int
1401 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
1402 {
1403 	int error;
1404 
1405 	if (req->newptr) {
1406 		SYSCTL_XUNLOCK();
1407 		lockmgr(&cancel_lk, LK_EXCLUSIVE);
1408 		error = tsleep(&error, PCATCH, "canmas", hz * 5);
1409 		lockmgr(&cancel_lk, LK_CANCEL_BEG);
1410 		error = tsleep(&error, PCATCH, "canmas", hz * 5);
1411 		lockmgr(&cancel_lk, LK_RELEASE);
1412 		SYSCTL_XLOCK();
1413 		SYSCTL_OUT(req, &error, sizeof(error));
1414 	}
1415 	error = 0;
1416 
1417 	return error;
1418 }
1419 
1420 static
1421 int
1422 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
1423 {
1424 	int error;
1425 
1426 	if (req->newptr) {
1427 		error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
1428 		if (error == 0)
1429 			lockmgr(&cancel_lk, LK_RELEASE);
1430 		SYSCTL_OUT(req, &error, sizeof(error));
1431 		kprintf("test %d\n", error);
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 #endif
1438