xref: /dragonfly/sys/kern/kern_lock.c (revision 092c2dd1)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *	John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013-2017
7  *	Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>.  Extensively rewritten.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 #include "opt_lint.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/spinlock2.h>
51 #include <sys/indefinite2.h>
52 
53 static void undo_shreq(struct lock *lkp);
54 static int undo_upreq(struct lock *lkp);
55 static int undo_exreq(struct lock *lkp);
56 
57 #ifdef DEBUG_CANCEL_LOCKS
58 
59 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
60 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
61 
62 static struct lock cancel_lk;
63 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
64 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
65 	    sysctl_cancel_lock, "I", "test cancelable locks");
66 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
67 	    sysctl_cancel_test, "I", "test cancelable locks");
68 
69 #endif
70 
71 __read_frequently int lock_test_mode;
72 SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW,
73 	   &lock_test_mode, 0, "");
74 
75 /*
76  * Locking primitives implementation.
77  * Locks provide shared/exclusive sychronization.
78  */
79 
80 #ifdef DEBUG_LOCKS
81 #define COUNT(td, x) (td)->td_locks += (x)
82 #else
83 #define COUNT(td, x) do { } while (0)
84 #endif
85 
86 /*
87  * Helper, assert basic conditions
88  */
89 static __inline void
90 _lockmgr_assert(struct lock *lkp, u_int flags)
91 {
92 	if (mycpu->gd_intr_nesting_level &&
93 	    (flags & LK_NOWAIT) == 0 &&
94 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
95 	    panic_cpu_gd != mycpu
96 	) {
97 		panic("lockmgr %s from %p: called from interrupt, ipi, "
98 		      "or hard code section",
99 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
100 	}
101 }
102 
103 /*
104  * Acquire a shared lock
105  */
106 int
107 lockmgr_shared(struct lock *lkp, u_int flags)
108 {
109 	uint32_t extflags;
110 	thread_t td;
111 	uint64_t count;
112 	int error;
113 	int pflags;
114 	int timo;
115 	int didloop;
116 
117 	_lockmgr_assert(lkp, flags);
118 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
119 	td = curthread;
120 
121 	count = lkp->lk_count;
122 	cpu_ccfence();
123 
124 	/*
125 	 * If the caller already holds the lock exclusively then
126 	 * we silently obtain another count on the exclusive lock.
127 	 * Avoid accessing lk_lockholder until testing exclusivity.
128 	 *
129 	 * WARNING!  The old FreeBSD behavior was to downgrade,
130 	 *	     but this creates a problem when recursions
131 	 *	     return to the caller and the caller expects
132 	 *	     its original exclusive lock to remain exclusively
133 	 *	     locked.
134 	 */
135 	if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
136 		KKASSERT(lkp->lk_count & LKC_XMASK);
137 		if ((extflags & LK_CANRECURSE) == 0) {
138 			if (extflags & LK_NOWAIT)
139 				return EBUSY;
140 			panic("lockmgr: locking against myself");
141 		}
142 		atomic_add_64(&lkp->lk_count, 1);
143 		COUNT(td, 1);
144 		return 0;
145 	}
146 
147 	/*
148 	 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while
149 	 * SHARED is set and either EXREQ or UPREQ are set.
150 	 *
151 	 * NOTE: In the race-to-0 case (see undo_shreq()), we could
152 	 *	 theoretically work the SMASK == 0 case here.
153 	 */
154 	if ((td->td_flags & TDF_DEADLKTREAT) == 0) {
155 		while ((count & LKC_SHARED) &&
156 		       (count & (LKC_EXREQ | LKC_UPREQ))) {
157 			/*
158 			 * Immediate failure conditions
159 			 */
160 			if (extflags & LK_CANCELABLE) {
161 				if (count & LKC_CANCEL)
162 					return ENOLCK;
163 			}
164 			if (extflags & LK_NOWAIT)
165 				return EBUSY;
166 
167 			/*
168 			 * Interlocked tsleep
169 			 */
170 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
171 			timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
172 
173 			tsleep_interlock(lkp, pflags);
174 			count = atomic_fetchadd_long(&lkp->lk_count, 0);
175 
176 			if ((count & LKC_SHARED) &&
177 			    (count & (LKC_EXREQ | LKC_UPREQ))) {
178 				error = tsleep(lkp, pflags | PINTERLOCKED,
179 					       lkp->lk_wmesg, timo);
180 				if (error)
181 					return error;
182 				count = lkp->lk_count;
183 				cpu_ccfence();
184 				continue;
185 			}
186 			break;
187 		}
188 	}
189 
190 	/*
191 	 * Bump the SCOUNT field.  The shared lock is granted only once
192 	 * the SHARED flag gets set.  If it is already set, we are done.
193 	 *
194 	 * (Racing an EXREQ or UPREQ operation is ok here, we already did
195 	 * our duty above).
196 	 */
197 	count = atomic_fetchadd_64(&lkp->lk_count, LKC_SCOUNT) + LKC_SCOUNT;
198 	error = 0;
199 	didloop = 0;
200 
201 	for (;;) {
202 		/*
203 		 * We may be able to grant ourselves the bit trivially.
204 		 * We're done once the SHARED bit is granted.
205 		 */
206 		if ((count & (LKC_XMASK | LKC_EXREQ |
207 			      LKC_UPREQ | LKC_SHARED)) == 0) {
208 			if (atomic_fcmpset_64(&lkp->lk_count,
209 					      &count, count | LKC_SHARED)) {
210 				/* count |= LKC_SHARED; NOT USED */
211 				break;
212 			}
213 			continue;
214 		}
215 		if ((td->td_flags & TDF_DEADLKTREAT) &&
216 		    (count & (LKC_XMASK | LKC_SHARED)) == 0) {
217 			if (atomic_fcmpset_64(&lkp->lk_count,
218 					      &count, count | LKC_SHARED)) {
219 				/* count |= LKC_SHARED; NOT USED */
220 				break;
221 			}
222 			continue;
223 		}
224 		if (count & LKC_SHARED)
225 			break;
226 
227 		/*
228 		 * Slow path
229 		 */
230 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
231 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
232 
233 		if (extflags & LK_CANCELABLE) {
234 			if (count & LKC_CANCEL) {
235 				undo_shreq(lkp);
236 				error = ENOLCK;
237 				break;
238 			}
239 		}
240 		if (extflags & LK_NOWAIT) {
241 			undo_shreq(lkp);
242 			error = EBUSY;
243 			break;
244 		}
245 
246 		/*
247 		 * Interlocked after the first loop.
248 		 */
249 		if (didloop) {
250 			error = tsleep(lkp, pflags | PINTERLOCKED,
251 				       lkp->lk_wmesg, timo);
252 			if (extflags & LK_SLEEPFAIL) {
253 				undo_shreq(lkp);
254 				error = ENOLCK;
255 				break;
256 			}
257 			if (error) {
258 				undo_shreq(lkp);
259 				break;
260 			}
261 		}
262 		didloop = 1;
263 
264 		/*
265 		 * Reload, shortcut grant case, then loop interlock
266 		 * and loop.
267 		 */
268 		count = lkp->lk_count;
269 		if (count & LKC_SHARED)
270 			break;
271 		tsleep_interlock(lkp, pflags);
272 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
273 	}
274 	if (error == 0)
275 		COUNT(td, 1);
276 
277 	return error;
278 }
279 
280 /*
281  * Acquire an exclusive lock
282  */
283 int
284 lockmgr_exclusive(struct lock *lkp, u_int flags)
285 {
286 	uint64_t count;
287 	uint64_t ncount;
288 	uint32_t extflags;
289 	thread_t td;
290 	int error;
291 	int pflags;
292 	int timo;
293 
294 	_lockmgr_assert(lkp, flags);
295 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
296 	td = curthread;
297 
298 	error = 0;
299 	count = lkp->lk_count;
300 	cpu_ccfence();
301 
302 	/*
303 	 * Recursive lock if we already hold it exclusively.  Avoid testing
304 	 * lk_lockholder until after testing lk_count.
305 	 */
306 	if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
307 		if ((extflags & LK_CANRECURSE) == 0) {
308 			if (extflags & LK_NOWAIT)
309 				return EBUSY;
310 			panic("lockmgr: locking against myself");
311 		}
312 		count = atomic_fetchadd_64(&lkp->lk_count, 1) + 1;
313 		KKASSERT((count & LKC_XMASK) > 1);
314 		COUNT(td, 1);
315 		return 0;
316 	}
317 
318 	/*
319 	 * Trivially acquire the lock, or block until we can set EXREQ.
320 	 * Set EXREQ2 if EXREQ is already set or the lock is already
321 	 * held exclusively.  EXREQ2 is an aggregation bit to request
322 	 * a wakeup.
323 	 *
324 	 * WARNING! We cannot set EXREQ if the lock is already held
325 	 *	    exclusively because it may race another EXREQ
326 	 *	    being cleared and granted.  We use the exclusivity
327 	 *	    to prevent both EXREQ and UPREQ from being set.
328 	 *
329 	 *	    This means that both shared and exclusive requests
330 	 *	    have equal priority against a current exclusive holder's
331 	 *	    release.  Exclusive requests still have priority over
332 	 *	    new shared requests when the lock is already held shared.
333 	 */
334 	for (;;) {
335 		/*
336 		 * Normal trivial case
337 		 */
338 		if ((count & (LKC_UPREQ | LKC_EXREQ |
339 			      LKC_XMASK)) == 0 &&
340 		    ((count & LKC_SHARED) == 0 ||
341 		     (count & LKC_SMASK) == 0)) {
342 			ncount = (count + 1) & ~LKC_SHARED;
343 			if (atomic_fcmpset_64(&lkp->lk_count,
344 					      &count, ncount)) {
345 				lkp->lk_lockholder = td;
346 				COUNT(td, 1);
347 				return 0;
348 			}
349 			continue;
350 		}
351 
352 		if (extflags & LK_CANCELABLE) {
353 			if (count & LKC_CANCEL)
354 				return ENOLCK;
355 		}
356 		if (extflags & LK_NOWAIT)
357 			return EBUSY;
358 
359 		/*
360 		 * Interlock to set EXREQ or EXREQ2
361 		 */
362 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
363 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
364 
365 		if (count & (LKC_EXREQ | LKC_XMASK))
366 			ncount = count | LKC_EXREQ2;
367 		else
368 			ncount = count | LKC_EXREQ;
369 		tsleep_interlock(lkp, pflags);
370 		if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
371 			/*
372 			 * If we successfully transitioned to EXREQ we
373 			 * can break out, otherwise we had set EXREQ2 and
374 			 * we block.
375 			 */
376 			if ((count & (LKC_EXREQ | LKC_XMASK)) == 0) {
377 				count = ncount;
378 				break;
379 			}
380 
381 			error = tsleep(lkp, pflags | PINTERLOCKED,
382 				       lkp->lk_wmesg, timo);
383 			count = lkp->lk_count;	/* relod */
384 			cpu_ccfence();
385 		}
386 #ifdef INVARIANTS
387 		if (lock_test_mode > 0) {
388 			--lock_test_mode;
389 			print_backtrace(8);
390 		}
391 #endif
392 		if (error)
393 			return error;
394 		if (extflags & LK_SLEEPFAIL)
395 			return ENOLCK;
396 	}
397 
398 	/*
399 	 * Once EXREQ has been set, wait for it to be granted
400 	 * We enter the loop with tsleep_interlock() already called.
401 	 */
402 	for (;;) {
403 		/*
404 		 * Waiting for EXREQ to be granted to us.
405 		 *
406 		 * NOTE! If we try to trivially get the exclusive lock
407 		 *	 (basically by racing undo_shreq()) and succeed,
408 		 *	 we must still wakeup(lkp) for another exclusive
409 		 *	 lock trying to acquire EXREQ.  Easier to simply
410 		 *	 wait for our own wakeup.
411 		 */
412 		if ((count & LKC_EXREQ) == 0) {
413 			KKASSERT(count & LKC_XMASK);
414 			lkp->lk_lockholder = td;
415 			COUNT(td, 1);
416 			break;
417 		}
418 
419 		/*
420 		 * Block waiting for our exreq to be granted.
421 		 * Check cancelation.  NOWAIT was already dealt with.
422 		 */
423 		if (extflags & LK_CANCELABLE) {
424 			if (count & LKC_CANCEL) {
425 				if (undo_exreq(lkp) == 0) {
426 					lkp->lk_lockholder = LK_KERNTHREAD;
427 					lockmgr_release(lkp, 0);
428 				}
429 				error = ENOLCK;
430 				break;
431 			}
432 		}
433 
434 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
435 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
436 
437 		error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
438 #ifdef INVARIANTS
439 		if (lock_test_mode > 0) {
440 			--lock_test_mode;
441 			print_backtrace(8);
442 		}
443 #endif
444 		/*
445 		 * A tsleep error is uncommon.  If it occurs we have to
446 		 * undo our EXREQ.  If we are granted the exclusive lock
447 		 * as we try to undo we have to deal with it.
448 		 */
449 		if (extflags & LK_SLEEPFAIL) {
450 			if (undo_exreq(lkp) == 0) {
451 				lkp->lk_lockholder = LK_KERNTHREAD;
452 				lockmgr_release(lkp, 0);
453 			}
454 			if (error == 0)
455 				error = ENOLCK;
456 			break;
457 		}
458 		if (error) {
459 			if (undo_exreq(lkp))
460 				break;
461 			lkp->lk_lockholder = td;
462 			COUNT(td, 1);
463 			error = 0;
464 			break;
465 		}
466 
467 		/*
468 		 * Reload after sleep, shortcut grant case.
469 		 * Then set the interlock and loop.
470 		 */
471 		count = lkp->lk_count;
472 		cpu_ccfence();
473 		if ((count & LKC_EXREQ) == 0) {
474 			KKASSERT(count & LKC_XMASK);
475 			lkp->lk_lockholder = td;
476 			COUNT(td, 1);
477 			break;
478 		}
479 		tsleep_interlock(lkp, pflags);
480 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
481 	}
482 	return error;
483 }
484 
485 /*
486  * Downgrade an exclusive lock to shared.
487  *
488  * This function always succeeds as long as the caller owns a legal
489  * exclusive lock with one reference.  UPREQ and EXREQ is ignored.
490  */
491 int
492 lockmgr_downgrade(struct lock *lkp, u_int flags)
493 {
494 	uint64_t count;
495 	uint64_t ncount;
496 	uint32_t extflags;
497 	thread_t otd;
498 	thread_t td;
499 
500 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
501 	td = curthread;
502 	count = lkp->lk_count;
503 
504 	for (;;) {
505 		cpu_ccfence();
506 
507 		/*
508 		 * Downgrade an exclusive lock into a shared lock.  All
509 		 * counts on a recursive exclusive lock become shared.
510 		 *
511 		 * NOTE: Currently to reduce confusion we only allow
512 		 *	 there to be one exclusive lock count, and panic
513 		 *	 if there are more.
514 		 */
515 		if (lkp->lk_lockholder != td || (count & LKC_XMASK) != 1) {
516 			panic("lockmgr: not holding exclusive lock: "
517 			      "%p/%p %016jx", lkp->lk_lockholder, td, count);
518 		}
519 
520 		/*
521 		 * NOTE! Must NULL-out lockholder before releasing the
522 		 *	 exclusive lock.
523 		 *
524 		 * NOTE! There might be pending shared requests, check
525 		 *	 and wake them up.
526 		 */
527 		otd = lkp->lk_lockholder;
528 		lkp->lk_lockholder = NULL;
529 		ncount = (count & ~(LKC_XMASK | LKC_EXREQ2)) +
530 			 ((count & LKC_XMASK) << LKC_SSHIFT);
531 		ncount |= LKC_SHARED;
532 
533 		if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
534 			/*
535 			 * Wakeup any shared waiters (prior SMASK), or
536 			 * any exclusive requests that couldn't set EXREQ
537 			 * because the lock had been held exclusively.
538 			 */
539 			if (count & (LKC_SMASK | LKC_EXREQ2))
540 				wakeup(lkp);
541 			/* count = ncount; NOT USED */
542 			break;
543 		}
544 		lkp->lk_lockholder = otd;
545 		/* retry */
546 	}
547 	return 0;
548 }
549 
550 /*
551  * Upgrade a shared lock to exclusive.  If LK_EXCLUPGRADE then guarantee
552  * that no other exclusive requester can get in front of us and fail
553  * immediately if another upgrade is pending.  If we fail, the shared
554  * lock is released.
555  *
556  * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone
557  * else is in front of us, we release the shared lock and acquire the
558  * exclusive lock normally.  If a failure occurs, the shared lock is
559  * released.
560  */
561 int
562 lockmgr_upgrade(struct lock *lkp, u_int flags)
563 {
564 	uint64_t count;
565 	uint64_t ncount;
566 	uint32_t extflags;
567 	thread_t td;
568 	int error;
569 	int pflags;
570 	int timo;
571 
572 	_lockmgr_assert(lkp, flags);
573 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
574 	td = curthread;
575 	error = 0;
576 	count = lkp->lk_count;
577 	cpu_ccfence();
578 
579 	/*
580 	 * If we already hold the lock exclusively this operation
581 	 * succeeds and is a NOP.
582 	 */
583 	if (count & LKC_XMASK) {
584 		if (lkp->lk_lockholder == td)
585 			return 0;
586 		panic("lockmgr: upgrade unowned lock");
587 	}
588 	if ((count & LKC_SMASK) == 0)
589 		panic("lockmgr: upgrade unowned lock");
590 
591 	/*
592 	 * Loop to acquire LKC_UPREQ
593 	 */
594 	for (;;) {
595 		/*
596 		 * If UPREQ is already pending, release the shared lock
597 		 * and acquire an exclusive lock normally.
598 		 *
599 		 * If NOWAIT or EXCLUPGRADE the operation must be atomic,
600 		 * and this isn't, so we fail.
601 		 */
602 		if (count & LKC_UPREQ) {
603 			lockmgr_release(lkp, 0);
604 			if ((flags & LK_TYPE_MASK) == LK_EXCLUPGRADE)
605 				error = EBUSY;
606 			else if (extflags & LK_NOWAIT)
607 				error = EBUSY;
608 			else
609 				error = lockmgr_exclusive(lkp, flags);
610 			return error;
611 		}
612 
613 		/*
614 		 * Try to immediately grant the upgrade, handle NOWAIT,
615 		 * or release the shared lock and simultaneously set UPREQ.
616 		 */
617 		if ((count & LKC_SMASK) == LKC_SCOUNT) {
618 			/*
619 			 * Immediate grant
620 			 */
621 			ncount = (count - LKC_SCOUNT + 1) & ~LKC_SHARED;
622 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
623 				lkp->lk_lockholder = td;
624 				return 0;
625 			}
626 		} else if (extflags & LK_NOWAIT) {
627 			/*
628 			 * Early EBUSY if an immediate grant is impossible
629 			 */
630 			lockmgr_release(lkp, 0);
631 			return EBUSY;
632 		} else {
633 			/*
634 			 * Multiple shared locks present, request the
635 			 * upgrade and break to the next loop.
636 			 */
637 			pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
638 			tsleep_interlock(lkp, pflags);
639 			ncount = (count - LKC_SCOUNT) | LKC_UPREQ;
640 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
641 				count = ncount;
642 				break;
643 			}
644 		}
645 		/* retry */
646 	}
647 
648 	/*
649 	 * We have acquired LKC_UPREQ, wait until the upgrade is granted
650 	 * or the tsleep fails.
651 	 *
652 	 * NOWAIT and EXCLUPGRADE have already been handled.  The first
653 	 * tsleep_interlock() has already been associated.
654 	 */
655 	for (;;) {
656 		cpu_ccfence();
657 
658 		/*
659 		 * We were granted our upgrade.  No other UPREQ can be
660 		 * made pending because we are now exclusive.
661 		 */
662 		if ((count & LKC_UPREQ) == 0) {
663 			KKASSERT((count & LKC_XMASK) == 1);
664 			lkp->lk_lockholder = td;
665 			break;
666 		}
667 
668 		if (extflags & LK_CANCELABLE) {
669 			if (count & LKC_CANCEL) {
670 				if (undo_upreq(lkp) == 0) {
671 					lkp->lk_lockholder = LK_KERNTHREAD;
672 					lockmgr_release(lkp, 0);
673 				}
674 				error = ENOLCK;
675 				break;
676 			}
677 		}
678 
679 		pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
680 		timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
681 
682 		error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
683 		if (extflags & LK_SLEEPFAIL) {
684 			if (undo_upreq(lkp) == 0) {
685 				lkp->lk_lockholder = LK_KERNTHREAD;
686 				lockmgr_release(lkp, 0);
687 			}
688 			if (error == 0)
689 				error = ENOLCK;
690 			break;
691 		}
692 		if (error) {
693 			if (undo_upreq(lkp))
694 				break;
695 			error = 0;
696 		}
697 
698 		/*
699 		 * Reload the lock, short-cut the UPGRANT code before
700 		 * taking the time to interlock and loop.
701 		 */
702 		count = lkp->lk_count;
703 		if ((count & LKC_UPREQ) == 0) {
704 			KKASSERT((count & LKC_XMASK) == 1);
705 			lkp->lk_lockholder = td;
706 			break;
707 		}
708 		tsleep_interlock(lkp, pflags);
709 		count = atomic_fetchadd_64(&lkp->lk_count, 0);
710 		/* retry */
711 	}
712 	return error;
713 }
714 
715 /*
716  * Release a held lock
717  *
718  * NOTE: When releasing to an unlocked state, we set the SHARED bit
719  *	 to optimize shared lock requests.
720  */
721 int
722 lockmgr_release(struct lock *lkp, u_int flags)
723 {
724 	uint64_t count;
725 	uint64_t ncount;
726 	uint32_t extflags;
727 	thread_t otd;
728 	thread_t td;
729 
730 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
731 	td = curthread;
732 
733 	count = lkp->lk_count;
734 	cpu_ccfence();
735 
736 	for (;;) {
737 		/*
738 		 * Release the currently held lock, grant all requests
739 		 * possible.
740 		 *
741 		 * WARNING! lksleep() assumes that LK_RELEASE does not
742 		 *	    block.
743 		 *
744 		 * Always succeeds.
745 		 * Never blocks.
746 		 */
747 		if ((count & (LKC_SMASK | LKC_XMASK)) == 0)
748 			panic("lockmgr: LK_RELEASE: no lock held");
749 
750 		if (count & LKC_XMASK) {
751 			/*
752 			 * Release exclusively held lock
753 			 */
754 			if (lkp->lk_lockholder != LK_KERNTHREAD &&
755 			    lkp->lk_lockholder != td) {
756 				panic("lockmgr: pid %d, not exclusive "
757 				      "lock holder thr %p/%p unlocking",
758 				    (td->td_proc ? td->td_proc->p_pid : -1),
759 				    td, lkp->lk_lockholder);
760 			}
761 			if ((count & (LKC_UPREQ | LKC_EXREQ |
762 				      LKC_XMASK)) == 1) {
763 				/*
764 				 * Last exclusive count is being released
765 				 * with no UPREQ or EXREQ.  The SHARED
766 				 * bit can be set or not without messing
767 				 * anything up, so precondition it to
768 				 * SHARED (which is the most cpu-optimal).
769 				 *
770 				 * Wakeup any EXREQ2.  EXREQ cannot be
771 				 * set while an exclusive count is present
772 				 * so we have to wakeup any EXREQ2 we find.
773 				 *
774 				 * We could hint the EXREQ2 by leaving
775 				 * SHARED unset, but atm I don't see any
776 				 * usefulness.
777 				 */
778 				otd = lkp->lk_lockholder;
779 				lkp->lk_lockholder = NULL;
780 				ncount = (count - 1);
781 				ncount &= ~(LKC_CANCEL | LKC_EXREQ2);
782 				ncount |= LKC_SHARED;
783 				if (atomic_fcmpset_64(&lkp->lk_count,
784 						      &count, ncount)) {
785 					if (count & (LKC_SMASK | LKC_EXREQ2))
786 						wakeup(lkp);
787 					if (otd != LK_KERNTHREAD)
788 						COUNT(td, -1);
789 					/* count = ncount; NOT USED */
790 					break;
791 				}
792 				lkp->lk_lockholder = otd;
793 				/* retry */
794 			} else if ((count & (LKC_UPREQ | LKC_XMASK)) ==
795 				   (LKC_UPREQ | 1)) {
796 				/*
797 				 * Last exclusive count is being released but
798 				 * an upgrade request is present, automatically
799 				 * grant an exclusive state to the owner of
800 				 * the upgrade request.  Transfer count to
801 				 * grant.
802 				 *
803 				 * EXREQ cannot be set while an exclusive
804 				 * holder exists, so do not clear EXREQ2.
805 				 */
806 				otd = lkp->lk_lockholder;
807 				lkp->lk_lockholder = NULL;
808 				ncount = count & ~LKC_UPREQ;
809 				if (atomic_fcmpset_64(&lkp->lk_count,
810 						      &count, ncount)) {
811 					wakeup(lkp);
812 					if (otd != LK_KERNTHREAD)
813 						COUNT(td, -1);
814 					/* count = ncount; NOT USED */
815 					break;
816 				}
817 				lkp->lk_lockholder = otd;
818 				/* retry */
819 			} else if ((count & (LKC_EXREQ | LKC_XMASK)) ==
820 				   (LKC_EXREQ | 1)) {
821 				/*
822 				 * Last exclusive count is being released but
823 				 * an exclusive request is present.  We
824 				 * automatically grant an exclusive state to
825 				 * the owner of the exclusive request,
826 				 * transfering our count.
827 				 *
828 				 * This case virtually never occurs because
829 				 * EXREQ is not set while exclusive holders
830 				 * exist.  However, it might be set if a
831 				 * an exclusive request is pending and a
832 				 * shared holder upgrades.
833 				 *
834 				 * Don't bother clearing EXREQ2.  A thread
835 				 * waiting to set EXREQ can't do it while
836 				 * an exclusive lock is present.
837 				 */
838 				otd = lkp->lk_lockholder;
839 				lkp->lk_lockholder = NULL;
840 				ncount = count & ~LKC_EXREQ;
841 				if (atomic_fcmpset_64(&lkp->lk_count,
842 						      &count, ncount)) {
843 					wakeup(lkp);
844 					if (otd != LK_KERNTHREAD)
845 						COUNT(td, -1);
846 					/* count = ncount; NOT USED */
847 					break;
848 				}
849 				lkp->lk_lockholder = otd;
850 				/* retry */
851 			} else {
852 				/*
853 				 * Multiple exclusive counts, drop by 1.
854 				 * Since we are the holder and there is more
855 				 * than one count, we can just decrement it.
856 				 */
857 				count =
858 				    atomic_fetchadd_long(&lkp->lk_count, -1);
859 				/* count = count - 1  NOT NEEDED */
860 				if (lkp->lk_lockholder != LK_KERNTHREAD)
861 					COUNT(td, -1);
862 				break;
863 			}
864 			/* retry */
865 		} else {
866 			/*
867 			 * Release shared lock
868 			 */
869 			KKASSERT((count & LKC_SHARED) && (count & LKC_SMASK));
870 			if ((count & (LKC_EXREQ | LKC_UPREQ | LKC_SMASK)) ==
871 			    LKC_SCOUNT) {
872 				/*
873 				 * Last shared count is being released,
874 				 * no exclusive or upgrade request present.
875 				 * Generally leave the shared bit set.
876 				 * Clear the CANCEL bit.
877 				 */
878 				ncount = (count - LKC_SCOUNT) & ~LKC_CANCEL;
879 				if (atomic_fcmpset_64(&lkp->lk_count,
880 						      &count, ncount)) {
881 					COUNT(td, -1);
882 					/* count = ncount; NOT USED */
883 					break;
884 				}
885 				/* retry */
886 			} else if ((count & (LKC_UPREQ | LKC_SMASK)) ==
887 				   (LKC_UPREQ | LKC_SCOUNT)) {
888 				/*
889 				 * Last shared count is being released but
890 				 * an upgrade request is present, automatically
891 				 * grant an exclusive state to the owner of
892 				 * the upgrade request and transfer the count.
893 				 */
894 				ncount = (count - LKC_SCOUNT + 1) &
895 					 ~(LKC_UPREQ | LKC_CANCEL | LKC_SHARED);
896 				if (atomic_fcmpset_64(&lkp->lk_count,
897 						      &count, ncount)) {
898 					wakeup(lkp);
899 					COUNT(td, -1);
900 					/* count = ncount; NOT USED */
901 					break;
902 				}
903 				/* retry */
904 			} else if ((count & (LKC_EXREQ | LKC_SMASK)) ==
905 				   (LKC_EXREQ | LKC_SCOUNT)) {
906 				/*
907 				 * Last shared count is being released but
908 				 * an exclusive request is present, we
909 				 * automatically grant an exclusive state to
910 				 * the owner of the request and transfer
911 				 * the count.
912 				 */
913 				ncount = (count - LKC_SCOUNT + 1) &
914 					 ~(LKC_EXREQ | LKC_EXREQ2 |
915 					   LKC_CANCEL | LKC_SHARED);
916 				if (atomic_fcmpset_64(&lkp->lk_count,
917 						      &count, ncount)) {
918 					wakeup(lkp);
919 					COUNT(td, -1);
920 					/* count = ncount; NOT USED */
921 					break;
922 				}
923 				/* retry */
924 			} else {
925 				/*
926 				 * Shared count is greater than 1.  We can
927 				 * just use undo_shreq() to clean things up.
928 				 * undo_shreq() will also handle races to 0
929 				 * after the fact.
930 				 */
931 				undo_shreq(lkp);
932 				COUNT(td, -1);
933 				break;
934 			}
935 			/* retry */
936 		}
937 		/* retry */
938 	}
939 	return 0;
940 }
941 
942 /*
943  * Start canceling blocked requesters or later requestors.
944  * Only blocked requesters using CANCELABLE can be canceled.
945  *
946  * This is intended to then allow other requesters (usually the
947  * caller) to obtain a non-cancelable lock.
948  *
949  * Don't waste time issuing a wakeup if nobody is pending.
950  */
951 int
952 lockmgr_cancel_beg(struct lock *lkp, u_int flags)
953 {
954 	uint64_t count;
955 
956 	count = lkp->lk_count;
957 	for (;;) {
958 		cpu_ccfence();
959 
960 		KKASSERT((count & LKC_CANCEL) == 0);	/* disallowed case */
961 
962 		/* issue w/lock held */
963 		KKASSERT((count & (LKC_XMASK | LKC_SMASK)) != 0);
964 
965 		if (!atomic_fcmpset_64(&lkp->lk_count,
966 				       &count, count | LKC_CANCEL)) {
967 			continue;
968 		}
969 		/* count |= LKC_CANCEL; NOT USED */
970 
971 		/*
972 		 * Wakeup any waiters.
973 		 *
974 		 * NOTE: EXREQ2 only matters when EXREQ is set, so don't
975 		 *	 bother checking EXREQ2.
976 		 */
977 		if (count & (LKC_EXREQ | LKC_SMASK | LKC_UPREQ)) {
978 			wakeup(lkp);
979 		}
980 		break;
981 	}
982 	return 0;
983 }
984 
985 /*
986  * End our cancel request (typically after we have acquired
987  * the lock ourselves).
988  */
989 int
990 lockmgr_cancel_end(struct lock *lkp, u_int flags)
991 {
992 	atomic_clear_long(&lkp->lk_count, LKC_CANCEL);
993 
994 	return 0;
995 }
996 
997 /*
998  * Backout SCOUNT from a failed shared lock attempt and handle any race
999  * to 0.  This function is also used by the release code for the less
1000  * optimal race to 0 case.
1001  *
1002  * WARNING! Since we are unconditionally decrementing LKC_SCOUNT, it is
1003  *	    possible for the lock to get into a LKC_SHARED + ZERO SCOUNT
1004  *	    situation.  A shared request can block with a ZERO SCOUNT if
1005  *	    EXREQ or UPREQ is pending in this situation.  Be sure to always
1006  *	    issue a wakeup() in this situation if we are unable to
1007  *	    transition to an exclusive lock, to handle the race.
1008  *
1009  * Always succeeds
1010  * Must not block
1011  */
1012 static void
1013 undo_shreq(struct lock *lkp)
1014 {
1015 	uint64_t count;
1016 	uint64_t ncount;
1017 
1018 	count = atomic_fetchadd_64(&lkp->lk_count, -LKC_SCOUNT) - LKC_SCOUNT;
1019 	while ((count & (LKC_EXREQ | LKC_UPREQ | LKC_CANCEL)) &&
1020 	       (count & (LKC_SMASK | LKC_XMASK)) == 0) {
1021 		/*
1022 		 * Note that UPREQ must have priority over EXREQ, and EXREQ
1023 		 * over CANCEL, so if the atomic op fails we have to loop up.
1024 		 */
1025 		if (count & LKC_UPREQ) {
1026 			ncount = (count + 1) & ~(LKC_UPREQ | LKC_CANCEL |
1027 						 LKC_SHARED);
1028 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1029 				wakeup(lkp);
1030 				/* count = ncount; NOT USED */
1031 				break;
1032 			}
1033 			wakeup(lkp);
1034 			continue;
1035 		}
1036 		if (count & LKC_EXREQ) {
1037 			ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2 |
1038 						 LKC_CANCEL | LKC_SHARED);
1039 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1040 				wakeup(lkp);
1041 				/* count = ncount; NOT USED */
1042 				break;
1043 			}
1044 			wakeup(lkp);
1045 			continue;
1046 		}
1047 		if (count & LKC_CANCEL) {
1048 			ncount = count & ~LKC_CANCEL;
1049 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1050 				wakeup(lkp);
1051 				/* count = ncount; NOT USED */
1052 				break;
1053 			}
1054 		}
1055 		/* retry */
1056 	}
1057 }
1058 
1059 /*
1060  * Undo an exclusive request.  Returns EBUSY if we were able to undo the
1061  * request, and 0 if the request was granted before we could undo it.
1062  * When 0 is returned, the lock state has not been modified.  The caller
1063  * is responsible for setting the lockholder to curthread.
1064  */
1065 static
1066 int
1067 undo_exreq(struct lock *lkp)
1068 {
1069 	uint64_t count;
1070 	uint64_t ncount;
1071 	int error;
1072 
1073 	count = lkp->lk_count;
1074 	error = 0;
1075 
1076 	for (;;) {
1077 		cpu_ccfence();
1078 
1079 		if ((count & LKC_EXREQ) == 0) {
1080 			/*
1081 			 * EXREQ was granted.  We own the exclusive lock.
1082 			 */
1083 			break;
1084 		}
1085 		if (count & LKC_XMASK) {
1086 			/*
1087 			 * Clear the EXREQ we still own.  Only wakeup on
1088 			 * EXREQ2 if no UPREQ.  There are still exclusive
1089 			 * holders so do not wake up any shared locks or
1090 			 * any UPREQ.
1091 			 *
1092 			 * If there is an UPREQ it will issue a wakeup()
1093 			 * for any EXREQ wait looops, so we can clear EXREQ2
1094 			 * now.
1095 			 */
1096 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1097 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1098 				if ((count & (LKC_EXREQ2 | LKC_UPREQ)) ==
1099 				    LKC_EXREQ2) {
1100 					wakeup(lkp);
1101 				}
1102 				error = EBUSY;
1103 				/* count = ncount; NOT USED */
1104 				break;
1105 			}
1106 			/* retry */
1107 		} else if (count & LKC_UPREQ) {
1108 			/*
1109 			 * Clear the EXREQ we still own.  We cannot wakeup any
1110 			 * shared or exclusive waiters because there is an
1111 			 * uprequest pending (that we do not handle here).
1112 			 *
1113 			 * If there is an UPREQ it will issue a wakeup()
1114 			 * for any EXREQ wait looops, so we can clear EXREQ2
1115 			 * now.
1116 			 */
1117 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1118 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1119 				error = EBUSY;
1120 				break;
1121 			}
1122 			/* retry */
1123 		} else if ((count & LKC_SHARED) && (count & LKC_SMASK)) {
1124 			/*
1125 			 * No UPREQ, lock not held exclusively, but the lock
1126 			 * is held shared.  Clear EXREQ, wakeup anyone trying
1127 			 * to get the EXREQ bit (they have to set it
1128 			 * themselves, EXREQ2 is an aggregation).
1129 			 *
1130 			 * We must also wakeup any shared locks blocked
1131 			 * by the EXREQ, so just issue the wakeup
1132 			 * unconditionally.  See lockmgr_shared() + 76 lines
1133 			 * or so.
1134 			 */
1135 			ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1136 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1137 				wakeup(lkp);
1138 				error = EBUSY;
1139 				/* count = ncount; NOT USED */
1140 				break;
1141 			}
1142 			/* retry */
1143 		} else {
1144 			/*
1145 			 * No UPREQ, lock not held exclusively or shared.
1146 			 * Grant the EXREQ and wakeup anyone waiting on
1147 			 * EXREQ2.
1148 			 *
1149 			 * We must also issue a wakeup if SHARED is set,
1150 			 * even without an SCOUNT, due to pre-shared blocking
1151 			 * that can occur on EXREQ in lockmgr_shared().
1152 			 */
1153 			ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2);
1154 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1155 				if (count & (LKC_EXREQ2 | LKC_SHARED))
1156 					wakeup(lkp);
1157 				/* count = ncount; NOT USED */
1158 				/* we are granting, error == 0 */
1159 				break;
1160 			}
1161 			/* retry */
1162 		}
1163 		/* retry */
1164 	}
1165 	return error;
1166 }
1167 
1168 /*
1169  * Undo an upgrade request.  Returns EBUSY if we were able to undo the
1170  * request, and 0 if the request was granted before we could undo it.
1171  * When 0 is returned, the lock state has not been modified.  The caller
1172  * is responsible for setting the lockholder to curthread.
1173  */
1174 static
1175 int
1176 undo_upreq(struct lock *lkp)
1177 {
1178 	uint64_t count;
1179 	uint64_t ncount;
1180 	int error;
1181 
1182 	count = lkp->lk_count;
1183 	error = 0;
1184 
1185 	for (;;) {
1186 		cpu_ccfence();
1187 
1188 		if ((count & LKC_UPREQ) == 0) {
1189 			/*
1190 			 * UPREQ was granted
1191 			 */
1192 			break;
1193 		}
1194 		if (count & LKC_XMASK) {
1195 			/*
1196 			 * Clear the UPREQ we still own.  Nobody to wakeup
1197 			 * here because there is an existing exclusive
1198 			 * holder.
1199 			 */
1200 			if (atomic_fcmpset_64(&lkp->lk_count, &count,
1201 					      count & ~LKC_UPREQ)) {
1202 				error = EBUSY;
1203 				/* count &= ~LKC_UPREQ; NOT USED */
1204 				break;
1205 			}
1206 		} else if (count & LKC_EXREQ) {
1207 			/*
1208 			 * Clear the UPREQ we still own.  Grant the exclusive
1209 			 * request and wake it up.
1210 			 */
1211 			ncount = (count + 1);
1212 			ncount &= ~(LKC_EXREQ | LKC_EXREQ2 | LKC_UPREQ);
1213 
1214 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1215 				wakeup(lkp);
1216 				error = EBUSY;
1217 				/* count = ncount; NOT USED */
1218 				break;
1219 			}
1220 		} else {
1221 			/*
1222 			 * Clear the UPREQ we still own.  Wakeup any shared
1223 			 * waiters.
1224 			 *
1225 			 * We must also issue a wakeup if SHARED was set
1226 			 * even if no shared waiters due to pre-shared blocking
1227 			 * that can occur on UPREQ.
1228 			 */
1229 			ncount = count & ~LKC_UPREQ;
1230 			if (count & LKC_SMASK)
1231 				ncount |= LKC_SHARED;
1232 
1233 			if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1234 				if ((count & LKC_SHARED) ||
1235 				    (ncount & LKC_SHARED)) {
1236 					wakeup(lkp);
1237 				}
1238 				error = EBUSY;
1239 				/* count = ncount; NOT USED */
1240 				break;
1241 			}
1242 		}
1243 		/* retry */
1244 	}
1245 	return error;
1246 }
1247 
1248 void
1249 lockmgr_kernproc(struct lock *lp)
1250 {
1251 	struct thread *td __debugvar = curthread;
1252 
1253 	if (lp->lk_lockholder != LK_KERNTHREAD) {
1254 		KASSERT(lp->lk_lockholder == td,
1255 		    ("lockmgr_kernproc: lock not owned by curthread %p: %p",
1256 		    td, lp->lk_lockholder));
1257 		lp->lk_lockholder = LK_KERNTHREAD;
1258 		COUNT(td, -1);
1259 	}
1260 }
1261 
1262 /*
1263  * Initialize a lock; required before use.
1264  */
1265 void
1266 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1267 {
1268 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
1269 	lkp->lk_count = 0;
1270 	lkp->lk_wmesg = wmesg;
1271 	lkp->lk_timo = timo;
1272 	lkp->lk_lockholder = NULL;
1273 }
1274 
1275 /*
1276  * Reinitialize a lock that is being reused for a different purpose, but
1277  * which may have pending (blocked) threads sitting on it.  The caller
1278  * must already hold the interlock.
1279  */
1280 void
1281 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1282 {
1283 	lkp->lk_wmesg = wmesg;
1284 	lkp->lk_timo = timo;
1285 }
1286 
1287 /*
1288  * De-initialize a lock.  The structure must no longer be used by anyone.
1289  */
1290 void
1291 lockuninit(struct lock *lkp)
1292 {
1293 	uint64_t count __unused;
1294 
1295 	count = lkp->lk_count;
1296 	cpu_ccfence();
1297 	KKASSERT((count & (LKC_EXREQ | LKC_UPREQ)) == 0 &&
1298 		 ((count & LKC_SHARED) || (count & LKC_SMASK) == 0));
1299 }
1300 
1301 /*
1302  * Determine the status of a lock.
1303  */
1304 int
1305 lockstatus(struct lock *lkp, struct thread *td)
1306 {
1307 	int lock_type = 0;
1308 	uint64_t count;
1309 
1310 	count = lkp->lk_count;
1311 	cpu_ccfence();
1312 
1313 	if (count & (LKC_XMASK | LKC_SMASK | LKC_EXREQ | LKC_UPREQ)) {
1314 		if (count & LKC_XMASK) {
1315 			if (td == NULL || lkp->lk_lockholder == td)
1316 				lock_type = LK_EXCLUSIVE;
1317 			else
1318 				lock_type = LK_EXCLOTHER;
1319 		} else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1320 			lock_type = LK_SHARED;
1321 		}
1322 	}
1323 	return (lock_type);
1324 }
1325 
1326 /*
1327  * Return non-zero if the caller owns the lock shared or exclusive.
1328  * We can only guess re: shared locks.
1329  */
1330 int
1331 lockowned(struct lock *lkp)
1332 {
1333 	thread_t td = curthread;
1334 	uint64_t count;
1335 
1336 	count = lkp->lk_count;
1337 	cpu_ccfence();
1338 
1339 	if (count & LKC_XMASK)
1340 		return(lkp->lk_lockholder == td);
1341 	else
1342 		return((count & LKC_SMASK) != 0);
1343 }
1344 
1345 #if 0
1346 /*
1347  * Determine the number of holders of a lock.
1348  *
1349  * REMOVED - Cannot be used due to our use of atomic_fetchadd_64()
1350  *	     for shared locks.  Caller can only test if the lock has
1351  *	     a count or not using lockinuse(lk) (sys/lock.h)
1352  */
1353 int
1354 lockcount(struct lock *lkp)
1355 {
1356 	panic("lockcount cannot be used");
1357 }
1358 
1359 int
1360 lockcountnb(struct lock *lkp)
1361 {
1362 	panic("lockcount cannot be used");
1363 }
1364 #endif
1365 
1366 /*
1367  * Print out information about state of a lock. Used by VOP_PRINT
1368  * routines to display status about contained locks.
1369  */
1370 void
1371 lockmgr_printinfo(struct lock *lkp)
1372 {
1373 	struct thread *td = lkp->lk_lockholder;
1374 	struct proc *p;
1375 	uint64_t count;
1376 
1377 	count = lkp->lk_count;
1378 	cpu_ccfence();
1379 
1380 	if (td && td != LK_KERNTHREAD)
1381 		p = td->td_proc;
1382 	else
1383 		p = NULL;
1384 
1385 	if (count & LKC_XMASK) {
1386 		kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d",
1387 		    lkp->lk_wmesg, (intmax_t)count, td,
1388 		    p ? p->p_pid : -99);
1389 	} else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1390 		kprintf(" lock type %s: SHARED (count %016jx)",
1391 		    lkp->lk_wmesg, (intmax_t)count);
1392 	} else {
1393 		kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
1394 	}
1395 	if ((count & (LKC_EXREQ | LKC_UPREQ)) ||
1396 	    ((count & LKC_XMASK) && (count & LKC_SMASK)))
1397 		kprintf(" with waiters\n");
1398 	else
1399 		kprintf("\n");
1400 }
1401 
1402 void
1403 lock_sysinit(struct lock_args *arg)
1404 {
1405 	lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
1406 }
1407 
1408 #ifdef DEBUG_CANCEL_LOCKS
1409 
1410 static
1411 int
1412 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
1413 {
1414 	int error;
1415 
1416 	if (req->newptr) {
1417 		SYSCTL_XUNLOCK();
1418 		lockmgr(&cancel_lk, LK_EXCLUSIVE);
1419 		error = tsleep(&error, PCATCH, "canmas", hz * 5);
1420 		lockmgr(&cancel_lk, LK_CANCEL_BEG);
1421 		error = tsleep(&error, PCATCH, "canmas", hz * 5);
1422 		lockmgr(&cancel_lk, LK_RELEASE);
1423 		SYSCTL_XLOCK();
1424 		SYSCTL_OUT(req, &error, sizeof(error));
1425 	}
1426 	error = 0;
1427 
1428 	return error;
1429 }
1430 
1431 static
1432 int
1433 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
1434 {
1435 	int error;
1436 
1437 	if (req->newptr) {
1438 		error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
1439 		if (error == 0)
1440 			lockmgr(&cancel_lk, LK_RELEASE);
1441 		SYSCTL_OUT(req, &error, sizeof(error));
1442 		kprintf("test %d\n", error);
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 #endif
1449