xref: /dragonfly/sys/kern/kern_lock.c (revision 783d47c4)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43  */
44 
45 #include "opt_lint.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
56 
57 /*
58  * Locking primitives implementation.
59  * Locks provide shared/exclusive sychronization.
60  */
61 
62 #ifdef DEBUG_LOCKS
63 #define COUNT(td, x) (td)->td_locks += (x)
64 #else
65 #define COUNT(td, x)
66 #endif
67 
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
70 
71 #if defined(DIAGNOSTIC)
72 #define LOCK_INLINE
73 #else
74 #define LOCK_INLINE __inline
75 #endif
76 
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78 		LK_SHARE_NONZERO | LK_WAIT_NONZERO)
79 
80 static int acquire(struct lock *lkp, int extflags, int wanted);
81 
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr)
84 {
85 	lkp->lk_flags |= LK_SHARE_NONZERO;
86 	lkp->lk_sharecount += incr;
87 }
88 
89 static LOCK_INLINE int
90 shareunlock(struct lock *lkp, int decr)
91 {
92 	int dowakeup = 0;
93 
94 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
95 
96 	if (lkp->lk_sharecount == decr) {
97 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
98 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
99 			dowakeup = 1;
100 		}
101 		lkp->lk_sharecount = 0;
102 	} else {
103 		lkp->lk_sharecount -= decr;
104 	}
105 	return(dowakeup);
106 }
107 
108 /*
109  * lock acquisition helper routine.  Called with the lock's spinlock held.
110  */
111 static int
112 acquire(struct lock *lkp, int extflags, int wanted)
113 {
114 	int error;
115 
116 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
117 		return EBUSY;
118 	}
119 
120 	while ((lkp->lk_flags & wanted) != 0) {
121 		lkp->lk_flags |= LK_WAIT_NONZERO;
122 		lkp->lk_waitcount++;
123 
124 		/*
125 		 * Atomic spinlock release/sleep/reacquire.
126 		 */
127 		error = ssleep(lkp, &lkp->lk_spinlock,
128 			       ((extflags & LK_PCATCH) ? PCATCH : 0),
129 			       lkp->lk_wmesg,
130 			       ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
131 		if (lkp->lk_waitcount == 1) {
132 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
133 			lkp->lk_waitcount = 0;
134 		} else {
135 			lkp->lk_waitcount--;
136 		}
137 		if (error)
138 			return error;
139 		if (extflags & LK_SLEEPFAIL)
140 			return ENOLCK;
141 	}
142 	return 0;
143 }
144 
145 /*
146  * Set, change, or release a lock.
147  *
148  * Shared requests increment the shared count. Exclusive requests set the
149  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
150  * accepted shared locks and shared-to-exclusive upgrades to go away.
151  *
152  * A spinlock is held for most of the procedure.  We must not do anything
153  * fancy while holding the spinlock.
154  */
155 int
156 #ifndef	DEBUG_LOCKS
157 lockmgr(struct lock *lkp, u_int flags)
158 #else
159 debuglockmgr(struct lock *lkp, u_int flags,
160 	     const char *name, const char *file, int line)
161 #endif
162 {
163 	thread_t td;
164 	int error;
165 	int extflags;
166 	int dowakeup;
167 #ifdef DEBUG_LOCKS
168 	int i;
169 #endif
170 
171 	error = 0;
172 	dowakeup = 0;
173 
174 	if (mycpu->gd_intr_nesting_level &&
175 	    (flags & LK_NOWAIT) == 0 &&
176 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
177 	    panic_cpu_gd != mycpu
178 	) {
179 
180 #ifndef DEBUG_LOCKS
181 		panic("lockmgr %s from %p: called from interrupt, ipi, "
182 		      "or hard code section",
183 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
184 #else
185 		panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
186 		      "or hard code section",
187 		      lkp->lk_wmesg, file, line);
188 #endif
189 	}
190 
191 #ifdef DEBUG_LOCKS
192 	if (mycpu->gd_spinlocks_wr &&
193 	    ((flags & LK_NOWAIT) == 0)
194 	) {
195 		panic("lockmgr %s from %s:%d: called with %d spinlocks held",
196 		      lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
197 	}
198 #endif
199 
200 	spin_lock(&lkp->lk_spinlock);
201 
202 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
203 	td = curthread;
204 
205 	switch (flags & LK_TYPE_MASK) {
206 	case LK_SHARED:
207 		/*
208 		 * If we are not the exclusive lock holder, we have to block
209 		 * while there is an exclusive lock holder or while an
210 		 * exclusive lock request or upgrade request is in progress.
211 		 *
212 		 * However, if TDF_DEADLKTREAT is set, we override exclusive
213 		 * lock requests or upgrade requests ( but not the exclusive
214 		 * lock itself ).
215 		 */
216 		if (lkp->lk_lockholder != td) {
217 			if (td->td_flags & TDF_DEADLKTREAT) {
218 				error = acquire(
219 					    lkp,
220 					    extflags,
221 					    LK_HAVE_EXCL
222 					);
223 			} else {
224 				error = acquire(
225 					    lkp,
226 					    extflags,
227 					    LK_HAVE_EXCL | LK_WANT_EXCL |
228 					     LK_WANT_UPGRADE
229 					);
230 			}
231 			if (error)
232 				break;
233 			sharelock(lkp, 1);
234 			COUNT(td, 1);
235 			break;
236 		}
237 		/*
238 		 * We hold an exclusive lock, so downgrade it to shared.
239 		 * An alternative would be to fail with EDEADLK.
240 		 */
241 		sharelock(lkp, 1);
242 		COUNT(td, 1);
243 		/* fall into downgrade */
244 
245 	case LK_DOWNGRADE:
246 		if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
247 			spin_unlock(&lkp->lk_spinlock);
248 			panic("lockmgr: not holding exclusive lock");
249 		}
250 
251 #ifdef DEBUG_LOCKS
252 		for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
253 			if (td->td_lockmgr_stack[i] == lkp &&
254 			    td->td_lockmgr_stack_id[i] > 0
255 			) {
256 				td->td_lockmgr_stack_id[i]--;
257 				break;
258 			}
259 		}
260 #endif
261 
262 		sharelock(lkp, lkp->lk_exclusivecount);
263 		lkp->lk_exclusivecount = 0;
264 		lkp->lk_flags &= ~LK_HAVE_EXCL;
265 		lkp->lk_lockholder = LK_NOTHREAD;
266 		if (lkp->lk_waitcount)
267 			dowakeup = 1;
268 		break;
269 
270 	case LK_EXCLUPGRADE:
271 		/*
272 		 * If another process is ahead of us to get an upgrade,
273 		 * then we want to fail rather than have an intervening
274 		 * exclusive access.
275 		 */
276 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
277 			dowakeup = shareunlock(lkp, 1);
278 			COUNT(td, -1);
279 			error = EBUSY;
280 			break;
281 		}
282 		/* fall into normal upgrade */
283 
284 	case LK_UPGRADE:
285 		/*
286 		 * Upgrade a shared lock to an exclusive one. If another
287 		 * shared lock has already requested an upgrade to an
288 		 * exclusive lock, our shared lock is released and an
289 		 * exclusive lock is requested (which will be granted
290 		 * after the upgrade). If we return an error, the file
291 		 * will always be unlocked.
292 		 */
293 		if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
294 			spin_unlock(&lkp->lk_spinlock);
295 			panic("lockmgr: upgrade exclusive lock");
296 		}
297 		dowakeup += shareunlock(lkp, 1);
298 		COUNT(td, -1);
299 		/*
300 		 * If we are just polling, check to see if we will block.
301 		 */
302 		if ((extflags & LK_NOWAIT) &&
303 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
304 		     lkp->lk_sharecount > 1)) {
305 			error = EBUSY;
306 			break;
307 		}
308 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
309 			/*
310 			 * We are first shared lock to request an upgrade, so
311 			 * request upgrade and wait for the shared count to
312 			 * drop to zero, then take exclusive lock.
313 			 *
314 			 * Although I don't think this can occur for
315 			 * robustness we also wait for any exclusive locks
316 			 * to be released.  LK_WANT_UPGRADE is supposed to
317 			 * prevent new exclusive locks but might not in the
318 			 * future.
319 			 */
320 			lkp->lk_flags |= LK_WANT_UPGRADE;
321 			error = acquire(lkp, extflags,
322 					LK_HAVE_EXCL | LK_SHARE_NONZERO);
323 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
324 
325 			if (error)
326 				break;
327 			lkp->lk_flags |= LK_HAVE_EXCL;
328 			lkp->lk_lockholder = td;
329 			if (lkp->lk_exclusivecount != 0) {
330 				spin_unlock(&lkp->lk_spinlock);
331 				panic("lockmgr(1): non-zero exclusive count");
332 			}
333 			lkp->lk_exclusivecount = 1;
334 #if defined(DEBUG_LOCKS)
335 			lkp->lk_filename = file;
336 			lkp->lk_lineno = line;
337 			lkp->lk_lockername = name;
338 
339         	        for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
340 				/*
341 				 * Recursive lockmgr path
342 			 	 */
343 				if (td->td_lockmgr_stack[i] == lkp &&
344 				    td->td_lockmgr_stack_id[i] != 0
345 				) {
346 					td->td_lockmgr_stack_id[i]++;
347 					goto lkmatch2;
348 				}
349  	               }
350 
351 			for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
352 				/*
353 				 * Use new lockmgr tracking slot
354 			 	 */
355         	        	if (td->td_lockmgr_stack_id[i] == 0) {
356                 	        	td->td_lockmgr_stack_id[i]++;
357                         		td->td_lockmgr_stack[i] = lkp;
358                         		break;
359                         	}
360 			}
361 lkmatch2:
362 			;
363 #endif
364 			COUNT(td, 1);
365 			break;
366 		}
367 		/*
368 		 * Someone else has requested upgrade. Release our shared
369 		 * lock, awaken upgrade requestor if we are the last shared
370 		 * lock, then request an exclusive lock.
371 		 */
372 		if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
373 		    LK_WAIT_NONZERO) {
374 			++dowakeup;
375 		}
376 		/* fall into exclusive request */
377 
378 	case LK_EXCLUSIVE:
379 		if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
380 			/*
381 			 *	Recursive lock.
382 			 */
383 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
384 				spin_unlock(&lkp->lk_spinlock);
385 				panic("lockmgr: locking against myself");
386 			}
387 			if ((extflags & LK_CANRECURSE) != 0) {
388 				lkp->lk_exclusivecount++;
389 				COUNT(td, 1);
390 				break;
391 			}
392 		}
393 		/*
394 		 * If we are just polling, check to see if we will sleep.
395 		 */
396 		if ((extflags & LK_NOWAIT) &&
397 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
398 				      LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
399 			error = EBUSY;
400 			break;
401 		}
402 		/*
403 		 * Wait for exclusive lock holders to release and try to
404 		 * acquire the want_exclusive flag.
405 		 */
406 		error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
407 		if (error)
408 			break;
409 		lkp->lk_flags |= LK_WANT_EXCL;
410 
411 		/*
412 		 * Wait for shared locks and upgrades to finish.  We can lose
413 		 * the race against a successful shared lock upgrade in which
414 		 * case LK_HAVE_EXCL will get set regardless of our
415 		 * acquisition of LK_WANT_EXCL, so we have to acquire
416 		 * LK_HAVE_EXCL here as well.
417 		 */
418 		error = acquire(lkp, extflags, LK_HAVE_EXCL |
419 					       LK_WANT_UPGRADE |
420 					       LK_SHARE_NONZERO);
421 		lkp->lk_flags &= ~LK_WANT_EXCL;
422 		if (error)
423 			break;
424 		lkp->lk_flags |= LK_HAVE_EXCL;
425 		lkp->lk_lockholder = td;
426 		if (lkp->lk_exclusivecount != 0) {
427 			spin_unlock(&lkp->lk_spinlock);
428 			panic("lockmgr(2): non-zero exclusive count");
429 		}
430 		lkp->lk_exclusivecount = 1;
431 #if defined(DEBUG_LOCKS)
432 		lkp->lk_filename = file;
433 		lkp->lk_lineno = line;
434 		lkp->lk_lockername = name;
435 
436                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
437 			/*
438 			 * Recursive lockmgr path
439 			 */
440 			if (td->td_lockmgr_stack[i] == lkp &&
441 			    td->td_lockmgr_stack_id[i] != 0
442 			) {
443 				td->td_lockmgr_stack_id[i]++;
444 				goto lkmatch1;
445 			}
446                 }
447 
448 		for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
449 			/*
450 			 * Use new lockmgr tracking slot
451 			 */
452                 	if (td->td_lockmgr_stack_id[i] == 0) {
453                         	td->td_lockmgr_stack_id[i]++;
454                         	td->td_lockmgr_stack[i] = lkp;
455                         	break;
456                         }
457 		}
458 lkmatch1:
459 		;
460 #endif
461 		COUNT(td, 1);
462 		break;
463 
464 	case LK_RELEASE:
465 		if (lkp->lk_exclusivecount != 0) {
466 			if (lkp->lk_lockholder != td &&
467 			    lkp->lk_lockholder != LK_KERNTHREAD) {
468 				spin_unlock(&lkp->lk_spinlock);
469 				panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
470 				    (td->td_proc ? td->td_proc->p_pid : -1),
471 				    "exclusive lock holder",
472 				    td, lkp->lk_lockholder);
473 			}
474 			if (lkp->lk_lockholder != LK_KERNTHREAD) {
475 				COUNT(td, -1);
476 			}
477 			if (lkp->lk_exclusivecount == 1) {
478 				lkp->lk_flags &= ~LK_HAVE_EXCL;
479 				lkp->lk_lockholder = LK_NOTHREAD;
480 				lkp->lk_exclusivecount = 0;
481 			} else {
482 				lkp->lk_exclusivecount--;
483 			}
484 #ifdef DEBUG_LOCKS
485 			for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
486 				if (td->td_lockmgr_stack[i] == lkp &&
487 				    td->td_lockmgr_stack_id[i] > 0
488 				) {
489 					td->td_lockmgr_stack_id[i]--;
490 					lkp->lk_filename = file;
491 					lkp->lk_lineno = line;
492 					break;
493 				}
494 			}
495 #endif
496 		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
497 			dowakeup += shareunlock(lkp, 1);
498 			COUNT(td, -1);
499 		} else {
500 			panic("lockmgr: LK_RELEASE: no lock held");
501 		}
502 		if (lkp->lk_flags & LK_WAIT_NONZERO)
503 			++dowakeup;
504 		break;
505 
506 	default:
507 		spin_unlock(&lkp->lk_spinlock);
508 		panic("lockmgr: unknown locktype request %d",
509 		    flags & LK_TYPE_MASK);
510 		/* NOTREACHED */
511 	}
512 	spin_unlock(&lkp->lk_spinlock);
513 	if (dowakeup)
514 		wakeup(lkp);
515 	return (error);
516 }
517 
518 void
519 lockmgr_kernproc(struct lock *lp)
520 {
521 	struct thread *td __debugvar = curthread;
522 
523 	if (lp->lk_lockholder != LK_KERNTHREAD) {
524 		KASSERT(lp->lk_lockholder == td,
525 		    ("lockmgr_kernproc: lock not owned by curthread %p", td));
526 		COUNT(td, -1);
527 		lp->lk_lockholder = LK_KERNTHREAD;
528 	}
529 }
530 
531 #if 0
532 /*
533  * Set the lock to be exclusively held.  The caller is holding the lock's
534  * spinlock and the spinlock remains held on return.  A panic will occur
535  * if the lock cannot be set to exclusive.
536  *
537  * XXX not only unused but these functions also break EXCLUPGRADE's
538  * atomicy.
539  */
540 void
541 lockmgr_setexclusive_interlocked(struct lock *lkp)
542 {
543 	thread_t td = curthread;
544 
545 	KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
546 	KKASSERT(lkp->lk_exclusivecount == 0);
547 	lkp->lk_flags |= LK_HAVE_EXCL;
548 	lkp->lk_lockholder = td;
549 	lkp->lk_exclusivecount = 1;
550 	COUNT(td, 1);
551 }
552 
553 /*
554  * Clear the caller's exclusive lock.  The caller is holding the lock's
555  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
556  *
557  * A panic will occur if the caller does not hold the lock.
558  */
559 void
560 lockmgr_clrexclusive_interlocked(struct lock *lkp)
561 {
562 	thread_t td __debugvar = curthread;
563 	int dowakeup = 0;
564 
565 	KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
566 		 && lkp->lk_lockholder == td);
567 	lkp->lk_lockholder = LK_NOTHREAD;
568 	lkp->lk_flags &= ~LK_HAVE_EXCL;
569 	lkp->lk_exclusivecount = 0;
570 	if (lkp->lk_flags & LK_WAIT_NONZERO)
571 		dowakeup = 1;
572 	COUNT(td, -1);
573 	spin_unlock(&lkp->lk_spinlock);
574 	if (dowakeup)
575 		wakeup((void *)lkp);
576 }
577 
578 #endif
579 
580 /*
581  * Initialize a lock; required before use.
582  */
583 void
584 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
585 {
586 	spin_init(&lkp->lk_spinlock);
587 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
588 	lkp->lk_sharecount = 0;
589 	lkp->lk_waitcount = 0;
590 	lkp->lk_exclusivecount = 0;
591 	lkp->lk_wmesg = wmesg;
592 	lkp->lk_timo = timo;
593 	lkp->lk_lockholder = LK_NOTHREAD;
594 }
595 
596 /*
597  * Reinitialize a lock that is being reused for a different purpose, but
598  * which may have pending (blocked) threads sitting on it.  The caller
599  * must already hold the interlock.
600  */
601 void
602 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
603 {
604 	spin_lock(&lkp->lk_spinlock);
605 	lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
606 			(flags & LK_EXTFLG_MASK);
607 	lkp->lk_wmesg = wmesg;
608 	lkp->lk_timo = timo;
609 	spin_unlock(&lkp->lk_spinlock);
610 }
611 
612 /*
613  * Requires that the caller is the exclusive owner of this lock.
614  */
615 void
616 lockuninit(struct lock *l)
617 {
618 	/*
619 	 * At this point we should have removed all the references to this lock
620 	 * so there can't be anyone waiting on it.
621 	 */
622 	KKASSERT(l->lk_waitcount == 0);
623 
624 	spin_uninit(&l->lk_spinlock);
625 }
626 
627 /*
628  * Determine the status of a lock.
629  */
630 int
631 lockstatus(struct lock *lkp, struct thread *td)
632 {
633 	int lock_type = 0;
634 
635 	spin_lock(&lkp->lk_spinlock);
636 	if (lkp->lk_exclusivecount != 0) {
637 		if (td == NULL || lkp->lk_lockholder == td)
638 			lock_type = LK_EXCLUSIVE;
639 		else
640 			lock_type = LK_EXCLOTHER;
641 	} else if (lkp->lk_sharecount != 0) {
642 		lock_type = LK_SHARED;
643 	}
644 	spin_unlock(&lkp->lk_spinlock);
645 	return (lock_type);
646 }
647 
648 /*
649  * Return non-zero if the caller owns the lock shared or exclusive.
650  * We can only guess re: shared locks.
651  */
652 int
653 lockowned(struct lock *lkp)
654 {
655 	thread_t td = curthread;
656 
657 	if (lkp->lk_exclusivecount)
658 		return(lkp->lk_lockholder == td);
659 	return(lkp->lk_sharecount != 0);
660 }
661 
662 /*
663  * Determine the number of holders of a lock.
664  *
665  * The non-blocking version can usually be used for assertions.
666  */
667 int
668 lockcount(struct lock *lkp)
669 {
670 	int count;
671 
672 	spin_lock(&lkp->lk_spinlock);
673 	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
674 	spin_unlock(&lkp->lk_spinlock);
675 	return (count);
676 }
677 
678 int
679 lockcountnb(struct lock *lkp)
680 {
681 	return (lkp->lk_exclusivecount + lkp->lk_sharecount);
682 }
683 
684 /*
685  * Print out information about state of a lock. Used by VOP_PRINT
686  * routines to display status about contained locks.
687  */
688 void
689 lockmgr_printinfo(struct lock *lkp)
690 {
691 	struct thread *td = lkp->lk_lockholder;
692 	struct proc *p;
693 
694 	if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
695 		p = td->td_proc;
696 	else
697 		p = NULL;
698 
699 	if (lkp->lk_sharecount)
700 		kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
701 		    lkp->lk_sharecount);
702 	else if (lkp->lk_flags & LK_HAVE_EXCL)
703 		kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
704 		    lkp->lk_wmesg, lkp->lk_exclusivecount, td,
705 		    p ? p->p_pid : -99);
706 	if (lkp->lk_waitcount > 0)
707 		kprintf(" with %d pending", lkp->lk_waitcount);
708 }
709 
710