xref: /dragonfly/sys/kern/kern_lock.c (revision fb5b3747)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43  */
44 
45 #include "opt_lint.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
56 
57 /*
58  * Locking primitives implementation.
59  * Locks provide shared/exclusive sychronization.
60  */
61 
62 #ifdef DEBUG_LOCKS
63 #define COUNT(td, x) (td)->td_locks += (x)
64 #else
65 #define COUNT(td, x)
66 #endif
67 
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
70 
71 #if defined(DIAGNOSTIC)
72 #define LOCK_INLINE
73 #else
74 #define LOCK_INLINE __inline
75 #endif
76 
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78 	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
79 
80 static int acquire(struct lock *lkp, int extflags, int wanted);
81 
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr) {
84 	lkp->lk_flags |= LK_SHARE_NONZERO;
85 	lkp->lk_sharecount += incr;
86 }
87 
88 static LOCK_INLINE int
89 shareunlock(struct lock *lkp, int decr)
90 {
91 	int dowakeup = 0;
92 
93 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
94 
95 	if (lkp->lk_sharecount == decr) {
96 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
97 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
98 			dowakeup = 1;
99 		}
100 		lkp->lk_sharecount = 0;
101 	} else {
102 		lkp->lk_sharecount -= decr;
103 	}
104 	return(dowakeup);
105 }
106 
107 /*
108  * lock acquisition helper routine.  Called with the lock's spinlock held.
109  */
110 static int
111 acquire(struct lock *lkp, int extflags, int wanted)
112 {
113 	int error;
114 
115 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
116 		return EBUSY;
117 	}
118 
119 	while ((lkp->lk_flags & wanted) != 0) {
120 		lkp->lk_flags |= LK_WAIT_NONZERO;
121 		lkp->lk_waitcount++;
122 
123 		/*
124 		 * Atomic spinlock release/sleep/reacquire.
125 		 */
126 		error = ssleep(lkp, &lkp->lk_spinlock,
127 			       ((extflags & LK_PCATCH) ? PCATCH : 0),
128 			       lkp->lk_wmesg,
129 			       ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
130 		if (lkp->lk_waitcount == 1) {
131 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
132 			lkp->lk_waitcount = 0;
133 		} else {
134 			lkp->lk_waitcount--;
135 		}
136 		if (error)
137 			return error;
138 		if (extflags & LK_SLEEPFAIL)
139 			return ENOLCK;
140 	}
141 	return 0;
142 }
143 
144 /*
145  * Set, change, or release a lock.
146  *
147  * Shared requests increment the shared count. Exclusive requests set the
148  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
149  * accepted shared locks and shared-to-exclusive upgrades to go away.
150  *
151  * A spinlock is held for most of the procedure.  We must not do anything
152  * fancy while holding the spinlock.
153  */
154 int
155 #ifndef	DEBUG_LOCKS
156 lockmgr(struct lock *lkp, u_int flags)
157 #else
158 debuglockmgr(struct lock *lkp, u_int flags,
159 	     const char *name, const char *file, int line)
160 #endif
161 {
162 	thread_t td;
163 	int error;
164 	int extflags;
165 	int dowakeup;
166 #ifdef DEBUG_LOCKS
167 	int i;
168 #endif
169 
170 	error = 0;
171 	dowakeup = 0;
172 
173 	if (mycpu->gd_intr_nesting_level &&
174 	    (flags & LK_NOWAIT) == 0 &&
175 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
176 	    panic_cpu_gd != mycpu
177 	) {
178 
179 #ifndef DEBUG_LOCKS
180 		panic("lockmgr %s from %p: called from interrupt, ipi, "
181 		      "or hard code section",
182 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
183 #else
184 		panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
185 		      "or hard code section",
186 		      lkp->lk_wmesg, file, line);
187 #endif
188 	}
189 
190 #ifdef DEBUG_LOCKS
191 	if (mycpu->gd_spinlocks_wr &&
192 	    ((flags & LK_NOWAIT) == 0)
193 	) {
194 		panic("lockmgr %s from %s:%d: called with %d spinlocks held",
195 		      lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
196 	}
197 #endif
198 
199 	/*
200 	 * So sue me, I'm too tired.
201 	 */
202 	if (spin_trylock(&lkp->lk_spinlock) == FALSE) {
203 		if (flags & LK_NOSPINWAIT)
204 			return(EBUSY);
205 		spin_lock(&lkp->lk_spinlock);
206 	}
207 
208 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
209 	td = curthread;
210 
211 	switch (flags & LK_TYPE_MASK) {
212 	case LK_SHARED:
213 		/*
214 		 * If we are not the exclusive lock holder, we have to block
215 		 * while there is an exclusive lock holder or while an
216 		 * exclusive lock request or upgrade request is in progress.
217 		 *
218 		 * However, if TDF_DEADLKTREAT is set, we override exclusive
219 		 * lock requests or upgrade requests ( but not the exclusive
220 		 * lock itself ).
221 		 */
222 		if (lkp->lk_lockholder != td) {
223 			if (td->td_flags & TDF_DEADLKTREAT) {
224 				error = acquire(
225 					    lkp,
226 					    extflags,
227 					    LK_HAVE_EXCL
228 					);
229 			} else {
230 				error = acquire(
231 					    lkp,
232 					    extflags,
233 					    LK_HAVE_EXCL | LK_WANT_EXCL |
234 					     LK_WANT_UPGRADE
235 					);
236 			}
237 			if (error)
238 				break;
239 			sharelock(lkp, 1);
240 			COUNT(td, 1);
241 			break;
242 		}
243 		/*
244 		 * We hold an exclusive lock, so downgrade it to shared.
245 		 * An alternative would be to fail with EDEADLK.
246 		 */
247 		sharelock(lkp, 1);
248 		COUNT(td, 1);
249 		/* fall into downgrade */
250 
251 	case LK_DOWNGRADE:
252 		if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
253 			spin_unlock(&lkp->lk_spinlock);
254 			panic("lockmgr: not holding exclusive lock");
255 		}
256 
257 #ifdef DEBUG_LOCKS
258 		for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
259 			if (td->td_lockmgr_stack[i] == lkp &&
260 			    td->td_lockmgr_stack_id[i] > 0
261 			) {
262 				td->td_lockmgr_stack_id[i]--;
263 				break;
264 			}
265 		}
266 #endif
267 
268 		sharelock(lkp, lkp->lk_exclusivecount);
269 		lkp->lk_exclusivecount = 0;
270 		lkp->lk_flags &= ~LK_HAVE_EXCL;
271 		lkp->lk_lockholder = LK_NOTHREAD;
272 		if (lkp->lk_waitcount)
273 			dowakeup = 1;
274 		break;
275 
276 	case LK_EXCLUPGRADE:
277 		/*
278 		 * If another process is ahead of us to get an upgrade,
279 		 * then we want to fail rather than have an intervening
280 		 * exclusive access.
281 		 */
282 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
283 			dowakeup = shareunlock(lkp, 1);
284 			COUNT(td, -1);
285 			error = EBUSY;
286 			break;
287 		}
288 		/* fall into normal upgrade */
289 
290 	case LK_UPGRADE:
291 		/*
292 		 * Upgrade a shared lock to an exclusive one. If another
293 		 * shared lock has already requested an upgrade to an
294 		 * exclusive lock, our shared lock is released and an
295 		 * exclusive lock is requested (which will be granted
296 		 * after the upgrade). If we return an error, the file
297 		 * will always be unlocked.
298 		 */
299 		if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
300 			spin_unlock(&lkp->lk_spinlock);
301 			panic("lockmgr: upgrade exclusive lock");
302 		}
303 		dowakeup += shareunlock(lkp, 1);
304 		COUNT(td, -1);
305 		/*
306 		 * If we are just polling, check to see if we will block.
307 		 */
308 		if ((extflags & LK_NOWAIT) &&
309 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
310 		     lkp->lk_sharecount > 1)) {
311 			error = EBUSY;
312 			break;
313 		}
314 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
315 			/*
316 			 * We are first shared lock to request an upgrade, so
317 			 * request upgrade and wait for the shared count to
318 			 * drop to zero, then take exclusive lock.
319 			 *
320 			 * Although I don't think this can occur for
321 			 * robustness we also wait for any exclusive locks
322 			 * to be released.  LK_WANT_UPGRADE is supposed to
323 			 * prevent new exclusive locks but might not in the
324 			 * future.
325 			 */
326 			lkp->lk_flags |= LK_WANT_UPGRADE;
327 			error = acquire(lkp, extflags,
328 					LK_HAVE_EXCL | LK_SHARE_NONZERO);
329 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
330 
331 			if (error)
332 				break;
333 			lkp->lk_flags |= LK_HAVE_EXCL;
334 			lkp->lk_lockholder = td;
335 			if (lkp->lk_exclusivecount != 0) {
336 				spin_unlock(&lkp->lk_spinlock);
337 				panic("lockmgr(1): non-zero exclusive count");
338 			}
339 			lkp->lk_exclusivecount = 1;
340 #if defined(DEBUG_LOCKS)
341 			lkp->lk_filename = file;
342 			lkp->lk_lineno = line;
343 			lkp->lk_lockername = name;
344 
345         	        for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
346 				/*
347 				 * Recursive lockmgr path
348 			 	 */
349 				if (td->td_lockmgr_stack[i] == lkp &&
350 				    td->td_lockmgr_stack_id[i] != 0
351 				) {
352 					td->td_lockmgr_stack_id[i]++;
353 					goto lkmatch2;
354 				}
355  	               }
356 
357 			for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
358 				/*
359 				 * Use new lockmgr tracking slot
360 			 	 */
361         	        	if (td->td_lockmgr_stack_id[i] == 0) {
362                 	        	td->td_lockmgr_stack_id[i]++;
363                         		td->td_lockmgr_stack[i] = lkp;
364                         		break;
365                         	}
366 			}
367 lkmatch2:
368 			;
369 #endif
370 			COUNT(td, 1);
371 			break;
372 		}
373 		/*
374 		 * Someone else has requested upgrade. Release our shared
375 		 * lock, awaken upgrade requestor if we are the last shared
376 		 * lock, then request an exclusive lock.
377 		 */
378 		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
379 			LK_WAIT_NONZERO) {
380 			++dowakeup;
381 		}
382 		/* fall into exclusive request */
383 
384 	case LK_EXCLUSIVE:
385 		if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
386 			/*
387 			 *	Recursive lock.
388 			 */
389 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
390 				spin_unlock(&lkp->lk_spinlock);
391 				panic("lockmgr: locking against myself");
392 			}
393 			if ((extflags & LK_CANRECURSE) != 0) {
394 				lkp->lk_exclusivecount++;
395 				COUNT(td, 1);
396 				break;
397 			}
398 		}
399 		/*
400 		 * If we are just polling, check to see if we will sleep.
401 		 */
402 		if ((extflags & LK_NOWAIT) &&
403 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
404 				      LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
405 			error = EBUSY;
406 			break;
407 		}
408 		/*
409 		 * Wait for exclusive lock holders to release and try to
410 		 * acquire the want_exclusive flag.
411 		 */
412 		error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
413 		if (error)
414 			break;
415 		lkp->lk_flags |= LK_WANT_EXCL;
416 
417 		/*
418 		 * Wait for shared locks and upgrades to finish.  We can lose
419 		 * the race against a successful shared lock upgrade in which
420 		 * case LK_HAVE_EXCL will get set regardless of our
421 		 * acquisition of LK_WANT_EXCL, so we have to acquire
422 		 * LK_HAVE_EXCL here as well.
423 		 */
424 		error = acquire(lkp, extflags, LK_HAVE_EXCL |
425 					       LK_WANT_UPGRADE |
426 					       LK_SHARE_NONZERO);
427 		lkp->lk_flags &= ~LK_WANT_EXCL;
428 		if (error)
429 			break;
430 		lkp->lk_flags |= LK_HAVE_EXCL;
431 		lkp->lk_lockholder = td;
432 		if (lkp->lk_exclusivecount != 0) {
433 			spin_unlock(&lkp->lk_spinlock);
434 			panic("lockmgr(2): non-zero exclusive count");
435 		}
436 		lkp->lk_exclusivecount = 1;
437 #if defined(DEBUG_LOCKS)
438 		lkp->lk_filename = file;
439 		lkp->lk_lineno = line;
440 		lkp->lk_lockername = name;
441 
442                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
443 			/*
444 			 * Recursive lockmgr path
445 			 */
446 			if (td->td_lockmgr_stack[i] == lkp &&
447 			    td->td_lockmgr_stack_id[i] != 0
448 			) {
449 				td->td_lockmgr_stack_id[i]++;
450 				goto lkmatch1;
451 			}
452                 }
453 
454 		for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
455 			/*
456 			 * Use new lockmgr tracking slot
457 			 */
458                 	if (td->td_lockmgr_stack_id[i] == 0) {
459                         	td->td_lockmgr_stack_id[i]++;
460                         	td->td_lockmgr_stack[i] = lkp;
461                         	break;
462                         }
463 		}
464 lkmatch1:
465 		;
466 #endif
467 		COUNT(td, 1);
468 		break;
469 
470 	case LK_RELEASE:
471 		if (lkp->lk_exclusivecount != 0) {
472 			if (lkp->lk_lockholder != td &&
473 			    lkp->lk_lockholder != LK_KERNTHREAD) {
474 				spin_unlock(&lkp->lk_spinlock);
475 				panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
476 				    (td->td_proc ? td->td_proc->p_pid : -1),
477 				    "exclusive lock holder",
478 				    td, lkp->lk_lockholder);
479 			}
480 			if (lkp->lk_lockholder != LK_KERNTHREAD) {
481 				COUNT(td, -1);
482 			}
483 			if (lkp->lk_exclusivecount == 1) {
484 				lkp->lk_flags &= ~LK_HAVE_EXCL;
485 				lkp->lk_lockholder = LK_NOTHREAD;
486 				lkp->lk_exclusivecount = 0;
487 			} else {
488 				lkp->lk_exclusivecount--;
489 			}
490 #ifdef DEBUG_LOCKS
491 			for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
492 				if (td->td_lockmgr_stack[i] == lkp &&
493 				    td->td_lockmgr_stack_id[i] > 0
494 				) {
495 					td->td_lockmgr_stack_id[i]--;
496 					break;
497 				}
498 			}
499 #endif
500 		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
501 			dowakeup += shareunlock(lkp, 1);
502 			COUNT(td, -1);
503 		}
504 		if (lkp->lk_flags & LK_WAIT_NONZERO)
505 			++dowakeup;
506 		break;
507 
508 	default:
509 		spin_unlock(&lkp->lk_spinlock);
510 		panic("lockmgr: unknown locktype request %d",
511 		    flags & LK_TYPE_MASK);
512 		/* NOTREACHED */
513 	}
514 	spin_unlock(&lkp->lk_spinlock);
515 	if (dowakeup)
516 		wakeup(lkp);
517 	return (error);
518 }
519 
520 void
521 lockmgr_kernproc(struct lock *lp)
522 {
523 	struct thread *td __debugvar = curthread;
524 
525 	if (lp->lk_lockholder != LK_KERNTHREAD) {
526 		KASSERT(lp->lk_lockholder == td,
527 		    ("lockmgr_kernproc: lock not owned by curthread %p", td));
528 		COUNT(td, -1);
529 		lp->lk_lockholder = LK_KERNTHREAD;
530 	}
531 }
532 
533 #if 0
534 /*
535  * Set the lock to be exclusively held.  The caller is holding the lock's
536  * spinlock and the spinlock remains held on return.  A panic will occur
537  * if the lock cannot be set to exclusive.
538  */
539 void
540 lockmgr_setexclusive_interlocked(struct lock *lkp)
541 {
542 	thread_t td = curthread;
543 
544 	KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
545 	KKASSERT(lkp->lk_exclusivecount == 0);
546 	lkp->lk_flags |= LK_HAVE_EXCL;
547 	lkp->lk_lockholder = td;
548 	lkp->lk_exclusivecount = 1;
549 	COUNT(td, 1);
550 }
551 
552 /*
553  * Clear the caller's exclusive lock.  The caller is holding the lock's
554  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
555  *
556  * A panic will occur if the caller does not hold the lock.
557  */
558 void
559 lockmgr_clrexclusive_interlocked(struct lock *lkp)
560 {
561 	thread_t td __debugvar = curthread;
562 	int dowakeup = 0;
563 
564 	KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
565 		 && lkp->lk_lockholder == td);
566 	lkp->lk_lockholder = LK_NOTHREAD;
567 	lkp->lk_flags &= ~LK_HAVE_EXCL;
568 	lkp->lk_exclusivecount = 0;
569 	if (lkp->lk_flags & LK_WAIT_NONZERO)
570 		dowakeup = 1;
571 	COUNT(td, -1);
572 	spin_unlock(&lkp->lk_spinlock);
573 	if (dowakeup)
574 		wakeup((void *)lkp);
575 }
576 
577 #endif
578 
579 /*
580  * Initialize a lock; required before use.
581  */
582 void
583 lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
584 {
585 	spin_init(&lkp->lk_spinlock);
586 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
587 	lkp->lk_sharecount = 0;
588 	lkp->lk_waitcount = 0;
589 	lkp->lk_exclusivecount = 0;
590 	lkp->lk_wmesg = wmesg;
591 	lkp->lk_timo = timo;
592 	lkp->lk_lockholder = LK_NOTHREAD;
593 }
594 
595 /*
596  * Reinitialize a lock that is being reused for a different purpose, but
597  * which may have pending (blocked) threads sitting on it.  The caller
598  * must already hold the interlock.
599  */
600 void
601 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
602 {
603 	spin_lock(&lkp->lk_spinlock);
604 	lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
605 			(flags & LK_EXTFLG_MASK);
606 	lkp->lk_wmesg = wmesg;
607 	lkp->lk_timo = timo;
608 	spin_unlock(&lkp->lk_spinlock);
609 }
610 
611 /*
612  * Requires that the caller is the exclusive owner of this lock.
613  */
614 void
615 lockuninit(struct lock *l)
616 {
617 	/*
618 	 * At this point we should have removed all the references to this lock
619 	 * so there can't be anyone waiting on it.
620 	 */
621 	KKASSERT(l->lk_waitcount == 0);
622 
623 	spin_uninit(&l->lk_spinlock);
624 }
625 
626 /*
627  * Determine the status of a lock.
628  */
629 int
630 lockstatus(struct lock *lkp, struct thread *td)
631 {
632 	int lock_type = 0;
633 
634 	spin_lock(&lkp->lk_spinlock);
635 	if (lkp->lk_exclusivecount != 0) {
636 		if (td == NULL || lkp->lk_lockholder == td)
637 			lock_type = LK_EXCLUSIVE;
638 		else
639 			lock_type = LK_EXCLOTHER;
640 	} else if (lkp->lk_sharecount != 0) {
641 		lock_type = LK_SHARED;
642 	}
643 	spin_unlock(&lkp->lk_spinlock);
644 	return (lock_type);
645 }
646 
647 /*
648  * Return non-zero if the caller owns the lock shared or exclusive.
649  * We can only guess re: shared locks.
650  */
651 int
652 lockowned(struct lock *lkp)
653 {
654 	thread_t td = curthread;
655 
656 	if (lkp->lk_exclusivecount)
657 		return(lkp->lk_lockholder == td);
658 	return(lkp->lk_sharecount != 0);
659 }
660 
661 /*
662  * Determine the number of holders of a lock.
663  *
664  * The non-blocking version can usually be used for assertions.
665  */
666 int
667 lockcount(struct lock *lkp)
668 {
669 	int count;
670 
671 	spin_lock(&lkp->lk_spinlock);
672 	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
673 	spin_unlock(&lkp->lk_spinlock);
674 	return (count);
675 }
676 
677 int
678 lockcountnb(struct lock *lkp)
679 {
680 	return (lkp->lk_exclusivecount + lkp->lk_sharecount);
681 }
682 
683 /*
684  * Print out information about state of a lock. Used by VOP_PRINT
685  * routines to display status about contained locks.
686  */
687 void
688 lockmgr_printinfo(struct lock *lkp)
689 {
690 	struct thread *td = lkp->lk_lockholder;
691 	struct proc *p;
692 
693 	if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
694 		p = td->td_proc;
695 	else
696 		p = NULL;
697 
698 	if (lkp->lk_sharecount)
699 		kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
700 		    lkp->lk_sharecount);
701 	else if (lkp->lk_flags & LK_HAVE_EXCL)
702 		kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
703 		    lkp->lk_wmesg, lkp->lk_exclusivecount, td,
704 		    p ? p->p_pid : -99);
705 	if (lkp->lk_waitcount > 0)
706 		kprintf(" with %d pending", lkp->lk_waitcount);
707 }
708 
709