xref: /dragonfly/sys/kern/kern_lock.c (revision 36a3d1d6)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43  */
44 
45 #include "opt_lint.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
56 
57 /*
58  * Locking primitives implementation.
59  * Locks provide shared/exclusive sychronization.
60  */
61 
62 #ifdef SIMPLELOCK_DEBUG
63 #define COUNT(td, x) (td)->td_locks += (x)
64 #else
65 #define COUNT(td, x)
66 #endif
67 
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
70 
71 #if defined(DIAGNOSTIC)
72 #define LOCK_INLINE
73 #else
74 #define LOCK_INLINE __inline
75 #endif
76 
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78 	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
79 
80 static int acquire(struct lock *lkp, int extflags, int wanted);
81 
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr) {
84 	lkp->lk_flags |= LK_SHARE_NONZERO;
85 	lkp->lk_sharecount += incr;
86 }
87 
88 static LOCK_INLINE int
89 shareunlock(struct lock *lkp, int decr)
90 {
91 	int dowakeup = 0;
92 
93 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
94 
95 	if (lkp->lk_sharecount == decr) {
96 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
97 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
98 			dowakeup = 1;
99 		}
100 		lkp->lk_sharecount = 0;
101 	} else {
102 		lkp->lk_sharecount -= decr;
103 	}
104 	return(dowakeup);
105 }
106 
107 /*
108  * lock acquisition helper routine.  Called with the lock's spinlock held.
109  */
110 static int
111 acquire(struct lock *lkp, int extflags, int wanted)
112 {
113 	int error;
114 
115 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
116 		return EBUSY;
117 	}
118 
119 	while ((lkp->lk_flags & wanted) != 0) {
120 		lkp->lk_flags |= LK_WAIT_NONZERO;
121 		lkp->lk_waitcount++;
122 
123 		/*
124 		 * Atomic spinlock release/sleep/reacquire.
125 		 */
126 		error = ssleep(lkp, &lkp->lk_spinlock,
127 			       ((extflags & LK_PCATCH) ? PCATCH : 0),
128 			       lkp->lk_wmesg,
129 			       ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
130 		if (lkp->lk_waitcount == 1) {
131 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
132 			lkp->lk_waitcount = 0;
133 		} else {
134 			lkp->lk_waitcount--;
135 		}
136 		if (error)
137 			return error;
138 		if (extflags & LK_SLEEPFAIL)
139 			return ENOLCK;
140 	}
141 	return 0;
142 }
143 
144 /*
145  * Set, change, or release a lock.
146  *
147  * Shared requests increment the shared count. Exclusive requests set the
148  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
149  * accepted shared locks and shared-to-exclusive upgrades to go away.
150  *
151  * A spinlock is held for most of the procedure.  We must not do anything
152  * fancy while holding the spinlock.
153  */
154 int
155 #ifndef	DEBUG_LOCKS
156 lockmgr(struct lock *lkp, u_int flags)
157 #else
158 debuglockmgr(struct lock *lkp, u_int flags,
159 	     const char *name, const char *file, int line)
160 #endif
161 {
162 	thread_t td;
163 	int error;
164 	int extflags;
165 	int dowakeup;
166 
167 	error = 0;
168 	dowakeup = 0;
169 
170 	if (mycpu->gd_intr_nesting_level &&
171 	    (flags & LK_NOWAIT) == 0 &&
172 	    (flags & LK_TYPE_MASK) != LK_RELEASE &&
173 	    panic_cpu_gd != mycpu
174 	) {
175 
176 #ifndef DEBUG_LOCKS
177 		panic("lockmgr %s from %p: called from interrupt, ipi, "
178 		      "or hard code section",
179 		      lkp->lk_wmesg, ((int **)&lkp)[-1]);
180 #else
181 		panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
182 		      "or hard code section",
183 		      lkp->lk_wmesg, file, line);
184 #endif
185 	}
186 
187 	/*
188 	 * So sue me, I'm too tired.
189 	 */
190 	if (spin_trylock(&lkp->lk_spinlock) == FALSE) {
191 		if (flags & LK_NOSPINWAIT)
192 			return(EBUSY);
193 		spin_lock(&lkp->lk_spinlock);
194 	}
195 
196 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
197 	td = curthread;
198 
199 	switch (flags & LK_TYPE_MASK) {
200 	case LK_SHARED:
201 		/*
202 		 * If we are not the exclusive lock holder, we have to block
203 		 * while there is an exclusive lock holder or while an
204 		 * exclusive lock request or upgrade request is in progress.
205 		 *
206 		 * However, if P_DEADLKTREAT is set, we override exclusive
207 		 * lock requests or upgrade requests ( but not the exclusive
208 		 * lock itself ).
209 		 */
210 		if (lkp->lk_lockholder != td) {
211 			if (td->td_flags & TDF_DEADLKTREAT) {
212 				error = acquire(
213 					    lkp,
214 					    extflags,
215 					    LK_HAVE_EXCL
216 					);
217 			} else {
218 				error = acquire(
219 					    lkp,
220 					    extflags,
221 					    LK_HAVE_EXCL | LK_WANT_EXCL |
222 					     LK_WANT_UPGRADE
223 					);
224 			}
225 			if (error)
226 				break;
227 			sharelock(lkp, 1);
228 			COUNT(td, 1);
229 			break;
230 		}
231 		/*
232 		 * We hold an exclusive lock, so downgrade it to shared.
233 		 * An alternative would be to fail with EDEADLK.
234 		 */
235 		sharelock(lkp, 1);
236 		COUNT(td, 1);
237 		/* fall into downgrade */
238 
239 	case LK_DOWNGRADE:
240 		if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
241 			spin_unlock(&lkp->lk_spinlock);
242 			panic("lockmgr: not holding exclusive lock");
243 		}
244 		sharelock(lkp, lkp->lk_exclusivecount);
245 		lkp->lk_exclusivecount = 0;
246 		lkp->lk_flags &= ~LK_HAVE_EXCL;
247 		lkp->lk_lockholder = LK_NOTHREAD;
248 		if (lkp->lk_waitcount)
249 			dowakeup = 1;
250 		break;
251 
252 	case LK_EXCLUPGRADE:
253 		/*
254 		 * If another process is ahead of us to get an upgrade,
255 		 * then we want to fail rather than have an intervening
256 		 * exclusive access.
257 		 */
258 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
259 			dowakeup = shareunlock(lkp, 1);
260 			COUNT(td, -1);
261 			error = EBUSY;
262 			break;
263 		}
264 		/* fall into normal upgrade */
265 
266 	case LK_UPGRADE:
267 		/*
268 		 * Upgrade a shared lock to an exclusive one. If another
269 		 * shared lock has already requested an upgrade to an
270 		 * exclusive lock, our shared lock is released and an
271 		 * exclusive lock is requested (which will be granted
272 		 * after the upgrade). If we return an error, the file
273 		 * will always be unlocked.
274 		 */
275 		if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
276 			spin_unlock(&lkp->lk_spinlock);
277 			panic("lockmgr: upgrade exclusive lock");
278 		}
279 		dowakeup += shareunlock(lkp, 1);
280 		COUNT(td, -1);
281 		/*
282 		 * If we are just polling, check to see if we will block.
283 		 */
284 		if ((extflags & LK_NOWAIT) &&
285 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
286 		     lkp->lk_sharecount > 1)) {
287 			error = EBUSY;
288 			break;
289 		}
290 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
291 			/*
292 			 * We are first shared lock to request an upgrade, so
293 			 * request upgrade and wait for the shared count to
294 			 * drop to zero, then take exclusive lock.
295 			 */
296 			lkp->lk_flags |= LK_WANT_UPGRADE;
297 			error = acquire(lkp, extflags, LK_SHARE_NONZERO);
298 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
299 
300 			if (error)
301 				break;
302 			lkp->lk_flags |= LK_HAVE_EXCL;
303 			lkp->lk_lockholder = td;
304 			if (lkp->lk_exclusivecount != 0) {
305 				spin_unlock(&lkp->lk_spinlock);
306 				panic("lockmgr: non-zero exclusive count");
307 			}
308 			lkp->lk_exclusivecount = 1;
309 #if defined(DEBUG_LOCKS)
310 			lkp->lk_filename = file;
311 			lkp->lk_lineno = line;
312 			lkp->lk_lockername = name;
313 #endif
314 			COUNT(td, 1);
315 			break;
316 		}
317 		/*
318 		 * Someone else has requested upgrade. Release our shared
319 		 * lock, awaken upgrade requestor if we are the last shared
320 		 * lock, then request an exclusive lock.
321 		 */
322 		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
323 			LK_WAIT_NONZERO) {
324 			++dowakeup;
325 		}
326 		/* fall into exclusive request */
327 
328 	case LK_EXCLUSIVE:
329 		if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
330 			/*
331 			 *	Recursive lock.
332 			 */
333 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
334 				spin_unlock(&lkp->lk_spinlock);
335 				panic("lockmgr: locking against myself");
336 			}
337 			if ((extflags & LK_CANRECURSE) != 0) {
338 				lkp->lk_exclusivecount++;
339 				COUNT(td, 1);
340 				break;
341 			}
342 		}
343 		/*
344 		 * If we are just polling, check to see if we will sleep.
345 		 */
346 		if ((extflags & LK_NOWAIT) &&
347 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
348 			error = EBUSY;
349 			break;
350 		}
351 		/*
352 		 * Try to acquire the want_exclusive flag.
353 		 */
354 		error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
355 		if (error)
356 			break;
357 		lkp->lk_flags |= LK_WANT_EXCL;
358 		/*
359 		 * Wait for shared locks and upgrades to finish.
360 		 */
361 		error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
362 		lkp->lk_flags &= ~LK_WANT_EXCL;
363 		if (error)
364 			break;
365 		lkp->lk_flags |= LK_HAVE_EXCL;
366 		lkp->lk_lockholder = td;
367 		if (lkp->lk_exclusivecount != 0) {
368 			spin_unlock(&lkp->lk_spinlock);
369 			panic("lockmgr: non-zero exclusive count");
370 		}
371 		lkp->lk_exclusivecount = 1;
372 #if defined(DEBUG_LOCKS)
373 			lkp->lk_filename = file;
374 			lkp->lk_lineno = line;
375 			lkp->lk_lockername = name;
376 #endif
377 		COUNT(td, 1);
378 		break;
379 
380 	case LK_RELEASE:
381 		if (lkp->lk_exclusivecount != 0) {
382 			if (lkp->lk_lockholder != td &&
383 			    lkp->lk_lockholder != LK_KERNTHREAD) {
384 				spin_unlock(&lkp->lk_spinlock);
385 				panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
386 				    (td->td_proc ? td->td_proc->p_pid : -1),
387 				    "exclusive lock holder",
388 				    td, lkp->lk_lockholder);
389 			}
390 			if (lkp->lk_lockholder != LK_KERNTHREAD) {
391 				COUNT(td, -1);
392 			}
393 			if (lkp->lk_exclusivecount == 1) {
394 				lkp->lk_flags &= ~LK_HAVE_EXCL;
395 				lkp->lk_lockholder = LK_NOTHREAD;
396 				lkp->lk_exclusivecount = 0;
397 			} else {
398 				lkp->lk_exclusivecount--;
399 			}
400 		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
401 			dowakeup += shareunlock(lkp, 1);
402 			COUNT(td, -1);
403 		}
404 		if (lkp->lk_flags & LK_WAIT_NONZERO)
405 			++dowakeup;
406 		break;
407 
408 	default:
409 		spin_unlock(&lkp->lk_spinlock);
410 		panic("lockmgr: unknown locktype request %d",
411 		    flags & LK_TYPE_MASK);
412 		/* NOTREACHED */
413 	}
414 	spin_unlock(&lkp->lk_spinlock);
415 	if (dowakeup)
416 		wakeup(lkp);
417 	return (error);
418 }
419 
420 void
421 lockmgr_kernproc(struct lock *lp)
422 {
423 	struct thread *td __debugvar = curthread;
424 
425 	if (lp->lk_lockholder != LK_KERNTHREAD) {
426 		KASSERT(lp->lk_lockholder == td,
427 		    ("lockmgr_kernproc: lock not owned by curthread %p", td));
428 		COUNT(td, -1);
429 		lp->lk_lockholder = LK_KERNTHREAD;
430 	}
431 }
432 
433 /*
434  * Set the lock to be exclusively held.  The caller is holding the lock's
435  * spinlock and the spinlock remains held on return.  A panic will occur
436  * if the lock cannot be set to exclusive.
437  */
438 void
439 lockmgr_setexclusive_interlocked(struct lock *lkp)
440 {
441 	thread_t td = curthread;
442 
443 	KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
444 	KKASSERT(lkp->lk_exclusivecount == 0);
445 	lkp->lk_flags |= LK_HAVE_EXCL;
446 	lkp->lk_lockholder = td;
447 	lkp->lk_exclusivecount = 1;
448 	COUNT(td, 1);
449 }
450 
451 /*
452  * Clear the caller's exclusive lock.  The caller is holding the lock's
453  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
454  *
455  * A panic will occur if the caller does not hold the lock.
456  */
457 void
458 lockmgr_clrexclusive_interlocked(struct lock *lkp)
459 {
460 	thread_t td __debugvar = curthread;
461 	int dowakeup = 0;
462 
463 	KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
464 		 && lkp->lk_lockholder == td);
465 	lkp->lk_lockholder = LK_NOTHREAD;
466 	lkp->lk_flags &= ~LK_HAVE_EXCL;
467 	lkp->lk_exclusivecount = 0;
468 	if (lkp->lk_flags & LK_WAIT_NONZERO)
469 		dowakeup = 1;
470 	COUNT(td, -1);
471 	spin_unlock(&lkp->lk_spinlock);
472 	if (dowakeup)
473 		wakeup((void *)lkp);
474 }
475 
476 /*
477  * Initialize a lock; required before use.
478  */
479 void
480 lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
481 {
482 	spin_init(&lkp->lk_spinlock);
483 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
484 	lkp->lk_sharecount = 0;
485 	lkp->lk_waitcount = 0;
486 	lkp->lk_exclusivecount = 0;
487 	lkp->lk_wmesg = wmesg;
488 	lkp->lk_timo = timo;
489 	lkp->lk_lockholder = LK_NOTHREAD;
490 }
491 
492 /*
493  * Reinitialize a lock that is being reused for a different purpose, but
494  * which may have pending (blocked) threads sitting on it.  The caller
495  * must already hold the interlock.
496  */
497 void
498 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
499 {
500 	spin_lock(&lkp->lk_spinlock);
501 	lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
502 			(flags & LK_EXTFLG_MASK);
503 	lkp->lk_wmesg = wmesg;
504 	lkp->lk_timo = timo;
505 	spin_unlock(&lkp->lk_spinlock);
506 }
507 
508 /*
509  * Requires that the caller is the exclusive owner of this lock.
510  */
511 void
512 lockuninit(struct lock *l)
513 {
514 	/*
515 	 * At this point we should have removed all the references to this lock
516 	 * so there can't be anyone waiting on it.
517 	 */
518 	KKASSERT(l->lk_waitcount == 0);
519 
520 	spin_uninit(&l->lk_spinlock);
521 }
522 
523 /*
524  * Determine the status of a lock.
525  */
526 int
527 lockstatus(struct lock *lkp, struct thread *td)
528 {
529 	int lock_type = 0;
530 
531 	spin_lock(&lkp->lk_spinlock);
532 	if (lkp->lk_exclusivecount != 0) {
533 		if (td == NULL || lkp->lk_lockholder == td)
534 			lock_type = LK_EXCLUSIVE;
535 		else
536 			lock_type = LK_EXCLOTHER;
537 	} else if (lkp->lk_sharecount != 0) {
538 		lock_type = LK_SHARED;
539 	}
540 	spin_unlock(&lkp->lk_spinlock);
541 	return (lock_type);
542 }
543 
544 /*
545  * Return non-zero if the caller owns the lock shared or exclusive.
546  * We can only guess re: shared locks.
547  */
548 int
549 lockowned(struct lock *lkp)
550 {
551 	thread_t td = curthread;
552 
553 	if (lkp->lk_exclusivecount)
554 		return(lkp->lk_lockholder == td);
555 	return(lkp->lk_sharecount != 0);
556 }
557 
558 /*
559  * Determine the number of holders of a lock.
560  *
561  * The non-blocking version can usually be used for assertions.
562  */
563 int
564 lockcount(struct lock *lkp)
565 {
566 	int count;
567 
568 	spin_lock(&lkp->lk_spinlock);
569 	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
570 	spin_unlock(&lkp->lk_spinlock);
571 	return (count);
572 }
573 
574 int
575 lockcountnb(struct lock *lkp)
576 {
577 	return (lkp->lk_exclusivecount + lkp->lk_sharecount);
578 }
579 
580 /*
581  * Print out information about state of a lock. Used by VOP_PRINT
582  * routines to display status about contained locks.
583  */
584 void
585 lockmgr_printinfo(struct lock *lkp)
586 {
587 	struct thread *td = lkp->lk_lockholder;
588 	struct proc *p;
589 
590 	if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
591 		p = td->td_proc;
592 	else
593 		p = NULL;
594 
595 	if (lkp->lk_sharecount)
596 		kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
597 		    lkp->lk_sharecount);
598 	else if (lkp->lk_flags & LK_HAVE_EXCL)
599 		kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
600 		    lkp->lk_wmesg, lkp->lk_exclusivecount, td,
601 		    p ? p->p_pid : -99);
602 	if (lkp->lk_waitcount > 0)
603 		kprintf(" with %d pending", lkp->lk_waitcount);
604 }
605 
606