1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $ 42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.22 2006/05/25 02:46:38 dillon Exp $ 43 */ 44 45 #include "opt_lint.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/proc.h> 51 #include <sys/lock.h> 52 #include <sys/sysctl.h> 53 #include <sys/spinlock.h> 54 #include <sys/thread2.h> 55 #include <sys/spinlock2.h> 56 57 /* 58 * 0: no warnings, 1: warnings, 2: panic 59 */ 60 static int lockmgr_from_int = 1; 61 SYSCTL_INT(_debug, OID_AUTO, lockmgr_from_int, CTLFLAG_RW, &lockmgr_from_int, 0, ""); 62 63 /* 64 * Locking primitives implementation. 65 * Locks provide shared/exclusive sychronization. 66 */ 67 68 #ifdef SIMPLELOCK_DEBUG 69 #define COUNT(td, x) (td)->td_locks += (x) 70 #else 71 #define COUNT(td, x) 72 #endif 73 74 #define LOCK_WAIT_TIME 100 75 #define LOCK_SAMPLE_WAIT 7 76 77 #if defined(DIAGNOSTIC) 78 #define LOCK_INLINE 79 #else 80 #define LOCK_INLINE __inline 81 #endif 82 83 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 84 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 85 86 static int acquire(struct lock *lkp, int extflags, int wanted); 87 88 static LOCK_INLINE void 89 sharelock(struct lock *lkp, int incr) { 90 lkp->lk_flags |= LK_SHARE_NONZERO; 91 lkp->lk_sharecount += incr; 92 } 93 94 static LOCK_INLINE void 95 shareunlock(struct lock *lkp, int decr) { 96 97 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 98 99 if (lkp->lk_sharecount == decr) { 100 lkp->lk_flags &= ~LK_SHARE_NONZERO; 101 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 102 wakeup(lkp); 103 } 104 lkp->lk_sharecount = 0; 105 } else { 106 lkp->lk_sharecount -= decr; 107 } 108 } 109 110 /* 111 * lock acquisition helper routine. Called with the lock's spinlock held. 112 */ 113 static int 114 acquire(struct lock *lkp, int extflags, int wanted) 115 { 116 int error; 117 118 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 119 return EBUSY; 120 } 121 122 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 123 if ((lkp->lk_flags & wanted) == 0) 124 return 0; 125 } 126 127 while ((lkp->lk_flags & wanted) != 0) { 128 lkp->lk_flags |= LK_WAIT_NONZERO; 129 lkp->lk_waitcount++; 130 131 /* 132 * Use the _quick version so the critical section is left 133 * intact, protecting the tsleep interlock. See 134 * tsleep_interlock() for a description of what is 135 * happening here. 136 */ 137 tsleep_interlock(lkp); 138 spin_unlock_wr(&lkp->lk_spinlock); 139 error = tsleep(lkp, 140 ((extflags & LK_PCATCH) ? PCATCH : 0), 141 lkp->lk_wmesg, 142 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 143 spin_lock_wr(&lkp->lk_spinlock); 144 if (lkp->lk_waitcount == 1) { 145 lkp->lk_flags &= ~LK_WAIT_NONZERO; 146 lkp->lk_waitcount = 0; 147 } else { 148 lkp->lk_waitcount--; 149 } 150 if (error) 151 return error; 152 if (extflags & LK_SLEEPFAIL) 153 return ENOLCK; 154 } 155 return 0; 156 } 157 158 /* 159 * Set, change, or release a lock. 160 * 161 * Shared requests increment the shared count. Exclusive requests set the 162 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 163 * accepted shared locks and shared-to-exclusive upgrades to go away. 164 * 165 * A spinlock is held for most of the procedure. We must not do anything 166 * fancy while holding the spinlock. 167 */ 168 int 169 #ifndef DEBUG_LOCKS 170 lockmgr(struct lock *lkp, u_int flags) 171 #else 172 debuglockmgr(struct lock *lkp, u_int flags, 173 const char *name, const char *file, int line) 174 #endif 175 { 176 thread_t td; 177 int error; 178 int extflags; 179 static int didpanic; 180 181 error = 0; 182 183 if (lockmgr_from_int && mycpu->gd_intr_nesting_level && 184 (flags & LK_NOWAIT) == 0 && 185 (flags & LK_TYPE_MASK) != LK_RELEASE && didpanic == 0) { 186 #ifndef DEBUG_LOCKS 187 if (lockmgr_from_int == 2) { 188 didpanic = 1; 189 panic( 190 "lockmgr %s from %p: called from interrupt", 191 lkp->lk_wmesg, ((int **)&lkp)[-1]); 192 didpanic = 0; 193 } else { 194 printf( 195 "lockmgr %s from %p: called from interrupt\n", 196 lkp->lk_wmesg, ((int **)&lkp)[-1]); 197 } 198 #else 199 if (lockmgr_from_int == 2) { 200 didpanic = 1; 201 panic( 202 "lockmgr %s from %s:%d: called from interrupt", 203 lkp->lk_wmesg, file, line); 204 didpanic = 0; 205 } else { 206 printf( 207 "lockmgr %s from %s:%d: called from interrupt\n", 208 lkp->lk_wmesg, file, line); 209 } 210 #endif 211 } 212 213 spin_lock_wr(&lkp->lk_spinlock); 214 215 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 216 td = curthread; 217 218 switch (flags & LK_TYPE_MASK) { 219 case LK_SHARED: 220 /* 221 * If we are not the exclusive lock holder, we have to block 222 * while there is an exclusive lock holder or while an 223 * exclusive lock request or upgrade request is in progress. 224 * 225 * However, if P_DEADLKTREAT is set, we override exclusive 226 * lock requests or upgrade requests ( but not the exclusive 227 * lock itself ). 228 */ 229 if (lkp->lk_lockholder != td) { 230 if (td->td_flags & TDF_DEADLKTREAT) { 231 error = acquire( 232 lkp, 233 extflags, 234 LK_HAVE_EXCL 235 ); 236 } else { 237 error = acquire( 238 lkp, 239 extflags, 240 LK_HAVE_EXCL | LK_WANT_EXCL | 241 LK_WANT_UPGRADE 242 ); 243 } 244 if (error) 245 break; 246 sharelock(lkp, 1); 247 COUNT(td, 1); 248 break; 249 } 250 /* 251 * We hold an exclusive lock, so downgrade it to shared. 252 * An alternative would be to fail with EDEADLK. 253 */ 254 sharelock(lkp, 1); 255 COUNT(td, 1); 256 /* fall into downgrade */ 257 258 case LK_DOWNGRADE: 259 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) { 260 spin_unlock_wr(&lkp->lk_spinlock); 261 panic("lockmgr: not holding exclusive lock"); 262 } 263 sharelock(lkp, lkp->lk_exclusivecount); 264 lkp->lk_exclusivecount = 0; 265 lkp->lk_flags &= ~LK_HAVE_EXCL; 266 lkp->lk_lockholder = LK_NOTHREAD; 267 if (lkp->lk_waitcount) 268 wakeup((void *)lkp); 269 break; 270 271 case LK_EXCLUPGRADE: 272 /* 273 * If another process is ahead of us to get an upgrade, 274 * then we want to fail rather than have an intervening 275 * exclusive access. 276 */ 277 if (lkp->lk_flags & LK_WANT_UPGRADE) { 278 shareunlock(lkp, 1); 279 COUNT(td, -1); 280 error = EBUSY; 281 break; 282 } 283 /* fall into normal upgrade */ 284 285 case LK_UPGRADE: 286 /* 287 * Upgrade a shared lock to an exclusive one. If another 288 * shared lock has already requested an upgrade to an 289 * exclusive lock, our shared lock is released and an 290 * exclusive lock is requested (which will be granted 291 * after the upgrade). If we return an error, the file 292 * will always be unlocked. 293 */ 294 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) { 295 spin_unlock_wr(&lkp->lk_spinlock); 296 panic("lockmgr: upgrade exclusive lock"); 297 } 298 shareunlock(lkp, 1); 299 COUNT(td, -1); 300 /* 301 * If we are just polling, check to see if we will block. 302 */ 303 if ((extflags & LK_NOWAIT) && 304 ((lkp->lk_flags & LK_WANT_UPGRADE) || 305 lkp->lk_sharecount > 1)) { 306 error = EBUSY; 307 break; 308 } 309 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 310 /* 311 * We are first shared lock to request an upgrade, so 312 * request upgrade and wait for the shared count to 313 * drop to zero, then take exclusive lock. 314 */ 315 lkp->lk_flags |= LK_WANT_UPGRADE; 316 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 317 lkp->lk_flags &= ~LK_WANT_UPGRADE; 318 319 if (error) 320 break; 321 lkp->lk_flags |= LK_HAVE_EXCL; 322 lkp->lk_lockholder = td; 323 if (lkp->lk_exclusivecount != 0) { 324 spin_unlock_wr(&lkp->lk_spinlock); 325 panic("lockmgr: non-zero exclusive count"); 326 } 327 lkp->lk_exclusivecount = 1; 328 #if defined(DEBUG_LOCKS) 329 lkp->lk_filename = file; 330 lkp->lk_lineno = line; 331 lkp->lk_lockername = name; 332 #endif 333 COUNT(td, 1); 334 break; 335 } 336 /* 337 * Someone else has requested upgrade. Release our shared 338 * lock, awaken upgrade requestor if we are the last shared 339 * lock, then request an exclusive lock. 340 */ 341 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 342 LK_WAIT_NONZERO) 343 wakeup((void *)lkp); 344 /* fall into exclusive request */ 345 346 case LK_EXCLUSIVE: 347 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) { 348 /* 349 * Recursive lock. 350 */ 351 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) { 352 spin_unlock_wr(&lkp->lk_spinlock); 353 panic("lockmgr: locking against myself"); 354 } 355 if ((extflags & LK_CANRECURSE) != 0) { 356 lkp->lk_exclusivecount++; 357 COUNT(td, 1); 358 break; 359 } 360 } 361 /* 362 * If we are just polling, check to see if we will sleep. 363 */ 364 if ((extflags & LK_NOWAIT) && 365 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 366 error = EBUSY; 367 break; 368 } 369 /* 370 * Try to acquire the want_exclusive flag. 371 */ 372 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 373 if (error) 374 break; 375 lkp->lk_flags |= LK_WANT_EXCL; 376 /* 377 * Wait for shared locks and upgrades to finish. 378 */ 379 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 380 lkp->lk_flags &= ~LK_WANT_EXCL; 381 if (error) 382 break; 383 lkp->lk_flags |= LK_HAVE_EXCL; 384 lkp->lk_lockholder = td; 385 if (lkp->lk_exclusivecount != 0) { 386 spin_unlock_wr(&lkp->lk_spinlock); 387 panic("lockmgr: non-zero exclusive count"); 388 } 389 lkp->lk_exclusivecount = 1; 390 #if defined(DEBUG_LOCKS) 391 lkp->lk_filename = file; 392 lkp->lk_lineno = line; 393 lkp->lk_lockername = name; 394 #endif 395 COUNT(td, 1); 396 break; 397 398 case LK_RELEASE: 399 if (lkp->lk_exclusivecount != 0) { 400 if (lkp->lk_lockholder != td && 401 lkp->lk_lockholder != LK_KERNTHREAD) { 402 spin_unlock_wr(&lkp->lk_spinlock); 403 panic("lockmgr: pid %d, not %s thr %p unlocking", 404 (td->td_proc ? td->td_proc->p_pid : -99), 405 "exclusive lock holder", 406 lkp->lk_lockholder); 407 } 408 if (lkp->lk_lockholder != LK_KERNTHREAD) { 409 COUNT(td, -1); 410 } 411 if (lkp->lk_exclusivecount == 1) { 412 lkp->lk_flags &= ~LK_HAVE_EXCL; 413 lkp->lk_lockholder = LK_NOTHREAD; 414 lkp->lk_exclusivecount = 0; 415 } else { 416 lkp->lk_exclusivecount--; 417 } 418 } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 419 shareunlock(lkp, 1); 420 COUNT(td, -1); 421 } 422 if (lkp->lk_flags & LK_WAIT_NONZERO) 423 wakeup((void *)lkp); 424 break; 425 426 default: 427 spin_unlock_wr(&lkp->lk_spinlock); 428 panic("lockmgr: unknown locktype request %d", 429 flags & LK_TYPE_MASK); 430 /* NOTREACHED */ 431 } 432 spin_unlock_wr(&lkp->lk_spinlock); 433 return (error); 434 } 435 436 void 437 lockmgr_kernproc(struct lock *lp) 438 { 439 struct thread *td = curthread; 440 441 if (lp->lk_lockholder != LK_KERNTHREAD) { 442 KASSERT(lp->lk_lockholder == td, 443 ("lockmgr_kernproc: lock not owned by curthread %p", td)); 444 COUNT(td, -1); 445 lp->lk_lockholder = LK_KERNTHREAD; 446 } 447 } 448 449 /* 450 * Initialize a lock; required before use. 451 */ 452 void 453 lockinit(struct lock *lkp, char *wmesg, int timo, int flags) 454 { 455 spin_init(&lkp->lk_spinlock); 456 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 457 lkp->lk_sharecount = 0; 458 lkp->lk_waitcount = 0; 459 lkp->lk_exclusivecount = 0; 460 lkp->lk_wmesg = wmesg; 461 lkp->lk_timo = timo; 462 lkp->lk_lockholder = LK_NOTHREAD; 463 } 464 465 /* 466 * Reinitialize a lock that is being reused for a different purpose, but 467 * which may have pending (blocked) threads sitting on it. The caller 468 * must already hold the interlock. 469 */ 470 void 471 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags) 472 { 473 spin_lock_wr(&lkp->lk_spinlock); 474 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) | 475 (flags & LK_EXTFLG_MASK); 476 lkp->lk_wmesg = wmesg; 477 lkp->lk_timo = timo; 478 spin_unlock_wr(&lkp->lk_spinlock); 479 } 480 481 /* 482 * Determine the status of a lock. 483 */ 484 int 485 lockstatus(struct lock *lkp, struct thread *td) 486 { 487 int lock_type = 0; 488 489 spin_lock_wr(&lkp->lk_spinlock); 490 if (lkp->lk_exclusivecount != 0) { 491 if (td == NULL || lkp->lk_lockholder == td) 492 lock_type = LK_EXCLUSIVE; 493 else 494 lock_type = LK_EXCLOTHER; 495 } else if (lkp->lk_sharecount != 0) { 496 lock_type = LK_SHARED; 497 } 498 spin_unlock_wr(&lkp->lk_spinlock); 499 return (lock_type); 500 } 501 502 /* 503 * Determine the number of holders of a lock. 504 * 505 * The non-blocking version can usually be used for assertions. 506 */ 507 int 508 lockcount(struct lock *lkp) 509 { 510 int count; 511 512 spin_lock_wr(&lkp->lk_spinlock); 513 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 514 spin_unlock_wr(&lkp->lk_spinlock); 515 return (count); 516 } 517 518 int 519 lockcountnb(struct lock *lkp) 520 { 521 return (lkp->lk_exclusivecount + lkp->lk_sharecount); 522 } 523 524 /* 525 * Print out information about state of a lock. Used by VOP_PRINT 526 * routines to display status about contained locks. 527 */ 528 void 529 lockmgr_printinfo(struct lock *lkp) 530 { 531 struct thread *td = lkp->lk_lockholder; 532 struct proc *p; 533 534 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD) 535 p = td->td_proc; 536 else 537 p = NULL; 538 539 if (lkp->lk_sharecount) 540 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 541 lkp->lk_sharecount); 542 else if (lkp->lk_flags & LK_HAVE_EXCL) 543 printf(" lock type %s: EXCL (count %d) by td %p pid %d", 544 lkp->lk_wmesg, lkp->lk_exclusivecount, td, 545 p ? p->p_pid : -99); 546 if (lkp->lk_waitcount > 0) 547 printf(" with %d pending", lkp->lk_waitcount); 548 } 549 550