1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $ 42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $ 43 */ 44 45 #include "opt_lint.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/proc.h> 51 #include <sys/lock.h> 52 #include <sys/sysctl.h> 53 #include <sys/spinlock.h> 54 #include <sys/thread2.h> 55 #include <sys/spinlock2.h> 56 57 /* 58 * Locking primitives implementation. 59 * Locks provide shared/exclusive sychronization. 60 */ 61 62 #ifdef SIMPLELOCK_DEBUG 63 #define COUNT(td, x) (td)->td_locks += (x) 64 #else 65 #define COUNT(td, x) 66 #endif 67 68 #define LOCK_WAIT_TIME 100 69 #define LOCK_SAMPLE_WAIT 7 70 71 #if defined(DIAGNOSTIC) 72 #define LOCK_INLINE 73 #else 74 #define LOCK_INLINE __inline 75 #endif 76 77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 78 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 79 80 static int acquire(struct lock *lkp, int extflags, int wanted); 81 82 static LOCK_INLINE void 83 sharelock(struct lock *lkp, int incr) { 84 lkp->lk_flags |= LK_SHARE_NONZERO; 85 lkp->lk_sharecount += incr; 86 } 87 88 static LOCK_INLINE int 89 shareunlock(struct lock *lkp, int decr) 90 { 91 int dowakeup = 0; 92 93 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 94 95 if (lkp->lk_sharecount == decr) { 96 lkp->lk_flags &= ~LK_SHARE_NONZERO; 97 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 98 dowakeup = 1; 99 } 100 lkp->lk_sharecount = 0; 101 } else { 102 lkp->lk_sharecount -= decr; 103 } 104 return(dowakeup); 105 } 106 107 /* 108 * lock acquisition helper routine. Called with the lock's spinlock held. 109 */ 110 static int 111 acquire(struct lock *lkp, int extflags, int wanted) 112 { 113 int error; 114 115 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 116 return EBUSY; 117 } 118 119 while ((lkp->lk_flags & wanted) != 0) { 120 lkp->lk_flags |= LK_WAIT_NONZERO; 121 lkp->lk_waitcount++; 122 123 /* 124 * Atomic spinlock release/sleep/reacquire. 125 */ 126 error = ssleep(lkp, &lkp->lk_spinlock, 127 ((extflags & LK_PCATCH) ? PCATCH : 0), 128 lkp->lk_wmesg, 129 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 130 if (lkp->lk_waitcount == 1) { 131 lkp->lk_flags &= ~LK_WAIT_NONZERO; 132 lkp->lk_waitcount = 0; 133 } else { 134 lkp->lk_waitcount--; 135 } 136 if (error) 137 return error; 138 if (extflags & LK_SLEEPFAIL) 139 return ENOLCK; 140 } 141 return 0; 142 } 143 144 /* 145 * Set, change, or release a lock. 146 * 147 * Shared requests increment the shared count. Exclusive requests set the 148 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 149 * accepted shared locks and shared-to-exclusive upgrades to go away. 150 * 151 * A spinlock is held for most of the procedure. We must not do anything 152 * fancy while holding the spinlock. 153 */ 154 int 155 #ifndef DEBUG_LOCKS 156 lockmgr(struct lock *lkp, u_int flags) 157 #else 158 debuglockmgr(struct lock *lkp, u_int flags, 159 const char *name, const char *file, int line) 160 #endif 161 { 162 thread_t td; 163 int error; 164 int extflags; 165 int dowakeup; 166 167 error = 0; 168 dowakeup = 0; 169 170 if (mycpu->gd_intr_nesting_level && 171 (flags & LK_NOWAIT) == 0 && 172 (flags & LK_TYPE_MASK) != LK_RELEASE && 173 panic_cpu_gd != mycpu 174 ) { 175 176 #ifndef DEBUG_LOCKS 177 panic("lockmgr %s from %p: called from interrupt, ipi, " 178 "or hard code section", 179 lkp->lk_wmesg, ((int **)&lkp)[-1]); 180 #else 181 panic("lockmgr %s from %s:%d: called from interrupt, ipi, " 182 "or hard code section", 183 lkp->lk_wmesg, file, line); 184 #endif 185 } 186 187 #ifdef DEBUG_LOCKS 188 if (mycpu->gd_spinlocks_wr && 189 ((flags & LK_NOWAIT) == 0) 190 ) { 191 panic("lockmgr %s from %s:%d: called with %d spinlocks held", 192 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr); 193 } 194 #endif 195 196 /* 197 * So sue me, I'm too tired. 198 */ 199 if (spin_trylock(&lkp->lk_spinlock) == FALSE) { 200 if (flags & LK_NOSPINWAIT) 201 return(EBUSY); 202 spin_lock(&lkp->lk_spinlock); 203 } 204 205 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 206 td = curthread; 207 208 switch (flags & LK_TYPE_MASK) { 209 case LK_SHARED: 210 /* 211 * If we are not the exclusive lock holder, we have to block 212 * while there is an exclusive lock holder or while an 213 * exclusive lock request or upgrade request is in progress. 214 * 215 * However, if TDF_DEADLKTREAT is set, we override exclusive 216 * lock requests or upgrade requests ( but not the exclusive 217 * lock itself ). 218 */ 219 if (lkp->lk_lockholder != td) { 220 if (td->td_flags & TDF_DEADLKTREAT) { 221 error = acquire( 222 lkp, 223 extflags, 224 LK_HAVE_EXCL 225 ); 226 } else { 227 error = acquire( 228 lkp, 229 extflags, 230 LK_HAVE_EXCL | LK_WANT_EXCL | 231 LK_WANT_UPGRADE 232 ); 233 } 234 if (error) 235 break; 236 sharelock(lkp, 1); 237 COUNT(td, 1); 238 break; 239 } 240 /* 241 * We hold an exclusive lock, so downgrade it to shared. 242 * An alternative would be to fail with EDEADLK. 243 */ 244 sharelock(lkp, 1); 245 COUNT(td, 1); 246 /* fall into downgrade */ 247 248 case LK_DOWNGRADE: 249 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) { 250 spin_unlock(&lkp->lk_spinlock); 251 panic("lockmgr: not holding exclusive lock"); 252 } 253 sharelock(lkp, lkp->lk_exclusivecount); 254 lkp->lk_exclusivecount = 0; 255 lkp->lk_flags &= ~LK_HAVE_EXCL; 256 lkp->lk_lockholder = LK_NOTHREAD; 257 if (lkp->lk_waitcount) 258 dowakeup = 1; 259 break; 260 261 case LK_EXCLUPGRADE: 262 /* 263 * If another process is ahead of us to get an upgrade, 264 * then we want to fail rather than have an intervening 265 * exclusive access. 266 */ 267 if (lkp->lk_flags & LK_WANT_UPGRADE) { 268 dowakeup = shareunlock(lkp, 1); 269 COUNT(td, -1); 270 error = EBUSY; 271 break; 272 } 273 /* fall into normal upgrade */ 274 275 case LK_UPGRADE: 276 /* 277 * Upgrade a shared lock to an exclusive one. If another 278 * shared lock has already requested an upgrade to an 279 * exclusive lock, our shared lock is released and an 280 * exclusive lock is requested (which will be granted 281 * after the upgrade). If we return an error, the file 282 * will always be unlocked. 283 */ 284 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) { 285 spin_unlock(&lkp->lk_spinlock); 286 panic("lockmgr: upgrade exclusive lock"); 287 } 288 dowakeup += shareunlock(lkp, 1); 289 COUNT(td, -1); 290 /* 291 * If we are just polling, check to see if we will block. 292 */ 293 if ((extflags & LK_NOWAIT) && 294 ((lkp->lk_flags & LK_WANT_UPGRADE) || 295 lkp->lk_sharecount > 1)) { 296 error = EBUSY; 297 break; 298 } 299 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 300 /* 301 * We are first shared lock to request an upgrade, so 302 * request upgrade and wait for the shared count to 303 * drop to zero, then take exclusive lock. 304 * 305 * Although I don't think this can occur for 306 * robustness we also wait for any exclusive locks 307 * to be released. LK_WANT_UPGRADE is supposed to 308 * prevent new exclusive locks but might not in the 309 * future. 310 */ 311 lkp->lk_flags |= LK_WANT_UPGRADE; 312 error = acquire(lkp, extflags, 313 LK_HAVE_EXCL | LK_SHARE_NONZERO); 314 lkp->lk_flags &= ~LK_WANT_UPGRADE; 315 316 if (error) 317 break; 318 lkp->lk_flags |= LK_HAVE_EXCL; 319 lkp->lk_lockholder = td; 320 if (lkp->lk_exclusivecount != 0) { 321 spin_unlock(&lkp->lk_spinlock); 322 panic("lockmgr(1): non-zero exclusive count"); 323 } 324 lkp->lk_exclusivecount = 1; 325 #if defined(DEBUG_LOCKS) 326 lkp->lk_filename = file; 327 lkp->lk_lineno = line; 328 lkp->lk_lockername = name; 329 #endif 330 COUNT(td, 1); 331 break; 332 } 333 /* 334 * Someone else has requested upgrade. Release our shared 335 * lock, awaken upgrade requestor if we are the last shared 336 * lock, then request an exclusive lock. 337 */ 338 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 339 LK_WAIT_NONZERO) { 340 ++dowakeup; 341 } 342 /* fall into exclusive request */ 343 344 case LK_EXCLUSIVE: 345 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) { 346 /* 347 * Recursive lock. 348 */ 349 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) { 350 spin_unlock(&lkp->lk_spinlock); 351 panic("lockmgr: locking against myself"); 352 } 353 if ((extflags & LK_CANRECURSE) != 0) { 354 lkp->lk_exclusivecount++; 355 COUNT(td, 1); 356 break; 357 } 358 } 359 /* 360 * If we are just polling, check to see if we will sleep. 361 */ 362 if ((extflags & LK_NOWAIT) && 363 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | 364 LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 365 error = EBUSY; 366 break; 367 } 368 /* 369 * Wait for exclusive lock holders to release and try to 370 * acquire the want_exclusive flag. 371 */ 372 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 373 if (error) 374 break; 375 lkp->lk_flags |= LK_WANT_EXCL; 376 377 /* 378 * Wait for shared locks and upgrades to finish. We can lose 379 * the race against a successful shared lock upgrade in which 380 * case LK_HAVE_EXCL will get set regardless of our 381 * acquisition of LK_WANT_EXCL, so we have to acquire 382 * LK_HAVE_EXCL here as well. 383 */ 384 error = acquire(lkp, extflags, LK_HAVE_EXCL | 385 LK_WANT_UPGRADE | 386 LK_SHARE_NONZERO); 387 lkp->lk_flags &= ~LK_WANT_EXCL; 388 if (error) 389 break; 390 lkp->lk_flags |= LK_HAVE_EXCL; 391 lkp->lk_lockholder = td; 392 if (lkp->lk_exclusivecount != 0) { 393 spin_unlock(&lkp->lk_spinlock); 394 panic("lockmgr(2): non-zero exclusive count"); 395 } 396 lkp->lk_exclusivecount = 1; 397 #if defined(DEBUG_LOCKS) 398 lkp->lk_filename = file; 399 lkp->lk_lineno = line; 400 lkp->lk_lockername = name; 401 #endif 402 COUNT(td, 1); 403 break; 404 405 case LK_RELEASE: 406 if (lkp->lk_exclusivecount != 0) { 407 if (lkp->lk_lockholder != td && 408 lkp->lk_lockholder != LK_KERNTHREAD) { 409 spin_unlock(&lkp->lk_spinlock); 410 panic("lockmgr: pid %d, not %s thr %p/%p unlocking", 411 (td->td_proc ? td->td_proc->p_pid : -1), 412 "exclusive lock holder", 413 td, lkp->lk_lockholder); 414 } 415 if (lkp->lk_lockholder != LK_KERNTHREAD) { 416 COUNT(td, -1); 417 } 418 if (lkp->lk_exclusivecount == 1) { 419 lkp->lk_flags &= ~LK_HAVE_EXCL; 420 lkp->lk_lockholder = LK_NOTHREAD; 421 lkp->lk_exclusivecount = 0; 422 } else { 423 lkp->lk_exclusivecount--; 424 } 425 } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 426 dowakeup += shareunlock(lkp, 1); 427 COUNT(td, -1); 428 } 429 if (lkp->lk_flags & LK_WAIT_NONZERO) 430 ++dowakeup; 431 break; 432 433 default: 434 spin_unlock(&lkp->lk_spinlock); 435 panic("lockmgr: unknown locktype request %d", 436 flags & LK_TYPE_MASK); 437 /* NOTREACHED */ 438 } 439 spin_unlock(&lkp->lk_spinlock); 440 if (dowakeup) 441 wakeup(lkp); 442 return (error); 443 } 444 445 void 446 lockmgr_kernproc(struct lock *lp) 447 { 448 struct thread *td __debugvar = curthread; 449 450 if (lp->lk_lockholder != LK_KERNTHREAD) { 451 KASSERT(lp->lk_lockholder == td, 452 ("lockmgr_kernproc: lock not owned by curthread %p", td)); 453 COUNT(td, -1); 454 lp->lk_lockholder = LK_KERNTHREAD; 455 } 456 } 457 458 #if 0 459 /* 460 * Set the lock to be exclusively held. The caller is holding the lock's 461 * spinlock and the spinlock remains held on return. A panic will occur 462 * if the lock cannot be set to exclusive. 463 */ 464 void 465 lockmgr_setexclusive_interlocked(struct lock *lkp) 466 { 467 thread_t td = curthread; 468 469 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0); 470 KKASSERT(lkp->lk_exclusivecount == 0); 471 lkp->lk_flags |= LK_HAVE_EXCL; 472 lkp->lk_lockholder = td; 473 lkp->lk_exclusivecount = 1; 474 COUNT(td, 1); 475 } 476 477 /* 478 * Clear the caller's exclusive lock. The caller is holding the lock's 479 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK. 480 * 481 * A panic will occur if the caller does not hold the lock. 482 */ 483 void 484 lockmgr_clrexclusive_interlocked(struct lock *lkp) 485 { 486 thread_t td __debugvar = curthread; 487 int dowakeup = 0; 488 489 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1 490 && lkp->lk_lockholder == td); 491 lkp->lk_lockholder = LK_NOTHREAD; 492 lkp->lk_flags &= ~LK_HAVE_EXCL; 493 lkp->lk_exclusivecount = 0; 494 if (lkp->lk_flags & LK_WAIT_NONZERO) 495 dowakeup = 1; 496 COUNT(td, -1); 497 spin_unlock(&lkp->lk_spinlock); 498 if (dowakeup) 499 wakeup((void *)lkp); 500 } 501 502 #endif 503 504 /* 505 * Initialize a lock; required before use. 506 */ 507 void 508 lockinit(struct lock *lkp, char *wmesg, int timo, int flags) 509 { 510 spin_init(&lkp->lk_spinlock); 511 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 512 lkp->lk_sharecount = 0; 513 lkp->lk_waitcount = 0; 514 lkp->lk_exclusivecount = 0; 515 lkp->lk_wmesg = wmesg; 516 lkp->lk_timo = timo; 517 lkp->lk_lockholder = LK_NOTHREAD; 518 } 519 520 /* 521 * Reinitialize a lock that is being reused for a different purpose, but 522 * which may have pending (blocked) threads sitting on it. The caller 523 * must already hold the interlock. 524 */ 525 void 526 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags) 527 { 528 spin_lock(&lkp->lk_spinlock); 529 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) | 530 (flags & LK_EXTFLG_MASK); 531 lkp->lk_wmesg = wmesg; 532 lkp->lk_timo = timo; 533 spin_unlock(&lkp->lk_spinlock); 534 } 535 536 /* 537 * Requires that the caller is the exclusive owner of this lock. 538 */ 539 void 540 lockuninit(struct lock *l) 541 { 542 /* 543 * At this point we should have removed all the references to this lock 544 * so there can't be anyone waiting on it. 545 */ 546 KKASSERT(l->lk_waitcount == 0); 547 548 spin_uninit(&l->lk_spinlock); 549 } 550 551 /* 552 * Determine the status of a lock. 553 */ 554 int 555 lockstatus(struct lock *lkp, struct thread *td) 556 { 557 int lock_type = 0; 558 559 spin_lock(&lkp->lk_spinlock); 560 if (lkp->lk_exclusivecount != 0) { 561 if (td == NULL || lkp->lk_lockholder == td) 562 lock_type = LK_EXCLUSIVE; 563 else 564 lock_type = LK_EXCLOTHER; 565 } else if (lkp->lk_sharecount != 0) { 566 lock_type = LK_SHARED; 567 } 568 spin_unlock(&lkp->lk_spinlock); 569 return (lock_type); 570 } 571 572 /* 573 * Return non-zero if the caller owns the lock shared or exclusive. 574 * We can only guess re: shared locks. 575 */ 576 int 577 lockowned(struct lock *lkp) 578 { 579 thread_t td = curthread; 580 581 if (lkp->lk_exclusivecount) 582 return(lkp->lk_lockholder == td); 583 return(lkp->lk_sharecount != 0); 584 } 585 586 /* 587 * Determine the number of holders of a lock. 588 * 589 * The non-blocking version can usually be used for assertions. 590 */ 591 int 592 lockcount(struct lock *lkp) 593 { 594 int count; 595 596 spin_lock(&lkp->lk_spinlock); 597 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 598 spin_unlock(&lkp->lk_spinlock); 599 return (count); 600 } 601 602 int 603 lockcountnb(struct lock *lkp) 604 { 605 return (lkp->lk_exclusivecount + lkp->lk_sharecount); 606 } 607 608 /* 609 * Print out information about state of a lock. Used by VOP_PRINT 610 * routines to display status about contained locks. 611 */ 612 void 613 lockmgr_printinfo(struct lock *lkp) 614 { 615 struct thread *td = lkp->lk_lockholder; 616 struct proc *p; 617 618 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD) 619 p = td->td_proc; 620 else 621 p = NULL; 622 623 if (lkp->lk_sharecount) 624 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 625 lkp->lk_sharecount); 626 else if (lkp->lk_flags & LK_HAVE_EXCL) 627 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d", 628 lkp->lk_wmesg, lkp->lk_exclusivecount, td, 629 p ? p->p_pid : -99); 630 if (lkp->lk_waitcount > 0) 631 kprintf(" with %d pending", lkp->lk_waitcount); 632 } 633 634