1 /*- 2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include "opt_adaptive_lockmgrs.h" 30 #include "opt_ddb.h" 31 #include "opt_hwpmc_hooks.h" 32 #include "opt_kdtrace.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/kdb.h> 39 #include <sys/ktr.h> 40 #include <sys/lock.h> 41 #include <sys/lock_profile.h> 42 #include <sys/lockmgr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/sleepqueue.h> 46 #ifdef DEBUG_LOCKS 47 #include <sys/stack.h> 48 #endif 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 52 #include <machine/cpu.h> 53 54 #ifdef DDB 55 #include <ddb/ddb.h> 56 #endif 57 58 #ifdef HWPMC_HOOKS 59 #include <sys/pmckern.h> 60 PMC_SOFT_DECLARE( , , lock, failed); 61 #endif 62 63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 64 (LK_ADAPTIVE | LK_NOSHARE)); 65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 67 68 #define SQ_EXCLUSIVE_QUEUE 0 69 #define SQ_SHARED_QUEUE 1 70 71 #ifndef INVARIANTS 72 #define _lockmgr_assert(lk, what, file, line) 73 #define TD_LOCKS_INC(td) 74 #define TD_LOCKS_DEC(td) 75 #else 76 #define TD_LOCKS_INC(td) ((td)->td_locks++) 77 #define TD_LOCKS_DEC(td) ((td)->td_locks--) 78 #endif 79 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 80 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 81 82 #ifndef DEBUG_LOCKS 83 #define STACK_PRINT(lk) 84 #define STACK_SAVE(lk) 85 #define STACK_ZERO(lk) 86 #else 87 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 88 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 89 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 90 #endif 91 92 #define LOCK_LOG2(lk, string, arg1, arg2) \ 93 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 94 CTR2(KTR_LOCK, (string), (arg1), (arg2)) 95 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 96 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 97 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 98 99 #define GIANT_DECLARE \ 100 int _i = 0; \ 101 WITNESS_SAVE_DECL(Giant) 102 #define GIANT_RESTORE() do { \ 103 if (_i > 0) { \ 104 while (_i--) \ 105 mtx_lock(&Giant); \ 106 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 107 } \ 108 } while (0) 109 #define GIANT_SAVE() do { \ 110 if (mtx_owned(&Giant)) { \ 111 WITNESS_SAVE(&Giant.lock_object, Giant); \ 112 while (mtx_owned(&Giant)) { \ 113 _i++; \ 114 mtx_unlock(&Giant); \ 115 } \ 116 } \ 117 } while (0) 118 119 #define LK_CAN_SHARE(x) \ 120 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 121 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \ 122 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 123 #define LK_TRYOP(x) \ 124 ((x) & LK_NOWAIT) 125 126 #define LK_CAN_WITNESS(x) \ 127 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 128 #define LK_TRYWIT(x) \ 129 (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 130 131 #define LK_CAN_ADAPT(lk, f) \ 132 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 133 ((f) & LK_SLEEPFAIL) == 0) 134 135 #define lockmgr_disowned(lk) \ 136 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 137 138 #define lockmgr_xlocked(lk) \ 139 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 140 141 static void assert_lockmgr(const struct lock_object *lock, int how); 142 #ifdef DDB 143 static void db_show_lockmgr(const struct lock_object *lock); 144 #endif 145 static void lock_lockmgr(struct lock_object *lock, int how); 146 #ifdef KDTRACE_HOOKS 147 static int owner_lockmgr(const struct lock_object *lock, 148 struct thread **owner); 149 #endif 150 static int unlock_lockmgr(struct lock_object *lock); 151 152 struct lock_class lock_class_lockmgr = { 153 .lc_name = "lockmgr", 154 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 155 .lc_assert = assert_lockmgr, 156 #ifdef DDB 157 .lc_ddb_show = db_show_lockmgr, 158 #endif 159 .lc_lock = lock_lockmgr, 160 .lc_unlock = unlock_lockmgr, 161 #ifdef KDTRACE_HOOKS 162 .lc_owner = owner_lockmgr, 163 #endif 164 }; 165 166 #ifdef ADAPTIVE_LOCKMGRS 167 static u_int alk_retries = 10; 168 static u_int alk_loops = 10000; 169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 170 "lockmgr debugging"); 171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 173 #endif 174 175 static __inline struct thread * 176 lockmgr_xholder(const struct lock *lk) 177 { 178 uintptr_t x; 179 180 x = lk->lk_lock; 181 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 182 } 183 184 /* 185 * It assumes sleepq_lock held and returns with this one unheld. 186 * It also assumes the generic interlock is sane and previously checked. 187 * If LK_INTERLOCK is specified the interlock is not reacquired after the 188 * sleep. 189 */ 190 static __inline int 191 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 192 const char *wmesg, int pri, int timo, int queue) 193 { 194 GIANT_DECLARE; 195 struct lock_class *class; 196 int catch, error; 197 198 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 199 catch = pri & PCATCH; 200 pri &= PRIMASK; 201 error = 0; 202 203 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 204 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 205 206 if (flags & LK_INTERLOCK) 207 class->lc_unlock(ilk); 208 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 209 lk->lk_exslpfail++; 210 GIANT_SAVE(); 211 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 212 SLEEPQ_INTERRUPTIBLE : 0), queue); 213 if ((flags & LK_TIMELOCK) && timo) 214 sleepq_set_timeout(&lk->lock_object, timo); 215 216 /* 217 * Decisional switch for real sleeping. 218 */ 219 if ((flags & LK_TIMELOCK) && timo && catch) 220 error = sleepq_timedwait_sig(&lk->lock_object, pri); 221 else if ((flags & LK_TIMELOCK) && timo) 222 error = sleepq_timedwait(&lk->lock_object, pri); 223 else if (catch) 224 error = sleepq_wait_sig(&lk->lock_object, pri); 225 else 226 sleepq_wait(&lk->lock_object, pri); 227 GIANT_RESTORE(); 228 if ((flags & LK_SLEEPFAIL) && error == 0) 229 error = ENOLCK; 230 231 return (error); 232 } 233 234 static __inline int 235 wakeupshlk(struct lock *lk, const char *file, int line) 236 { 237 uintptr_t v, x; 238 u_int realexslp; 239 int queue, wakeup_swapper; 240 241 TD_LOCKS_DEC(curthread); 242 TD_SLOCKS_DEC(curthread); 243 WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 244 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 245 246 wakeup_swapper = 0; 247 for (;;) { 248 x = lk->lk_lock; 249 250 /* 251 * If there is more than one shared lock held, just drop one 252 * and return. 253 */ 254 if (LK_SHARERS(x) > 1) { 255 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 256 x - LK_ONE_SHARER)) 257 break; 258 continue; 259 } 260 261 /* 262 * If there are not waiters on the exclusive queue, drop the 263 * lock quickly. 264 */ 265 if ((x & LK_ALL_WAITERS) == 0) { 266 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 267 LK_SHARERS_LOCK(1)); 268 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 269 break; 270 continue; 271 } 272 273 /* 274 * We should have a sharer with waiters, so enter the hard 275 * path in order to handle wakeups correctly. 276 */ 277 sleepq_lock(&lk->lock_object); 278 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 279 v = LK_UNLOCKED; 280 281 /* 282 * If the lock has exclusive waiters, give them preference in 283 * order to avoid deadlock with shared runners up. 284 * If interruptible sleeps left the exclusive queue empty 285 * avoid a starvation for the threads sleeping on the shared 286 * queue by giving them precedence and cleaning up the 287 * exclusive waiters bit anyway. 288 * Please note that lk_exslpfail count may be lying about 289 * the real number of waiters with the LK_SLEEPFAIL flag on 290 * because they may be used in conjuction with interruptible 291 * sleeps so lk_exslpfail might be considered an 'upper limit' 292 * bound, including the edge cases. 293 */ 294 realexslp = sleepq_sleepcnt(&lk->lock_object, 295 SQ_EXCLUSIVE_QUEUE); 296 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 297 if (lk->lk_exslpfail < realexslp) { 298 lk->lk_exslpfail = 0; 299 queue = SQ_EXCLUSIVE_QUEUE; 300 v |= (x & LK_SHARED_WAITERS); 301 } else { 302 lk->lk_exslpfail = 0; 303 LOCK_LOG2(lk, 304 "%s: %p has only LK_SLEEPFAIL sleepers", 305 __func__, lk); 306 LOCK_LOG2(lk, 307 "%s: %p waking up threads on the exclusive queue", 308 __func__, lk); 309 wakeup_swapper = 310 sleepq_broadcast(&lk->lock_object, 311 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 312 queue = SQ_SHARED_QUEUE; 313 } 314 315 } else { 316 317 /* 318 * Exclusive waiters sleeping with LK_SLEEPFAIL on 319 * and using interruptible sleeps/timeout may have 320 * left spourious lk_exslpfail counts on, so clean 321 * it up anyway. 322 */ 323 lk->lk_exslpfail = 0; 324 queue = SQ_SHARED_QUEUE; 325 } 326 327 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 328 v)) { 329 sleepq_release(&lk->lock_object); 330 continue; 331 } 332 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 333 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 334 "exclusive"); 335 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 336 0, queue); 337 sleepq_release(&lk->lock_object); 338 break; 339 } 340 341 lock_profile_release_lock(&lk->lock_object); 342 return (wakeup_swapper); 343 } 344 345 static void 346 assert_lockmgr(const struct lock_object *lock, int what) 347 { 348 349 panic("lockmgr locks do not support assertions"); 350 } 351 352 static void 353 lock_lockmgr(struct lock_object *lock, int how) 354 { 355 356 panic("lockmgr locks do not support sleep interlocking"); 357 } 358 359 static int 360 unlock_lockmgr(struct lock_object *lock) 361 { 362 363 panic("lockmgr locks do not support sleep interlocking"); 364 } 365 366 #ifdef KDTRACE_HOOKS 367 static int 368 owner_lockmgr(const struct lock_object *lock, struct thread **owner) 369 { 370 371 panic("lockmgr locks do not support owner inquiring"); 372 } 373 #endif 374 375 void 376 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 377 { 378 int iflags; 379 380 MPASS((flags & ~LK_INIT_MASK) == 0); 381 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 382 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 383 &lk->lk_lock)); 384 385 iflags = LO_SLEEPABLE | LO_UPGRADABLE; 386 if (flags & LK_CANRECURSE) 387 iflags |= LO_RECURSABLE; 388 if ((flags & LK_NODUP) == 0) 389 iflags |= LO_DUPOK; 390 if (flags & LK_NOPROFILE) 391 iflags |= LO_NOPROFILE; 392 if ((flags & LK_NOWITNESS) == 0) 393 iflags |= LO_WITNESS; 394 if (flags & LK_QUIET) 395 iflags |= LO_QUIET; 396 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 397 398 lk->lk_lock = LK_UNLOCKED; 399 lk->lk_recurse = 0; 400 lk->lk_exslpfail = 0; 401 lk->lk_timo = timo; 402 lk->lk_pri = pri; 403 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 404 STACK_ZERO(lk); 405 } 406 407 /* 408 * XXX: Gross hacks to manipulate external lock flags after 409 * initialization. Used for certain vnode and buf locks. 410 */ 411 void 412 lockallowshare(struct lock *lk) 413 { 414 415 lockmgr_assert(lk, KA_XLOCKED); 416 lk->lock_object.lo_flags &= ~LK_NOSHARE; 417 } 418 419 void 420 lockallowrecurse(struct lock *lk) 421 { 422 423 lockmgr_assert(lk, KA_XLOCKED); 424 lk->lock_object.lo_flags |= LO_RECURSABLE; 425 } 426 427 void 428 lockdisablerecurse(struct lock *lk) 429 { 430 431 lockmgr_assert(lk, KA_XLOCKED); 432 lk->lock_object.lo_flags &= ~LO_RECURSABLE; 433 } 434 435 void 436 lockdestroy(struct lock *lk) 437 { 438 439 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 440 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 441 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 442 lock_destroy(&lk->lock_object); 443 } 444 445 int 446 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 447 const char *wmesg, int pri, int timo, const char *file, int line) 448 { 449 GIANT_DECLARE; 450 struct lock_class *class; 451 const char *iwmesg; 452 uintptr_t tid, v, x; 453 u_int op, realexslp; 454 int error, ipri, itimo, queue, wakeup_swapper; 455 #ifdef LOCK_PROFILING 456 uint64_t waittime = 0; 457 int contested = 0; 458 #endif 459 #ifdef ADAPTIVE_LOCKMGRS 460 volatile struct thread *owner; 461 u_int i, spintries = 0; 462 #endif 463 464 error = 0; 465 tid = (uintptr_t)curthread; 466 op = (flags & LK_TYPE_MASK); 467 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 468 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 469 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 470 471 MPASS((flags & ~LK_TOTAL_MASK) == 0); 472 KASSERT((op & (op - 1)) == 0, 473 ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 474 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 475 (op != LK_DOWNGRADE && op != LK_RELEASE), 476 ("%s: Invalid flags in regard of the operation desired @ %s:%d", 477 __func__, file, line)); 478 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 479 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 480 __func__, file, line)); 481 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 482 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 483 lk->lock_object.lo_name, file, line)); 484 485 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 486 if (panicstr != NULL) { 487 if (flags & LK_INTERLOCK) 488 class->lc_unlock(ilk); 489 return (0); 490 } 491 492 if (lk->lock_object.lo_flags & LK_NOSHARE) { 493 switch (op) { 494 case LK_SHARED: 495 op = LK_EXCLUSIVE; 496 break; 497 case LK_UPGRADE: 498 case LK_DOWNGRADE: 499 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 500 file, line); 501 if (flags & LK_INTERLOCK) 502 class->lc_unlock(ilk); 503 return (0); 504 } 505 } 506 507 wakeup_swapper = 0; 508 switch (op) { 509 case LK_SHARED: 510 if (LK_CAN_WITNESS(flags)) 511 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 512 file, line, ilk); 513 for (;;) { 514 x = lk->lk_lock; 515 516 /* 517 * If no other thread has an exclusive lock, or 518 * no exclusive waiter is present, bump the count of 519 * sharers. Since we have to preserve the state of 520 * waiters, if we fail to acquire the shared lock 521 * loop back and retry. 522 */ 523 if (LK_CAN_SHARE(x)) { 524 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 525 x + LK_ONE_SHARER)) 526 break; 527 continue; 528 } 529 #ifdef HWPMC_HOOKS 530 PMC_SOFT_CALL( , , lock, failed); 531 #endif 532 lock_profile_obtain_lock_failed(&lk->lock_object, 533 &contested, &waittime); 534 535 /* 536 * If the lock is already held by curthread in 537 * exclusive way avoid a deadlock. 538 */ 539 if (LK_HOLDER(x) == tid) { 540 LOCK_LOG2(lk, 541 "%s: %p already held in exclusive mode", 542 __func__, lk); 543 error = EDEADLK; 544 break; 545 } 546 547 /* 548 * If the lock is expected to not sleep just give up 549 * and return. 550 */ 551 if (LK_TRYOP(flags)) { 552 LOCK_LOG2(lk, "%s: %p fails the try operation", 553 __func__, lk); 554 error = EBUSY; 555 break; 556 } 557 558 #ifdef ADAPTIVE_LOCKMGRS 559 /* 560 * If the owner is running on another CPU, spin until 561 * the owner stops running or the state of the lock 562 * changes. We need a double-state handle here 563 * because for a failed acquisition the lock can be 564 * either held in exclusive mode or shared mode 565 * (for the writer starvation avoidance technique). 566 */ 567 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 568 LK_HOLDER(x) != LK_KERNPROC) { 569 owner = (struct thread *)LK_HOLDER(x); 570 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 571 CTR3(KTR_LOCK, 572 "%s: spinning on %p held by %p", 573 __func__, lk, owner); 574 575 /* 576 * If we are holding also an interlock drop it 577 * in order to avoid a deadlock if the lockmgr 578 * owner is adaptively spinning on the 579 * interlock itself. 580 */ 581 if (flags & LK_INTERLOCK) { 582 class->lc_unlock(ilk); 583 flags &= ~LK_INTERLOCK; 584 } 585 GIANT_SAVE(); 586 while (LK_HOLDER(lk->lk_lock) == 587 (uintptr_t)owner && TD_IS_RUNNING(owner)) 588 cpu_spinwait(); 589 GIANT_RESTORE(); 590 continue; 591 } else if (LK_CAN_ADAPT(lk, flags) && 592 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 593 spintries < alk_retries) { 594 if (flags & LK_INTERLOCK) { 595 class->lc_unlock(ilk); 596 flags &= ~LK_INTERLOCK; 597 } 598 GIANT_SAVE(); 599 spintries++; 600 for (i = 0; i < alk_loops; i++) { 601 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 602 CTR4(KTR_LOCK, 603 "%s: shared spinning on %p with %u and %u", 604 __func__, lk, spintries, i); 605 x = lk->lk_lock; 606 if ((x & LK_SHARE) == 0 || 607 LK_CAN_SHARE(x) != 0) 608 break; 609 cpu_spinwait(); 610 } 611 GIANT_RESTORE(); 612 if (i != alk_loops) 613 continue; 614 } 615 #endif 616 617 /* 618 * Acquire the sleepqueue chain lock because we 619 * probabilly will need to manipulate waiters flags. 620 */ 621 sleepq_lock(&lk->lock_object); 622 x = lk->lk_lock; 623 624 /* 625 * if the lock can be acquired in shared mode, try 626 * again. 627 */ 628 if (LK_CAN_SHARE(x)) { 629 sleepq_release(&lk->lock_object); 630 continue; 631 } 632 633 #ifdef ADAPTIVE_LOCKMGRS 634 /* 635 * The current lock owner might have started executing 636 * on another CPU (or the lock could have changed 637 * owner) while we were waiting on the turnstile 638 * chain lock. If so, drop the turnstile lock and try 639 * again. 640 */ 641 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 642 LK_HOLDER(x) != LK_KERNPROC) { 643 owner = (struct thread *)LK_HOLDER(x); 644 if (TD_IS_RUNNING(owner)) { 645 sleepq_release(&lk->lock_object); 646 continue; 647 } 648 } 649 #endif 650 651 /* 652 * Try to set the LK_SHARED_WAITERS flag. If we fail, 653 * loop back and retry. 654 */ 655 if ((x & LK_SHARED_WAITERS) == 0) { 656 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 657 x | LK_SHARED_WAITERS)) { 658 sleepq_release(&lk->lock_object); 659 continue; 660 } 661 LOCK_LOG2(lk, "%s: %p set shared waiters flag", 662 __func__, lk); 663 } 664 665 /* 666 * As far as we have been unable to acquire the 667 * shared lock and the shared waiters flag is set, 668 * we will sleep. 669 */ 670 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 671 SQ_SHARED_QUEUE); 672 flags &= ~LK_INTERLOCK; 673 if (error) { 674 LOCK_LOG3(lk, 675 "%s: interrupted sleep for %p with %d", 676 __func__, lk, error); 677 break; 678 } 679 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 680 __func__, lk); 681 } 682 if (error == 0) { 683 lock_profile_obtain_lock_success(&lk->lock_object, 684 contested, waittime, file, line); 685 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 686 line); 687 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 688 line); 689 TD_LOCKS_INC(curthread); 690 TD_SLOCKS_INC(curthread); 691 STACK_SAVE(lk); 692 } 693 break; 694 case LK_UPGRADE: 695 _lockmgr_assert(lk, KA_SLOCKED, file, line); 696 v = lk->lk_lock; 697 x = v & LK_ALL_WAITERS; 698 v &= LK_EXCLUSIVE_SPINNERS; 699 700 /* 701 * Try to switch from one shared lock to an exclusive one. 702 * We need to preserve waiters flags during the operation. 703 */ 704 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 705 tid | x)) { 706 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 707 line); 708 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 709 LK_TRYWIT(flags), file, line); 710 TD_SLOCKS_DEC(curthread); 711 break; 712 } 713 714 /* 715 * We have been unable to succeed in upgrading, so just 716 * give up the shared lock. 717 */ 718 wakeup_swapper |= wakeupshlk(lk, file, line); 719 720 /* FALLTHROUGH */ 721 case LK_EXCLUSIVE: 722 if (LK_CAN_WITNESS(flags)) 723 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 724 LOP_EXCLUSIVE, file, line, ilk); 725 726 /* 727 * If curthread already holds the lock and this one is 728 * allowed to recurse, simply recurse on it. 729 */ 730 if (lockmgr_xlocked(lk)) { 731 if ((flags & LK_CANRECURSE) == 0 && 732 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 733 734 /* 735 * If the lock is expected to not panic just 736 * give up and return. 737 */ 738 if (LK_TRYOP(flags)) { 739 LOCK_LOG2(lk, 740 "%s: %p fails the try operation", 741 __func__, lk); 742 error = EBUSY; 743 break; 744 } 745 if (flags & LK_INTERLOCK) 746 class->lc_unlock(ilk); 747 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 748 __func__, iwmesg, file, line); 749 } 750 lk->lk_recurse++; 751 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 752 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 753 lk->lk_recurse, file, line); 754 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 755 LK_TRYWIT(flags), file, line); 756 TD_LOCKS_INC(curthread); 757 break; 758 } 759 760 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 761 tid)) { 762 #ifdef HWPMC_HOOKS 763 PMC_SOFT_CALL( , , lock, failed); 764 #endif 765 lock_profile_obtain_lock_failed(&lk->lock_object, 766 &contested, &waittime); 767 768 /* 769 * If the lock is expected to not sleep just give up 770 * and return. 771 */ 772 if (LK_TRYOP(flags)) { 773 LOCK_LOG2(lk, "%s: %p fails the try operation", 774 __func__, lk); 775 error = EBUSY; 776 break; 777 } 778 779 #ifdef ADAPTIVE_LOCKMGRS 780 /* 781 * If the owner is running on another CPU, spin until 782 * the owner stops running or the state of the lock 783 * changes. 784 */ 785 x = lk->lk_lock; 786 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 787 LK_HOLDER(x) != LK_KERNPROC) { 788 owner = (struct thread *)LK_HOLDER(x); 789 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 790 CTR3(KTR_LOCK, 791 "%s: spinning on %p held by %p", 792 __func__, lk, owner); 793 794 /* 795 * If we are holding also an interlock drop it 796 * in order to avoid a deadlock if the lockmgr 797 * owner is adaptively spinning on the 798 * interlock itself. 799 */ 800 if (flags & LK_INTERLOCK) { 801 class->lc_unlock(ilk); 802 flags &= ~LK_INTERLOCK; 803 } 804 GIANT_SAVE(); 805 while (LK_HOLDER(lk->lk_lock) == 806 (uintptr_t)owner && TD_IS_RUNNING(owner)) 807 cpu_spinwait(); 808 GIANT_RESTORE(); 809 continue; 810 } else if (LK_CAN_ADAPT(lk, flags) && 811 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 812 spintries < alk_retries) { 813 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 814 !atomic_cmpset_ptr(&lk->lk_lock, x, 815 x | LK_EXCLUSIVE_SPINNERS)) 816 continue; 817 if (flags & LK_INTERLOCK) { 818 class->lc_unlock(ilk); 819 flags &= ~LK_INTERLOCK; 820 } 821 GIANT_SAVE(); 822 spintries++; 823 for (i = 0; i < alk_loops; i++) { 824 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 825 CTR4(KTR_LOCK, 826 "%s: shared spinning on %p with %u and %u", 827 __func__, lk, spintries, i); 828 if ((lk->lk_lock & 829 LK_EXCLUSIVE_SPINNERS) == 0) 830 break; 831 cpu_spinwait(); 832 } 833 GIANT_RESTORE(); 834 if (i != alk_loops) 835 continue; 836 } 837 #endif 838 839 /* 840 * Acquire the sleepqueue chain lock because we 841 * probabilly will need to manipulate waiters flags. 842 */ 843 sleepq_lock(&lk->lock_object); 844 x = lk->lk_lock; 845 846 /* 847 * if the lock has been released while we spun on 848 * the sleepqueue chain lock just try again. 849 */ 850 if (x == LK_UNLOCKED) { 851 sleepq_release(&lk->lock_object); 852 continue; 853 } 854 855 #ifdef ADAPTIVE_LOCKMGRS 856 /* 857 * The current lock owner might have started executing 858 * on another CPU (or the lock could have changed 859 * owner) while we were waiting on the turnstile 860 * chain lock. If so, drop the turnstile lock and try 861 * again. 862 */ 863 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 864 LK_HOLDER(x) != LK_KERNPROC) { 865 owner = (struct thread *)LK_HOLDER(x); 866 if (TD_IS_RUNNING(owner)) { 867 sleepq_release(&lk->lock_object); 868 continue; 869 } 870 } 871 #endif 872 873 /* 874 * The lock can be in the state where there is a 875 * pending queue of waiters, but still no owner. 876 * This happens when the lock is contested and an 877 * owner is going to claim the lock. 878 * If curthread is the one successfully acquiring it 879 * claim lock ownership and return, preserving waiters 880 * flags. 881 */ 882 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 883 if ((x & ~v) == LK_UNLOCKED) { 884 v &= ~LK_EXCLUSIVE_SPINNERS; 885 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 886 tid | v)) { 887 sleepq_release(&lk->lock_object); 888 LOCK_LOG2(lk, 889 "%s: %p claimed by a new writer", 890 __func__, lk); 891 break; 892 } 893 sleepq_release(&lk->lock_object); 894 continue; 895 } 896 897 /* 898 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 899 * fail, loop back and retry. 900 */ 901 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 902 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 903 x | LK_EXCLUSIVE_WAITERS)) { 904 sleepq_release(&lk->lock_object); 905 continue; 906 } 907 LOCK_LOG2(lk, "%s: %p set excl waiters flag", 908 __func__, lk); 909 } 910 911 /* 912 * As far as we have been unable to acquire the 913 * exclusive lock and the exclusive waiters flag 914 * is set, we will sleep. 915 */ 916 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 917 SQ_EXCLUSIVE_QUEUE); 918 flags &= ~LK_INTERLOCK; 919 if (error) { 920 LOCK_LOG3(lk, 921 "%s: interrupted sleep for %p with %d", 922 __func__, lk, error); 923 break; 924 } 925 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 926 __func__, lk); 927 } 928 if (error == 0) { 929 lock_profile_obtain_lock_success(&lk->lock_object, 930 contested, waittime, file, line); 931 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 932 lk->lk_recurse, file, line); 933 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 934 LK_TRYWIT(flags), file, line); 935 TD_LOCKS_INC(curthread); 936 STACK_SAVE(lk); 937 } 938 break; 939 case LK_DOWNGRADE: 940 _lockmgr_assert(lk, KA_XLOCKED, file, line); 941 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 942 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 943 944 /* 945 * Panic if the lock is recursed. 946 */ 947 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 948 if (flags & LK_INTERLOCK) 949 class->lc_unlock(ilk); 950 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 951 __func__, iwmesg, file, line); 952 } 953 TD_SLOCKS_INC(curthread); 954 955 /* 956 * In order to preserve waiters flags, just spin. 957 */ 958 for (;;) { 959 x = lk->lk_lock; 960 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 961 x &= LK_ALL_WAITERS; 962 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 963 LK_SHARERS_LOCK(1) | x)) 964 break; 965 cpu_spinwait(); 966 } 967 break; 968 case LK_RELEASE: 969 _lockmgr_assert(lk, KA_LOCKED, file, line); 970 x = lk->lk_lock; 971 972 if ((x & LK_SHARE) == 0) { 973 974 /* 975 * As first option, treact the lock as if it has not 976 * any waiter. 977 * Fix-up the tid var if the lock has been disowned. 978 */ 979 if (LK_HOLDER(x) == LK_KERNPROC) 980 tid = LK_KERNPROC; 981 else { 982 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 983 file, line); 984 TD_LOCKS_DEC(curthread); 985 } 986 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 987 lk->lk_recurse, file, line); 988 989 /* 990 * The lock is held in exclusive mode. 991 * If the lock is recursed also, then unrecurse it. 992 */ 993 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 994 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 995 lk); 996 lk->lk_recurse--; 997 break; 998 } 999 if (tid != LK_KERNPROC) 1000 lock_profile_release_lock(&lk->lock_object); 1001 1002 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1003 LK_UNLOCKED)) 1004 break; 1005 1006 sleepq_lock(&lk->lock_object); 1007 x = lk->lk_lock; 1008 v = LK_UNLOCKED; 1009 1010 /* 1011 * If the lock has exclusive waiters, give them 1012 * preference in order to avoid deadlock with 1013 * shared runners up. 1014 * If interruptible sleeps left the exclusive queue 1015 * empty avoid a starvation for the threads sleeping 1016 * on the shared queue by giving them precedence 1017 * and cleaning up the exclusive waiters bit anyway. 1018 * Please note that lk_exslpfail count may be lying 1019 * about the real number of waiters with the 1020 * LK_SLEEPFAIL flag on because they may be used in 1021 * conjuction with interruptible sleeps so 1022 * lk_exslpfail might be considered an 'upper limit' 1023 * bound, including the edge cases. 1024 */ 1025 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1026 realexslp = sleepq_sleepcnt(&lk->lock_object, 1027 SQ_EXCLUSIVE_QUEUE); 1028 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 1029 if (lk->lk_exslpfail < realexslp) { 1030 lk->lk_exslpfail = 0; 1031 queue = SQ_EXCLUSIVE_QUEUE; 1032 v |= (x & LK_SHARED_WAITERS); 1033 } else { 1034 lk->lk_exslpfail = 0; 1035 LOCK_LOG2(lk, 1036 "%s: %p has only LK_SLEEPFAIL sleepers", 1037 __func__, lk); 1038 LOCK_LOG2(lk, 1039 "%s: %p waking up threads on the exclusive queue", 1040 __func__, lk); 1041 wakeup_swapper = 1042 sleepq_broadcast(&lk->lock_object, 1043 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 1044 queue = SQ_SHARED_QUEUE; 1045 } 1046 } else { 1047 1048 /* 1049 * Exclusive waiters sleeping with LK_SLEEPFAIL 1050 * on and using interruptible sleeps/timeout 1051 * may have left spourious lk_exslpfail counts 1052 * on, so clean it up anyway. 1053 */ 1054 lk->lk_exslpfail = 0; 1055 queue = SQ_SHARED_QUEUE; 1056 } 1057 1058 LOCK_LOG3(lk, 1059 "%s: %p waking up threads on the %s queue", 1060 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1061 "exclusive"); 1062 atomic_store_rel_ptr(&lk->lk_lock, v); 1063 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1064 SLEEPQ_LK, 0, queue); 1065 sleepq_release(&lk->lock_object); 1066 break; 1067 } else 1068 wakeup_swapper = wakeupshlk(lk, file, line); 1069 break; 1070 case LK_DRAIN: 1071 if (LK_CAN_WITNESS(flags)) 1072 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1073 LOP_EXCLUSIVE, file, line, ilk); 1074 1075 /* 1076 * Trying to drain a lock we already own will result in a 1077 * deadlock. 1078 */ 1079 if (lockmgr_xlocked(lk)) { 1080 if (flags & LK_INTERLOCK) 1081 class->lc_unlock(ilk); 1082 panic("%s: draining %s with the lock held @ %s:%d\n", 1083 __func__, iwmesg, file, line); 1084 } 1085 1086 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1087 #ifdef HWPMC_HOOKS 1088 PMC_SOFT_CALL( , , lock, failed); 1089 #endif 1090 lock_profile_obtain_lock_failed(&lk->lock_object, 1091 &contested, &waittime); 1092 1093 /* 1094 * If the lock is expected to not sleep just give up 1095 * and return. 1096 */ 1097 if (LK_TRYOP(flags)) { 1098 LOCK_LOG2(lk, "%s: %p fails the try operation", 1099 __func__, lk); 1100 error = EBUSY; 1101 break; 1102 } 1103 1104 /* 1105 * Acquire the sleepqueue chain lock because we 1106 * probabilly will need to manipulate waiters flags. 1107 */ 1108 sleepq_lock(&lk->lock_object); 1109 x = lk->lk_lock; 1110 1111 /* 1112 * if the lock has been released while we spun on 1113 * the sleepqueue chain lock just try again. 1114 */ 1115 if (x == LK_UNLOCKED) { 1116 sleepq_release(&lk->lock_object); 1117 continue; 1118 } 1119 1120 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1121 if ((x & ~v) == LK_UNLOCKED) { 1122 v = (x & ~LK_EXCLUSIVE_SPINNERS); 1123 1124 /* 1125 * If interruptible sleeps left the exclusive 1126 * queue empty avoid a starvation for the 1127 * threads sleeping on the shared queue by 1128 * giving them precedence and cleaning up the 1129 * exclusive waiters bit anyway. 1130 * Please note that lk_exslpfail count may be 1131 * lying about the real number of waiters with 1132 * the LK_SLEEPFAIL flag on because they may 1133 * be used in conjuction with interruptible 1134 * sleeps so lk_exslpfail might be considered 1135 * an 'upper limit' bound, including the edge 1136 * cases. 1137 */ 1138 if (v & LK_EXCLUSIVE_WAITERS) { 1139 queue = SQ_EXCLUSIVE_QUEUE; 1140 v &= ~LK_EXCLUSIVE_WAITERS; 1141 } else { 1142 1143 /* 1144 * Exclusive waiters sleeping with 1145 * LK_SLEEPFAIL on and using 1146 * interruptible sleeps/timeout may 1147 * have left spourious lk_exslpfail 1148 * counts on, so clean it up anyway. 1149 */ 1150 MPASS(v & LK_SHARED_WAITERS); 1151 lk->lk_exslpfail = 0; 1152 queue = SQ_SHARED_QUEUE; 1153 v &= ~LK_SHARED_WAITERS; 1154 } 1155 if (queue == SQ_EXCLUSIVE_QUEUE) { 1156 realexslp = 1157 sleepq_sleepcnt(&lk->lock_object, 1158 SQ_EXCLUSIVE_QUEUE); 1159 if (lk->lk_exslpfail >= realexslp) { 1160 lk->lk_exslpfail = 0; 1161 queue = SQ_SHARED_QUEUE; 1162 v &= ~LK_SHARED_WAITERS; 1163 if (realexslp != 0) { 1164 LOCK_LOG2(lk, 1165 "%s: %p has only LK_SLEEPFAIL sleepers", 1166 __func__, lk); 1167 LOCK_LOG2(lk, 1168 "%s: %p waking up threads on the exclusive queue", 1169 __func__, lk); 1170 wakeup_swapper = 1171 sleepq_broadcast( 1172 &lk->lock_object, 1173 SLEEPQ_LK, 0, 1174 SQ_EXCLUSIVE_QUEUE); 1175 } 1176 } else 1177 lk->lk_exslpfail = 0; 1178 } 1179 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1180 sleepq_release(&lk->lock_object); 1181 continue; 1182 } 1183 LOCK_LOG3(lk, 1184 "%s: %p waking up all threads on the %s queue", 1185 __func__, lk, queue == SQ_SHARED_QUEUE ? 1186 "shared" : "exclusive"); 1187 wakeup_swapper |= sleepq_broadcast( 1188 &lk->lock_object, SLEEPQ_LK, 0, queue); 1189 1190 /* 1191 * If shared waiters have been woken up we need 1192 * to wait for one of them to acquire the lock 1193 * before to set the exclusive waiters in 1194 * order to avoid a deadlock. 1195 */ 1196 if (queue == SQ_SHARED_QUEUE) { 1197 for (v = lk->lk_lock; 1198 (v & LK_SHARE) && !LK_SHARERS(v); 1199 v = lk->lk_lock) 1200 cpu_spinwait(); 1201 } 1202 } 1203 1204 /* 1205 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1206 * fail, loop back and retry. 1207 */ 1208 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1209 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1210 x | LK_EXCLUSIVE_WAITERS)) { 1211 sleepq_release(&lk->lock_object); 1212 continue; 1213 } 1214 LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1215 __func__, lk); 1216 } 1217 1218 /* 1219 * As far as we have been unable to acquire the 1220 * exclusive lock and the exclusive waiters flag 1221 * is set, we will sleep. 1222 */ 1223 if (flags & LK_INTERLOCK) { 1224 class->lc_unlock(ilk); 1225 flags &= ~LK_INTERLOCK; 1226 } 1227 GIANT_SAVE(); 1228 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1229 SQ_EXCLUSIVE_QUEUE); 1230 sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1231 GIANT_RESTORE(); 1232 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1233 __func__, lk); 1234 } 1235 1236 if (error == 0) { 1237 lock_profile_obtain_lock_success(&lk->lock_object, 1238 contested, waittime, file, line); 1239 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1240 lk->lk_recurse, file, line); 1241 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1242 LK_TRYWIT(flags), file, line); 1243 TD_LOCKS_INC(curthread); 1244 STACK_SAVE(lk); 1245 } 1246 break; 1247 default: 1248 if (flags & LK_INTERLOCK) 1249 class->lc_unlock(ilk); 1250 panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1251 } 1252 1253 if (flags & LK_INTERLOCK) 1254 class->lc_unlock(ilk); 1255 if (wakeup_swapper) 1256 kick_proc0(); 1257 1258 return (error); 1259 } 1260 1261 void 1262 _lockmgr_disown(struct lock *lk, const char *file, int line) 1263 { 1264 uintptr_t tid, x; 1265 1266 if (SCHEDULER_STOPPED()) 1267 return; 1268 1269 tid = (uintptr_t)curthread; 1270 _lockmgr_assert(lk, KA_XLOCKED, file, line); 1271 1272 /* 1273 * Panic if the lock is recursed. 1274 */ 1275 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 1276 panic("%s: disown a recursed lockmgr @ %s:%d\n", 1277 __func__, file, line); 1278 1279 /* 1280 * If the owner is already LK_KERNPROC just skip the whole operation. 1281 */ 1282 if (LK_HOLDER(lk->lk_lock) != tid) 1283 return; 1284 lock_profile_release_lock(&lk->lock_object); 1285 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1286 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1287 TD_LOCKS_DEC(curthread); 1288 STACK_SAVE(lk); 1289 1290 /* 1291 * In order to preserve waiters flags, just spin. 1292 */ 1293 for (;;) { 1294 x = lk->lk_lock; 1295 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1296 x &= LK_ALL_WAITERS; 1297 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1298 LK_KERNPROC | x)) 1299 return; 1300 cpu_spinwait(); 1301 } 1302 } 1303 1304 void 1305 lockmgr_printinfo(const struct lock *lk) 1306 { 1307 struct thread *td; 1308 uintptr_t x; 1309 1310 if (lk->lk_lock == LK_UNLOCKED) 1311 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1312 else if (lk->lk_lock & LK_SHARE) 1313 printf("lock type %s: SHARED (count %ju)\n", 1314 lk->lock_object.lo_name, 1315 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1316 else { 1317 td = lockmgr_xholder(lk); 1318 printf("lock type %s: EXCL by thread %p " 1319 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td, 1320 td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid); 1321 } 1322 1323 x = lk->lk_lock; 1324 if (x & LK_EXCLUSIVE_WAITERS) 1325 printf(" with exclusive waiters pending\n"); 1326 if (x & LK_SHARED_WAITERS) 1327 printf(" with shared waiters pending\n"); 1328 if (x & LK_EXCLUSIVE_SPINNERS) 1329 printf(" with exclusive spinners pending\n"); 1330 1331 STACK_PRINT(lk); 1332 } 1333 1334 int 1335 lockstatus(const struct lock *lk) 1336 { 1337 uintptr_t v, x; 1338 int ret; 1339 1340 ret = LK_SHARED; 1341 x = lk->lk_lock; 1342 v = LK_HOLDER(x); 1343 1344 if ((x & LK_SHARE) == 0) { 1345 if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1346 ret = LK_EXCLUSIVE; 1347 else 1348 ret = LK_EXCLOTHER; 1349 } else if (x == LK_UNLOCKED) 1350 ret = 0; 1351 1352 return (ret); 1353 } 1354 1355 #ifdef INVARIANT_SUPPORT 1356 1357 FEATURE(invariant_support, 1358 "Support for modules compiled with INVARIANTS option"); 1359 1360 #ifndef INVARIANTS 1361 #undef _lockmgr_assert 1362 #endif 1363 1364 void 1365 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 1366 { 1367 int slocked = 0; 1368 1369 if (panicstr != NULL) 1370 return; 1371 switch (what) { 1372 case KA_SLOCKED: 1373 case KA_SLOCKED | KA_NOTRECURSED: 1374 case KA_SLOCKED | KA_RECURSED: 1375 slocked = 1; 1376 case KA_LOCKED: 1377 case KA_LOCKED | KA_NOTRECURSED: 1378 case KA_LOCKED | KA_RECURSED: 1379 #ifdef WITNESS 1380 1381 /* 1382 * We cannot trust WITNESS if the lock is held in exclusive 1383 * mode and a call to lockmgr_disown() happened. 1384 * Workaround this skipping the check if the lock is held in 1385 * exclusive mode even for the KA_LOCKED case. 1386 */ 1387 if (slocked || (lk->lk_lock & LK_SHARE)) { 1388 witness_assert(&lk->lock_object, what, file, line); 1389 break; 1390 } 1391 #endif 1392 if (lk->lk_lock == LK_UNLOCKED || 1393 ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1394 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 1395 panic("Lock %s not %slocked @ %s:%d\n", 1396 lk->lock_object.lo_name, slocked ? "share" : "", 1397 file, line); 1398 1399 if ((lk->lk_lock & LK_SHARE) == 0) { 1400 if (lockmgr_recursed(lk)) { 1401 if (what & KA_NOTRECURSED) 1402 panic("Lock %s recursed @ %s:%d\n", 1403 lk->lock_object.lo_name, file, 1404 line); 1405 } else if (what & KA_RECURSED) 1406 panic("Lock %s not recursed @ %s:%d\n", 1407 lk->lock_object.lo_name, file, line); 1408 } 1409 break; 1410 case KA_XLOCKED: 1411 case KA_XLOCKED | KA_NOTRECURSED: 1412 case KA_XLOCKED | KA_RECURSED: 1413 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 1414 panic("Lock %s not exclusively locked @ %s:%d\n", 1415 lk->lock_object.lo_name, file, line); 1416 if (lockmgr_recursed(lk)) { 1417 if (what & KA_NOTRECURSED) 1418 panic("Lock %s recursed @ %s:%d\n", 1419 lk->lock_object.lo_name, file, line); 1420 } else if (what & KA_RECURSED) 1421 panic("Lock %s not recursed @ %s:%d\n", 1422 lk->lock_object.lo_name, file, line); 1423 break; 1424 case KA_UNLOCKED: 1425 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1426 panic("Lock %s exclusively locked @ %s:%d\n", 1427 lk->lock_object.lo_name, file, line); 1428 break; 1429 default: 1430 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1431 line); 1432 } 1433 } 1434 #endif 1435 1436 #ifdef DDB 1437 int 1438 lockmgr_chain(struct thread *td, struct thread **ownerp) 1439 { 1440 struct lock *lk; 1441 1442 lk = td->td_wchan; 1443 1444 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1445 return (0); 1446 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1447 if (lk->lk_lock & LK_SHARE) 1448 db_printf("SHARED (count %ju)\n", 1449 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1450 else 1451 db_printf("EXCL\n"); 1452 *ownerp = lockmgr_xholder(lk); 1453 1454 return (1); 1455 } 1456 1457 static void 1458 db_show_lockmgr(const struct lock_object *lock) 1459 { 1460 struct thread *td; 1461 const struct lock *lk; 1462 1463 lk = (const struct lock *)lock; 1464 1465 db_printf(" state: "); 1466 if (lk->lk_lock == LK_UNLOCKED) 1467 db_printf("UNLOCKED\n"); 1468 else if (lk->lk_lock & LK_SHARE) 1469 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1470 else { 1471 td = lockmgr_xholder(lk); 1472 if (td == (struct thread *)LK_KERNPROC) 1473 db_printf("XLOCK: LK_KERNPROC\n"); 1474 else 1475 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1476 td->td_tid, td->td_proc->p_pid, 1477 td->td_proc->p_comm); 1478 if (lockmgr_recursed(lk)) 1479 db_printf(" recursed: %d\n", lk->lk_recurse); 1480 } 1481 db_printf(" waiters: "); 1482 switch (lk->lk_lock & LK_ALL_WAITERS) { 1483 case LK_SHARED_WAITERS: 1484 db_printf("shared\n"); 1485 break; 1486 case LK_EXCLUSIVE_WAITERS: 1487 db_printf("exclusive\n"); 1488 break; 1489 case LK_ALL_WAITERS: 1490 db_printf("shared and exclusive\n"); 1491 break; 1492 default: 1493 db_printf("none\n"); 1494 } 1495 db_printf(" spinners: "); 1496 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1497 db_printf("exclusive\n"); 1498 else 1499 db_printf("none\n"); 1500 } 1501 #endif 1502