1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (C) 1997 5 * John S. Dyson. All rights reserved. 6 * Copyright (C) 2013-2017 7 * Matthew Dillon, All rights reserved. 8 * 9 * This code contains ideas from software contributed to Berkeley by 10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 11 * System project at Carnegie-Mellon University. 12 * 13 * This code is derived from software contributed to The DragonFly Project 14 * by Matthew Dillon <dillon@backplane.com>. Extensively rewritten. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 */ 40 41 #include "opt_lint.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/proc.h> 47 #include <sys/lock.h> 48 #include <sys/sysctl.h> 49 #include <sys/spinlock.h> 50 #include <sys/spinlock2.h> 51 #include <sys/indefinite2.h> 52 53 static void undo_shreq(struct lock *lkp); 54 static int undo_upreq(struct lock *lkp); 55 static int undo_exreq(struct lock *lkp); 56 57 #ifdef DEBUG_CANCEL_LOCKS 58 59 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS); 60 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS); 61 62 static struct lock cancel_lk; 63 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0); 64 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0, 65 sysctl_cancel_lock, "I", "test cancelable locks"); 66 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0, 67 sysctl_cancel_test, "I", "test cancelable locks"); 68 69 #endif 70 71 __read_frequently int lock_test_mode; 72 SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW, 73 &lock_test_mode, 0, ""); 74 75 /* 76 * Locking primitives implementation. 77 * Locks provide shared/exclusive sychronization. 78 */ 79 80 #ifdef DEBUG_LOCKS 81 #define COUNT(td, x) (td)->td_locks += (x) 82 #else 83 #define COUNT(td, x) do { } while (0) 84 #endif 85 86 /* 87 * Helper, assert basic conditions 88 */ 89 static __inline void 90 _lockmgr_assert(struct lock *lkp, u_int flags) 91 { 92 if (mycpu->gd_intr_nesting_level && 93 (flags & LK_NOWAIT) == 0 && 94 (flags & LK_TYPE_MASK) != LK_RELEASE && 95 panic_cpu_gd != mycpu 96 ) { 97 panic("lockmgr %s from %p: called from interrupt, ipi, " 98 "or hard code section", 99 lkp->lk_wmesg, ((int **)&lkp)[-1]); 100 } 101 } 102 103 /* 104 * Acquire a shared lock 105 */ 106 int 107 lockmgr_shared(struct lock *lkp, u_int flags) 108 { 109 uint32_t extflags; 110 thread_t td; 111 uint64_t count; 112 int error; 113 int pflags; 114 int timo; 115 int didloop; 116 117 _lockmgr_assert(lkp, flags); 118 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 119 td = curthread; 120 121 count = lkp->lk_count; 122 cpu_ccfence(); 123 124 /* 125 * If the caller already holds the lock exclusively then 126 * we silently obtain another count on the exclusive lock. 127 * Avoid accessing lk_lockholder until testing exclusivity. 128 * 129 * WARNING! The old FreeBSD behavior was to downgrade, 130 * but this creates a problem when recursions 131 * return to the caller and the caller expects 132 * its original exclusive lock to remain exclusively 133 * locked. 134 */ 135 if ((count & LKC_XMASK) && lkp->lk_lockholder == td) { 136 KKASSERT(lkp->lk_count & LKC_XMASK); 137 if ((extflags & LK_CANRECURSE) == 0) { 138 if (extflags & LK_NOWAIT) 139 return EBUSY; 140 panic("lockmgr: locking against myself"); 141 } 142 atomic_add_64(&lkp->lk_count, 1); 143 COUNT(td, 1); 144 return 0; 145 } 146 147 /* 148 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while 149 * SHARED is set and either EXREQ or UPREQ are set. 150 * 151 * NOTE: In the race-to-0 case (see undo_shreq()), we could 152 * theoretically work the SMASK == 0 case here. 153 */ 154 if ((td->td_flags & TDF_DEADLKTREAT) == 0) { 155 while ((count & LKC_SHARED) && 156 (count & (LKC_EXREQ | LKC_UPREQ))) { 157 /* 158 * Immediate failure conditions 159 */ 160 if (extflags & LK_CANCELABLE) { 161 if (count & LKC_CANCEL) 162 return ENOLCK; 163 } 164 if (extflags & LK_NOWAIT) 165 return EBUSY; 166 167 /* 168 * Interlocked tsleep 169 */ 170 pflags = (extflags & LK_PCATCH) ? PCATCH : 0; 171 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0; 172 173 tsleep_interlock(lkp, pflags); 174 count = atomic_fetchadd_long(&lkp->lk_count, 0); 175 176 if ((count & LKC_SHARED) && 177 (count & (LKC_EXREQ | LKC_UPREQ))) { 178 error = tsleep(lkp, pflags | PINTERLOCKED, 179 lkp->lk_wmesg, timo); 180 if (error) 181 return error; 182 count = lkp->lk_count; 183 cpu_ccfence(); 184 continue; 185 } 186 break; 187 } 188 } 189 190 /* 191 * Bump the SCOUNT field. The shared lock is granted only once 192 * the SHARED flag gets set. If it is already set, we are done. 193 * 194 * (Racing an EXREQ or UPREQ operation is ok here, we already did 195 * our duty above). 196 */ 197 count = atomic_fetchadd_64(&lkp->lk_count, LKC_SCOUNT) + LKC_SCOUNT; 198 error = 0; 199 didloop = 0; 200 201 for (;;) { 202 /* 203 * We may be able to grant ourselves the bit trivially. 204 * We're done once the SHARED bit is granted. 205 */ 206 if ((count & (LKC_XMASK | LKC_EXREQ | 207 LKC_UPREQ | LKC_SHARED)) == 0) { 208 if (atomic_fcmpset_64(&lkp->lk_count, 209 &count, count | LKC_SHARED)) { 210 /* count |= LKC_SHARED; NOT USED */ 211 break; 212 } 213 continue; 214 } 215 if ((td->td_flags & TDF_DEADLKTREAT) && 216 (count & (LKC_XMASK | LKC_SHARED)) == 0) { 217 if (atomic_fcmpset_64(&lkp->lk_count, 218 &count, count | LKC_SHARED)) { 219 /* count |= LKC_SHARED; NOT USED */ 220 break; 221 } 222 continue; 223 } 224 if (count & LKC_SHARED) 225 break; 226 227 /* 228 * Slow path 229 */ 230 pflags = (extflags & LK_PCATCH) ? PCATCH : 0; 231 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0; 232 233 if (extflags & LK_CANCELABLE) { 234 if (count & LKC_CANCEL) { 235 undo_shreq(lkp); 236 error = ENOLCK; 237 break; 238 } 239 } 240 if (extflags & LK_NOWAIT) { 241 undo_shreq(lkp); 242 error = EBUSY; 243 break; 244 } 245 246 /* 247 * Interlocked after the first loop. 248 */ 249 if (didloop) { 250 error = tsleep(lkp, pflags | PINTERLOCKED, 251 lkp->lk_wmesg, timo); 252 if (extflags & LK_SLEEPFAIL) { 253 undo_shreq(lkp); 254 error = ENOLCK; 255 break; 256 } 257 if (error) { 258 undo_shreq(lkp); 259 break; 260 } 261 } 262 didloop = 1; 263 264 /* 265 * Reload, shortcut grant case, then loop interlock 266 * and loop. 267 */ 268 count = lkp->lk_count; 269 if (count & LKC_SHARED) 270 break; 271 tsleep_interlock(lkp, pflags); 272 count = atomic_fetchadd_64(&lkp->lk_count, 0); 273 } 274 if (error == 0) 275 COUNT(td, 1); 276 277 return error; 278 } 279 280 /* 281 * Acquire an exclusive lock 282 */ 283 int 284 lockmgr_exclusive(struct lock *lkp, u_int flags) 285 { 286 uint64_t count; 287 uint64_t ncount; 288 uint32_t extflags; 289 thread_t td; 290 int error; 291 int pflags; 292 int timo; 293 294 _lockmgr_assert(lkp, flags); 295 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 296 td = curthread; 297 298 error = 0; 299 count = lkp->lk_count; 300 cpu_ccfence(); 301 302 /* 303 * Recursive lock if we already hold it exclusively. Avoid testing 304 * lk_lockholder until after testing lk_count. 305 */ 306 if ((count & LKC_XMASK) && lkp->lk_lockholder == td) { 307 if ((extflags & LK_CANRECURSE) == 0) { 308 if (extflags & LK_NOWAIT) 309 return EBUSY; 310 panic("lockmgr: locking against myself"); 311 } 312 count = atomic_fetchadd_64(&lkp->lk_count, 1) + 1; 313 KKASSERT((count & LKC_XMASK) > 1); 314 COUNT(td, 1); 315 return 0; 316 } 317 318 /* 319 * Trivially acquire the lock, or block until we can set EXREQ. 320 * Set EXREQ2 if EXREQ is already set or the lock is already 321 * held exclusively. EXREQ2 is an aggregation bit to request 322 * a wakeup. 323 * 324 * WARNING! We cannot set EXREQ if the lock is already held 325 * exclusively because it may race another EXREQ 326 * being cleared and granted. We use the exclusivity 327 * to prevent both EXREQ and UPREQ from being set. 328 * 329 * This means that both shared and exclusive requests 330 * have equal priority against a current exclusive holder's 331 * release. Exclusive requests still have priority over 332 * new shared requests when the lock is already held shared. 333 */ 334 for (;;) { 335 /* 336 * Normal trivial case 337 */ 338 if ((count & (LKC_UPREQ | LKC_EXREQ | 339 LKC_XMASK)) == 0 && 340 ((count & LKC_SHARED) == 0 || 341 (count & LKC_SMASK) == 0)) { 342 ncount = (count + 1) & ~LKC_SHARED; 343 if (atomic_fcmpset_64(&lkp->lk_count, 344 &count, ncount)) { 345 lkp->lk_lockholder = td; 346 COUNT(td, 1); 347 return 0; 348 } 349 continue; 350 } 351 352 if (extflags & LK_CANCELABLE) { 353 if (count & LKC_CANCEL) 354 return ENOLCK; 355 } 356 if (extflags & LK_NOWAIT) 357 return EBUSY; 358 359 /* 360 * Interlock to set EXREQ or EXREQ2 361 */ 362 pflags = (extflags & LK_PCATCH) ? PCATCH : 0; 363 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0; 364 365 if (count & (LKC_EXREQ | LKC_XMASK)) 366 ncount = count | LKC_EXREQ2; 367 else 368 ncount = count | LKC_EXREQ; 369 tsleep_interlock(lkp, pflags); 370 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 371 /* 372 * If we successfully transitioned to EXREQ we 373 * can break out, otherwise we had set EXREQ2 and 374 * we block. 375 */ 376 if ((count & (LKC_EXREQ | LKC_XMASK)) == 0) { 377 count = ncount; 378 break; 379 } 380 381 error = tsleep(lkp, pflags | PINTERLOCKED, 382 lkp->lk_wmesg, timo); 383 count = lkp->lk_count; /* relod */ 384 cpu_ccfence(); 385 } 386 #ifdef INVARIANTS 387 if (lock_test_mode > 0) { 388 --lock_test_mode; 389 print_backtrace(8); 390 } 391 #endif 392 if (error) 393 return error; 394 if (extflags & LK_SLEEPFAIL) 395 return ENOLCK; 396 } 397 398 /* 399 * Once EXREQ has been set, wait for it to be granted 400 * We enter the loop with tsleep_interlock() already called. 401 */ 402 for (;;) { 403 /* 404 * Waiting for EXREQ to be granted to us. 405 * 406 * The granting thread will handle the count for us, but we 407 * still have to set lk_lockholder. 408 * 409 * NOTE! If we try to trivially get the exclusive lock 410 * (basically by racing undo_shreq()) and succeed, 411 * we must still wakeup(lkp) for another exclusive 412 * lock trying to acquire EXREQ. Easier to simply 413 * wait for our own wakeup. 414 */ 415 if ((count & LKC_EXREQ) == 0) { 416 KKASSERT(count & LKC_XMASK); 417 lkp->lk_lockholder = td; 418 COUNT(td, 1); 419 break; 420 } 421 422 /* 423 * Block waiting for our exreq to be granted. 424 * Check cancelation. NOWAIT was already dealt with. 425 */ 426 if (extflags & LK_CANCELABLE) { 427 if (count & LKC_CANCEL) { 428 if (undo_exreq(lkp) == 0) { 429 lkp->lk_lockholder = LK_KERNTHREAD; 430 lockmgr_release(lkp, 0); 431 } 432 error = ENOLCK; 433 break; 434 } 435 } 436 437 pflags = (extflags & LK_PCATCH) ? PCATCH : 0; 438 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0; 439 440 error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo); 441 #ifdef INVARIANTS 442 if (lock_test_mode > 0) { 443 --lock_test_mode; 444 print_backtrace(8); 445 } 446 #endif 447 /* 448 * A tsleep error is uncommon. If it occurs we have to 449 * undo our EXREQ. If we are granted the exclusive lock 450 * as we try to undo we have to deal with it. 451 */ 452 if (extflags & LK_SLEEPFAIL) { 453 if (undo_exreq(lkp) == 0) { 454 lkp->lk_lockholder = LK_KERNTHREAD; 455 lockmgr_release(lkp, 0); 456 } 457 if (error == 0) 458 error = ENOLCK; 459 break; 460 } 461 if (error) { 462 if (undo_exreq(lkp)) 463 break; 464 lkp->lk_lockholder = td; 465 COUNT(td, 1); 466 error = 0; 467 break; 468 } 469 470 /* 471 * Reload after sleep, shortcut grant case. 472 * Then set the interlock and loop. 473 * 474 * The granting thread will handle the count for us, but we 475 * still have to set lk_lockholder. 476 */ 477 count = lkp->lk_count; 478 cpu_ccfence(); 479 if ((count & LKC_EXREQ) == 0) { 480 KKASSERT(count & LKC_XMASK); 481 lkp->lk_lockholder = td; 482 COUNT(td, 1); 483 break; 484 } 485 tsleep_interlock(lkp, pflags); 486 count = atomic_fetchadd_64(&lkp->lk_count, 0); 487 } 488 return error; 489 } 490 491 /* 492 * Downgrade an exclusive lock to shared. 493 * 494 * This function always succeeds as long as the caller owns a legal 495 * exclusive lock with one reference. UPREQ and EXREQ is ignored. 496 */ 497 int 498 lockmgr_downgrade(struct lock *lkp, u_int flags) 499 { 500 uint64_t count; 501 uint64_t ncount; 502 uint32_t extflags; 503 thread_t otd; 504 thread_t td; 505 506 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 507 td = curthread; 508 count = lkp->lk_count; 509 510 for (;;) { 511 cpu_ccfence(); 512 513 /* 514 * Downgrade an exclusive lock into a shared lock. All 515 * counts on a recursive exclusive lock become shared. 516 * 517 * NOTE: Currently to reduce confusion we only allow 518 * there to be one exclusive lock count, and panic 519 * if there are more. 520 */ 521 if (lkp->lk_lockholder != td || (count & LKC_XMASK) != 1) { 522 panic("lockmgr: not holding exclusive lock: " 523 "%p/%p %016jx", lkp->lk_lockholder, td, count); 524 } 525 526 /* 527 * NOTE! Must NULL-out lockholder before releasing the 528 * exclusive lock. 529 * 530 * NOTE! There might be pending shared requests, check 531 * and wake them up. 532 */ 533 otd = lkp->lk_lockholder; 534 lkp->lk_lockholder = NULL; 535 ncount = (count & ~(LKC_XMASK | LKC_EXREQ2)) + 536 ((count & LKC_XMASK) << LKC_SSHIFT); 537 ncount |= LKC_SHARED; 538 539 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 540 /* 541 * Wakeup any shared waiters (prior SMASK), or 542 * any exclusive requests that couldn't set EXREQ 543 * because the lock had been held exclusively. 544 */ 545 if (count & (LKC_SMASK | LKC_EXREQ2)) 546 wakeup(lkp); 547 /* count = ncount; NOT USED */ 548 break; 549 } 550 lkp->lk_lockholder = otd; 551 /* retry */ 552 } 553 return 0; 554 } 555 556 /* 557 * Upgrade a shared lock to exclusive. If LK_EXCLUPGRADE then guarantee 558 * that no other exclusive requester can get in front of us and fail 559 * immediately if another upgrade is pending. If we fail, the shared 560 * lock is released. 561 * 562 * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone 563 * else is in front of us, we release the shared lock and acquire the 564 * exclusive lock normally. If a failure occurs, the shared lock is 565 * released. 566 * 567 * The way this works is that if we cannot instantly upgrade the 568 * shared lock due to various conditions, but we can acquire UPREQ, 569 * we then set UPREQ and wait for the thread blocking us to grant 570 * our upgrade. The other thread grants our upgrade by incrementing 571 * the excl count (to 1) and clearing UPREQ, but it doesn't know 'who' 572 * requested the upgrade so it can't set lk_lockholder. Our thread notices 573 * that LK_UPREQ is now clear and finishes up by setting lk_lockholder. 574 */ 575 int 576 lockmgr_upgrade(struct lock *lkp, u_int flags) 577 { 578 uint64_t count; 579 uint64_t ncount; 580 uint32_t extflags; 581 thread_t td; 582 int error; 583 int pflags; 584 int timo; 585 586 _lockmgr_assert(lkp, flags); 587 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 588 td = curthread; 589 error = 0; 590 count = lkp->lk_count; 591 cpu_ccfence(); 592 593 /* 594 * If we already hold the lock exclusively this operation 595 * succeeds and is a NOP. 596 */ 597 if (count & LKC_XMASK) { 598 if (lkp->lk_lockholder == td) 599 return 0; 600 panic("lockmgr: upgrade unowned lock"); 601 } 602 if ((count & LKC_SMASK) == 0) 603 panic("lockmgr: upgrade unowned lock"); 604 605 /* 606 * Loop to acquire LKC_UPREQ 607 */ 608 for (;;) { 609 /* 610 * If UPREQ is already pending, release the shared lock 611 * and acquire an exclusive lock normally. 612 * 613 * If NOWAIT or EXCLUPGRADE the operation must be atomic, 614 * and this isn't, so we fail. 615 */ 616 if (count & LKC_UPREQ) { 617 lockmgr_release(lkp, 0); 618 if ((flags & LK_TYPE_MASK) == LK_EXCLUPGRADE) 619 error = EBUSY; 620 else if (extflags & LK_NOWAIT) 621 error = EBUSY; 622 else 623 error = lockmgr_exclusive(lkp, flags); 624 return error; 625 } 626 627 /* 628 * Try to immediately grant the upgrade, handle NOWAIT, 629 * or release the shared lock and simultaneously set UPREQ. 630 */ 631 if ((count & LKC_SMASK) == LKC_SCOUNT) { 632 /* 633 * Immediate grant 634 */ 635 ncount = (count - LKC_SCOUNT + 1) & ~LKC_SHARED; 636 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 637 lkp->lk_lockholder = td; 638 return 0; 639 } 640 } else if (extflags & LK_NOWAIT) { 641 /* 642 * Early EBUSY if an immediate grant is impossible 643 */ 644 lockmgr_release(lkp, 0); 645 return EBUSY; 646 } else { 647 /* 648 * Multiple shared locks present, request the 649 * upgrade and break to the next loop. 650 */ 651 pflags = (extflags & LK_PCATCH) ? PCATCH : 0; 652 tsleep_interlock(lkp, pflags); 653 ncount = (count - LKC_SCOUNT) | LKC_UPREQ; 654 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 655 count = ncount; 656 break; 657 } 658 } 659 /* retry */ 660 } 661 662 /* 663 * We have acquired LKC_UPREQ, wait until the upgrade is granted 664 * or the tsleep fails. 665 * 666 * NOWAIT and EXCLUPGRADE have already been handled. The first 667 * tsleep_interlock() has already been associated. 668 */ 669 for (;;) { 670 cpu_ccfence(); 671 672 /* 673 * We were granted our upgrade. No other UPREQ can be 674 * made pending because we are now exclusive. 675 * 676 * The granting thread will handle the count for us, but we 677 * still have to set lk_lockholder. 678 */ 679 if ((count & LKC_UPREQ) == 0) { 680 KKASSERT((count & LKC_XMASK) == 1); 681 lkp->lk_lockholder = td; 682 break; 683 } 684 685 if (extflags & LK_CANCELABLE) { 686 if (count & LKC_CANCEL) { 687 if (undo_upreq(lkp) == 0) { 688 lkp->lk_lockholder = LK_KERNTHREAD; 689 lockmgr_release(lkp, 0); 690 } 691 error = ENOLCK; 692 break; 693 } 694 } 695 696 pflags = (extflags & LK_PCATCH) ? PCATCH : 0; 697 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0; 698 699 error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo); 700 if (extflags & LK_SLEEPFAIL) { 701 if (undo_upreq(lkp) == 0) { 702 lkp->lk_lockholder = LK_KERNTHREAD; 703 lockmgr_release(lkp, 0); 704 } 705 if (error == 0) 706 error = ENOLCK; 707 break; 708 } 709 if (error) { 710 if (undo_upreq(lkp)) 711 break; 712 error = 0; 713 } 714 715 /* 716 * Reload the lock, short-cut the UPGRANT code before 717 * taking the time to interlock and loop. 718 * 719 * The granting thread will handle the count for us, but we 720 * still have to set lk_lockholder. 721 */ 722 count = lkp->lk_count; 723 if ((count & LKC_UPREQ) == 0) { 724 KKASSERT((count & LKC_XMASK) == 1); 725 lkp->lk_lockholder = td; 726 break; 727 } 728 tsleep_interlock(lkp, pflags); 729 count = atomic_fetchadd_64(&lkp->lk_count, 0); 730 /* retry */ 731 } 732 return error; 733 } 734 735 /* 736 * Release a held lock 737 * 738 * NOTE: When releasing to an unlocked state, we set the SHARED bit 739 * to optimize shared lock requests. 740 */ 741 int 742 lockmgr_release(struct lock *lkp, u_int flags) 743 { 744 uint64_t count; 745 uint64_t ncount; 746 uint32_t extflags; 747 thread_t otd; 748 thread_t td; 749 750 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 751 td = curthread; 752 753 count = lkp->lk_count; 754 cpu_ccfence(); 755 756 for (;;) { 757 /* 758 * Release the currently held lock, grant all requests 759 * possible. 760 * 761 * WARNING! lksleep() assumes that LK_RELEASE does not 762 * block. 763 * 764 * Always succeeds. 765 * Never blocks. 766 */ 767 if ((count & (LKC_SMASK | LKC_XMASK)) == 0) 768 panic("lockmgr: LK_RELEASE: no lock held"); 769 770 if (count & LKC_XMASK) { 771 /* 772 * Release exclusively held lock 773 */ 774 if (lkp->lk_lockholder != LK_KERNTHREAD && 775 lkp->lk_lockholder != td) { 776 panic("lockmgr: pid %d, not exclusive " 777 "lock holder thr %p/%p unlocking", 778 (td->td_proc ? td->td_proc->p_pid : -1), 779 td, lkp->lk_lockholder); 780 } 781 if ((count & (LKC_UPREQ | LKC_EXREQ | 782 LKC_XMASK)) == 1) { 783 /* 784 * Last exclusive count is being released 785 * with no UPREQ or EXREQ. The SHARED 786 * bit can be set or not without messing 787 * anything up, so precondition it to 788 * SHARED (which is the most cpu-optimal). 789 * 790 * Wakeup any EXREQ2. EXREQ cannot be 791 * set while an exclusive count is present 792 * so we have to wakeup any EXREQ2 we find. 793 * 794 * We could hint the EXREQ2 by leaving 795 * SHARED unset, but atm I don't see any 796 * usefulness. 797 */ 798 otd = lkp->lk_lockholder; 799 lkp->lk_lockholder = NULL; 800 ncount = (count - 1); 801 ncount &= ~(LKC_CANCEL | LKC_EXREQ2); 802 ncount |= LKC_SHARED; 803 if (atomic_fcmpset_64(&lkp->lk_count, 804 &count, ncount)) { 805 if (count & (LKC_SMASK | LKC_EXREQ2)) 806 wakeup(lkp); 807 if (otd != LK_KERNTHREAD) 808 COUNT(td, -1); 809 /* count = ncount; NOT USED */ 810 break; 811 } 812 lkp->lk_lockholder = otd; 813 /* retry */ 814 } else if ((count & (LKC_UPREQ | LKC_XMASK)) == 815 (LKC_UPREQ | 1)) { 816 /* 817 * Last exclusive count is being released but 818 * an upgrade request is present, automatically 819 * grant an exclusive state to the owner of 820 * the upgrade request. Transfer count to 821 * grant. 822 * 823 * The owner of LK_UPREQ is still responsible 824 * for setting lk_lockholder. 825 * 826 * EXREQ cannot be set while an exclusive 827 * holder exists, so do not clear EXREQ2. 828 */ 829 otd = lkp->lk_lockholder; 830 lkp->lk_lockholder = NULL; 831 ncount = count & ~LKC_UPREQ; 832 if (atomic_fcmpset_64(&lkp->lk_count, 833 &count, ncount)) { 834 wakeup(lkp); 835 if (otd != LK_KERNTHREAD) 836 COUNT(td, -1); 837 /* count = ncount; NOT USED */ 838 break; 839 } 840 lkp->lk_lockholder = otd; 841 /* retry */ 842 } else if ((count & (LKC_EXREQ | LKC_XMASK)) == 843 (LKC_EXREQ | 1)) { 844 /* 845 * Last exclusive count is being released but 846 * an exclusive request is present. We 847 * automatically grant an exclusive state to 848 * the owner of the exclusive request, 849 * transfering our count. 850 * 851 * This case virtually never occurs because 852 * EXREQ is not set while exclusive holders 853 * exist. However, it might be set if a 854 * an exclusive request is pending and a 855 * shared holder upgrades. 856 * 857 * Don't bother clearing EXREQ2. A thread 858 * waiting to set EXREQ can't do it while 859 * an exclusive lock is present. 860 */ 861 otd = lkp->lk_lockholder; 862 lkp->lk_lockholder = NULL; 863 ncount = count & ~LKC_EXREQ; 864 if (atomic_fcmpset_64(&lkp->lk_count, 865 &count, ncount)) { 866 wakeup(lkp); 867 if (otd != LK_KERNTHREAD) 868 COUNT(td, -1); 869 /* count = ncount; NOT USED */ 870 break; 871 } 872 lkp->lk_lockholder = otd; 873 /* retry */ 874 } else { 875 /* 876 * Multiple exclusive counts, drop by 1. 877 * Since we are the holder and there is more 878 * than one count, we can just decrement it. 879 */ 880 count = 881 atomic_fetchadd_long(&lkp->lk_count, -1); 882 /* count = count - 1 NOT NEEDED */ 883 if (lkp->lk_lockholder != LK_KERNTHREAD) 884 COUNT(td, -1); 885 break; 886 } 887 /* retry */ 888 } else { 889 /* 890 * Release shared lock 891 */ 892 KKASSERT((count & LKC_SHARED) && (count & LKC_SMASK)); 893 if ((count & (LKC_EXREQ | LKC_UPREQ | LKC_SMASK)) == 894 LKC_SCOUNT) { 895 /* 896 * Last shared count is being released, 897 * no exclusive or upgrade request present. 898 * Generally leave the shared bit set. 899 * Clear the CANCEL bit. 900 */ 901 ncount = (count - LKC_SCOUNT) & ~LKC_CANCEL; 902 if (atomic_fcmpset_64(&lkp->lk_count, 903 &count, ncount)) { 904 COUNT(td, -1); 905 /* count = ncount; NOT USED */ 906 break; 907 } 908 /* retry */ 909 } else if ((count & (LKC_UPREQ | LKC_SMASK)) == 910 (LKC_UPREQ | LKC_SCOUNT)) { 911 /* 912 * Last shared count is being released but 913 * an upgrade request is present, automatically 914 * grant an exclusive state to the owner of 915 * the upgrade request and transfer the count. 916 * 917 * The owner of the upgrade request is still 918 * responsible for setting lk_lockholder. 919 */ 920 ncount = (count - LKC_SCOUNT + 1) & 921 ~(LKC_UPREQ | LKC_CANCEL | LKC_SHARED); 922 if (atomic_fcmpset_64(&lkp->lk_count, 923 &count, ncount)) { 924 wakeup(lkp); 925 COUNT(td, -1); 926 /* count = ncount; NOT USED */ 927 break; 928 } 929 /* retry */ 930 } else if ((count & (LKC_EXREQ | LKC_SMASK)) == 931 (LKC_EXREQ | LKC_SCOUNT)) { 932 /* 933 * Last shared count is being released but 934 * an exclusive request is present, we 935 * automatically grant an exclusive state to 936 * the owner of the request and transfer 937 * the count. 938 */ 939 ncount = (count - LKC_SCOUNT + 1) & 940 ~(LKC_EXREQ | LKC_EXREQ2 | 941 LKC_CANCEL | LKC_SHARED); 942 if (atomic_fcmpset_64(&lkp->lk_count, 943 &count, ncount)) { 944 wakeup(lkp); 945 COUNT(td, -1); 946 /* count = ncount; NOT USED */ 947 break; 948 } 949 /* retry */ 950 } else { 951 /* 952 * Shared count is greater than 1. We can 953 * just use undo_shreq() to clean things up. 954 * undo_shreq() will also handle races to 0 955 * after the fact. 956 */ 957 undo_shreq(lkp); 958 COUNT(td, -1); 959 break; 960 } 961 /* retry */ 962 } 963 /* retry */ 964 } 965 return 0; 966 } 967 968 /* 969 * Start canceling blocked or future requesters. Only blocked/future 970 * requesters who pass the CANCELABLE flag can be canceled. 971 * 972 * This is intended to then allow other requesters (usually the 973 * caller) to obtain a non-cancelable lock. 974 * 975 * Don't waste time issuing a wakeup if nobody is pending. 976 */ 977 int 978 lockmgr_cancel_beg(struct lock *lkp, u_int flags) 979 { 980 uint64_t count; 981 982 count = lkp->lk_count; 983 for (;;) { 984 cpu_ccfence(); 985 986 KKASSERT((count & LKC_CANCEL) == 0); /* disallowed case */ 987 988 /* issue w/lock held */ 989 KKASSERT((count & (LKC_XMASK | LKC_SMASK)) != 0); 990 991 if (!atomic_fcmpset_64(&lkp->lk_count, 992 &count, count | LKC_CANCEL)) { 993 continue; 994 } 995 /* count |= LKC_CANCEL; NOT USED */ 996 997 /* 998 * Wakeup any waiters. 999 * 1000 * NOTE: EXREQ2 must be checked in addition to standard 1001 * wait sources, it is possible for EXREQ2 to be 1002 * set when EXREQ is clear. 1003 */ 1004 if (count & (LKC_EXREQ | LKC_EXREQ2 | LKC_SMASK | LKC_UPREQ)) { 1005 wakeup(lkp); 1006 } 1007 break; 1008 } 1009 return 0; 1010 } 1011 1012 /* 1013 * End our cancel request (typically after we have acquired 1014 * the lock ourselves). 1015 */ 1016 int 1017 lockmgr_cancel_end(struct lock *lkp, u_int flags) 1018 { 1019 atomic_clear_long(&lkp->lk_count, LKC_CANCEL); 1020 1021 return 0; 1022 } 1023 1024 /* 1025 * Backout SCOUNT from a failed shared lock attempt and handle any race 1026 * to 0. This function is also used by the release code for the less 1027 * optimal race to 0 case. 1028 * 1029 * WARNING! Since we are unconditionally decrementing LKC_SCOUNT, it is 1030 * possible for the lock to get into a LKC_SHARED + ZERO SCOUNT 1031 * situation. A shared request can block with a ZERO SCOUNT if 1032 * EXREQ or UPREQ is pending in this situation. Be sure to always 1033 * issue a wakeup() in this situation if we are unable to 1034 * transition to an exclusive lock, to handle the race. 1035 * 1036 * Always succeeds 1037 * Must not block 1038 */ 1039 static void 1040 undo_shreq(struct lock *lkp) 1041 { 1042 uint64_t count; 1043 uint64_t ncount; 1044 1045 count = atomic_fetchadd_64(&lkp->lk_count, -LKC_SCOUNT) - LKC_SCOUNT; 1046 while ((count & (LKC_EXREQ | LKC_UPREQ | LKC_CANCEL)) && 1047 (count & (LKC_SMASK | LKC_XMASK)) == 0) { 1048 /* 1049 * Grant any UPREQ here. This is handled in two parts. 1050 * We grant the UPREQ by incrementing the excl count and 1051 * clearing UPREQ and SHARED (and also CANCEL). 1052 * 1053 * The owner of UPREQ is still responsible for setting 1054 * lockholder. 1055 * 1056 * Note that UPREQ must have priority over EXREQ, and EXREQ 1057 * over CANCEL, so if the atomic op fails we have to loop up. 1058 */ 1059 if (count & LKC_UPREQ) { 1060 ncount = (count + 1) & ~(LKC_UPREQ | LKC_CANCEL | 1061 LKC_SHARED); 1062 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1063 wakeup(lkp); 1064 /* count = ncount; NOT USED */ 1065 break; 1066 } 1067 wakeup(lkp); /* XXX probably not needed */ 1068 continue; 1069 } 1070 if (count & LKC_EXREQ) { 1071 ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2 | 1072 LKC_CANCEL | LKC_SHARED); 1073 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1074 wakeup(lkp); 1075 /* count = ncount; NOT USED */ 1076 break; 1077 } 1078 wakeup(lkp); /* XXX probably not needed */ 1079 continue; 1080 } 1081 if (count & LKC_CANCEL) { 1082 ncount = count & ~LKC_CANCEL; 1083 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1084 wakeup(lkp); 1085 /* count = ncount; NOT USED */ 1086 break; 1087 } 1088 } 1089 /* retry */ 1090 } 1091 } 1092 1093 /* 1094 * Undo an exclusive request. Returns EBUSY if we were able to undo the 1095 * request, and 0 if the request was granted before we could undo it. 1096 * When 0 is returned, the lock state has not been modified. The caller 1097 * is responsible for setting the lockholder to curthread. 1098 */ 1099 static 1100 int 1101 undo_exreq(struct lock *lkp) 1102 { 1103 uint64_t count; 1104 uint64_t ncount; 1105 int error; 1106 1107 count = lkp->lk_count; 1108 error = 0; 1109 1110 for (;;) { 1111 cpu_ccfence(); 1112 1113 if ((count & LKC_EXREQ) == 0) { 1114 /* 1115 * EXREQ was granted. We own the exclusive lock. 1116 */ 1117 break; 1118 } 1119 if (count & LKC_XMASK) { 1120 /* 1121 * Clear the EXREQ we still own. Only wakeup on 1122 * EXREQ2 if no UPREQ. There are still exclusive 1123 * holders so do not wake up any shared locks or 1124 * any UPREQ. 1125 * 1126 * If there is an UPREQ it will issue a wakeup() 1127 * for any EXREQ wait looops, so we can clear EXREQ2 1128 * now. 1129 */ 1130 ncount = count & ~(LKC_EXREQ | LKC_EXREQ2); 1131 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1132 if ((count & (LKC_EXREQ2 | LKC_UPREQ)) == 1133 LKC_EXREQ2) { 1134 wakeup(lkp); 1135 } 1136 error = EBUSY; 1137 /* count = ncount; NOT USED */ 1138 break; 1139 } 1140 /* retry */ 1141 } else if (count & LKC_UPREQ) { 1142 /* 1143 * Clear the EXREQ we still own. We cannot wakeup any 1144 * shared or exclusive waiters because there is an 1145 * uprequest pending (that we do not handle here). 1146 * 1147 * If there is an UPREQ it will issue a wakeup() 1148 * for any EXREQ wait looops, so we can clear EXREQ2 1149 * now. 1150 */ 1151 ncount = count & ~(LKC_EXREQ | LKC_EXREQ2); 1152 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1153 error = EBUSY; 1154 break; 1155 } 1156 /* retry */ 1157 } else if ((count & LKC_SHARED) && (count & LKC_SMASK)) { 1158 /* 1159 * No UPREQ, lock not held exclusively, but the lock 1160 * is held shared. Clear EXREQ, wakeup anyone trying 1161 * to get the EXREQ bit (they have to set it 1162 * themselves, EXREQ2 is an aggregation). 1163 * 1164 * We must also wakeup any shared locks blocked 1165 * by the EXREQ, so just issue the wakeup 1166 * unconditionally. See lockmgr_shared() + 76 lines 1167 * or so. 1168 */ 1169 ncount = count & ~(LKC_EXREQ | LKC_EXREQ2); 1170 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1171 wakeup(lkp); 1172 error = EBUSY; 1173 /* count = ncount; NOT USED */ 1174 break; 1175 } 1176 /* retry */ 1177 } else { 1178 /* 1179 * No UPREQ, lock not held exclusively or shared. 1180 * Grant the EXREQ and wakeup anyone waiting on 1181 * EXREQ2. 1182 * 1183 * We must also issue a wakeup if SHARED is set, 1184 * even without an SCOUNT, due to pre-shared blocking 1185 * that can occur on EXREQ in lockmgr_shared(). 1186 */ 1187 ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2); 1188 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1189 if (count & (LKC_EXREQ2 | LKC_SHARED)) 1190 wakeup(lkp); 1191 /* count = ncount; NOT USED */ 1192 /* we are granting, error == 0 */ 1193 break; 1194 } 1195 /* retry */ 1196 } 1197 /* retry */ 1198 } 1199 return error; 1200 } 1201 1202 /* 1203 * Undo an upgrade request. Returns EBUSY if we were able to undo the 1204 * request, and 0 if the request was granted before we could undo it. 1205 * When 0 is returned, the lock state has not been modified. The caller 1206 * is responsible for setting the lockholder to curthread. 1207 */ 1208 static 1209 int 1210 undo_upreq(struct lock *lkp) 1211 { 1212 uint64_t count; 1213 uint64_t ncount; 1214 int error; 1215 1216 count = lkp->lk_count; 1217 error = 0; 1218 1219 for (;;) { 1220 cpu_ccfence(); 1221 1222 if ((count & LKC_UPREQ) == 0) { 1223 /* 1224 * UPREQ was granted 1225 */ 1226 break; 1227 } 1228 if (count & LKC_XMASK) { 1229 /* 1230 * Clear the UPREQ we still own. Nobody to wakeup 1231 * here because there is an existing exclusive 1232 * holder. 1233 */ 1234 if (atomic_fcmpset_64(&lkp->lk_count, &count, 1235 count & ~LKC_UPREQ)) { 1236 error = EBUSY; 1237 /* count &= ~LKC_UPREQ; NOT USED */ 1238 break; 1239 } 1240 } else if (count & LKC_EXREQ) { 1241 /* 1242 * Clear the UPREQ we still own. Grant the exclusive 1243 * request and wake it up. 1244 */ 1245 ncount = (count + 1); 1246 ncount &= ~(LKC_EXREQ | LKC_EXREQ2 | LKC_UPREQ); 1247 1248 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1249 wakeup(lkp); 1250 error = EBUSY; 1251 /* count = ncount; NOT USED */ 1252 break; 1253 } 1254 } else { 1255 /* 1256 * Clear the UPREQ we still own. Wakeup any shared 1257 * waiters. 1258 * 1259 * We must also issue a wakeup if SHARED was set 1260 * even if no shared waiters due to pre-shared blocking 1261 * that can occur on UPREQ. 1262 */ 1263 ncount = count & ~LKC_UPREQ; 1264 if (count & LKC_SMASK) 1265 ncount |= LKC_SHARED; 1266 1267 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) { 1268 if ((count & LKC_SHARED) || 1269 (ncount & LKC_SHARED)) { 1270 wakeup(lkp); 1271 } 1272 error = EBUSY; 1273 /* count = ncount; NOT USED */ 1274 break; 1275 } 1276 } 1277 /* retry */ 1278 } 1279 return error; 1280 } 1281 1282 void 1283 lockmgr_kernproc(struct lock *lp) 1284 { 1285 struct thread *td __debugvar = curthread; 1286 1287 if (lp->lk_lockholder != LK_KERNTHREAD) { 1288 KASSERT(lp->lk_lockholder == td, 1289 ("lockmgr_kernproc: lock not owned by curthread %p: %p", 1290 td, lp->lk_lockholder)); 1291 lp->lk_lockholder = LK_KERNTHREAD; 1292 COUNT(td, -1); 1293 } 1294 } 1295 1296 /* 1297 * Initialize a lock; required before use. 1298 */ 1299 void 1300 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags) 1301 { 1302 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 1303 lkp->lk_count = 0; 1304 lkp->lk_wmesg = wmesg; 1305 lkp->lk_timo = timo; 1306 lkp->lk_lockholder = NULL; 1307 } 1308 1309 /* 1310 * Reinitialize a lock that is being reused for a different purpose, but 1311 * which may have pending (blocked) threads sitting on it. The caller 1312 * must already hold the interlock. 1313 */ 1314 void 1315 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags) 1316 { 1317 lkp->lk_wmesg = wmesg; 1318 lkp->lk_timo = timo; 1319 } 1320 1321 /* 1322 * De-initialize a lock. The structure must no longer be used by anyone. 1323 */ 1324 void 1325 lockuninit(struct lock *lkp) 1326 { 1327 uint64_t count __unused; 1328 1329 count = lkp->lk_count; 1330 cpu_ccfence(); 1331 KKASSERT((count & (LKC_EXREQ | LKC_UPREQ)) == 0 && 1332 ((count & LKC_SHARED) || (count & LKC_SMASK) == 0)); 1333 } 1334 1335 /* 1336 * Determine the status of a lock. 1337 */ 1338 int 1339 lockstatus(struct lock *lkp, struct thread *td) 1340 { 1341 int lock_type = 0; 1342 uint64_t count; 1343 1344 count = lkp->lk_count; 1345 cpu_ccfence(); 1346 1347 if (count & (LKC_XMASK | LKC_SMASK | LKC_EXREQ | LKC_UPREQ)) { 1348 if (count & LKC_XMASK) { 1349 if (td == NULL || lkp->lk_lockholder == td) 1350 lock_type = LK_EXCLUSIVE; 1351 else 1352 lock_type = LK_EXCLOTHER; 1353 } else if ((count & LKC_SMASK) && (count & LKC_SHARED)) { 1354 lock_type = LK_SHARED; 1355 } 1356 } 1357 return (lock_type); 1358 } 1359 1360 /* 1361 * Return non-zero if the caller owns the lock shared or exclusive. 1362 * We can only guess re: shared locks. 1363 */ 1364 int 1365 lockowned(struct lock *lkp) 1366 { 1367 thread_t td = curthread; 1368 uint64_t count; 1369 1370 count = lkp->lk_count; 1371 cpu_ccfence(); 1372 1373 if (count & LKC_XMASK) 1374 return(lkp->lk_lockholder == td); 1375 else 1376 return((count & LKC_SMASK) != 0); 1377 } 1378 1379 #if 0 1380 /* 1381 * Determine the number of holders of a lock. 1382 * 1383 * REMOVED - Cannot be used due to our use of atomic_fetchadd_64() 1384 * for shared locks. Caller can only test if the lock has 1385 * a count or not using lockinuse(lk) (sys/lock.h) 1386 */ 1387 int 1388 lockcount(struct lock *lkp) 1389 { 1390 panic("lockcount cannot be used"); 1391 } 1392 1393 int 1394 lockcountnb(struct lock *lkp) 1395 { 1396 panic("lockcount cannot be used"); 1397 } 1398 #endif 1399 1400 /* 1401 * Print out information about state of a lock. Used by VOP_PRINT 1402 * routines to display status about contained locks. 1403 */ 1404 void 1405 lockmgr_printinfo(struct lock *lkp) 1406 { 1407 struct thread *td = lkp->lk_lockholder; 1408 struct proc *p; 1409 uint64_t count; 1410 1411 count = lkp->lk_count; 1412 cpu_ccfence(); 1413 1414 if (td && td != LK_KERNTHREAD) 1415 p = td->td_proc; 1416 else 1417 p = NULL; 1418 1419 if (count & LKC_XMASK) { 1420 kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d", 1421 lkp->lk_wmesg, (intmax_t)count, td, 1422 p ? p->p_pid : -99); 1423 } else if ((count & LKC_SMASK) && (count & LKC_SHARED)) { 1424 kprintf(" lock type %s: SHARED (count %016jx)", 1425 lkp->lk_wmesg, (intmax_t)count); 1426 } else { 1427 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg); 1428 } 1429 if ((count & (LKC_EXREQ | LKC_UPREQ)) || 1430 ((count & LKC_XMASK) && (count & LKC_SMASK))) 1431 kprintf(" with waiters\n"); 1432 else 1433 kprintf("\n"); 1434 } 1435 1436 void 1437 lock_sysinit(struct lock_args *arg) 1438 { 1439 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags); 1440 } 1441 1442 #ifdef DEBUG_CANCEL_LOCKS 1443 1444 static 1445 int 1446 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS) 1447 { 1448 int error; 1449 1450 if (req->newptr) { 1451 SYSCTL_XUNLOCK(); 1452 lockmgr(&cancel_lk, LK_EXCLUSIVE); 1453 error = tsleep(&error, PCATCH, "canmas", hz * 5); 1454 lockmgr(&cancel_lk, LK_CANCEL_BEG); 1455 error = tsleep(&error, PCATCH, "canmas", hz * 5); 1456 lockmgr(&cancel_lk, LK_RELEASE); 1457 SYSCTL_XLOCK(); 1458 SYSCTL_OUT(req, &error, sizeof(error)); 1459 } 1460 error = 0; 1461 1462 return error; 1463 } 1464 1465 static 1466 int 1467 sysctl_cancel_test(SYSCTL_HANDLER_ARGS) 1468 { 1469 int error; 1470 1471 if (req->newptr) { 1472 error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE); 1473 if (error == 0) 1474 lockmgr(&cancel_lk, LK_RELEASE); 1475 SYSCTL_OUT(req, &error, sizeof(error)); 1476 kprintf("test %d\n", error); 1477 } 1478 1479 return 0; 1480 } 1481 1482 #endif 1483