1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD: src/lib/libpthread/thread/thr_mutex.c,v 1.46 2004/10/31 05:03:50 green Exp $ 33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.8 2006/03/12 12:02:28 davidxu Exp $ 34 */ 35 36 #include <machine/tls.h> 37 38 #include <stdlib.h> 39 #include <errno.h> 40 #include <string.h> 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <pthread.h> 44 #include "thr_private.h" 45 46 #if defined(_PTHREADS_INVARIANTS) 47 #define MUTEX_INIT_LINK(m) do { \ 48 (m)->m_qe.tqe_prev = NULL; \ 49 (m)->m_qe.tqe_next = NULL; \ 50 } while (0) 51 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 52 if ((m)->m_qe.tqe_prev == NULL) \ 53 PANIC("mutex is not on list"); \ 54 } while (0) 55 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 56 if (((m)->m_qe.tqe_prev != NULL) || \ 57 ((m)->m_qe.tqe_next != NULL)) \ 58 PANIC("mutex is on list"); \ 59 } while (0) 60 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \ 61 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \ 62 "thread in syncq when it shouldn't be."); \ 63 } while (0); 64 #else 65 #define MUTEX_INIT_LINK(m) 66 #define MUTEX_ASSERT_IS_OWNED(m) 67 #define MUTEX_ASSERT_NOT_OWNED(m) 68 #define THR_ASSERT_NOT_IN_SYNCQ(thr) 69 #endif 70 71 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 72 #define MUTEX_DESTROY(m) do { \ 73 free(m); \ 74 } while (0) 75 76 77 /* 78 * Prototypes 79 */ 80 static long mutex_handoff(struct pthread *, struct pthread_mutex *); 81 static int mutex_self_trylock(struct pthread *, pthread_mutex_t); 82 static int mutex_self_lock(struct pthread *, pthread_mutex_t, 83 const struct timespec *abstime); 84 static int mutex_unlock_common(pthread_mutex_t *, int); 85 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t); 86 static void mutex_rescan_owned (struct pthread *, struct pthread *, 87 struct pthread_mutex *); 88 #if 0 89 static pthread_t mutex_queue_deq(pthread_mutex_t); 90 #endif 91 static void mutex_queue_remove(pthread_mutex_t, pthread_t); 92 static void mutex_queue_enq(pthread_mutex_t, pthread_t); 93 94 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 95 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 96 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 97 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 98 99 /* Single underscore versions provided for libc internal usage: */ 100 /* No difference between libc and application usage of these: */ 101 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 102 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 103 104 static int 105 mutex_init(pthread_mutex_t *mutex, 106 const pthread_mutexattr_t *mutex_attr, int private) 107 { 108 static const struct pthread_mutex_attr default_attr = { 109 .m_type = PTHREAD_MUTEX_DEFAULT, 110 .m_protocol = PTHREAD_PRIO_NONE, 111 .m_ceiling = THR_MAX_PRIORITY, 112 .m_flags = 0 113 }; 114 const struct pthread_mutex_attr *attr; 115 struct pthread_mutex *pmutex; 116 117 if (mutex_attr == NULL) { 118 attr = &default_attr; 119 } else { 120 attr = *mutex_attr; 121 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 122 attr->m_type >= MUTEX_TYPE_MAX) 123 return (EINVAL); 124 if (attr->m_protocol < PTHREAD_PRIO_NONE || 125 attr->m_protocol > PTHREAD_PRIO_PROTECT) 126 return (EINVAL); 127 } 128 129 if ((pmutex = (pthread_mutex_t) 130 malloc(sizeof(struct pthread_mutex))) == NULL) 131 return (ENOMEM); 132 133 _thr_umtx_init(&pmutex->m_lock); 134 pmutex->m_type = attr->m_type; 135 pmutex->m_protocol = attr->m_protocol; 136 TAILQ_INIT(&pmutex->m_queue); 137 pmutex->m_owner = NULL; 138 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 139 if (private) 140 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 141 pmutex->m_count = 0; 142 pmutex->m_refcount = 0; 143 if (attr->m_protocol == PTHREAD_PRIO_PROTECT) 144 pmutex->m_prio = attr->m_ceiling; 145 else 146 pmutex->m_prio = -1; 147 pmutex->m_saved_prio = 0; 148 MUTEX_INIT_LINK(pmutex); 149 *mutex = pmutex; 150 return (0); 151 } 152 153 static int 154 init_static(struct pthread *thread, pthread_mutex_t *mutex) 155 { 156 int ret; 157 158 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 159 160 if (*mutex == NULL) 161 ret = mutex_init(mutex, NULL, 0); 162 else 163 ret = 0; 164 165 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 166 167 return (ret); 168 } 169 170 static int 171 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 172 { 173 int ret; 174 175 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 176 177 if (*mutex == NULL) 178 ret = mutex_init(mutex, NULL, 1); 179 else 180 ret = 0; 181 182 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 183 184 return (ret); 185 } 186 187 int 188 _pthread_mutex_init(pthread_mutex_t *mutex, 189 const pthread_mutexattr_t *mutex_attr) 190 { 191 return mutex_init(mutex, mutex_attr, 1); 192 } 193 194 int 195 __pthread_mutex_init(pthread_mutex_t *mutex, 196 const pthread_mutexattr_t *mutex_attr) 197 { 198 return mutex_init(mutex, mutex_attr, 0); 199 } 200 201 int 202 _mutex_reinit(pthread_mutex_t *mutex) 203 { 204 _thr_umtx_init(&(*mutex)->m_lock); 205 TAILQ_INIT(&(*mutex)->m_queue); 206 MUTEX_INIT_LINK(*mutex); 207 (*mutex)->m_owner = NULL; 208 (*mutex)->m_count = 0; 209 (*mutex)->m_refcount = 0; 210 (*mutex)->m_prio = 0; 211 (*mutex)->m_saved_prio = 0; 212 return (0); 213 } 214 215 void 216 _mutex_fork(struct pthread *curthread) 217 { 218 struct pthread_mutex *m; 219 220 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 221 m->m_lock = UMTX_LOCKED; 222 223 /* Clear contender for priority mutexes */ 224 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) { 225 /* clear another thread locked us */ 226 _thr_umtx_init(&m->m_lock); 227 TAILQ_INIT(&m->m_queue); 228 } 229 } 230 231 int 232 _pthread_mutex_destroy(pthread_mutex_t *mutex) 233 { 234 struct pthread *curthread = tls_get_curthread(); 235 pthread_mutex_t m; 236 int ret = 0; 237 238 if (mutex == NULL || *mutex == NULL) 239 ret = EINVAL; 240 else { 241 /* 242 * Try to lock the mutex structure, we only need to 243 * try once, if failed, the mutex is in used. 244 */ 245 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 246 if (ret) 247 return (ret); 248 249 /* 250 * Check mutex other fields to see if this mutex is 251 * in use. Mostly for prority mutex types, or there 252 * are condition variables referencing it. 253 */ 254 if (((*mutex)->m_owner != NULL) || 255 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 256 ((*mutex)->m_refcount != 0)) { 257 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock); 258 ret = EBUSY; 259 } else { 260 /* 261 * Save a pointer to the mutex so it can be free'd 262 * and set the caller's pointer to NULL: 263 */ 264 m = *mutex; 265 *mutex = NULL; 266 267 /* Unlock the mutex structure: */ 268 _thr_umtx_unlock(&m->m_lock, curthread->tid); 269 270 /* 271 * Free the memory allocated for the mutex 272 * structure: 273 */ 274 MUTEX_ASSERT_NOT_OWNED(m); 275 MUTEX_DESTROY(m); 276 } 277 } 278 279 /* Return the completion status: */ 280 return (ret); 281 } 282 283 static int 284 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 285 { 286 int ret = 0; 287 288 THR_ASSERT((mutex != NULL) && (*mutex != NULL), 289 "Uninitialized mutex in mutex_trylock_common"); 290 291 /* Short cut for simple mutex. */ 292 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) { 293 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 294 if (ret == 0) { 295 (*mutex)->m_owner = curthread; 296 /* Add to the list of owned mutexes: */ 297 MUTEX_ASSERT_NOT_OWNED(*mutex); 298 TAILQ_INSERT_TAIL(&curthread->mutexq, 299 (*mutex), m_qe); 300 } else if ((*mutex)->m_owner == curthread) { 301 ret = mutex_self_trylock(curthread, *mutex); 302 } /* else {} */ 303 304 return (ret); 305 } 306 307 /* Code for priority mutex */ 308 309 /* Lock the mutex structure: */ 310 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); 311 312 /* 313 * If the mutex was statically allocated, properly 314 * initialize the tail queue. 315 */ 316 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 317 TAILQ_INIT(&(*mutex)->m_queue); 318 MUTEX_INIT_LINK(*mutex); 319 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 320 } 321 322 /* Process according to mutex type: */ 323 switch ((*mutex)->m_protocol) { 324 /* POSIX priority inheritence mutex: */ 325 case PTHREAD_PRIO_INHERIT: 326 /* Check if this mutex is not locked: */ 327 if ((*mutex)->m_owner == NULL) { 328 /* Lock the mutex for the running thread: */ 329 (*mutex)->m_owner = curthread; 330 331 THR_LOCK(curthread); 332 /* Track number of priority mutexes owned: */ 333 curthread->priority_mutex_count++; 334 335 /* 336 * The mutex takes on the attributes of the 337 * running thread when there are no waiters. 338 */ 339 (*mutex)->m_prio = curthread->active_priority; 340 (*mutex)->m_saved_prio = 341 curthread->inherited_priority; 342 curthread->inherited_priority = (*mutex)->m_prio; 343 THR_UNLOCK(curthread); 344 345 /* Add to the list of owned mutexes: */ 346 MUTEX_ASSERT_NOT_OWNED(*mutex); 347 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 348 (*mutex), m_qe); 349 } else if ((*mutex)->m_owner == curthread) 350 ret = mutex_self_trylock(curthread, *mutex); 351 else 352 /* Return a busy error: */ 353 ret = EBUSY; 354 break; 355 356 /* POSIX priority protection mutex: */ 357 case PTHREAD_PRIO_PROTECT: 358 /* Check for a priority ceiling violation: */ 359 if (curthread->active_priority > (*mutex)->m_prio) 360 ret = EINVAL; 361 362 /* Check if this mutex is not locked: */ 363 else if ((*mutex)->m_owner == NULL) { 364 /* Lock the mutex for the running thread: */ 365 (*mutex)->m_owner = curthread; 366 367 THR_LOCK(curthread); 368 /* Track number of priority mutexes owned: */ 369 curthread->priority_mutex_count++; 370 371 /* 372 * The running thread inherits the ceiling 373 * priority of the mutex and executes at that 374 * priority. 375 */ 376 curthread->active_priority = (*mutex)->m_prio; 377 (*mutex)->m_saved_prio = 378 curthread->inherited_priority; 379 curthread->inherited_priority = 380 (*mutex)->m_prio; 381 THR_UNLOCK(curthread); 382 /* Add to the list of owned mutexes: */ 383 MUTEX_ASSERT_NOT_OWNED(*mutex); 384 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 385 (*mutex), m_qe); 386 } else if ((*mutex)->m_owner == curthread) 387 ret = mutex_self_trylock(curthread, *mutex); 388 else 389 /* Return a busy error: */ 390 ret = EBUSY; 391 break; 392 393 /* Trap invalid mutex types: */ 394 default: 395 /* Return an invalid argument error: */ 396 ret = EINVAL; 397 break; 398 } 399 400 /* Unlock the mutex structure: */ 401 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); 402 403 /* Return the completion status: */ 404 return (ret); 405 } 406 407 int 408 __pthread_mutex_trylock(pthread_mutex_t *mutex) 409 { 410 struct pthread *curthread = tls_get_curthread(); 411 int ret = 0; 412 413 /* 414 * If the mutex is statically initialized, perform the dynamic 415 * initialization: 416 */ 417 if ((*mutex != NULL) || 418 ((ret = init_static(curthread, mutex)) == 0)) 419 ret = mutex_trylock_common(curthread, mutex); 420 421 return (ret); 422 } 423 424 int 425 _pthread_mutex_trylock(pthread_mutex_t *mutex) 426 { 427 struct pthread *curthread = tls_get_curthread(); 428 int ret = 0; 429 430 /* 431 * If the mutex is statically initialized, perform the dynamic 432 * initialization marking the mutex private (delete safe): 433 */ 434 if ((*mutex != NULL) || 435 ((ret = init_static_private(curthread, mutex)) == 0)) 436 ret = mutex_trylock_common(curthread, mutex); 437 438 return (ret); 439 } 440 441 static int 442 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m, 443 const struct timespec * abstime) 444 { 445 struct timespec ts, ts2; 446 long cycle; 447 int ret = 0; 448 449 THR_ASSERT((m != NULL) && (*m != NULL), 450 "Uninitialized mutex in mutex_lock_common"); 451 452 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 453 abstime->tv_nsec >= 1000000000)) 454 return (EINVAL); 455 456 /* Short cut for simple mutex. */ 457 458 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) { 459 /* Default POSIX mutex: */ 460 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock); 461 if (ret == 0) { 462 (*m)->m_owner = curthread; 463 /* Add to the list of owned mutexes: */ 464 MUTEX_ASSERT_NOT_OWNED(*m); 465 TAILQ_INSERT_TAIL(&curthread->mutexq, 466 (*m), m_qe); 467 } else if ((*m)->m_owner == curthread) { 468 ret = mutex_self_lock(curthread, *m, abstime); 469 } else { 470 if (abstime == NULL) { 471 THR_UMTX_LOCK(curthread, &(*m)->m_lock); 472 ret = 0; 473 } else { 474 clock_gettime(CLOCK_REALTIME, &ts); 475 TIMESPEC_SUB(&ts2, abstime, &ts); 476 ret = THR_UMTX_TIMEDLOCK(curthread, 477 &(*m)->m_lock, &ts2); 478 /* 479 * Timed out wait is not restarted if 480 * it was interrupted, not worth to do it. 481 */ 482 if (ret == EINTR) 483 ret = ETIMEDOUT; 484 } 485 if (ret == 0) { 486 (*m)->m_owner = curthread; 487 /* Add to the list of owned mutexes: */ 488 MUTEX_ASSERT_NOT_OWNED(*m); 489 TAILQ_INSERT_TAIL(&curthread->mutexq, 490 (*m), m_qe); 491 } 492 } 493 return (ret); 494 } 495 496 /* Code for priority mutex */ 497 498 /* 499 * Enter a loop waiting to become the mutex owner. We need a 500 * loop in case the waiting thread is interrupted by a signal 501 * to execute a signal handler. It is not (currently) possible 502 * to remain in the waiting queue while running a handler. 503 * Instead, the thread is interrupted and backed out of the 504 * waiting queue prior to executing the signal handler. 505 */ 506 do { 507 /* Lock the mutex structure: */ 508 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 509 510 /* 511 * If the mutex was statically allocated, properly 512 * initialize the tail queue. 513 */ 514 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) { 515 TAILQ_INIT(&(*m)->m_queue); 516 (*m)->m_flags |= MUTEX_FLAGS_INITED; 517 MUTEX_INIT_LINK(*m); 518 } 519 520 /* Process according to mutex type: */ 521 switch ((*m)->m_protocol) { 522 /* POSIX priority inheritence mutex: */ 523 case PTHREAD_PRIO_INHERIT: 524 /* Check if this mutex is not locked: */ 525 if ((*m)->m_owner == NULL) { 526 /* Lock the mutex for this thread: */ 527 (*m)->m_owner = curthread; 528 529 THR_LOCK(curthread); 530 /* Track number of priority mutexes owned: */ 531 curthread->priority_mutex_count++; 532 533 /* 534 * The mutex takes on attributes of the 535 * running thread when there are no waiters. 536 * Make sure the thread's scheduling lock is 537 * held while priorities are adjusted. 538 */ 539 (*m)->m_prio = curthread->active_priority; 540 (*m)->m_saved_prio = 541 curthread->inherited_priority; 542 curthread->inherited_priority = (*m)->m_prio; 543 THR_UNLOCK(curthread); 544 545 /* Add to the list of owned mutexes: */ 546 MUTEX_ASSERT_NOT_OWNED(*m); 547 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 548 (*m), m_qe); 549 550 /* Unlock the mutex structure: */ 551 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 552 } else if ((*m)->m_owner == curthread) { 553 ret = mutex_self_lock(curthread, *m, abstime); 554 555 /* Unlock the mutex structure: */ 556 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 557 } else { 558 /* 559 * Join the queue of threads waiting to lock 560 * the mutex and save a pointer to the mutex. 561 */ 562 mutex_queue_enq(*m, curthread); 563 curthread->data.mutex = *m; 564 565 if (curthread->active_priority > (*m)->m_prio) 566 /* Adjust priorities: */ 567 mutex_priority_adjust(curthread, *m); 568 569 THR_LOCK(curthread); 570 cycle = curthread->cycle; 571 THR_UNLOCK(curthread); 572 573 /* Unlock the mutex structure: */ 574 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 575 576 clock_gettime(CLOCK_REALTIME, &ts); 577 TIMESPEC_SUB(&ts2, abstime, &ts); 578 ret = _thr_umtx_wait(&curthread->cycle, cycle, 579 &ts2, CLOCK_REALTIME); 580 if (ret == EINTR) 581 ret = 0; 582 583 if (THR_IN_MUTEXQ(curthread)) { 584 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 585 mutex_queue_remove(*m, curthread); 586 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 587 } 588 /* 589 * Only clear these after assuring the 590 * thread is dequeued. 591 */ 592 curthread->data.mutex = NULL; 593 } 594 break; 595 596 /* POSIX priority protection mutex: */ 597 case PTHREAD_PRIO_PROTECT: 598 /* Check for a priority ceiling violation: */ 599 if (curthread->active_priority > (*m)->m_prio) { 600 /* Unlock the mutex structure: */ 601 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 602 ret = EINVAL; 603 } 604 /* Check if this mutex is not locked: */ 605 else if ((*m)->m_owner == NULL) { 606 /* 607 * Lock the mutex for the running 608 * thread: 609 */ 610 (*m)->m_owner = curthread; 611 612 THR_LOCK(curthread); 613 /* Track number of priority mutexes owned: */ 614 curthread->priority_mutex_count++; 615 616 /* 617 * The running thread inherits the ceiling 618 * priority of the mutex and executes at that 619 * priority. Make sure the thread's 620 * scheduling lock is held while priorities 621 * are adjusted. 622 */ 623 curthread->active_priority = (*m)->m_prio; 624 (*m)->m_saved_prio = 625 curthread->inherited_priority; 626 curthread->inherited_priority = (*m)->m_prio; 627 THR_UNLOCK(curthread); 628 629 /* Add to the list of owned mutexes: */ 630 MUTEX_ASSERT_NOT_OWNED(*m); 631 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 632 (*m), m_qe); 633 634 /* Unlock the mutex structure: */ 635 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 636 } else if ((*m)->m_owner == curthread) { 637 ret = mutex_self_lock(curthread, *m, abstime); 638 639 /* Unlock the mutex structure: */ 640 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 641 } else { 642 /* 643 * Join the queue of threads waiting to lock 644 * the mutex and save a pointer to the mutex. 645 */ 646 mutex_queue_enq(*m, curthread); 647 curthread->data.mutex = *m; 648 649 /* Clear any previous error: */ 650 curthread->error = 0; 651 652 THR_LOCK(curthread); 653 cycle = curthread->cycle; 654 THR_UNLOCK(curthread); 655 656 /* Unlock the mutex structure: */ 657 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 658 659 clock_gettime(CLOCK_REALTIME, &ts); 660 TIMESPEC_SUB(&ts2, abstime, &ts); 661 ret = _thr_umtx_wait(&curthread->cycle, cycle, 662 &ts2, CLOCK_REALTIME); 663 if (ret == EINTR) 664 ret = 0; 665 666 curthread->data.mutex = NULL; 667 if (THR_IN_MUTEXQ(curthread)) { 668 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 669 mutex_queue_remove(*m, curthread); 670 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 671 } 672 /* 673 * Only clear these after assuring the 674 * thread is dequeued. 675 */ 676 curthread->data.mutex = NULL; 677 678 /* 679 * The threads priority may have changed while 680 * waiting for the mutex causing a ceiling 681 * violation. 682 */ 683 ret = curthread->error; 684 curthread->error = 0; 685 } 686 break; 687 688 /* Trap invalid mutex types: */ 689 default: 690 /* Unlock the mutex structure: */ 691 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 692 693 /* Return an invalid argument error: */ 694 ret = EINVAL; 695 break; 696 } 697 698 } while (((*m)->m_owner != curthread) && (ret == 0)); 699 700 /* Return the completion status: */ 701 return (ret); 702 } 703 704 int 705 __pthread_mutex_lock(pthread_mutex_t *m) 706 { 707 struct pthread *curthread; 708 int ret = 0; 709 710 _thr_check_init(); 711 712 curthread = tls_get_curthread(); 713 714 /* 715 * If the mutex is statically initialized, perform the dynamic 716 * initialization: 717 */ 718 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 719 ret = mutex_lock_common(curthread, m, NULL); 720 721 return (ret); 722 } 723 724 int 725 _pthread_mutex_lock(pthread_mutex_t *m) 726 { 727 struct pthread *curthread; 728 int ret = 0; 729 730 _thr_check_init(); 731 732 curthread = tls_get_curthread(); 733 734 /* 735 * If the mutex is statically initialized, perform the dynamic 736 * initialization marking it private (delete safe): 737 */ 738 if ((*m != NULL) || 739 ((ret = init_static_private(curthread, m)) == 0)) 740 ret = mutex_lock_common(curthread, m, NULL); 741 742 return (ret); 743 } 744 745 int 746 __pthread_mutex_timedlock(pthread_mutex_t *m, 747 const struct timespec *abs_timeout) 748 { 749 struct pthread *curthread; 750 int ret = 0; 751 752 _thr_check_init(); 753 754 curthread = tls_get_curthread(); 755 756 /* 757 * If the mutex is statically initialized, perform the dynamic 758 * initialization: 759 */ 760 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 761 ret = mutex_lock_common(curthread, m, abs_timeout); 762 763 return (ret); 764 } 765 766 int 767 _pthread_mutex_timedlock(pthread_mutex_t *m, 768 const struct timespec *abs_timeout) 769 { 770 struct pthread *curthread; 771 int ret = 0; 772 773 _thr_check_init(); 774 775 curthread = tls_get_curthread(); 776 777 /* 778 * If the mutex is statically initialized, perform the dynamic 779 * initialization marking it private (delete safe): 780 */ 781 if ((*m != NULL) || 782 ((ret = init_static_private(curthread, m)) == 0)) 783 ret = mutex_lock_common(curthread, m, abs_timeout); 784 785 return (ret); 786 } 787 788 int 789 _pthread_mutex_unlock(pthread_mutex_t *m) 790 { 791 return (mutex_unlock_common(m, /* add reference */ 0)); 792 } 793 794 int 795 _mutex_cv_unlock(pthread_mutex_t *m) 796 { 797 return (mutex_unlock_common(m, /* add reference */ 1)); 798 } 799 800 int 801 _mutex_cv_lock(pthread_mutex_t *m) 802 { 803 int ret; 804 805 if ((ret = _pthread_mutex_lock(m)) == 0) 806 (*m)->m_refcount--; 807 return (ret); 808 } 809 810 static int 811 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m) 812 { 813 int ret; 814 815 switch (m->m_type) { 816 /* case PTHREAD_MUTEX_DEFAULT: */ 817 case PTHREAD_MUTEX_ERRORCHECK: 818 case PTHREAD_MUTEX_NORMAL: 819 ret = EBUSY; 820 break; 821 822 case PTHREAD_MUTEX_RECURSIVE: 823 /* Increment the lock count: */ 824 if (m->m_count + 1 > 0) { 825 m->m_count++; 826 ret = 0; 827 } else 828 ret = EAGAIN; 829 break; 830 831 default: 832 /* Trap invalid mutex types; */ 833 ret = EINVAL; 834 } 835 836 return (ret); 837 } 838 839 static int 840 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m, 841 const struct timespec *abstime) 842 { 843 struct timespec ts1, ts2; 844 int ret; 845 846 switch (m->m_type) { 847 /* case PTHREAD_MUTEX_DEFAULT: */ 848 case PTHREAD_MUTEX_ERRORCHECK: 849 if (abstime) { 850 clock_gettime(CLOCK_REALTIME, &ts1); 851 TIMESPEC_SUB(&ts2, abstime, &ts1); 852 __sys_nanosleep(&ts2, NULL); 853 ret = ETIMEDOUT; 854 } else { 855 /* 856 * POSIX specifies that mutexes should return 857 * EDEADLK if a recursive lock is detected. 858 */ 859 ret = EDEADLK; 860 } 861 break; 862 863 case PTHREAD_MUTEX_NORMAL: 864 /* 865 * What SS2 define as a 'normal' mutex. Intentionally 866 * deadlock on attempts to get a lock you already own. 867 */ 868 ret = 0; 869 if (m->m_protocol != PTHREAD_PRIO_NONE) { 870 /* Unlock the mutex structure: */ 871 THR_LOCK_RELEASE(curthread, &m->m_lock); 872 } 873 if (abstime) { 874 clock_gettime(CLOCK_REALTIME, &ts1); 875 TIMESPEC_SUB(&ts2, abstime, &ts1); 876 __sys_nanosleep(&ts2, NULL); 877 ret = ETIMEDOUT; 878 } else { 879 ts1.tv_sec = 30; 880 ts1.tv_nsec = 0; 881 for (;;) 882 __sys_nanosleep(&ts1, NULL); 883 } 884 break; 885 886 case PTHREAD_MUTEX_RECURSIVE: 887 /* Increment the lock count: */ 888 if (m->m_count + 1 > 0) { 889 m->m_count++; 890 ret = 0; 891 } else 892 ret = EAGAIN; 893 break; 894 895 default: 896 /* Trap invalid mutex types; */ 897 ret = EINVAL; 898 } 899 900 return (ret); 901 } 902 903 static int 904 mutex_unlock_common(pthread_mutex_t *m, int add_reference) 905 { 906 struct pthread *curthread = tls_get_curthread(); 907 long tid = -1; 908 int ret = 0; 909 910 if (m == NULL || *m == NULL) 911 ret = EINVAL; 912 else { 913 /* Short cut for simple mutex. */ 914 915 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) { 916 /* 917 * Check if the running thread is not the owner of the 918 * mutex: 919 */ 920 if (__predict_false((*m)->m_owner != curthread)) { 921 ret = EPERM; 922 } else if (__predict_false( 923 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE && 924 (*m)->m_count > 0)) { 925 /* Decrement the count: */ 926 (*m)->m_count--; 927 if (add_reference) 928 (*m)->m_refcount++; 929 } else { 930 /* 931 * Clear the count in case this is a recursive 932 * mutex. 933 */ 934 (*m)->m_count = 0; 935 (*m)->m_owner = NULL; 936 /* Remove the mutex from the threads queue. */ 937 MUTEX_ASSERT_IS_OWNED(*m); 938 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe); 939 MUTEX_INIT_LINK(*m); 940 if (add_reference) 941 (*m)->m_refcount++; 942 /* 943 * Hand off the mutex to the next waiting 944 * thread. 945 */ 946 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid); 947 } 948 return (ret); 949 } 950 951 /* Code for priority mutex */ 952 953 /* Lock the mutex structure: */ 954 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 955 956 /* Process according to mutex type: */ 957 switch ((*m)->m_protocol) { 958 /* POSIX priority inheritence mutex: */ 959 case PTHREAD_PRIO_INHERIT: 960 /* 961 * Check if the running thread is not the owner of the 962 * mutex: 963 */ 964 if ((*m)->m_owner != curthread) 965 ret = EPERM; 966 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 967 ((*m)->m_count > 0)) 968 /* Decrement the count: */ 969 (*m)->m_count--; 970 else { 971 /* 972 * Clear the count in case this is recursive 973 * mutex. 974 */ 975 (*m)->m_count = 0; 976 977 /* 978 * Restore the threads inherited priority and 979 * recompute the active priority (being careful 980 * not to override changes in the threads base 981 * priority subsequent to locking the mutex). 982 */ 983 THR_LOCK(curthread); 984 curthread->inherited_priority = 985 (*m)->m_saved_prio; 986 curthread->active_priority = 987 MAX(curthread->inherited_priority, 988 curthread->base_priority); 989 990 /* 991 * This thread now owns one less priority mutex. 992 */ 993 curthread->priority_mutex_count--; 994 THR_UNLOCK(curthread); 995 996 /* Remove the mutex from the threads queue. */ 997 MUTEX_ASSERT_IS_OWNED(*m); 998 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq, 999 (*m), m_qe); 1000 MUTEX_INIT_LINK(*m); 1001 1002 /* 1003 * Hand off the mutex to the next waiting 1004 * thread: 1005 */ 1006 tid = mutex_handoff(curthread, *m); 1007 } 1008 break; 1009 1010 /* POSIX priority ceiling mutex: */ 1011 case PTHREAD_PRIO_PROTECT: 1012 /* 1013 * Check if the running thread is not the owner of the 1014 * mutex: 1015 */ 1016 if ((*m)->m_owner != curthread) 1017 ret = EPERM; 1018 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1019 ((*m)->m_count > 0)) 1020 /* Decrement the count: */ 1021 (*m)->m_count--; 1022 else { 1023 /* 1024 * Clear the count in case this is a recursive 1025 * mutex. 1026 */ 1027 (*m)->m_count = 0; 1028 1029 /* 1030 * Restore the threads inherited priority and 1031 * recompute the active priority (being careful 1032 * not to override changes in the threads base 1033 * priority subsequent to locking the mutex). 1034 */ 1035 THR_LOCK(curthread); 1036 curthread->inherited_priority = 1037 (*m)->m_saved_prio; 1038 curthread->active_priority = 1039 MAX(curthread->inherited_priority, 1040 curthread->base_priority); 1041 1042 /* 1043 * This thread now owns one less priority mutex. 1044 */ 1045 curthread->priority_mutex_count--; 1046 THR_UNLOCK(curthread); 1047 1048 /* Remove the mutex from the threads queue. */ 1049 MUTEX_ASSERT_IS_OWNED(*m); 1050 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq, 1051 (*m), m_qe); 1052 MUTEX_INIT_LINK(*m); 1053 1054 /* 1055 * Hand off the mutex to the next waiting 1056 * thread: 1057 */ 1058 tid = mutex_handoff(curthread, *m); 1059 } 1060 break; 1061 1062 /* Trap invalid mutex types: */ 1063 default: 1064 /* Return an invalid argument error: */ 1065 ret = EINVAL; 1066 break; 1067 } 1068 1069 if ((ret == 0) && (add_reference != 0)) 1070 /* Increment the reference count: */ 1071 (*m)->m_refcount++; 1072 1073 /* Unlock the mutex structure: */ 1074 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 1075 } 1076 1077 /* Return the completion status: */ 1078 return (ret); 1079 } 1080 1081 1082 /* 1083 * This function is called when a change in base priority occurs for 1084 * a thread that is holding or waiting for a priority protection or 1085 * inheritence mutex. A change in a threads base priority can effect 1086 * changes to active priorities of other threads and to the ordering 1087 * of mutex locking by waiting threads. 1088 * 1089 * This must be called without the target thread's scheduling lock held. 1090 */ 1091 void 1092 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread, 1093 int propagate_prio) 1094 { 1095 struct pthread_mutex *m; 1096 1097 /* Adjust the priorites of any owned priority mutexes: */ 1098 if (pthread->priority_mutex_count > 0) { 1099 /* 1100 * Rescan the mutexes owned by this thread and correct 1101 * their priorities to account for this threads change 1102 * in priority. This has the side effect of changing 1103 * the threads active priority. 1104 * 1105 * Be sure to lock the first mutex in the list of owned 1106 * mutexes. This acts as a barrier against another 1107 * simultaneous call to change the threads priority 1108 * and from the owning thread releasing the mutex. 1109 */ 1110 m = TAILQ_FIRST(&pthread->pri_mutexq); 1111 if (m != NULL) { 1112 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1113 /* 1114 * Make sure the thread still owns the lock. 1115 */ 1116 if (m == TAILQ_FIRST(&pthread->pri_mutexq)) 1117 mutex_rescan_owned(curthread, pthread, 1118 /* rescan all owned */ NULL); 1119 THR_LOCK_RELEASE(curthread, &m->m_lock); 1120 } 1121 } 1122 1123 /* 1124 * If this thread is waiting on a priority inheritence mutex, 1125 * check for priority adjustments. A change in priority can 1126 * also cause a ceiling violation(*) for a thread waiting on 1127 * a priority protection mutex; we don't perform the check here 1128 * as it is done in pthread_mutex_unlock. 1129 * 1130 * (*) It should be noted that a priority change to a thread 1131 * _after_ taking and owning a priority ceiling mutex 1132 * does not affect ownership of that mutex; the ceiling 1133 * priority is only checked before mutex ownership occurs. 1134 */ 1135 if (propagate_prio != 0) { 1136 /* 1137 * Lock the thread's scheduling queue. This is a bit 1138 * convoluted; the "in synchronization queue flag" can 1139 * only be cleared with both the thread's scheduling and 1140 * mutex locks held. The thread's pointer to the wanted 1141 * mutex is guaranteed to be valid during this time. 1142 */ 1143 THR_THREAD_LOCK(curthread, pthread); 1144 1145 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) || 1146 ((m = pthread->data.mutex) == NULL)) 1147 THR_THREAD_UNLOCK(curthread, pthread); 1148 else { 1149 /* 1150 * This thread is currently waiting on a mutex; unlock 1151 * the scheduling queue lock and lock the mutex. We 1152 * can't hold both at the same time because the locking 1153 * order could cause a deadlock. 1154 */ 1155 THR_THREAD_UNLOCK(curthread, pthread); 1156 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1157 1158 /* 1159 * Check to make sure this thread is still in the 1160 * same state (the lock above can yield the CPU to 1161 * another thread or the thread may be running on 1162 * another CPU). 1163 */ 1164 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1165 (pthread->data.mutex == m)) { 1166 /* 1167 * Remove and reinsert this thread into 1168 * the list of waiting threads to preserve 1169 * decreasing priority order. 1170 */ 1171 mutex_queue_remove(m, pthread); 1172 mutex_queue_enq(m, pthread); 1173 1174 if (m->m_protocol == PTHREAD_PRIO_INHERIT) 1175 /* Adjust priorities: */ 1176 mutex_priority_adjust(curthread, m); 1177 } 1178 1179 /* Unlock the mutex structure: */ 1180 THR_LOCK_RELEASE(curthread, &m->m_lock); 1181 } 1182 } 1183 } 1184 1185 /* 1186 * Called when a new thread is added to the mutex waiting queue or 1187 * when a threads priority changes that is already in the mutex 1188 * waiting queue. 1189 * 1190 * This must be called with the mutex locked by the current thread. 1191 */ 1192 static void 1193 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex) 1194 { 1195 pthread_mutex_t m = mutex; 1196 struct pthread *pthread_next, *pthread = mutex->m_owner; 1197 int done, temp_prio; 1198 1199 /* 1200 * Calculate the mutex priority as the maximum of the highest 1201 * active priority of any waiting threads and the owning threads 1202 * active priority(*). 1203 * 1204 * (*) Because the owning threads current active priority may 1205 * reflect priority inherited from this mutex (and the mutex 1206 * priority may have changed) we must recalculate the active 1207 * priority based on the threads saved inherited priority 1208 * and its base priority. 1209 */ 1210 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1211 temp_prio = MAX(pthread_next->active_priority, 1212 MAX(m->m_saved_prio, pthread->base_priority)); 1213 1214 /* See if this mutex really needs adjusting: */ 1215 if (temp_prio == m->m_prio) 1216 /* No need to propagate the priority: */ 1217 return; 1218 1219 /* Set new priority of the mutex: */ 1220 m->m_prio = temp_prio; 1221 1222 /* 1223 * Don't unlock the mutex passed in as an argument. It is 1224 * expected to be locked and unlocked by the caller. 1225 */ 1226 done = 1; 1227 do { 1228 /* 1229 * Save the threads priority before rescanning the 1230 * owned mutexes: 1231 */ 1232 temp_prio = pthread->active_priority; 1233 1234 /* 1235 * Fix the priorities for all mutexes held by the owning 1236 * thread since taking this mutex. This also has a 1237 * potential side-effect of changing the threads priority. 1238 * 1239 * At this point the mutex is locked by the current thread. 1240 * The owning thread can't release the mutex until it is 1241 * unlocked, so we should be able to safely walk its list 1242 * of owned mutexes. 1243 */ 1244 mutex_rescan_owned(curthread, pthread, m); 1245 1246 /* 1247 * If this isn't the first time through the loop, 1248 * the current mutex needs to be unlocked. 1249 */ 1250 if (done == 0) 1251 THR_LOCK_RELEASE(curthread, &m->m_lock); 1252 1253 /* Assume we're done unless told otherwise: */ 1254 done = 1; 1255 1256 /* 1257 * If the thread is currently waiting on a mutex, check 1258 * to see if the threads new priority has affected the 1259 * priority of the mutex. 1260 */ 1261 if ((temp_prio != pthread->active_priority) && 1262 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1263 ((m = pthread->data.mutex) != NULL) && 1264 (m->m_protocol == PTHREAD_PRIO_INHERIT)) { 1265 /* Lock the mutex structure: */ 1266 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1267 1268 /* 1269 * Make sure the thread is still waiting on the 1270 * mutex: 1271 */ 1272 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1273 (m == pthread->data.mutex)) { 1274 /* 1275 * The priority for this thread has changed. 1276 * Remove and reinsert this thread into the 1277 * list of waiting threads to preserve 1278 * decreasing priority order. 1279 */ 1280 mutex_queue_remove(m, pthread); 1281 mutex_queue_enq(m, pthread); 1282 1283 /* 1284 * Grab the waiting thread with highest 1285 * priority: 1286 */ 1287 pthread_next = TAILQ_FIRST(&m->m_queue); 1288 1289 /* 1290 * Calculate the mutex priority as the maximum 1291 * of the highest active priority of any 1292 * waiting threads and the owning threads 1293 * active priority. 1294 */ 1295 temp_prio = MAX(pthread_next->active_priority, 1296 MAX(m->m_saved_prio, 1297 m->m_owner->base_priority)); 1298 1299 if (temp_prio != m->m_prio) { 1300 /* 1301 * The priority needs to be propagated 1302 * to the mutex this thread is waiting 1303 * on and up to the owner of that mutex. 1304 */ 1305 m->m_prio = temp_prio; 1306 pthread = m->m_owner; 1307 1308 /* We're not done yet: */ 1309 done = 0; 1310 } 1311 } 1312 /* Only release the mutex if we're done: */ 1313 if (done != 0) 1314 THR_LOCK_RELEASE(curthread, &m->m_lock); 1315 } 1316 } while (done == 0); 1317 } 1318 1319 static void 1320 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread, 1321 struct pthread_mutex *mutex) 1322 { 1323 struct pthread_mutex *m; 1324 struct pthread *pthread_next; 1325 int active_prio, inherited_prio; 1326 1327 /* 1328 * Start walking the mutexes the thread has taken since 1329 * taking this mutex. 1330 */ 1331 if (mutex == NULL) { 1332 /* 1333 * A null mutex means start at the beginning of the owned 1334 * mutex list. 1335 */ 1336 m = TAILQ_FIRST(&pthread->pri_mutexq); 1337 1338 /* There is no inherited priority yet. */ 1339 inherited_prio = 0; 1340 } else { 1341 /* 1342 * The caller wants to start after a specific mutex. It 1343 * is assumed that this mutex is a priority inheritence 1344 * mutex and that its priority has been correctly 1345 * calculated. 1346 */ 1347 m = TAILQ_NEXT(mutex, m_qe); 1348 1349 /* Start inheriting priority from the specified mutex. */ 1350 inherited_prio = mutex->m_prio; 1351 } 1352 active_prio = MAX(inherited_prio, pthread->base_priority); 1353 1354 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) { 1355 /* 1356 * We only want to deal with priority inheritence 1357 * mutexes. This might be optimized by only placing 1358 * priority inheritence mutexes into the owned mutex 1359 * list, but it may prove to be useful having all 1360 * owned mutexes in this list. Consider a thread 1361 * exiting while holding mutexes... 1362 */ 1363 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1364 /* 1365 * Fix the owners saved (inherited) priority to 1366 * reflect the priority of the previous mutex. 1367 */ 1368 m->m_saved_prio = inherited_prio; 1369 1370 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1371 /* Recalculate the priority of the mutex: */ 1372 m->m_prio = MAX(active_prio, 1373 pthread_next->active_priority); 1374 else 1375 m->m_prio = active_prio; 1376 1377 /* Recalculate new inherited and active priorities: */ 1378 inherited_prio = m->m_prio; 1379 active_prio = MAX(m->m_prio, pthread->base_priority); 1380 } 1381 } 1382 1383 /* 1384 * Fix the threads inherited priority and recalculate its 1385 * active priority. 1386 */ 1387 pthread->inherited_priority = inherited_prio; 1388 active_prio = MAX(inherited_prio, pthread->base_priority); 1389 1390 if (active_prio != pthread->active_priority) { 1391 /* Lock the thread's scheduling queue: */ 1392 THR_THREAD_LOCK(curthread, pthread); 1393 1394 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */ 1395 if (1) { 1396 /* 1397 * This thread is not in a run queue. Just set 1398 * its active priority. 1399 */ 1400 pthread->active_priority = active_prio; 1401 } 1402 else { 1403 /* 1404 * This thread is in a run queue. Remove it from 1405 * the queue before changing its priority: 1406 */ 1407 /* THR_RUNQ_REMOVE(pthread);*/ 1408 /* 1409 * POSIX states that if the priority is being 1410 * lowered, the thread must be inserted at the 1411 * head of the queue for its priority if it owns 1412 * any priority protection or inheritence mutexes. 1413 */ 1414 if ((active_prio < pthread->active_priority) && 1415 (pthread->priority_mutex_count > 0)) { 1416 /* Set the new active priority. */ 1417 pthread->active_priority = active_prio; 1418 /* THR_RUNQ_INSERT_HEAD(pthread); */ 1419 } else { 1420 /* Set the new active priority. */ 1421 pthread->active_priority = active_prio; 1422 /* THR_RUNQ_INSERT_TAIL(pthread);*/ 1423 } 1424 } 1425 THR_THREAD_UNLOCK(curthread, pthread); 1426 } 1427 } 1428 1429 void 1430 _mutex_unlock_private(pthread_t pthread) 1431 { 1432 struct pthread_mutex *m, *m_next; 1433 1434 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) { 1435 m_next = TAILQ_NEXT(m, m_qe); 1436 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1437 pthread_mutex_unlock(&m); 1438 } 1439 } 1440 1441 /* 1442 * Dequeue a waiting thread from the head of a mutex queue in descending 1443 * priority order. 1444 * 1445 * In order to properly dequeue a thread from the mutex queue and 1446 * make it runnable without the possibility of errant wakeups, it 1447 * is necessary to lock the thread's scheduling queue while also 1448 * holding the mutex lock. 1449 */ 1450 static long 1451 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex) 1452 { 1453 struct pthread *pthread; 1454 long tid = -1; 1455 1456 /* Keep dequeueing until we find a valid thread: */ 1457 mutex->m_owner = NULL; 1458 pthread = TAILQ_FIRST(&mutex->m_queue); 1459 while (pthread != NULL) { 1460 /* Take the thread's scheduling lock: */ 1461 THR_THREAD_LOCK(curthread, pthread); 1462 1463 /* Remove the thread from the mutex queue: */ 1464 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1465 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1466 1467 /* 1468 * Only exit the loop if the thread hasn't been 1469 * cancelled. 1470 */ 1471 switch (mutex->m_protocol) { 1472 case PTHREAD_PRIO_NONE: 1473 /* 1474 * Assign the new owner and add the mutex to the 1475 * thread's list of owned mutexes. 1476 */ 1477 mutex->m_owner = pthread; 1478 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe); 1479 break; 1480 1481 case PTHREAD_PRIO_INHERIT: 1482 /* 1483 * Assign the new owner and add the mutex to the 1484 * thread's list of owned mutexes. 1485 */ 1486 mutex->m_owner = pthread; 1487 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe); 1488 1489 /* Track number of priority mutexes owned: */ 1490 pthread->priority_mutex_count++; 1491 1492 /* 1493 * Set the priority of the mutex. Since our waiting 1494 * threads are in descending priority order, the 1495 * priority of the mutex becomes the active priority 1496 * of the thread we just dequeued. 1497 */ 1498 mutex->m_prio = pthread->active_priority; 1499 1500 /* Save the owning threads inherited priority: */ 1501 mutex->m_saved_prio = pthread->inherited_priority; 1502 1503 /* 1504 * The owning threads inherited priority now becomes 1505 * his active priority (the priority of the mutex). 1506 */ 1507 pthread->inherited_priority = mutex->m_prio; 1508 break; 1509 1510 case PTHREAD_PRIO_PROTECT: 1511 if (pthread->active_priority > mutex->m_prio) { 1512 /* 1513 * Either the mutex ceiling priority has 1514 * been lowered and/or this threads priority 1515 * has been raised subsequent to the thread 1516 * being queued on the waiting list. 1517 */ 1518 pthread->error = EINVAL; 1519 } 1520 else { 1521 /* 1522 * Assign the new owner and add the mutex 1523 * to the thread's list of owned mutexes. 1524 */ 1525 mutex->m_owner = pthread; 1526 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, 1527 mutex, m_qe); 1528 1529 /* Track number of priority mutexes owned: */ 1530 pthread->priority_mutex_count++; 1531 1532 /* 1533 * Save the owning threads inherited 1534 * priority: 1535 */ 1536 mutex->m_saved_prio = 1537 pthread->inherited_priority; 1538 1539 /* 1540 * The owning thread inherits the ceiling 1541 * priority of the mutex and executes at 1542 * that priority: 1543 */ 1544 pthread->inherited_priority = mutex->m_prio; 1545 pthread->active_priority = mutex->m_prio; 1546 1547 } 1548 break; 1549 } 1550 1551 /* Make the thread runnable and unlock the scheduling queue: */ 1552 pthread->cycle++; 1553 _thr_umtx_wake(&pthread->cycle, 1); 1554 1555 THR_THREAD_UNLOCK(curthread, pthread); 1556 if (mutex->m_owner == pthread) 1557 /* We're done; a valid owner was found. */ 1558 break; 1559 else 1560 /* Get the next thread from the waiting queue: */ 1561 pthread = TAILQ_NEXT(pthread, sqe); 1562 } 1563 1564 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT)) 1565 /* This mutex has no priority: */ 1566 mutex->m_prio = 0; 1567 return (tid); 1568 } 1569 1570 #if 0 1571 /* 1572 * Dequeue a waiting thread from the head of a mutex queue in descending 1573 * priority order. 1574 */ 1575 static pthread_t 1576 mutex_queue_deq(struct pthread_mutex *mutex) 1577 { 1578 pthread_t pthread; 1579 1580 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1581 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1582 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1583 } 1584 1585 return (pthread); 1586 } 1587 #endif 1588 1589 /* 1590 * Remove a waiting thread from a mutex queue in descending priority order. 1591 */ 1592 static void 1593 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1594 { 1595 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) { 1596 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1597 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1598 } 1599 } 1600 1601 /* 1602 * Enqueue a waiting thread to a queue in descending priority order. 1603 */ 1604 static void 1605 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1606 { 1607 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1608 1609 THR_ASSERT_NOT_IN_SYNCQ(pthread); 1610 /* 1611 * For the common case of all threads having equal priority, 1612 * we perform a quick check against the priority of the thread 1613 * at the tail of the queue. 1614 */ 1615 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1616 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1617 else { 1618 tid = TAILQ_FIRST(&mutex->m_queue); 1619 while (pthread->active_priority <= tid->active_priority) 1620 tid = TAILQ_NEXT(tid, sqe); 1621 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1622 } 1623 pthread->sflags |= THR_FLAGS_IN_SYNCQ; 1624 } 1625