1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 */ 34 35 #include "namespace.h" 36 #include <machine/tls.h> 37 #include <errno.h> 38 #include <stdlib.h> 39 #include <string.h> 40 #include <sys/queue.h> 41 #include <pthread.h> 42 #include "un-namespace.h" 43 44 #include "thr_private.h" 45 46 #ifdef _PTHREADS_DEBUGGING 47 48 #include <stdio.h> 49 #include <stdarg.h> 50 #include <sys/file.h> 51 52 #endif 53 54 #if defined(_PTHREADS_INVARIANTS) 55 #define MUTEX_INIT_LINK(m) do { \ 56 (m)->m_qe.tqe_prev = NULL; \ 57 (m)->m_qe.tqe_next = NULL; \ 58 } while (0) 59 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 60 if ((m)->m_qe.tqe_prev == NULL) \ 61 PANIC("mutex is not on list"); \ 62 } while (0) 63 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 64 if (((m)->m_qe.tqe_prev != NULL) || \ 65 ((m)->m_qe.tqe_next != NULL)) \ 66 PANIC("mutex is on list"); \ 67 } while (0) 68 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \ 69 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \ 70 "thread in syncq when it shouldn't be."); \ 71 } while (0); 72 #else 73 #define MUTEX_INIT_LINK(m) 74 #define MUTEX_ASSERT_IS_OWNED(m) 75 #define MUTEX_ASSERT_NOT_OWNED(m) 76 #define THR_ASSERT_NOT_IN_SYNCQ(thr) 77 #endif 78 79 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 80 #define MUTEX_DESTROY(m) do { \ 81 __free(m); \ 82 } while (0) 83 84 umtx_t _mutex_static_lock; 85 86 #ifdef _PTHREADS_DEBUGGING 87 88 static 89 void 90 mutex_log(const char *ctl, ...) 91 { 92 char buf[256]; 93 va_list va; 94 size_t len; 95 96 va_start(va, ctl); 97 len = vsnprintf(buf, sizeof(buf), ctl, va); 98 va_end(va); 99 _thr_log(buf, len); 100 } 101 102 #else 103 104 static __inline 105 void 106 mutex_log(const char *ctl __unused, ...) 107 { 108 } 109 110 #endif 111 112 #ifdef _PTHREADS_DEBUGGING2 113 114 static void 115 mutex_log2(struct pthread *curthread, struct pthread_mutex *m, int op) 116 { 117 if (curthread) { 118 if (curthread->tid < 32) 119 m->m_lastop[curthread->tid] = 120 (__sys_getpid() << 16) | op; 121 } else { 122 m->m_lastop[0] = 123 (__sys_getpid() << 16) | op; 124 } 125 } 126 127 #else 128 129 static __inline 130 void 131 mutex_log2(struct pthread *curthread __unused, 132 struct pthread_mutex *m __unused, int op __unused) 133 { 134 } 135 136 #endif 137 138 /* 139 * Prototypes 140 */ 141 static int mutex_self_trylock(pthread_mutex_t); 142 static int mutex_self_lock(pthread_mutex_t, 143 const struct timespec *abstime); 144 static int mutex_unlock_common(pthread_mutex_t *); 145 146 int __pthread_mutex_init(pthread_mutex_t *mutex, 147 const pthread_mutexattr_t *mutex_attr); 148 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 149 int __pthread_mutex_lock(pthread_mutex_t *mutex); 150 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 151 const struct timespec *abs_timeout); 152 153 static int 154 mutex_check_attr(const struct pthread_mutex_attr *attr) 155 { 156 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 157 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 158 return (EINVAL); 159 if (attr->m_protocol < PTHREAD_PRIO_NONE || 160 attr->m_protocol > PTHREAD_PRIO_PROTECT) 161 return (EINVAL); 162 return (0); 163 } 164 165 static void 166 mutex_init_body(struct pthread_mutex *pmutex, 167 const struct pthread_mutex_attr *attr, int private) 168 { 169 _thr_umtx_init(&pmutex->m_lock); 170 pmutex->m_type = attr->m_type; 171 pmutex->m_protocol = attr->m_protocol; 172 TAILQ_INIT(&pmutex->m_queue); 173 mutex_log2(tls_get_curthread(), pmutex, 32); 174 pmutex->m_owner = NULL; 175 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 176 if (private) 177 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 178 pmutex->m_count = 0; 179 pmutex->m_refcount = 0; 180 if (attr->m_protocol == PTHREAD_PRIO_PROTECT) 181 pmutex->m_prio = attr->m_ceiling; 182 else 183 pmutex->m_prio = -1; 184 pmutex->m_saved_prio = 0; 185 MUTEX_INIT_LINK(pmutex); 186 } 187 188 static int 189 mutex_init(pthread_mutex_t *mutex, 190 const pthread_mutexattr_t *mutex_attr, int private) 191 { 192 const struct pthread_mutex_attr *attr; 193 struct pthread_mutex *pmutex; 194 int error; 195 196 if (mutex_attr == NULL) { 197 attr = &_pthread_mutexattr_default; 198 } else { 199 attr = *mutex_attr; 200 error = mutex_check_attr(attr); 201 if (error != 0) 202 return (error); 203 } 204 205 pmutex = __malloc(sizeof(struct pthread_mutex)); 206 if (pmutex == NULL) 207 return (ENOMEM); 208 mutex_init_body(pmutex, attr, private); 209 *mutex = pmutex; 210 return (0); 211 } 212 213 static int 214 init_static(struct pthread *thread, pthread_mutex_t *mutex) 215 { 216 int ret; 217 218 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 219 220 if (*mutex == NULL) 221 ret = mutex_init(mutex, NULL, 0); 222 else 223 ret = 0; 224 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 225 226 return (ret); 227 } 228 229 static int 230 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 231 { 232 int ret; 233 234 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 235 236 if (*mutex == NULL) 237 ret = mutex_init(mutex, NULL, 1); 238 else 239 ret = 0; 240 241 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 242 243 return (ret); 244 } 245 246 int 247 _pthread_mutex_init(pthread_mutex_t * __restrict mutex, 248 const pthread_mutexattr_t * __restrict mutex_attr) 249 { 250 return mutex_init(mutex, mutex_attr, 1); 251 } 252 253 int 254 __pthread_mutex_init(pthread_mutex_t *mutex, 255 const pthread_mutexattr_t *mutex_attr) 256 { 257 return mutex_init(mutex, mutex_attr, 0); 258 } 259 260 #if 0 261 int 262 _mutex_reinit(pthread_mutex_t *mutexp) 263 { 264 pthread_mutex_t mutex = *mutexp; 265 266 _thr_umtx_init(&mutex->m_lock); 267 TAILQ_INIT(&mutex->m_queue); 268 MUTEX_INIT_LINK(mutex); 269 mutex_log2(tls_get_curthread(), mutex, 33); 270 mutex->m_owner = NULL; 271 mutex->m_count = 0; 272 mutex->m_refcount = 0; 273 mutex->m_prio = 0; 274 mutex->m_saved_prio = 0; 275 276 return (0); 277 } 278 #endif 279 280 void 281 _mutex_fork(struct pthread *curthread) 282 { 283 struct pthread_mutex *m; 284 285 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 286 m->m_lock = UMTX_LOCKED; 287 } 288 289 int 290 _pthread_mutex_destroy(pthread_mutex_t *mutex) 291 { 292 struct pthread *curthread = tls_get_curthread(); 293 pthread_mutex_t m; 294 int ret = 0; 295 296 if (mutex == NULL) { 297 ret = EINVAL; 298 } else if (*mutex == NULL) { 299 ret = 0; 300 } else { 301 /* 302 * Try to lock the mutex structure, we only need to 303 * try once, if failed, the mutex is in use. 304 */ 305 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 306 if (ret) 307 return (ret); 308 309 /* 310 * Check mutex other fields to see if this mutex is 311 * in use. Mostly for prority mutex types, or there 312 * are condition variables referencing it. 313 */ 314 if (((*mutex)->m_owner != NULL) || 315 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 316 ((*mutex)->m_refcount != 0)) { 317 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock); 318 ret = EBUSY; 319 } else { 320 /* 321 * Save a pointer to the mutex so it can be free'd 322 * and set the caller's pointer to NULL: 323 */ 324 m = *mutex; 325 *mutex = NULL; 326 327 /* Unlock the mutex structure: */ 328 THR_UMTX_UNLOCK(curthread, &m->m_lock); 329 330 /* 331 * Free the memory allocated for the mutex 332 * structure: 333 */ 334 MUTEX_ASSERT_NOT_OWNED(m); 335 MUTEX_DESTROY(m); 336 } 337 } 338 339 /* Return the completion status: */ 340 return (ret); 341 } 342 343 static int 344 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 345 { 346 struct pthread_mutex *m; 347 int ret; 348 349 m = *mutex; 350 mutex_log("mutex_lock_trylock_common %p\n", m); 351 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock); 352 if (ret == 0) { 353 mutex_log2(curthread, m, 1); 354 m->m_owner = curthread; 355 /* Add to the list of owned mutexes: */ 356 MUTEX_ASSERT_NOT_OWNED(m); 357 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 358 } else if (m->m_owner == curthread) { 359 mutex_log2(curthread, m, 2); 360 ret = mutex_self_trylock(m); 361 } /* else {} */ 362 mutex_log("mutex_lock_trylock_common %p (returns %d)\n", m, ret); 363 364 return (ret); 365 } 366 367 int 368 __pthread_mutex_trylock(pthread_mutex_t *m) 369 { 370 struct pthread *curthread = tls_get_curthread(); 371 int ret; 372 373 if (__predict_false(m == NULL)) 374 return(EINVAL); 375 /* 376 * If the mutex is statically initialized, perform the dynamic 377 * initialization: 378 */ 379 if (__predict_false(*m == NULL)) { 380 ret = init_static(curthread, m); 381 if (__predict_false(ret != 0)) 382 return (ret); 383 } 384 return (mutex_trylock_common(curthread, m)); 385 } 386 387 int 388 _pthread_mutex_trylock(pthread_mutex_t *m) 389 { 390 struct pthread *curthread = tls_get_curthread(); 391 int ret = 0; 392 393 /* 394 * If the mutex is statically initialized, perform the dynamic 395 * initialization marking the mutex private (delete safe): 396 */ 397 if (__predict_false(*m == NULL)) { 398 ret = init_static_private(curthread, m); 399 if (__predict_false(ret != 0)) 400 return (ret); 401 } 402 return (mutex_trylock_common(curthread, m)); 403 } 404 405 static int 406 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 407 const struct timespec * abstime) 408 { 409 struct timespec ts, ts2; 410 struct pthread_mutex *m; 411 int ret = 0; 412 413 m = *mutex; 414 mutex_log("mutex_lock_common %p\n", m); 415 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock); 416 if (ret == 0) { 417 mutex_log2(curthread, m, 3); 418 m->m_owner = curthread; 419 /* Add to the list of owned mutexes: */ 420 MUTEX_ASSERT_NOT_OWNED(m); 421 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 422 } else if (m->m_owner == curthread) { 423 ret = mutex_self_lock(m, abstime); 424 } else { 425 if (abstime == NULL) { 426 THR_UMTX_LOCK(curthread, &m->m_lock); 427 ret = 0; 428 } else if (__predict_false( 429 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 430 abstime->tv_nsec >= 1000000000)) { 431 ret = EINVAL; 432 } else { 433 clock_gettime(CLOCK_REALTIME, &ts); 434 TIMESPEC_SUB(&ts2, abstime, &ts); 435 ret = THR_UMTX_TIMEDLOCK(curthread, &m->m_lock, &ts2); 436 } 437 if (ret == 0) { 438 mutex_log2(curthread, m, 4); 439 m->m_owner = curthread; 440 /* Add to the list of owned mutexes: */ 441 MUTEX_ASSERT_NOT_OWNED(m); 442 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 443 } 444 } 445 mutex_log("mutex_lock_common %p (returns %d) lock %d,%d\n", 446 m, ret, m->m_lock, m->m_count); 447 return (ret); 448 } 449 450 int 451 __pthread_mutex_lock(pthread_mutex_t *m) 452 { 453 struct pthread *curthread; 454 int ret; 455 456 if (__predict_false(m == NULL)) 457 return(EINVAL); 458 459 /* 460 * If the mutex is statically initialized, perform the dynamic 461 * initialization: 462 */ 463 curthread = tls_get_curthread(); 464 if (__predict_false(*m == NULL)) { 465 ret = init_static(curthread, m); 466 if (__predict_false(ret)) 467 return (ret); 468 } 469 return (mutex_lock_common(curthread, m, NULL)); 470 } 471 472 int 473 _pthread_mutex_lock(pthread_mutex_t *m) 474 { 475 struct pthread *curthread; 476 int ret; 477 478 if (__predict_false(m == NULL)) 479 return(EINVAL); 480 481 /* 482 * If the mutex is statically initialized, perform the dynamic 483 * initialization marking it private (delete safe): 484 */ 485 curthread = tls_get_curthread(); 486 if (__predict_false(*m == NULL)) { 487 ret = init_static_private(curthread, m); 488 if (__predict_false(ret)) 489 return (ret); 490 } 491 return (mutex_lock_common(curthread, m, NULL)); 492 } 493 494 int 495 __pthread_mutex_timedlock(pthread_mutex_t * __restrict m, 496 const struct timespec * __restrict abs_timeout) 497 { 498 struct pthread *curthread; 499 int ret; 500 501 if (__predict_false(m == NULL)) 502 return(EINVAL); 503 504 /* 505 * If the mutex is statically initialized, perform the dynamic 506 * initialization: 507 */ 508 curthread = tls_get_curthread(); 509 if (__predict_false(*m == NULL)) { 510 ret = init_static(curthread, m); 511 if (__predict_false(ret)) 512 return (ret); 513 } 514 return (mutex_lock_common(curthread, m, abs_timeout)); 515 } 516 517 int 518 _pthread_mutex_timedlock(pthread_mutex_t *m, 519 const struct timespec *abs_timeout) 520 { 521 struct pthread *curthread; 522 int ret; 523 524 if (__predict_false(m == NULL)) 525 return(EINVAL); 526 527 curthread = tls_get_curthread(); 528 529 /* 530 * If the mutex is statically initialized, perform the dynamic 531 * initialization marking it private (delete safe): 532 */ 533 if (__predict_false(*m == NULL)) { 534 ret = init_static_private(curthread, m); 535 if (__predict_false(ret)) 536 return (ret); 537 } 538 return (mutex_lock_common(curthread, m, abs_timeout)); 539 } 540 541 int 542 _pthread_mutex_unlock(pthread_mutex_t *m) 543 { 544 if (__predict_false(m == NULL)) 545 return(EINVAL); 546 return (mutex_unlock_common(m)); 547 } 548 549 static int 550 mutex_self_trylock(pthread_mutex_t m) 551 { 552 int ret; 553 554 switch (m->m_type) { 555 /* case PTHREAD_MUTEX_DEFAULT: */ 556 case PTHREAD_MUTEX_ERRORCHECK: 557 case PTHREAD_MUTEX_NORMAL: 558 ret = EBUSY; 559 break; 560 561 case PTHREAD_MUTEX_RECURSIVE: 562 /* Increment the lock count: */ 563 if (m->m_count + 1 > 0) { 564 m->m_count++; 565 ret = 0; 566 } else 567 ret = EAGAIN; 568 break; 569 570 default: 571 /* Trap invalid mutex types; */ 572 ret = EINVAL; 573 } 574 575 return (ret); 576 } 577 578 static int 579 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 580 { 581 struct timespec ts1, ts2; 582 int ret; 583 584 switch (m->m_type) { 585 /* case PTHREAD_MUTEX_DEFAULT: */ 586 case PTHREAD_MUTEX_ERRORCHECK: 587 if (abstime) { 588 clock_gettime(CLOCK_REALTIME, &ts1); 589 TIMESPEC_SUB(&ts2, abstime, &ts1); 590 __sys_nanosleep(&ts2, NULL); 591 ret = ETIMEDOUT; 592 } else { 593 /* 594 * POSIX specifies that mutexes should return 595 * EDEADLK if a recursive lock is detected. 596 */ 597 ret = EDEADLK; 598 } 599 break; 600 601 case PTHREAD_MUTEX_NORMAL: 602 /* 603 * What SS2 define as a 'normal' mutex. Intentionally 604 * deadlock on attempts to get a lock you already own. 605 */ 606 ret = 0; 607 if (abstime) { 608 clock_gettime(CLOCK_REALTIME, &ts1); 609 TIMESPEC_SUB(&ts2, abstime, &ts1); 610 __sys_nanosleep(&ts2, NULL); 611 ret = ETIMEDOUT; 612 } else { 613 ts1.tv_sec = 30; 614 ts1.tv_nsec = 0; 615 for (;;) 616 __sys_nanosleep(&ts1, NULL); 617 } 618 break; 619 620 case PTHREAD_MUTEX_RECURSIVE: 621 /* Increment the lock count: */ 622 if (m->m_count + 1 > 0) { 623 m->m_count++; 624 ret = 0; 625 } else 626 ret = EAGAIN; 627 break; 628 629 default: 630 /* Trap invalid mutex types; */ 631 ret = EINVAL; 632 } 633 634 return (ret); 635 } 636 637 static int 638 mutex_unlock_common(pthread_mutex_t *mutex) 639 { 640 struct pthread *curthread = tls_get_curthread(); 641 struct pthread_mutex *m; 642 643 if (__predict_false((m = *mutex) == NULL)) { 644 mutex_log2(curthread, m, 252); 645 return (EINVAL); 646 } 647 mutex_log("mutex_unlock_common %p\n", m); 648 if (__predict_false(m->m_owner != curthread)) { 649 mutex_log("mutex_unlock_common %p (failedA)\n", m); 650 mutex_log2(curthread, m, 253); 651 return (EPERM); 652 } 653 654 if (__predict_false(m->m_type == PTHREAD_MUTEX_RECURSIVE && 655 m->m_count > 0)) { 656 m->m_count--; 657 mutex_log("mutex_unlock_common %p (returns 0, partial)\n", m); 658 mutex_log2(curthread, m, 254); 659 } else { 660 /* 661 * Clear the count in case this is a recursive mutex. 662 */ 663 m->m_count = 0; 664 m->m_owner = NULL; 665 /* Remove the mutex from the threads queue. */ 666 MUTEX_ASSERT_IS_OWNED(m); 667 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 668 mutex_log2(tls_get_curthread(), m, 35); 669 MUTEX_INIT_LINK(m); 670 mutex_log2(tls_get_curthread(), m, 36); 671 /* 672 * Hand off the mutex to the next waiting thread. 673 */ 674 mutex_log("mutex_unlock_common %p (returns 0) lock %d\n", 675 m, m->m_lock); 676 THR_UMTX_UNLOCK(curthread, &m->m_lock); 677 mutex_log2(tls_get_curthread(), m, 37); 678 mutex_log2(curthread, m, 255); 679 } 680 return (0); 681 } 682 683 int 684 _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex, 685 int * __restrict prioceiling) 686 { 687 if ((mutex == NULL) || (*mutex == NULL)) 688 return (EINVAL); 689 if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) 690 return (EINVAL); 691 *prioceiling = (*mutex)->m_prio; 692 return (0); 693 } 694 695 int 696 _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex, 697 int prioceiling, int * __restrict old_ceiling) 698 { 699 int ret = 0; 700 int tmp; 701 702 if ((mutex == NULL) || (*mutex == NULL)) 703 ret = EINVAL; 704 else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) 705 ret = EINVAL; 706 else if ((ret = _pthread_mutex_lock(mutex)) == 0) { 707 tmp = (*mutex)->m_prio; 708 (*mutex)->m_prio = prioceiling; 709 ret = _pthread_mutex_unlock(mutex); 710 *old_ceiling = tmp; 711 } 712 return(ret); 713 } 714 715 int 716 _mutex_cv_lock(pthread_mutex_t *m, int count) 717 { 718 int ret; 719 720 if ((ret = _pthread_mutex_lock(m)) == 0) { 721 (*m)->m_refcount--; 722 (*m)->m_count += count; 723 } 724 return (ret); 725 } 726 727 int 728 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 729 { 730 struct pthread *curthread = tls_get_curthread(); 731 struct pthread_mutex *m; 732 733 if (__predict_false(mutex == NULL)) 734 return (EINVAL); 735 if (__predict_false((m = *mutex) == NULL)) 736 return (EINVAL); 737 if (__predict_false(m->m_owner != curthread)) 738 return (EPERM); 739 740 *count = m->m_count; 741 m->m_count = 0; 742 m->m_refcount++; 743 mutex_log2(tls_get_curthread(), m, 45); 744 m->m_owner = NULL; 745 /* Remove the mutex from the threads queue. */ 746 MUTEX_ASSERT_IS_OWNED(m); 747 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 748 MUTEX_INIT_LINK(m); 749 THR_UMTX_UNLOCK(curthread, &m->m_lock); 750 mutex_log2(curthread, m, 250); 751 return (0); 752 } 753 754 void 755 _mutex_unlock_private(pthread_t pthread) 756 { 757 struct pthread_mutex *m, *m_next; 758 759 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 760 m_next = TAILQ_NEXT(m, m_qe); 761 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 762 _pthread_mutex_unlock(&m); 763 } 764 } 765 766 __strong_reference(__pthread_mutex_init, pthread_mutex_init); 767 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock); 768 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 769 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 770 771 /* Single underscore versions provided for libc internal usage: */ 772 /* No difference between libc and application usage of these: */ 773 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 774 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 775 __strong_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 776 __strong_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 777