1 /*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 */
34
35 #include "namespace.h"
36 #include <machine/tls.h>
37 #include <errno.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <sys/queue.h>
41 #include <pthread.h>
42 #include "un-namespace.h"
43
44 #include "thr_private.h"
45
46 #ifdef _PTHREADS_DEBUGGING
47
48 #include <stdio.h>
49 #include <stdarg.h>
50 #include <sys/file.h>
51
52 #endif
53
54 #if defined(_PTHREADS_INVARIANTS)
55 #define MUTEX_INIT_LINK(m) do { \
56 (m)->m_qe.tqe_prev = NULL; \
57 (m)->m_qe.tqe_next = NULL; \
58 } while (0)
59 #define MUTEX_ASSERT_IS_OWNED(m) do { \
60 if ((m)->m_qe.tqe_prev == NULL) \
61 PANIC("mutex is not on list"); \
62 } while (0)
63 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
64 if (((m)->m_qe.tqe_prev != NULL) || \
65 ((m)->m_qe.tqe_next != NULL)) \
66 PANIC("mutex is on list"); \
67 } while (0)
68 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
69 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
70 "thread in syncq when it shouldn't be."); \
71 } while (0);
72 #else
73 #define MUTEX_INIT_LINK(m)
74 #define MUTEX_ASSERT_IS_OWNED(m)
75 #define MUTEX_ASSERT_NOT_OWNED(m)
76 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
77 #endif
78
79 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
80 #define MUTEX_DESTROY(m) do { \
81 __free(m); \
82 } while (0)
83
84 umtx_t _mutex_static_lock;
85
86 #ifdef _PTHREADS_DEBUGGING
87
88 static
89 void
mutex_log(const char * ctl,...)90 mutex_log(const char *ctl, ...)
91 {
92 char buf[256];
93 va_list va;
94 size_t len;
95
96 va_start(va, ctl);
97 len = vsnprintf(buf, sizeof(buf), ctl, va);
98 va_end(va);
99 _thr_log(buf, len);
100 }
101
102 #else
103
104 static __inline
105 void
mutex_log(const char * ctl __unused,...)106 mutex_log(const char *ctl __unused, ...)
107 {
108 }
109
110 #endif
111
112 #ifdef _PTHREADS_DEBUGGING2
113
114 static void
mutex_log2(pthread_t curthread,pthread_mutex_t m,int op)115 mutex_log2(pthread_t curthread, pthread_mutex_t m, int op)
116 {
117 if (curthread) {
118 if (curthread->tid < 32)
119 m->m_lastop[curthread->tid] =
120 (__sys_getpid() << 16) | op;
121 } else {
122 m->m_lastop[0] =
123 (__sys_getpid() << 16) | op;
124 }
125 }
126
127 #else
128
129 static __inline
130 void
mutex_log2(pthread_t curthread __unused,pthread_mutex_t m __unused,int op __unused)131 mutex_log2(pthread_t curthread __unused,
132 pthread_mutex_t m __unused, int op __unused)
133 {
134 }
135
136 #endif
137
138 /*
139 * Prototypes
140 */
141 static int mutex_self_trylock(pthread_mutex_t);
142 static int mutex_self_lock(pthread_mutex_t,
143 const struct timespec *abstime);
144 static int mutex_unlock_common(pthread_mutex_t *);
145
146 int __pthread_mutex_init(pthread_mutex_t *mutex,
147 const pthread_mutexattr_t *mutex_attr);
148 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
149 int __pthread_mutex_lock(pthread_mutex_t *mutex);
150 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
151 const struct timespec *abs_timeout);
152
153 static int
mutex_check_attr(const struct __pthread_mutexattr_s * attr)154 mutex_check_attr(const struct __pthread_mutexattr_s *attr)
155 {
156 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
157 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
158 return (EINVAL);
159 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
160 attr->m_protocol > PTHREAD_PRIO_PROTECT)
161 return (EINVAL);
162 return (0);
163 }
164
165 static void
mutex_init_body(pthread_mutex_t pmutex,const struct __pthread_mutexattr_s * attr,int private)166 mutex_init_body(pthread_mutex_t pmutex,
167 const struct __pthread_mutexattr_s *attr, int private)
168 {
169 _thr_umtx_init(&pmutex->m_lock);
170 pmutex->m_type = attr->m_type;
171 pmutex->m_protocol = attr->m_protocol;
172 TAILQ_INIT(&pmutex->m_queue);
173 mutex_log2(tls_get_curthread(), pmutex, 32);
174 pmutex->m_owner = NULL;
175 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
176 if (private)
177 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
178 pmutex->m_count = 0;
179 pmutex->m_refcount = 0;
180 if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
181 pmutex->m_prio = attr->m_ceiling;
182 else
183 pmutex->m_prio = -1;
184 pmutex->m_saved_prio = 0;
185 MUTEX_INIT_LINK(pmutex);
186 }
187
188 static int
mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * mutex_attr,int private)189 mutex_init(pthread_mutex_t *mutex,
190 const pthread_mutexattr_t *mutex_attr, int private)
191 {
192 const struct __pthread_mutexattr_s *attr;
193 pthread_mutex_t pmutex;
194 int error;
195
196 if (mutex_attr == NULL) {
197 attr = &_pthread_mutexattr_default;
198 } else {
199 attr = *mutex_attr;
200 error = mutex_check_attr(attr);
201 if (error != 0)
202 return (error);
203 }
204
205 pmutex = __malloc(sizeof(struct __pthread_mutex_s));
206 if (pmutex == NULL)
207 return (ENOMEM);
208 mutex_init_body(pmutex, attr, private);
209 *mutex = pmutex;
210 return (0);
211 }
212
213 static int
init_static(pthread_t thread,pthread_mutex_t * mutex)214 init_static(pthread_t thread, pthread_mutex_t *mutex)
215 {
216 int ret;
217
218 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
219
220 if (*mutex == NULL)
221 ret = mutex_init(mutex, NULL, 0);
222 else
223 ret = 0;
224 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
225
226 return (ret);
227 }
228
229 static int
init_static_private(pthread_t thread,pthread_mutex_t * mutex)230 init_static_private(pthread_t thread, pthread_mutex_t *mutex)
231 {
232 int ret;
233
234 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
235
236 if (*mutex == NULL)
237 ret = mutex_init(mutex, NULL, 1);
238 else
239 ret = 0;
240
241 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
242
243 return (ret);
244 }
245
246 int
_pthread_mutex_init(pthread_mutex_t * __restrict mutex,const pthread_mutexattr_t * __restrict mutex_attr)247 _pthread_mutex_init(pthread_mutex_t * __restrict mutex,
248 const pthread_mutexattr_t * __restrict mutex_attr)
249 {
250 return mutex_init(mutex, mutex_attr, 1);
251 }
252
253 int
__pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * mutex_attr)254 __pthread_mutex_init(pthread_mutex_t *mutex,
255 const pthread_mutexattr_t *mutex_attr)
256 {
257 return mutex_init(mutex, mutex_attr, 0);
258 }
259
260 #if 0
261 int
262 _mutex_reinit(pthread_mutex_t *mutexp)
263 {
264 pthread_mutex_t mutex = *mutexp;
265
266 _thr_umtx_init(&mutex->m_lock);
267 TAILQ_INIT(&mutex->m_queue);
268 MUTEX_INIT_LINK(mutex);
269 mutex_log2(tls_get_curthread(), mutex, 33);
270 mutex->m_owner = NULL;
271 mutex->m_count = 0;
272 mutex->m_refcount = 0;
273 mutex->m_prio = 0;
274 mutex->m_saved_prio = 0;
275
276 return (0);
277 }
278 #endif
279
280 void
_mutex_fork(pthread_t curthread,lwpid_t tid)281 _mutex_fork(pthread_t curthread, lwpid_t tid)
282 {
283 pthread_mutex_t m;
284
285 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
286 m->m_lock = tid;
287 }
288
289 int
_pthread_mutex_destroy(pthread_mutex_t * mutex)290 _pthread_mutex_destroy(pthread_mutex_t *mutex)
291 {
292 pthread_t curthread = tls_get_curthread();
293 pthread_mutex_t m;
294 int ret = 0;
295
296 if (mutex == NULL) {
297 ret = EINVAL;
298 } else if (*mutex == NULL) {
299 ret = 0;
300 } else {
301 /*
302 * Try to lock the mutex structure, we only need to
303 * try once, if failed, the mutex is in use.
304 */
305 ret = THR_UMTX_TRYLOCK_PERSIST(curthread, &(*mutex)->m_lock);
306 if (ret)
307 return (ret);
308
309 /*
310 * Check mutex other fields to see if this mutex is
311 * in use. Mostly for prority mutex types, or there
312 * are condition variables referencing it.
313 */
314 if (((*mutex)->m_owner != NULL) ||
315 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
316 ((*mutex)->m_refcount != 0)) {
317 THR_UMTX_UNLOCK_PERSIST(curthread, &(*mutex)->m_lock);
318 ret = EBUSY;
319 } else {
320 /*
321 * Save a pointer to the mutex so it can be free'd
322 * and set the caller's pointer to NULL:
323 */
324 m = *mutex;
325 *mutex = NULL;
326
327 /* Unlock the mutex structure: */
328 THR_UMTX_UNLOCK_PERSIST(curthread, &m->m_lock);
329
330 /*
331 * Free the memory allocated for the mutex
332 * structure:
333 */
334 MUTEX_ASSERT_NOT_OWNED(m);
335 MUTEX_DESTROY(m);
336 }
337 }
338
339 /* Return the completion status: */
340 return (ret);
341 }
342
343 static int
mutex_trylock_common(pthread_t curthread,pthread_mutex_t * mutex)344 mutex_trylock_common(pthread_t curthread, pthread_mutex_t *mutex)
345 {
346 pthread_mutex_t m;
347 int ret;
348
349 m = *mutex;
350 mutex_log("mutex_lock_trylock_common %p\n", m);
351 ret = THR_UMTX_TRYLOCK_PERSIST(curthread, &m->m_lock);
352 if (ret == 0) {
353 mutex_log2(curthread, m, 1);
354 m->m_owner = curthread;
355 /* Add to the list of owned mutexes: */
356 MUTEX_ASSERT_NOT_OWNED(m);
357 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
358 } else if (m->m_owner == curthread) {
359 mutex_log2(curthread, m, 2);
360 ret = mutex_self_trylock(m);
361 } /* else {} */
362 mutex_log("mutex_lock_trylock_common %p (returns %d)\n", m, ret);
363
364 return (ret);
365 }
366
367 int
__pthread_mutex_trylock(pthread_mutex_t * m)368 __pthread_mutex_trylock(pthread_mutex_t *m)
369 {
370 pthread_t curthread = tls_get_curthread();
371 int ret;
372
373 if (__predict_false(m == NULL))
374 return(EINVAL);
375 /*
376 * If the mutex is statically initialized, perform the dynamic
377 * initialization:
378 */
379 if (__predict_false(*m == NULL)) {
380 ret = init_static(curthread, m);
381 if (__predict_false(ret != 0))
382 return (ret);
383 }
384 return (mutex_trylock_common(curthread, m));
385 }
386
387 int
_pthread_mutex_trylock(pthread_mutex_t * m)388 _pthread_mutex_trylock(pthread_mutex_t *m)
389 {
390 pthread_t curthread = tls_get_curthread();
391 int ret = 0;
392
393 /*
394 * If the mutex is statically initialized, perform the dynamic
395 * initialization marking the mutex private (delete safe):
396 */
397 if (__predict_false(*m == NULL)) {
398 ret = init_static_private(curthread, m);
399 if (__predict_false(ret != 0))
400 return (ret);
401 }
402 return (mutex_trylock_common(curthread, m));
403 }
404
405 static int
mutex_lock_common(pthread_t curthread,pthread_mutex_t * mutex,const struct timespec * abstime)406 mutex_lock_common(pthread_t curthread, pthread_mutex_t *mutex,
407 const struct timespec * abstime)
408 {
409 struct timespec ts, ts2;
410 pthread_mutex_t m;
411 int ret = 0;
412
413 m = *mutex;
414 mutex_log("mutex_lock_common %p\n", m);
415 ret = THR_UMTX_TRYLOCK_PERSIST(curthread, &m->m_lock);
416 if (ret == 0) {
417 mutex_log2(curthread, m, 3);
418 m->m_owner = curthread;
419 /* Add to the list of owned mutexes: */
420 MUTEX_ASSERT_NOT_OWNED(m);
421 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
422 } else if (m->m_owner == curthread) {
423 ret = mutex_self_lock(m, abstime);
424 } else {
425 if (abstime == NULL) {
426 THR_UMTX_LOCK_PERSIST(curthread, &m->m_lock);
427 ret = 0;
428 } else if (__predict_false(
429 abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
430 abstime->tv_nsec >= 1000000000)) {
431 ret = EINVAL;
432 } else {
433 clock_gettime(CLOCK_REALTIME, &ts);
434 timespecsub(abstime, &ts, &ts2);
435 ret = THR_UMTX_TIMEDLOCK_PERSIST(curthread,
436 &m->m_lock, &ts2);
437 }
438 if (ret == 0) {
439 mutex_log2(curthread, m, 4);
440 m->m_owner = curthread;
441 /* Add to the list of owned mutexes: */
442 MUTEX_ASSERT_NOT_OWNED(m);
443 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
444 }
445 }
446 mutex_log("mutex_lock_common %p (returns %d) lock %d,%d\n",
447 m, ret, m->m_lock, m->m_count);
448 return (ret);
449 }
450
451 int
__pthread_mutex_lock(pthread_mutex_t * m)452 __pthread_mutex_lock(pthread_mutex_t *m)
453 {
454 pthread_t curthread;
455 int ret;
456
457 if (__predict_false(m == NULL))
458 return(EINVAL);
459
460 /*
461 * If the mutex is statically initialized, perform the dynamic
462 * initialization:
463 */
464 curthread = tls_get_curthread();
465 if (__predict_false(*m == NULL)) {
466 ret = init_static(curthread, m);
467 if (__predict_false(ret))
468 return (ret);
469 }
470 return (mutex_lock_common(curthread, m, NULL));
471 }
472
473 int
_pthread_mutex_lock(pthread_mutex_t * m)474 _pthread_mutex_lock(pthread_mutex_t *m)
475 {
476 pthread_t curthread;
477 int ret;
478
479 _thr_check_init();
480
481 if (__predict_false(m == NULL))
482 return(EINVAL);
483
484 /*
485 * If the mutex is statically initialized, perform the dynamic
486 * initialization marking it private (delete safe):
487 */
488 curthread = tls_get_curthread();
489 if (__predict_false(*m == NULL)) {
490 ret = init_static_private(curthread, m);
491 if (__predict_false(ret))
492 return (ret);
493 }
494 return (mutex_lock_common(curthread, m, NULL));
495 }
496
497 int
__pthread_mutex_timedlock(pthread_mutex_t * __restrict m,const struct timespec * __restrict abs_timeout)498 __pthread_mutex_timedlock(pthread_mutex_t * __restrict m,
499 const struct timespec * __restrict abs_timeout)
500 {
501 pthread_t curthread;
502 int ret;
503
504 _thr_check_init();
505
506 if (__predict_false(m == NULL))
507 return(EINVAL);
508
509 /*
510 * If the mutex is statically initialized, perform the dynamic
511 * initialization:
512 */
513 curthread = tls_get_curthread();
514 if (__predict_false(*m == NULL)) {
515 ret = init_static(curthread, m);
516 if (__predict_false(ret))
517 return (ret);
518 }
519 return (mutex_lock_common(curthread, m, abs_timeout));
520 }
521
522 int
_pthread_mutex_timedlock(pthread_mutex_t * m,const struct timespec * abs_timeout)523 _pthread_mutex_timedlock(pthread_mutex_t *m,
524 const struct timespec *abs_timeout)
525 {
526 pthread_t curthread;
527 int ret;
528
529 if (__predict_false(m == NULL))
530 return(EINVAL);
531
532 curthread = tls_get_curthread();
533
534 /*
535 * If the mutex is statically initialized, perform the dynamic
536 * initialization marking it private (delete safe):
537 */
538 if (__predict_false(*m == NULL)) {
539 ret = init_static_private(curthread, m);
540 if (__predict_false(ret))
541 return (ret);
542 }
543 return (mutex_lock_common(curthread, m, abs_timeout));
544 }
545
546 int
_pthread_mutex_unlock(pthread_mutex_t * m)547 _pthread_mutex_unlock(pthread_mutex_t *m)
548 {
549 if (__predict_false(m == NULL))
550 return(EINVAL);
551 return (mutex_unlock_common(m));
552 }
553
554 static int
mutex_self_trylock(pthread_mutex_t m)555 mutex_self_trylock(pthread_mutex_t m)
556 {
557 int ret;
558
559 switch (m->m_type) {
560 /* case PTHREAD_MUTEX_DEFAULT: */
561 case PTHREAD_MUTEX_ERRORCHECK:
562 case PTHREAD_MUTEX_NORMAL:
563 ret = EBUSY;
564 break;
565
566 case PTHREAD_MUTEX_RECURSIVE:
567 /* Increment the lock count: */
568 if (m->m_count + 1 > 0) {
569 m->m_count++;
570 ret = 0;
571 } else
572 ret = EAGAIN;
573 break;
574
575 default:
576 /* Trap invalid mutex types; */
577 ret = EINVAL;
578 }
579
580 return (ret);
581 }
582
583 static int
mutex_self_lock(pthread_mutex_t m,const struct timespec * abstime)584 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
585 {
586 struct timespec ts1, ts2;
587 int ret;
588
589 switch (m->m_type) {
590 /* case PTHREAD_MUTEX_DEFAULT: */
591 case PTHREAD_MUTEX_ERRORCHECK:
592 if (abstime) {
593 clock_gettime(CLOCK_REALTIME, &ts1);
594 timespecsub(abstime, &ts1, &ts2);
595 __sys_nanosleep(&ts2, NULL);
596 ret = ETIMEDOUT;
597 } else {
598 /*
599 * POSIX specifies that mutexes should return
600 * EDEADLK if a recursive lock is detected.
601 */
602 ret = EDEADLK;
603 }
604 break;
605
606 case PTHREAD_MUTEX_NORMAL:
607 /*
608 * What SS2 define as a 'normal' mutex. Intentionally
609 * deadlock on attempts to get a lock you already own.
610 */
611 ret = 0;
612 if (abstime) {
613 clock_gettime(CLOCK_REALTIME, &ts1);
614 timespecsub(abstime, &ts1, &ts2);
615 __sys_nanosleep(&ts2, NULL);
616 ret = ETIMEDOUT;
617 } else {
618 ts1.tv_sec = 30;
619 ts1.tv_nsec = 0;
620 for (;;)
621 __sys_nanosleep(&ts1, NULL);
622 }
623 break;
624
625 case PTHREAD_MUTEX_RECURSIVE:
626 /* Increment the lock count: */
627 if (m->m_count + 1 > 0) {
628 m->m_count++;
629 ret = 0;
630 } else
631 ret = EAGAIN;
632 break;
633
634 default:
635 /* Trap invalid mutex types; */
636 ret = EINVAL;
637 }
638
639 return (ret);
640 }
641
642 static int
mutex_unlock_common(pthread_mutex_t * mutex)643 mutex_unlock_common(pthread_mutex_t *mutex)
644 {
645 pthread_t curthread = tls_get_curthread();
646 pthread_mutex_t m;
647
648 if (__predict_false((m = *mutex) == NULL)) {
649 mutex_log2(curthread, m, 252);
650 return (EINVAL);
651 }
652 mutex_log("mutex_unlock_common %p\n", m);
653 if (__predict_false(m->m_owner != curthread)) {
654 mutex_log("mutex_unlock_common %p (failedA)\n", m);
655 mutex_log2(curthread, m, 253);
656 return (EPERM);
657 }
658
659 if (__predict_false(m->m_type == PTHREAD_MUTEX_RECURSIVE &&
660 m->m_count > 0)) {
661 m->m_count--;
662 mutex_log("mutex_unlock_common %p (returns 0, partial)\n", m);
663 mutex_log2(curthread, m, 254);
664 } else {
665 /*
666 * Clear the count in case this is a recursive mutex.
667 */
668 m->m_count = 0;
669 m->m_owner = NULL;
670 /* Remove the mutex from the threads queue. */
671 MUTEX_ASSERT_IS_OWNED(m);
672 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
673 mutex_log2(tls_get_curthread(), m, 35);
674 MUTEX_INIT_LINK(m);
675 mutex_log2(tls_get_curthread(), m, 36);
676 /*
677 * Hand off the mutex to the next waiting thread.
678 */
679 mutex_log("mutex_unlock_common %p (returns 0) lock %d\n",
680 m, m->m_lock);
681 THR_UMTX_UNLOCK_PERSIST(curthread, &m->m_lock);
682 mutex_log2(tls_get_curthread(), m, 37);
683 mutex_log2(curthread, m, 255);
684 }
685 return (0);
686 }
687
688 int
_pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,int * __restrict prioceiling)689 _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,
690 int * __restrict prioceiling)
691 {
692 if ((mutex == NULL) || (*mutex == NULL))
693 return (EINVAL);
694 if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
695 return (EINVAL);
696 *prioceiling = (*mutex)->m_prio;
697 return (0);
698 }
699
700 int
_pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,int prioceiling,int * __restrict old_ceiling)701 _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,
702 int prioceiling, int * __restrict old_ceiling)
703 {
704 int ret = 0;
705 int tmp;
706
707 if ((mutex == NULL) || (*mutex == NULL))
708 ret = EINVAL;
709 else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
710 ret = EINVAL;
711 else if ((ret = _pthread_mutex_lock(mutex)) == 0) {
712 tmp = (*mutex)->m_prio;
713 (*mutex)->m_prio = prioceiling;
714 ret = _pthread_mutex_unlock(mutex);
715 *old_ceiling = tmp;
716 }
717 return(ret);
718 }
719
720 int
_mutex_cv_lock(pthread_mutex_t * m,int count)721 _mutex_cv_lock(pthread_mutex_t *m, int count)
722 {
723 int ret;
724
725 if ((ret = _pthread_mutex_lock(m)) == 0) {
726 (*m)->m_refcount--;
727 (*m)->m_count += count;
728 }
729 return (ret);
730 }
731
732 int
_mutex_cv_unlock(pthread_mutex_t * mutex,int * count)733 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
734 {
735 pthread_t curthread = tls_get_curthread();
736 pthread_mutex_t m;
737
738 if (__predict_false(mutex == NULL))
739 return (EINVAL);
740 if (__predict_false((m = *mutex) == NULL))
741 return (EINVAL);
742 if (__predict_false(m->m_owner != curthread))
743 return (EPERM);
744
745 *count = m->m_count;
746 m->m_count = 0;
747 m->m_refcount++;
748 mutex_log2(tls_get_curthread(), m, 45);
749 m->m_owner = NULL;
750 /* Remove the mutex from the threads queue. */
751 MUTEX_ASSERT_IS_OWNED(m);
752 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
753 MUTEX_INIT_LINK(m);
754 THR_UMTX_UNLOCK_PERSIST(curthread, &m->m_lock);
755 mutex_log2(curthread, m, 250);
756 return (0);
757 }
758
759 void
_mutex_unlock_private(pthread_t pthread)760 _mutex_unlock_private(pthread_t pthread)
761 {
762 pthread_mutex_t m, m_next;
763
764 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
765 m_next = TAILQ_NEXT(m, m_qe);
766 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
767 _pthread_mutex_unlock(&m);
768 }
769 }
770
771 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
772 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
773 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
774 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
775
776 /* Single underscore versions provided for libc internal usage: */
777 /* No difference between libc and application usage of these: */
778 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
779 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
780 __strong_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
781 __strong_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
782