1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  */
34 
35 #include "namespace.h"
36 #include <machine/tls.h>
37 #include <errno.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <sys/queue.h>
41 #include <pthread.h>
42 #include "un-namespace.h"
43 
44 #include "thr_private.h"
45 
46 #ifdef _PTHREADS_DEBUGGING
47 
48 #include <stdio.h>
49 #include <stdarg.h>
50 #include <sys/file.h>
51 
52 #endif
53 
54 #if defined(_PTHREADS_INVARIANTS)
55 #define MUTEX_INIT_LINK(m)		do {		\
56 	(m)->m_qe.tqe_prev = NULL;			\
57 	(m)->m_qe.tqe_next = NULL;			\
58 } while (0)
59 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
60 	if ((m)->m_qe.tqe_prev == NULL)			\
61 		PANIC("mutex is not on list");		\
62 } while (0)
63 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
64 	if (((m)->m_qe.tqe_prev != NULL) ||		\
65 	    ((m)->m_qe.tqe_next != NULL))		\
66 		PANIC("mutex is on list");		\
67 } while (0)
68 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
69 	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
70 	    "thread in syncq when it shouldn't be.");	\
71 } while (0);
72 #else
73 #define MUTEX_INIT_LINK(m)
74 #define MUTEX_ASSERT_IS_OWNED(m)
75 #define MUTEX_ASSERT_NOT_OWNED(m)
76 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)
77 #endif
78 
79 #define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
80 #define	MUTEX_DESTROY(m) do {		\
81 	free(m);			\
82 } while (0)
83 
84 umtx_t	_mutex_static_lock;
85 
86 #ifdef _PTHREADS_DEBUGGING
87 
88 static
89 void
90 mutex_log(const char *ctl, ...)
91 {
92 	char buf[256];
93 	va_list va;
94 	size_t len;
95 
96 	va_start(va, ctl);
97 	len = vsnprintf(buf, sizeof(buf), ctl, va);
98 	va_end(va);
99 	_thr_log(buf, len);
100 }
101 
102 #else
103 
104 static __inline
105 void
106 mutex_log(const char *ctl __unused, ...)
107 {
108 }
109 
110 #endif
111 
112 #ifdef _PTHREADS_DEBUGGING2
113 
114 static void
115 mutex_log2(struct pthread *curthread, struct pthread_mutex *m, int op)
116 {
117 	if (curthread) {
118 		if (curthread->tid < 32)
119 			m->m_lastop[curthread->tid] =
120 				(__sys_getpid() << 16) | op;
121 	} else {
122 			m->m_lastop[0] =
123 				(__sys_getpid() << 16) | op;
124 	}
125 }
126 
127 #else
128 
129 static __inline
130 void
131 mutex_log2(struct pthread *curthread __unused,
132 	   struct pthread_mutex *m __unused, int op __unused)
133 {
134 }
135 
136 #endif
137 
138 /*
139  * Prototypes
140  */
141 static int	mutex_self_trylock(pthread_mutex_t);
142 static int	mutex_self_lock(pthread_mutex_t,
143 			const struct timespec *abstime);
144 static int	mutex_unlock_common(pthread_mutex_t *);
145 
146 int __pthread_mutex_init(pthread_mutex_t *mutex,
147 	const pthread_mutexattr_t *mutex_attr);
148 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
149 int __pthread_mutex_lock(pthread_mutex_t *mutex);
150 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
151 	const struct timespec *abs_timeout);
152 
153 static int
154 mutex_check_attr(const struct pthread_mutex_attr *attr)
155 {
156 	if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
157 	    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
158 		return (EINVAL);
159 	if (attr->m_protocol < PTHREAD_PRIO_NONE ||
160 	    attr->m_protocol > PTHREAD_PRIO_PROTECT)
161 		return (EINVAL);
162 	return (0);
163 }
164 
165 static void
166 mutex_init_body(struct pthread_mutex *pmutex,
167     const struct pthread_mutex_attr *attr, int private)
168 {
169 	_thr_umtx_init(&pmutex->m_lock);
170 	pmutex->m_type = attr->m_type;
171 	pmutex->m_protocol = attr->m_protocol;
172 	TAILQ_INIT(&pmutex->m_queue);
173 	mutex_log2(tls_get_curthread(), pmutex, 32);
174 	pmutex->m_owner = NULL;
175 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
176 	if (private)
177 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
178 	pmutex->m_count = 0;
179 	pmutex->m_refcount = 0;
180 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
181 		pmutex->m_prio = attr->m_ceiling;
182 	else
183 		pmutex->m_prio = -1;
184 	pmutex->m_saved_prio = 0;
185 	MUTEX_INIT_LINK(pmutex);
186 }
187 
188 static int
189 mutex_init(pthread_mutex_t *mutex,
190     const pthread_mutexattr_t *mutex_attr, int private)
191 {
192 	const struct pthread_mutex_attr *attr;
193 	struct pthread_mutex *pmutex;
194 	int error;
195 
196 	if (mutex_attr == NULL) {
197 		attr = &_pthread_mutexattr_default;
198 	} else {
199 		attr = *mutex_attr;
200 		error = mutex_check_attr(attr);
201 		if (error != 0)
202 			return (error);
203 	}
204 	if ((pmutex = (pthread_mutex_t)
205 		malloc(sizeof(struct pthread_mutex))) == NULL)
206 		return (ENOMEM);
207 	mutex_init_body(pmutex, attr, private);
208 	*mutex = pmutex;
209 	return (0);
210 }
211 
212 static int
213 init_static(struct pthread *thread, pthread_mutex_t *mutex)
214 {
215 	int ret;
216 
217 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
218 
219 	if (*mutex == NULL)
220 		ret = mutex_init(mutex, NULL, 0);
221 	else
222 		ret = 0;
223 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
224 
225 	return (ret);
226 }
227 
228 static int
229 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
230 {
231 	int ret;
232 
233 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
234 
235 	if (*mutex == NULL)
236 		ret = mutex_init(mutex, NULL, 1);
237 	else
238 		ret = 0;
239 
240 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
241 
242 	return (ret);
243 }
244 
245 int
246 _pthread_mutex_init(pthread_mutex_t * __restrict mutex,
247     const pthread_mutexattr_t * __restrict mutex_attr)
248 {
249 	return mutex_init(mutex, mutex_attr, 1);
250 }
251 
252 int
253 __pthread_mutex_init(pthread_mutex_t *mutex,
254     const pthread_mutexattr_t *mutex_attr)
255 {
256 	return mutex_init(mutex, mutex_attr, 0);
257 }
258 
259 #if 0
260 int
261 _mutex_reinit(pthread_mutex_t *mutexp)
262 {
263 	pthread_mutex_t mutex = *mutexp;
264 
265 	_thr_umtx_init(&mutex->m_lock);
266 	TAILQ_INIT(&mutex->m_queue);
267 	MUTEX_INIT_LINK(mutex);
268 	mutex_log2(tls_get_curthread(), mutex, 33);
269 	mutex->m_owner = NULL;
270 	mutex->m_count = 0;
271 	mutex->m_refcount = 0;
272 	mutex->m_prio = 0;
273 	mutex->m_saved_prio = 0;
274 
275 	return (0);
276 }
277 #endif
278 
279 void
280 _mutex_fork(struct pthread *curthread)
281 {
282 	struct pthread_mutex *m;
283 
284 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
285 		m->m_lock = UMTX_LOCKED;
286 }
287 
288 int
289 _pthread_mutex_destroy(pthread_mutex_t *mutex)
290 {
291 	struct pthread *curthread = tls_get_curthread();
292 	pthread_mutex_t m;
293 	int ret = 0;
294 
295 	if (mutex == NULL) {
296 		ret = EINVAL;
297 	} else if (*mutex == NULL) {
298 		ret = 0;
299 	} else {
300 		/*
301 		 * Try to lock the mutex structure, we only need to
302 		 * try once, if failed, the mutex is in use.
303 		 */
304 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
305 		if (ret)
306 			return (ret);
307 
308 		/*
309 		 * Check mutex other fields to see if this mutex is
310 		 * in use. Mostly for prority mutex types, or there
311 		 * are condition variables referencing it.
312 		 */
313 		if (((*mutex)->m_owner != NULL) ||
314 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
315 		    ((*mutex)->m_refcount != 0)) {
316 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
317 			ret = EBUSY;
318 		} else {
319 			/*
320 			 * Save a pointer to the mutex so it can be free'd
321 			 * and set the caller's pointer to NULL:
322 			 */
323 			m = *mutex;
324 			*mutex = NULL;
325 
326 			/* Unlock the mutex structure: */
327 			THR_UMTX_UNLOCK(curthread, &m->m_lock);
328 
329 			/*
330 			 * Free the memory allocated for the mutex
331 			 * structure:
332 			 */
333 			MUTEX_ASSERT_NOT_OWNED(m);
334 			MUTEX_DESTROY(m);
335 		}
336 	}
337 
338 	/* Return the completion status: */
339 	return (ret);
340 }
341 
342 static int
343 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
344 {
345 	struct pthread_mutex *m;
346 	int ret;
347 
348 	m = *mutex;
349 	mutex_log("mutex_lock_trylock_common %p\n", m);
350 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
351 	if (ret == 0) {
352 		mutex_log2(curthread, m, 1);
353 		m->m_owner = curthread;
354 		/* Add to the list of owned mutexes: */
355 		MUTEX_ASSERT_NOT_OWNED(m);
356 		TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
357 	} else if (m->m_owner == curthread) {
358 		mutex_log2(curthread, m, 2);
359 		ret = mutex_self_trylock(m);
360 	} /* else {} */
361 	mutex_log("mutex_lock_trylock_common %p (returns %d)\n", m, ret);
362 
363 	return (ret);
364 }
365 
366 int
367 __pthread_mutex_trylock(pthread_mutex_t *m)
368 {
369 	struct pthread *curthread = tls_get_curthread();
370 	int ret;
371 
372 	if (__predict_false(m == NULL))
373 		return(EINVAL);
374 	/*
375 	 * If the mutex is statically initialized, perform the dynamic
376 	 * initialization:
377 	 */
378 	if (__predict_false(*m == NULL)) {
379 		ret = init_static(curthread, m);
380 		if (__predict_false(ret != 0))
381 			return (ret);
382 	}
383 	return (mutex_trylock_common(curthread, m));
384 }
385 
386 int
387 _pthread_mutex_trylock(pthread_mutex_t *m)
388 {
389 	struct pthread	*curthread = tls_get_curthread();
390 	int	ret = 0;
391 
392 	/*
393 	 * If the mutex is statically initialized, perform the dynamic
394 	 * initialization marking the mutex private (delete safe):
395 	 */
396 	if (__predict_false(*m == NULL)) {
397 		ret = init_static_private(curthread, m);
398 		if (__predict_false(ret != 0))
399 			return (ret);
400 	}
401 	return (mutex_trylock_common(curthread, m));
402 }
403 
404 static int
405 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
406 	const struct timespec * abstime)
407 {
408 	struct  timespec ts, ts2;
409 	struct  pthread_mutex *m;
410 	int	ret = 0;
411 
412 	m = *mutex;
413 	mutex_log("mutex_lock_common %p\n", m);
414 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
415 	if (ret == 0) {
416 		mutex_log2(curthread, m, 3);
417 		m->m_owner = curthread;
418 		/* Add to the list of owned mutexes: */
419 		MUTEX_ASSERT_NOT_OWNED(m);
420 		TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
421 	} else if (m->m_owner == curthread) {
422 		ret = mutex_self_lock(m, abstime);
423 	} else {
424 		if (abstime == NULL) {
425 			THR_UMTX_LOCK(curthread, &m->m_lock);
426 			ret = 0;
427 		} else if (__predict_false(
428 			abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
429 			abstime->tv_nsec >= 1000000000)) {
430 				ret = EINVAL;
431 		} else {
432 			clock_gettime(CLOCK_REALTIME, &ts);
433 			TIMESPEC_SUB(&ts2, abstime, &ts);
434 			ret = THR_UMTX_TIMEDLOCK(curthread, &m->m_lock, &ts2);
435 		}
436 		if (ret == 0) {
437 			mutex_log2(curthread, m, 4);
438 			m->m_owner = curthread;
439 			/* Add to the list of owned mutexes: */
440 			MUTEX_ASSERT_NOT_OWNED(m);
441 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
442 		}
443 	}
444 	mutex_log("mutex_lock_common %p (returns %d) lock %d,%d\n",
445 		  m, ret, m->m_lock, m->m_count);
446 	return (ret);
447 }
448 
449 int
450 __pthread_mutex_lock(pthread_mutex_t *m)
451 {
452 	struct pthread *curthread;
453 	int	ret;
454 
455 	if (__predict_false(m == NULL))
456 		return(EINVAL);
457 
458 	/*
459 	 * If the mutex is statically initialized, perform the dynamic
460 	 * initialization:
461 	 */
462 	curthread = tls_get_curthread();
463 	if (__predict_false(*m == NULL)) {
464 		ret = init_static(curthread, m);
465 		if (__predict_false(ret))
466 			return (ret);
467 	}
468 	return (mutex_lock_common(curthread, m, NULL));
469 }
470 
471 int
472 _pthread_mutex_lock(pthread_mutex_t *m)
473 {
474 	struct pthread *curthread;
475 	int	ret;
476 
477 	if (__predict_false(m == NULL))
478 		return(EINVAL);
479 
480 	/*
481 	 * If the mutex is statically initialized, perform the dynamic
482 	 * initialization marking it private (delete safe):
483 	 */
484 	curthread = tls_get_curthread();
485 	if (__predict_false(*m == NULL)) {
486 		ret = init_static_private(curthread, m);
487 		if (__predict_false(ret))
488 			return (ret);
489 	}
490 	return (mutex_lock_common(curthread, m, NULL));
491 }
492 
493 int
494 __pthread_mutex_timedlock(pthread_mutex_t * __restrict m,
495     const struct timespec * __restrict abs_timeout)
496 {
497 	struct pthread *curthread;
498 	int	ret;
499 
500 	if (__predict_false(m == NULL))
501 		return(EINVAL);
502 
503 	/*
504 	 * If the mutex is statically initialized, perform the dynamic
505 	 * initialization:
506 	 */
507 	curthread = tls_get_curthread();
508 	if (__predict_false(*m == NULL)) {
509 		ret = init_static(curthread, m);
510 		if (__predict_false(ret))
511 			return (ret);
512 	}
513 	return (mutex_lock_common(curthread, m, abs_timeout));
514 }
515 
516 int
517 _pthread_mutex_timedlock(pthread_mutex_t *m,
518 	const struct timespec *abs_timeout)
519 {
520 	struct pthread *curthread;
521 	int	ret;
522 
523 	if (__predict_false(m == NULL))
524 		return(EINVAL);
525 
526 	curthread = tls_get_curthread();
527 
528 	/*
529 	 * If the mutex is statically initialized, perform the dynamic
530 	 * initialization marking it private (delete safe):
531 	 */
532 	if (__predict_false(*m == NULL)) {
533 		ret = init_static_private(curthread, m);
534 		if (__predict_false(ret))
535 			return (ret);
536 	}
537 	return (mutex_lock_common(curthread, m, abs_timeout));
538 }
539 
540 int
541 _pthread_mutex_unlock(pthread_mutex_t *m)
542 {
543 	if (__predict_false(m == NULL))
544 		return(EINVAL);
545 	return (mutex_unlock_common(m));
546 }
547 
548 static int
549 mutex_self_trylock(pthread_mutex_t m)
550 {
551 	int	ret;
552 
553 	switch (m->m_type) {
554 	/* case PTHREAD_MUTEX_DEFAULT: */
555 	case PTHREAD_MUTEX_ERRORCHECK:
556 	case PTHREAD_MUTEX_NORMAL:
557 		ret = EBUSY;
558 		break;
559 
560 	case PTHREAD_MUTEX_RECURSIVE:
561 		/* Increment the lock count: */
562 		if (m->m_count + 1 > 0) {
563 			m->m_count++;
564 			ret = 0;
565 		} else
566 			ret = EAGAIN;
567 		break;
568 
569 	default:
570 		/* Trap invalid mutex types; */
571 		ret = EINVAL;
572 	}
573 
574 	return (ret);
575 }
576 
577 static int
578 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
579 {
580 	struct timespec ts1, ts2;
581 	int ret;
582 
583 	switch (m->m_type) {
584 	/* case PTHREAD_MUTEX_DEFAULT: */
585 	case PTHREAD_MUTEX_ERRORCHECK:
586 		if (abstime) {
587 			clock_gettime(CLOCK_REALTIME, &ts1);
588 			TIMESPEC_SUB(&ts2, abstime, &ts1);
589 			__sys_nanosleep(&ts2, NULL);
590 			ret = ETIMEDOUT;
591 		} else {
592 			/*
593 			 * POSIX specifies that mutexes should return
594 			 * EDEADLK if a recursive lock is detected.
595 			 */
596 			ret = EDEADLK;
597 		}
598 		break;
599 
600 	case PTHREAD_MUTEX_NORMAL:
601 		/*
602 		 * What SS2 define as a 'normal' mutex.  Intentionally
603 		 * deadlock on attempts to get a lock you already own.
604 		 */
605 		ret = 0;
606 		if (abstime) {
607 			clock_gettime(CLOCK_REALTIME, &ts1);
608 			TIMESPEC_SUB(&ts2, abstime, &ts1);
609 			__sys_nanosleep(&ts2, NULL);
610 			ret = ETIMEDOUT;
611 		} else {
612 			ts1.tv_sec = 30;
613 			ts1.tv_nsec = 0;
614 			for (;;)
615 				__sys_nanosleep(&ts1, NULL);
616 		}
617 		break;
618 
619 	case PTHREAD_MUTEX_RECURSIVE:
620 		/* Increment the lock count: */
621 		if (m->m_count + 1 > 0) {
622 			m->m_count++;
623 			ret = 0;
624 		} else
625 			ret = EAGAIN;
626 		break;
627 
628 	default:
629 		/* Trap invalid mutex types; */
630 		ret = EINVAL;
631 	}
632 
633 	return (ret);
634 }
635 
636 static int
637 mutex_unlock_common(pthread_mutex_t *mutex)
638 {
639 	struct pthread *curthread = tls_get_curthread();
640 	struct pthread_mutex *m;
641 
642 	if (__predict_false((m = *mutex) == NULL)) {
643 		mutex_log2(curthread, m, 252);
644 		return (EINVAL);
645 	}
646 	mutex_log("mutex_unlock_common %p\n", m);
647 	if (__predict_false(m->m_owner != curthread)) {
648 		mutex_log("mutex_unlock_common %p (failedA)\n", m);
649 		mutex_log2(curthread, m, 253);
650 		return (EPERM);
651 	}
652 
653 	if (__predict_false(m->m_type == PTHREAD_MUTEX_RECURSIVE &&
654 			    m->m_count > 0)) {
655 		m->m_count--;
656 		mutex_log("mutex_unlock_common %p (returns 0, partial)\n", m);
657 		mutex_log2(curthread, m, 254);
658 	} else {
659 		/*
660 		 * Clear the count in case this is a recursive mutex.
661 		 */
662 		m->m_count = 0;
663 		m->m_owner = NULL;
664 		/* Remove the mutex from the threads queue. */
665 		MUTEX_ASSERT_IS_OWNED(m);
666 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
667 		mutex_log2(tls_get_curthread(), m, 35);
668 		MUTEX_INIT_LINK(m);
669 		mutex_log2(tls_get_curthread(), m, 36);
670 		/*
671 		 * Hand off the mutex to the next waiting thread.
672 		 */
673 		mutex_log("mutex_unlock_common %p (returns 0) lock %d\n",
674 			  m, m->m_lock);
675 		THR_UMTX_UNLOCK(curthread, &m->m_lock);
676 		mutex_log2(tls_get_curthread(), m, 37);
677 		mutex_log2(curthread, m, 255);
678 	}
679 	return (0);
680 }
681 
682 int
683 _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,
684     int * __restrict prioceiling)
685 {
686 	if ((mutex == NULL) || (*mutex == NULL))
687 		return (EINVAL);
688 	if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
689 		return (EINVAL);
690 	*prioceiling = (*mutex)->m_prio;
691 	return (0);
692 }
693 
694 int
695 _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,
696     int prioceiling, int * __restrict old_ceiling)
697 {
698 	int ret = 0;
699 	int tmp;
700 
701 	if ((mutex == NULL) || (*mutex == NULL))
702 		ret = EINVAL;
703 	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
704 		ret = EINVAL;
705 	else if ((ret = _pthread_mutex_lock(mutex)) == 0) {
706 		tmp = (*mutex)->m_prio;
707 		(*mutex)->m_prio = prioceiling;
708 		ret = _pthread_mutex_unlock(mutex);
709 		*old_ceiling = tmp;
710 	}
711 	return(ret);
712 }
713 
714 int
715 _mutex_cv_lock(pthread_mutex_t *m, int count)
716 {
717 	int	ret;
718 
719 	if ((ret = _pthread_mutex_lock(m)) == 0) {
720 		(*m)->m_refcount--;
721 		(*m)->m_count += count;
722 	}
723 	return (ret);
724 }
725 
726 int
727 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
728 {
729 	struct pthread *curthread = tls_get_curthread();
730 	struct pthread_mutex *m;
731 
732 	if (__predict_false(mutex == NULL))
733 		return (EINVAL);
734 	if (__predict_false((m = *mutex) == NULL))
735 		return (EINVAL);
736 	if (__predict_false(m->m_owner != curthread))
737 		return (EPERM);
738 
739 	*count = m->m_count;
740 	m->m_count = 0;
741 	m->m_refcount++;
742 	mutex_log2(tls_get_curthread(), m, 45);
743 	m->m_owner = NULL;
744 	/* Remove the mutex from the threads queue. */
745 	MUTEX_ASSERT_IS_OWNED(m);
746 	TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
747 	MUTEX_INIT_LINK(m);
748 	THR_UMTX_UNLOCK(curthread, &m->m_lock);
749 	mutex_log2(curthread, m, 250);
750 	return (0);
751 }
752 
753 void
754 _mutex_unlock_private(pthread_t pthread)
755 {
756 	struct pthread_mutex	*m, *m_next;
757 
758 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
759 		m_next = TAILQ_NEXT(m, m_qe);
760 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
761 			_pthread_mutex_unlock(&m);
762 	}
763 }
764 
765 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
766 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
767 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
768 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
769 
770 /* Single underscore versions provided for libc internal usage: */
771 /* No difference between libc and application usage of these: */
772 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
773 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
774 __strong_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
775 __strong_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
776