1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  */
34 
35 #include "namespace.h"
36 #include <machine/tls.h>
37 #include <errno.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <sys/queue.h>
41 #include <pthread.h>
42 #include "un-namespace.h"
43 
44 #include "thr_private.h"
45 
46 #if defined(_PTHREADS_INVARIANTS)
47 #define MUTEX_INIT_LINK(m)		do {		\
48 	(m)->m_qe.tqe_prev = NULL;			\
49 	(m)->m_qe.tqe_next = NULL;			\
50 } while (0)
51 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
52 	if ((m)->m_qe.tqe_prev == NULL)			\
53 		PANIC("mutex is not on list");		\
54 } while (0)
55 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
56 	if (((m)->m_qe.tqe_prev != NULL) ||		\
57 	    ((m)->m_qe.tqe_next != NULL))		\
58 		PANIC("mutex is on list");		\
59 } while (0)
60 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
61 	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
62 	    "thread in syncq when it shouldn't be.");	\
63 } while (0);
64 #else
65 #define MUTEX_INIT_LINK(m)
66 #define MUTEX_ASSERT_IS_OWNED(m)
67 #define MUTEX_ASSERT_NOT_OWNED(m)
68 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)
69 #endif
70 
71 #define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
72 #define	MUTEX_DESTROY(m) do {		\
73 	free(m);			\
74 } while (0)
75 
76 umtx_t	_mutex_static_lock;
77 
78 /*
79  * Prototypes
80  */
81 static int	mutex_self_trylock(pthread_mutex_t);
82 static int	mutex_self_lock(pthread_mutex_t,
83 			const struct timespec *abstime);
84 static int	mutex_unlock_common(pthread_mutex_t *);
85 
86 int __pthread_mutex_init(pthread_mutex_t *mutex,
87 	const pthread_mutexattr_t *mutex_attr);
88 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
89 int __pthread_mutex_lock(pthread_mutex_t *mutex);
90 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
91 	const struct timespec *abs_timeout);
92 
93 static int
94 mutex_check_attr(const struct pthread_mutex_attr *attr)
95 {
96 	if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
97 	    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
98 		return (EINVAL);
99 	if (attr->m_protocol < PTHREAD_PRIO_NONE ||
100 	    attr->m_protocol > PTHREAD_PRIO_PROTECT)
101 		return (EINVAL);
102 	return (0);
103 }
104 
105 static void
106 mutex_init_body(struct pthread_mutex *pmutex,
107     const struct pthread_mutex_attr *attr, int private)
108 {
109 	_thr_umtx_init(&pmutex->m_lock);
110 	pmutex->m_type = attr->m_type;
111 	pmutex->m_protocol = attr->m_protocol;
112 	TAILQ_INIT(&pmutex->m_queue);
113 	pmutex->m_owner = NULL;
114 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
115 	if (private)
116 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
117 	pmutex->m_count = 0;
118 	pmutex->m_refcount = 0;
119 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
120 		pmutex->m_prio = attr->m_ceiling;
121 	else
122 		pmutex->m_prio = -1;
123 	pmutex->m_saved_prio = 0;
124 	MUTEX_INIT_LINK(pmutex);
125 }
126 
127 static int
128 mutex_init(pthread_mutex_t *mutex,
129     const pthread_mutexattr_t *mutex_attr, int private)
130 {
131 	const struct pthread_mutex_attr *attr;
132 	struct pthread_mutex *pmutex;
133 	int error;
134 
135 	if (mutex_attr == NULL) {
136 		attr = &_pthread_mutexattr_default;
137 	} else {
138 		attr = *mutex_attr;
139 		error = mutex_check_attr(attr);
140 		if (error != 0)
141 			return (error);
142 	}
143 	if ((pmutex = (pthread_mutex_t)
144 		malloc(sizeof(struct pthread_mutex))) == NULL)
145 		return (ENOMEM);
146 	mutex_init_body(pmutex, attr, private);
147 	*mutex = pmutex;
148 	return (0);
149 }
150 
151 static int
152 init_static(struct pthread *thread, pthread_mutex_t *mutex)
153 {
154 	int ret;
155 
156 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
157 
158 	if (*mutex == NULL)
159 		ret = mutex_init(mutex, NULL, 0);
160 	else
161 		ret = 0;
162 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
163 
164 	return (ret);
165 }
166 
167 static int
168 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
169 {
170 	int ret;
171 
172 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
173 
174 	if (*mutex == NULL)
175 		ret = mutex_init(mutex, NULL, 1);
176 	else
177 		ret = 0;
178 
179 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
180 
181 	return (ret);
182 }
183 
184 int
185 _pthread_mutex_init(pthread_mutex_t *mutex,
186     const pthread_mutexattr_t *mutex_attr)
187 {
188 	return mutex_init(mutex, mutex_attr, 1);
189 }
190 
191 int
192 __pthread_mutex_init(pthread_mutex_t *mutex,
193     const pthread_mutexattr_t *mutex_attr)
194 {
195 	return mutex_init(mutex, mutex_attr, 0);
196 }
197 
198 int
199 _mutex_reinit(pthread_mutex_t *mutex)
200 {
201 	_thr_umtx_init(&(*mutex)->m_lock);
202 	TAILQ_INIT(&(*mutex)->m_queue);
203 	MUTEX_INIT_LINK(*mutex);
204 	(*mutex)->m_owner = NULL;
205 	(*mutex)->m_count = 0;
206 	(*mutex)->m_refcount = 0;
207 	(*mutex)->m_prio = 0;
208 	(*mutex)->m_saved_prio = 0;
209 	return (0);
210 }
211 
212 void
213 _mutex_fork(struct pthread *curthread)
214 {
215 	struct pthread_mutex *m;
216 
217 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
218 		m->m_lock = UMTX_LOCKED;
219 }
220 
221 int
222 _pthread_mutex_destroy(pthread_mutex_t *mutex)
223 {
224 	struct pthread *curthread = tls_get_curthread();
225 	pthread_mutex_t m;
226 	int ret = 0;
227 
228 	if (mutex == NULL)
229 		ret = EINVAL;
230 	else if (*mutex == NULL)
231 		ret = 0;
232 	else {
233 		/*
234 		 * Try to lock the mutex structure, we only need to
235 		 * try once, if failed, the mutex is in used.
236 		 */
237 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
238 		if (ret)
239 			return (ret);
240 
241 		/*
242 		 * Check mutex other fields to see if this mutex is
243 		 * in use. Mostly for prority mutex types, or there
244 		 * are condition variables referencing it.
245 		 */
246 		if (((*mutex)->m_owner != NULL) ||
247 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
248 		    ((*mutex)->m_refcount != 0)) {
249 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
250 			ret = EBUSY;
251 		} else {
252 			/*
253 			 * Save a pointer to the mutex so it can be free'd
254 			 * and set the caller's pointer to NULL:
255 			 */
256 			m = *mutex;
257 			*mutex = NULL;
258 
259 			/* Unlock the mutex structure: */
260 			THR_UMTX_UNLOCK(curthread, &m->m_lock);
261 
262 			/*
263 			 * Free the memory allocated for the mutex
264 			 * structure:
265 			 */
266 			MUTEX_ASSERT_NOT_OWNED(m);
267 			MUTEX_DESTROY(m);
268 		}
269 	}
270 
271 	/* Return the completion status: */
272 	return (ret);
273 }
274 
275 static int
276 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
277 {
278 	struct pthread_mutex *m;
279 	int ret;
280 
281 	m = *mutex;
282 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
283 	if (ret == 0) {
284 		m->m_owner = curthread;
285 		/* Add to the list of owned mutexes: */
286 		MUTEX_ASSERT_NOT_OWNED(m);
287 		TAILQ_INSERT_TAIL(&curthread->mutexq,
288 		    m, m_qe);
289 	} else if (m->m_owner == curthread) {
290 		ret = mutex_self_trylock(m);
291 	} /* else {} */
292 
293 	return (ret);
294 }
295 
296 int
297 __pthread_mutex_trylock(pthread_mutex_t *m)
298 {
299 	struct pthread *curthread = tls_get_curthread();
300 	int ret;
301 
302 	if (__predict_false(m == NULL))
303 		return(EINVAL);
304 	/*
305 	 * If the mutex is statically initialized, perform the dynamic
306 	 * initialization:
307 	 */
308 	if (__predict_false(*m == NULL)) {
309 		ret = init_static(curthread, m);
310 		if (__predict_false(ret != 0))
311 			return (ret);
312 	}
313 	return (mutex_trylock_common(curthread, m));
314 }
315 
316 int
317 _pthread_mutex_trylock(pthread_mutex_t *m)
318 {
319 	struct pthread	*curthread = tls_get_curthread();
320 	int	ret = 0;
321 
322 	/*
323 	 * If the mutex is statically initialized, perform the dynamic
324 	 * initialization marking the mutex private (delete safe):
325 	 */
326 	if (__predict_false(*m == NULL)) {
327 		ret = init_static_private(curthread, m);
328 		if (__predict_false(ret != 0))
329 			return (ret);
330 	}
331 	return (mutex_trylock_common(curthread, m));
332 }
333 
334 static int
335 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
336 	const struct timespec * abstime)
337 {
338 	struct  timespec ts, ts2;
339 	struct  pthread_mutex *m;
340 	int	ret = 0;
341 
342 	m = *mutex;
343 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
344 	if (ret == 0) {
345 		m->m_owner = curthread;
346 		/* Add to the list of owned mutexes: */
347 		MUTEX_ASSERT_NOT_OWNED(m);
348 		TAILQ_INSERT_TAIL(&curthread->mutexq,
349 		    m, m_qe);
350 	} else if (m->m_owner == curthread) {
351 		ret = mutex_self_lock(m, abstime);
352 	} else {
353 		if (abstime == NULL) {
354 			THR_UMTX_LOCK(curthread, &m->m_lock);
355 			ret = 0;
356 		} else if (__predict_false(
357 			abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
358 			abstime->tv_nsec >= 1000000000)) {
359 				ret = EINVAL;
360 		} else {
361 			clock_gettime(CLOCK_REALTIME, &ts);
362 			TIMESPEC_SUB(&ts2, abstime, &ts);
363 			ret = THR_UMTX_TIMEDLOCK(curthread,
364 				&m->m_lock, &ts2);
365 			/*
366 			 * Timed out wait is not restarted if
367 			 * it was interrupted, not worth to do it.
368 			 */
369 			if (ret == EINTR)
370 				ret = ETIMEDOUT;
371 		}
372 		if (ret == 0) {
373 			m->m_owner = curthread;
374 			/* Add to the list of owned mutexes: */
375 			MUTEX_ASSERT_NOT_OWNED(m);
376 			TAILQ_INSERT_TAIL(&curthread->mutexq,
377 			    m, m_qe);
378 		}
379 	}
380 	return (ret);
381 }
382 
383 int
384 __pthread_mutex_lock(pthread_mutex_t *m)
385 {
386 	struct pthread *curthread;
387 	int	ret;
388 
389 	if (__predict_false(m == NULL))
390 		return(EINVAL);
391 
392 	/*
393 	 * If the mutex is statically initialized, perform the dynamic
394 	 * initialization:
395 	 */
396 	curthread = tls_get_curthread();
397 	if (__predict_false(*m == NULL)) {
398 		ret = init_static(curthread, m);
399 		if (__predict_false(ret))
400 			return (ret);
401 	}
402 	return (mutex_lock_common(curthread, m, NULL));
403 }
404 
405 int
406 _pthread_mutex_lock(pthread_mutex_t *m)
407 {
408 	struct pthread *curthread;
409 	int	ret;
410 
411 	if (__predict_false(m == NULL))
412 		return(EINVAL);
413 
414 	/*
415 	 * If the mutex is statically initialized, perform the dynamic
416 	 * initialization marking it private (delete safe):
417 	 */
418 	curthread = tls_get_curthread();
419 	if (__predict_false(*m == NULL)) {
420 		ret = init_static_private(curthread, m);
421 		if (__predict_false(ret))
422 			return (ret);
423 	}
424 	return (mutex_lock_common(curthread, m, NULL));
425 }
426 
427 int
428 __pthread_mutex_timedlock(pthread_mutex_t *m,
429 	const struct timespec *abs_timeout)
430 {
431 	struct pthread *curthread;
432 	int	ret;
433 
434 	if (__predict_false(m == NULL))
435 		return(EINVAL);
436 
437 	/*
438 	 * If the mutex is statically initialized, perform the dynamic
439 	 * initialization:
440 	 */
441 	curthread = tls_get_curthread();
442 	if (__predict_false(*m == NULL)) {
443 		ret = init_static(curthread, m);
444 		if (__predict_false(ret))
445 			return (ret);
446 	}
447 	return (mutex_lock_common(curthread, m, abs_timeout));
448 }
449 
450 int
451 _pthread_mutex_timedlock(pthread_mutex_t *m,
452 	const struct timespec *abs_timeout)
453 {
454 	struct pthread *curthread;
455 	int	ret;
456 
457 	if (__predict_false(m == NULL))
458 		return(EINVAL);
459 
460 	curthread = tls_get_curthread();
461 
462 	/*
463 	 * If the mutex is statically initialized, perform the dynamic
464 	 * initialization marking it private (delete safe):
465 	 */
466 	if (__predict_false(*m == NULL)) {
467 		ret = init_static_private(curthread, m);
468 		if (__predict_false(ret))
469 			return (ret);
470 	}
471 	return (mutex_lock_common(curthread, m, abs_timeout));
472 }
473 
474 int
475 _pthread_mutex_unlock(pthread_mutex_t *m)
476 {
477 	if (__predict_false(m == NULL))
478 		return(EINVAL);
479 	return (mutex_unlock_common(m));
480 }
481 
482 static int
483 mutex_self_trylock(pthread_mutex_t m)
484 {
485 	int	ret;
486 
487 	switch (m->m_type) {
488 	/* case PTHREAD_MUTEX_DEFAULT: */
489 	case PTHREAD_MUTEX_ERRORCHECK:
490 	case PTHREAD_MUTEX_NORMAL:
491 		ret = EBUSY;
492 		break;
493 
494 	case PTHREAD_MUTEX_RECURSIVE:
495 		/* Increment the lock count: */
496 		if (m->m_count + 1 > 0) {
497 			m->m_count++;
498 			ret = 0;
499 		} else
500 			ret = EAGAIN;
501 		break;
502 
503 	default:
504 		/* Trap invalid mutex types; */
505 		ret = EINVAL;
506 	}
507 
508 	return (ret);
509 }
510 
511 static int
512 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
513 {
514 	struct timespec ts1, ts2;
515 	int ret;
516 
517 	switch (m->m_type) {
518 	/* case PTHREAD_MUTEX_DEFAULT: */
519 	case PTHREAD_MUTEX_ERRORCHECK:
520 		if (abstime) {
521 			clock_gettime(CLOCK_REALTIME, &ts1);
522 			TIMESPEC_SUB(&ts2, abstime, &ts1);
523 			__sys_nanosleep(&ts2, NULL);
524 			ret = ETIMEDOUT;
525 		} else {
526 			/*
527 			 * POSIX specifies that mutexes should return
528 			 * EDEADLK if a recursive lock is detected.
529 			 */
530 			ret = EDEADLK;
531 		}
532 		break;
533 
534 	case PTHREAD_MUTEX_NORMAL:
535 		/*
536 		 * What SS2 define as a 'normal' mutex.  Intentionally
537 		 * deadlock on attempts to get a lock you already own.
538 		 */
539 		ret = 0;
540 		if (abstime) {
541 			clock_gettime(CLOCK_REALTIME, &ts1);
542 			TIMESPEC_SUB(&ts2, abstime, &ts1);
543 			__sys_nanosleep(&ts2, NULL);
544 			ret = ETIMEDOUT;
545 		} else {
546 			ts1.tv_sec = 30;
547 			ts1.tv_nsec = 0;
548 			for (;;)
549 				__sys_nanosleep(&ts1, NULL);
550 		}
551 		break;
552 
553 	case PTHREAD_MUTEX_RECURSIVE:
554 		/* Increment the lock count: */
555 		if (m->m_count + 1 > 0) {
556 			m->m_count++;
557 			ret = 0;
558 		} else
559 			ret = EAGAIN;
560 		break;
561 
562 	default:
563 		/* Trap invalid mutex types; */
564 		ret = EINVAL;
565 	}
566 
567 	return (ret);
568 }
569 
570 static int
571 mutex_unlock_common(pthread_mutex_t *mutex)
572 {
573 	struct pthread *curthread = tls_get_curthread();
574 	struct pthread_mutex *m;
575 
576 	if (__predict_false((m = *mutex)== NULL))
577 		return (EINVAL);
578 	if (__predict_false(m->m_owner != curthread))
579 		return (EPERM);
580 
581 	if (__predict_false(
582 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
583 		m->m_count > 0)) {
584 		m->m_count--;
585 	} else {
586 		/*
587 		 * Clear the count in case this is a recursive mutex.
588 		 */
589 		m->m_count = 0;
590 		m->m_owner = NULL;
591 		/* Remove the mutex from the threads queue. */
592 		MUTEX_ASSERT_IS_OWNED(m);
593 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
594 		MUTEX_INIT_LINK(m);
595 		/*
596 		 * Hand off the mutex to the next waiting thread.
597 		 */
598 		THR_UMTX_UNLOCK(curthread, &m->m_lock);
599 	}
600 	return (0);
601 }
602 
603 int
604 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
605 			      int *prioceiling)
606 {
607 	if ((mutex == NULL) || (*mutex == NULL))
608 		return (EINVAL);
609 	if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
610 		return (EINVAL);
611 	*prioceiling = (*mutex)->m_prio;
612 	return (0);
613 }
614 
615 int
616 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
617 			      int prioceiling, int *old_ceiling)
618 {
619 	int ret = 0;
620 	int tmp;
621 
622 	if ((mutex == NULL) || (*mutex == NULL))
623 		ret = EINVAL;
624 	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
625 		ret = EINVAL;
626 	else if ((ret = _pthread_mutex_lock(mutex)) == 0) {
627 		tmp = (*mutex)->m_prio;
628 		(*mutex)->m_prio = prioceiling;
629 		ret = _pthread_mutex_unlock(mutex);
630 		*old_ceiling = tmp;
631 	}
632 	return(ret);
633 }
634 
635 int
636 _mutex_cv_lock(pthread_mutex_t *m, int count)
637 {
638 	int	ret;
639 
640 	if ((ret = _pthread_mutex_lock(m)) == 0) {
641 		(*m)->m_refcount--;
642 		(*m)->m_count += count;
643 	}
644 	return (ret);
645 }
646 
647 int
648 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
649 {
650 	struct pthread *curthread = tls_get_curthread();
651 	struct pthread_mutex *m;
652 
653 	if (__predict_false(mutex == NULL))
654 		return (EINVAL);
655 	if (__predict_false((m = *mutex) == NULL))
656 		return (EINVAL);
657 	if (__predict_false(m->m_owner != curthread))
658 		return (EPERM);
659 
660 	*count = m->m_count;
661 	m->m_count = 0;
662 	m->m_refcount++;
663 	m->m_owner = NULL;
664 	/* Remove the mutex from the threads queue. */
665 	MUTEX_ASSERT_IS_OWNED(m);
666 	TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
667 	MUTEX_INIT_LINK(m);
668 	THR_UMTX_UNLOCK(curthread, &m->m_lock);
669 	return (0);
670 }
671 
672 void
673 _mutex_unlock_private(pthread_t pthread)
674 {
675 	struct pthread_mutex	*m, *m_next;
676 
677 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
678 		m_next = TAILQ_NEXT(m, m_qe);
679 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
680 			_pthread_mutex_unlock(&m);
681 	}
682 }
683 
684 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
685 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
686 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
687 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
688 
689 /* Single underscore versions provided for libc internal usage: */
690 /* No difference between libc and application usage of these: */
691 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
692 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
693 __strong_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
694 __strong_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
695