xref: /openbsd/lib/libc/thread/rthread_sync.c (revision d556a964)
1*d556a964Scheloha /*	$OpenBSD: rthread_sync.c,v 1.6 2024/01/10 04:28:43 cheloha Exp $ */
27e321ac1Sguenther /*
37e321ac1Sguenther  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
47e321ac1Sguenther  * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
57e321ac1Sguenther  * All Rights Reserved.
67e321ac1Sguenther  *
77e321ac1Sguenther  * Permission to use, copy, modify, and distribute this software for any
87e321ac1Sguenther  * purpose with or without fee is hereby granted, provided that the above
97e321ac1Sguenther  * copyright notice and this permission notice appear in all copies.
107e321ac1Sguenther  *
117e321ac1Sguenther  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
127e321ac1Sguenther  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
137e321ac1Sguenther  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
147e321ac1Sguenther  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
157e321ac1Sguenther  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
167e321ac1Sguenther  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
177e321ac1Sguenther  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
187e321ac1Sguenther  */
197e321ac1Sguenther /*
207e321ac1Sguenther  * Mutexes and conditions - synchronization functions.
217e321ac1Sguenther  */
227e321ac1Sguenther 
237e321ac1Sguenther #include <assert.h>
245be66c01Sguenther #include <errno.h>
255be66c01Sguenther #include <pthread.h>
267e321ac1Sguenther #include <stdlib.h>
277e321ac1Sguenther #include <string.h>
287e321ac1Sguenther #include <unistd.h>
297e321ac1Sguenther 
307e321ac1Sguenther #include "rthread.h"
317e321ac1Sguenther #include "cancel.h"		/* in libc/include */
327e321ac1Sguenther 
337e321ac1Sguenther static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
347e321ac1Sguenther 
357e321ac1Sguenther /*
367e321ac1Sguenther  * mutexen
377e321ac1Sguenther  */
387e321ac1Sguenther int
pthread_mutex_init(pthread_mutex_t * mutexp,const pthread_mutexattr_t * attr)397e321ac1Sguenther pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
407e321ac1Sguenther {
417e321ac1Sguenther 	struct pthread_mutex *mutex;
427e321ac1Sguenther 
437e321ac1Sguenther 	mutex = calloc(1, sizeof(*mutex));
447e321ac1Sguenther 	if (!mutex)
457e321ac1Sguenther 		return (errno);
467e321ac1Sguenther 	mutex->lock = _SPINLOCK_UNLOCKED;
477e321ac1Sguenther 	TAILQ_INIT(&mutex->lockers);
487e321ac1Sguenther 	if (attr == NULL) {
497e321ac1Sguenther 		mutex->type = PTHREAD_MUTEX_DEFAULT;
507e321ac1Sguenther 		mutex->prioceiling = -1;
517e321ac1Sguenther 	} else {
527e321ac1Sguenther 		mutex->type = (*attr)->ma_type;
537e321ac1Sguenther 		mutex->prioceiling = (*attr)->ma_protocol ==
547e321ac1Sguenther 		    PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
557e321ac1Sguenther 	}
567e321ac1Sguenther 	*mutexp = mutex;
577e321ac1Sguenther 
587e321ac1Sguenther 	return (0);
597e321ac1Sguenther }
608ae31f71Sguenther DEF_STRONG(pthread_mutex_init);
617e321ac1Sguenther 
627e321ac1Sguenther int
pthread_mutex_destroy(pthread_mutex_t * mutexp)637e321ac1Sguenther pthread_mutex_destroy(pthread_mutex_t *mutexp)
647e321ac1Sguenther {
657e321ac1Sguenther 	struct pthread_mutex *mutex;
667e321ac1Sguenther 
677e321ac1Sguenther 	assert(mutexp);
687e321ac1Sguenther 	mutex = (struct pthread_mutex *)*mutexp;
697e321ac1Sguenther 	if (mutex) {
707e321ac1Sguenther 		if (mutex->count || mutex->owner != NULL ||
717e321ac1Sguenther 		    !TAILQ_EMPTY(&mutex->lockers)) {
727e321ac1Sguenther #define MSG "pthread_mutex_destroy on mutex with waiters!\n"
737e321ac1Sguenther 			write(2, MSG, sizeof(MSG) - 1);
747e321ac1Sguenther #undef MSG
757e321ac1Sguenther 			return (EBUSY);
767e321ac1Sguenther 		}
777e321ac1Sguenther 		free(mutex);
787e321ac1Sguenther 		*mutexp = NULL;
797e321ac1Sguenther 	}
807e321ac1Sguenther 	return (0);
817e321ac1Sguenther }
828ae31f71Sguenther DEF_STRONG(pthread_mutex_destroy);
837e321ac1Sguenther 
847e321ac1Sguenther static int
_rthread_mutex_lock(pthread_mutex_t * mutexp,int trywait,const struct timespec * abstime)857e321ac1Sguenther _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
867e321ac1Sguenther     const struct timespec *abstime)
877e321ac1Sguenther {
887e321ac1Sguenther 	struct pthread_mutex *mutex;
897e321ac1Sguenther 	pthread_t self = pthread_self();
907e321ac1Sguenther 	int ret = 0;
917e321ac1Sguenther 
927e321ac1Sguenther 	/*
937e321ac1Sguenther 	 * If the mutex is statically initialized, perform the dynamic
947e321ac1Sguenther 	 * initialization. Note: _thread_mutex_lock() in libc requires
957e321ac1Sguenther 	 * _rthread_mutex_lock() to perform the mutex init when *mutexp
967e321ac1Sguenther 	 * is NULL.
977e321ac1Sguenther 	 */
987e321ac1Sguenther 	if (*mutexp == NULL) {
997e321ac1Sguenther 		_spinlock(&static_init_lock);
1007e321ac1Sguenther 		if (*mutexp == NULL)
1017e321ac1Sguenther 			ret = pthread_mutex_init(mutexp, NULL);
1027e321ac1Sguenther 		_spinunlock(&static_init_lock);
1037e321ac1Sguenther 		if (ret != 0)
1047e321ac1Sguenther 			return (EINVAL);
1057e321ac1Sguenther 	}
1067e321ac1Sguenther 	mutex = (struct pthread_mutex *)*mutexp;
1077e321ac1Sguenther 
1087e321ac1Sguenther 	_rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
1097e321ac1Sguenther 	_spinlock(&mutex->lock);
1107e321ac1Sguenther 	if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
1117e321ac1Sguenther 		assert(mutex->count == 0);
1127e321ac1Sguenther 		mutex->owner = self;
1137e321ac1Sguenther 	} else if (mutex->owner == self) {
1147e321ac1Sguenther 		assert(mutex->count > 0);
1157e321ac1Sguenther 
1167e321ac1Sguenther 		/* already owner?  handle recursive behavior */
1177e321ac1Sguenther 		if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
1187e321ac1Sguenther 		{
1197e321ac1Sguenther 			if (trywait ||
1207e321ac1Sguenther 			    mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
1217e321ac1Sguenther 				_spinunlock(&mutex->lock);
1227e321ac1Sguenther 				return (trywait ? EBUSY : EDEADLK);
1237e321ac1Sguenther 			}
1247e321ac1Sguenther 
1257e321ac1Sguenther 			/* self-deadlock is disallowed by strict */
1267e321ac1Sguenther 			if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
1277e321ac1Sguenther 			    abstime == NULL)
1287e321ac1Sguenther 				abort();
1297e321ac1Sguenther 
1307e321ac1Sguenther 			/* self-deadlock, possibly until timeout */
1317e321ac1Sguenther 			while (__thrsleep(self, CLOCK_REALTIME, abstime,
1327e321ac1Sguenther 			    &mutex->lock, NULL) != EWOULDBLOCK)
1337e321ac1Sguenther 				_spinlock(&mutex->lock);
1347e321ac1Sguenther 			return (ETIMEDOUT);
1357e321ac1Sguenther 		}
1367e321ac1Sguenther 		if (mutex->count == INT_MAX) {
1377e321ac1Sguenther 			_spinunlock(&mutex->lock);
1387e321ac1Sguenther 			return (EAGAIN);
1397e321ac1Sguenther 		}
1407e321ac1Sguenther 	} else if (trywait) {
1417e321ac1Sguenther 		/* try failed */
1427e321ac1Sguenther 		_spinunlock(&mutex->lock);
1437e321ac1Sguenther 		return (EBUSY);
1447e321ac1Sguenther 	} else {
1457e321ac1Sguenther 		/* add to the wait queue and block until at the head */
1467e321ac1Sguenther 		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
1477e321ac1Sguenther 		while (mutex->owner != self) {
1487e321ac1Sguenther 			ret = __thrsleep(self, CLOCK_REALTIME, abstime,
1497e321ac1Sguenther 			    &mutex->lock, NULL);
1507e321ac1Sguenther 			_spinlock(&mutex->lock);
1517e321ac1Sguenther 			assert(mutex->owner != NULL);
1527e321ac1Sguenther 			if (ret == EWOULDBLOCK) {
1537e321ac1Sguenther 				if (mutex->owner == self)
1547e321ac1Sguenther 					break;
1557e321ac1Sguenther 				TAILQ_REMOVE(&mutex->lockers, self, waiting);
1567e321ac1Sguenther 				_spinunlock(&mutex->lock);
1577e321ac1Sguenther 				return (ETIMEDOUT);
1587e321ac1Sguenther 			}
1597e321ac1Sguenther 		}
1607e321ac1Sguenther 	}
1617e321ac1Sguenther 
1627e321ac1Sguenther 	mutex->count++;
1637e321ac1Sguenther 	_spinunlock(&mutex->lock);
1647e321ac1Sguenther 
1657e321ac1Sguenther 	return (0);
1667e321ac1Sguenther }
1677e321ac1Sguenther 
1687e321ac1Sguenther int
pthread_mutex_lock(pthread_mutex_t * p)1697e321ac1Sguenther pthread_mutex_lock(pthread_mutex_t *p)
1707e321ac1Sguenther {
1717e321ac1Sguenther 	return (_rthread_mutex_lock(p, 0, NULL));
1727e321ac1Sguenther }
1738ae31f71Sguenther DEF_STRONG(pthread_mutex_lock);
1747e321ac1Sguenther 
1757e321ac1Sguenther int
pthread_mutex_trylock(pthread_mutex_t * p)1767e321ac1Sguenther pthread_mutex_trylock(pthread_mutex_t *p)
1777e321ac1Sguenther {
1787e321ac1Sguenther 	return (_rthread_mutex_lock(p, 1, NULL));
1797e321ac1Sguenther }
1807e321ac1Sguenther 
1817e321ac1Sguenther int
pthread_mutex_timedlock(pthread_mutex_t * p,const struct timespec * abstime)1827e321ac1Sguenther pthread_mutex_timedlock(pthread_mutex_t *p, const struct timespec *abstime)
1837e321ac1Sguenther {
1847e321ac1Sguenther 	return (_rthread_mutex_lock(p, 0, abstime));
1857e321ac1Sguenther }
1867e321ac1Sguenther 
1877e321ac1Sguenther int
pthread_mutex_unlock(pthread_mutex_t * mutexp)1887e321ac1Sguenther pthread_mutex_unlock(pthread_mutex_t *mutexp)
1897e321ac1Sguenther {
1907e321ac1Sguenther 	pthread_t self = pthread_self();
1917e321ac1Sguenther 	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
1927e321ac1Sguenther 
1937e321ac1Sguenther 	_rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
1947e321ac1Sguenther 	    (void *)mutex);
1957e321ac1Sguenther 
1967e321ac1Sguenther 	if (mutex == NULL)
1977e321ac1Sguenther #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
1987e321ac1Sguenther 		return (EPERM);
1997e321ac1Sguenther #elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
2007e321ac1Sguenther 		return(0);
2017e321ac1Sguenther #else
2027e321ac1Sguenther 		abort();
2037e321ac1Sguenther #endif
2047e321ac1Sguenther 
2057e321ac1Sguenther 	if (mutex->owner != self) {
2067e321ac1Sguenther 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
2077e321ac1Sguenther 		    mutex->type == PTHREAD_MUTEX_RECURSIVE)
2087e321ac1Sguenther 			return (EPERM);
2097e321ac1Sguenther 		else {
2107e321ac1Sguenther 			/*
2117e321ac1Sguenther 			 * For mutex type NORMAL our undefined behavior for
2127e321ac1Sguenther 			 * unlocking an unlocked mutex is to succeed without
2137e321ac1Sguenther 			 * error.  All other undefined behaviors are to
2147e321ac1Sguenther 			 * abort() immediately.
2157e321ac1Sguenther 			 */
2167e321ac1Sguenther 			if (mutex->owner == NULL &&
2177e321ac1Sguenther 			    mutex->type == PTHREAD_MUTEX_NORMAL)
2187e321ac1Sguenther 				return (0);
2197e321ac1Sguenther 			else
2207e321ac1Sguenther 				abort();
2217e321ac1Sguenther 		}
2227e321ac1Sguenther 	}
2237e321ac1Sguenther 
2247e321ac1Sguenther 	if (--mutex->count == 0) {
2257e321ac1Sguenther 		pthread_t next;
2267e321ac1Sguenther 
2277e321ac1Sguenther 		_spinlock(&mutex->lock);
2287e321ac1Sguenther 		mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
2297e321ac1Sguenther 		if (next != NULL)
2307e321ac1Sguenther 			TAILQ_REMOVE(&mutex->lockers, next, waiting);
2317e321ac1Sguenther 		_spinunlock(&mutex->lock);
2327e321ac1Sguenther 		if (next != NULL)
2337e321ac1Sguenther 			__thrwakeup(next, 1);
2347e321ac1Sguenther 	}
2357e321ac1Sguenther 
2367e321ac1Sguenther 	return (0);
2377e321ac1Sguenther }
2388ae31f71Sguenther DEF_STRONG(pthread_mutex_unlock);
2397e321ac1Sguenther 
2407e321ac1Sguenther /*
2417e321ac1Sguenther  * condition variables
2427e321ac1Sguenther  */
2437e321ac1Sguenther int
pthread_cond_init(pthread_cond_t * condp,const pthread_condattr_t * attr)2447e321ac1Sguenther pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
2457e321ac1Sguenther {
2467e321ac1Sguenther 	pthread_cond_t cond;
2477e321ac1Sguenther 
2487e321ac1Sguenther 	cond = calloc(1, sizeof(*cond));
2497e321ac1Sguenther 	if (!cond)
2507e321ac1Sguenther 		return (errno);
2517e321ac1Sguenther 	cond->lock = _SPINLOCK_UNLOCKED;
2527e321ac1Sguenther 	TAILQ_INIT(&cond->waiters);
2537e321ac1Sguenther 	if (attr == NULL)
2547e321ac1Sguenther 		cond->clock = CLOCK_REALTIME;
2557e321ac1Sguenther 	else
2567e321ac1Sguenther 		cond->clock = (*attr)->ca_clock;
2577e321ac1Sguenther 	*condp = cond;
2587e321ac1Sguenther 
2597e321ac1Sguenther 	return (0);
2607e321ac1Sguenther }
2618ae31f71Sguenther DEF_STRONG(pthread_cond_init);
2627e321ac1Sguenther 
2637e321ac1Sguenther int
pthread_cond_destroy(pthread_cond_t * condp)2647e321ac1Sguenther pthread_cond_destroy(pthread_cond_t *condp)
2657e321ac1Sguenther {
2667e321ac1Sguenther 	pthread_cond_t cond;
2677e321ac1Sguenther 
2687e321ac1Sguenther 	assert(condp);
2697e321ac1Sguenther 	cond = *condp;
2707e321ac1Sguenther 	if (cond) {
2717e321ac1Sguenther 		if (!TAILQ_EMPTY(&cond->waiters)) {
2727e321ac1Sguenther #define MSG "pthread_cond_destroy on condvar with waiters!\n"
2737e321ac1Sguenther 			write(2, MSG, sizeof(MSG) - 1);
2747e321ac1Sguenther #undef MSG
2757e321ac1Sguenther 			return (EBUSY);
2767e321ac1Sguenther 		}
2777e321ac1Sguenther 		free(cond);
2787e321ac1Sguenther 	}
2797e321ac1Sguenther 	*condp = NULL;
2807e321ac1Sguenther 
2817e321ac1Sguenther 	return (0);
2827e321ac1Sguenther }
2837e321ac1Sguenther 
2847e321ac1Sguenther int
pthread_cond_timedwait(pthread_cond_t * condp,pthread_mutex_t * mutexp,const struct timespec * abstime)2857e321ac1Sguenther pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
2867e321ac1Sguenther     const struct timespec *abstime)
2877e321ac1Sguenther {
2887e321ac1Sguenther 	pthread_cond_t cond;
2897e321ac1Sguenther 	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
2907e321ac1Sguenther 	struct tib *tib = TIB_GET();
2917e321ac1Sguenther 	pthread_t self = tib->tib_thread;
2927e321ac1Sguenther 	pthread_t next;
2937e321ac1Sguenther 	int mutex_count;
2947e321ac1Sguenther 	int canceled = 0;
2957e321ac1Sguenther 	int rv = 0;
2967e321ac1Sguenther 	int error;
2977e321ac1Sguenther 	PREP_CANCEL_POINT(tib);
2987e321ac1Sguenther 
2997e321ac1Sguenther 	if (!*condp)
3007e321ac1Sguenther 		if ((error = pthread_cond_init(condp, NULL)))
3017e321ac1Sguenther 			return (error);
3027e321ac1Sguenther 	cond = *condp;
3037e321ac1Sguenther 	_rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
3047e321ac1Sguenther 	    (void *)cond, (void *)mutex);
3057e321ac1Sguenther 
3067e321ac1Sguenther 	if (mutex == NULL)
3077e321ac1Sguenther #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
3087e321ac1Sguenther 		return (EPERM);
3097e321ac1Sguenther #else
3107e321ac1Sguenther 		abort();
3117e321ac1Sguenther #endif
3127e321ac1Sguenther 
3137e321ac1Sguenther 	if (mutex->owner != self) {
3147e321ac1Sguenther 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
3157e321ac1Sguenther 			return (EPERM);
3167e321ac1Sguenther 		else
3177e321ac1Sguenther 			abort();
3187e321ac1Sguenther 	}
3197e321ac1Sguenther 
320*d556a964Scheloha 	if (abstime == NULL || abstime->tv_nsec < 0 ||
3217e321ac1Sguenther 	    abstime->tv_nsec >= 1000000000)
3227e321ac1Sguenther 		return (EINVAL);
3237e321ac1Sguenther 
3247e321ac1Sguenther 	ENTER_DELAYED_CANCEL_POINT(tib, self);
3257e321ac1Sguenther 
3267e321ac1Sguenther 	_spinlock(&cond->lock);
3277e321ac1Sguenther 
3287e321ac1Sguenther 	/* mark the condvar as being associated with this mutex */
3297e321ac1Sguenther 	if (cond->mutex == NULL) {
3307e321ac1Sguenther 		cond->mutex = mutex;
3317e321ac1Sguenther 		assert(TAILQ_EMPTY(&cond->waiters));
3327e321ac1Sguenther 	} else if (cond->mutex != mutex) {
3337e321ac1Sguenther 		assert(cond->mutex == mutex);
3347e321ac1Sguenther 		_spinunlock(&cond->lock);
3357e321ac1Sguenther 		LEAVE_CANCEL_POINT_INNER(tib, 1);
3367e321ac1Sguenther 		return (EINVAL);
3377e321ac1Sguenther 	} else
3387e321ac1Sguenther 		assert(! TAILQ_EMPTY(&cond->waiters));
3397e321ac1Sguenther 
3407e321ac1Sguenther 	/* snag the count in case this is a recursive mutex */
3417e321ac1Sguenther 	mutex_count = mutex->count;
3427e321ac1Sguenther 
3437e321ac1Sguenther 	/* transfer from the mutex queue to the condvar queue */
3447e321ac1Sguenther 	_spinlock(&mutex->lock);
3457e321ac1Sguenther 	self->blocking_cond = cond;
3467e321ac1Sguenther 	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
3477e321ac1Sguenther 	_spinunlock(&cond->lock);
3487e321ac1Sguenther 
3497e321ac1Sguenther 	/* wake the next guy blocked on the mutex */
3507e321ac1Sguenther 	mutex->count = 0;
3517e321ac1Sguenther 	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
3527e321ac1Sguenther 	if (next != NULL) {
3537e321ac1Sguenther 		TAILQ_REMOVE(&mutex->lockers, next, waiting);
3547e321ac1Sguenther 		__thrwakeup(next, 1);
3557e321ac1Sguenther 	}
3567e321ac1Sguenther 
3577e321ac1Sguenther 	/* wait until we're the owner of the mutex again */
3587e321ac1Sguenther 	while (mutex->owner != self) {
3597e321ac1Sguenther 		error = __thrsleep(self, cond->clock, abstime,
3607e321ac1Sguenther 		    &mutex->lock, &self->delayed_cancel);
3617e321ac1Sguenther 
3627e321ac1Sguenther 		/*
3637e321ac1Sguenther 		 * If abstime == NULL, then we're definitely waiting
3647e321ac1Sguenther 		 * on the mutex instead of the condvar, and are
3657e321ac1Sguenther 		 * just waiting for mutex ownership, regardless of
3667e321ac1Sguenther 		 * why we woke up.
3677e321ac1Sguenther 		 */
3687e321ac1Sguenther 		if (abstime == NULL) {
3697e321ac1Sguenther 			_spinlock(&mutex->lock);
3707e321ac1Sguenther 			continue;
3717e321ac1Sguenther 		}
3727e321ac1Sguenther 
3737e321ac1Sguenther 		/*
3747e321ac1Sguenther 		 * If we took a normal signal (not from
3757e321ac1Sguenther 		 * cancellation) then we should just go back to
3767e321ac1Sguenther 		 * sleep without changing state (timeouts, etc).
3777e321ac1Sguenther 		 */
378bda456ccSpirofti 		if ((error == EINTR || error == ECANCELED) &&
379bda456ccSpirofti 		    (tib->tib_canceled == 0 ||
3807e321ac1Sguenther 		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
3817e321ac1Sguenther 			_spinlock(&mutex->lock);
3827e321ac1Sguenther 			continue;
3837e321ac1Sguenther 		}
3847e321ac1Sguenther 
3857e321ac1Sguenther 		/*
3867e321ac1Sguenther 		 * The remaining reasons for waking up (normal
3877e321ac1Sguenther 		 * wakeup, timeout, and cancellation) all mean that
3887e321ac1Sguenther 		 * we won't be staying in the condvar queue and
3897e321ac1Sguenther 		 * we'll no longer time out or be cancelable.
3907e321ac1Sguenther 		 */
3917e321ac1Sguenther 		abstime = NULL;
3927e321ac1Sguenther 		LEAVE_CANCEL_POINT_INNER(tib, 0);
3937e321ac1Sguenther 
3947e321ac1Sguenther 		/*
3957e321ac1Sguenther 		 * If we're no longer in the condvar's queue then
3967e321ac1Sguenther 		 * we're just waiting for mutex ownership.  Need
3977e321ac1Sguenther 		 * cond->lock here to prevent race with cond_signal().
3987e321ac1Sguenther 		 */
3997e321ac1Sguenther 		_spinlock(&cond->lock);
4007e321ac1Sguenther 		if (self->blocking_cond == NULL) {
4017e321ac1Sguenther 			_spinunlock(&cond->lock);
4027e321ac1Sguenther 			_spinlock(&mutex->lock);
4037e321ac1Sguenther 			continue;
4047e321ac1Sguenther 		}
4057e321ac1Sguenther 		assert(self->blocking_cond == cond);
4067e321ac1Sguenther 
4077e321ac1Sguenther 		/* if timeout or canceled, make note of that */
4087e321ac1Sguenther 		if (error == EWOULDBLOCK)
4097e321ac1Sguenther 			rv = ETIMEDOUT;
4107e321ac1Sguenther 		else if (error == EINTR)
4117e321ac1Sguenther 			canceled = 1;
4127e321ac1Sguenther 
4137e321ac1Sguenther 		/* transfer between the queues */
4147e321ac1Sguenther 		TAILQ_REMOVE(&cond->waiters, self, waiting);
4157e321ac1Sguenther 		assert(mutex == cond->mutex);
4167e321ac1Sguenther 		if (TAILQ_EMPTY(&cond->waiters))
4177e321ac1Sguenther 			cond->mutex = NULL;
4187e321ac1Sguenther 		self->blocking_cond = NULL;
4197e321ac1Sguenther 		_spinunlock(&cond->lock);
4207e321ac1Sguenther 		_spinlock(&mutex->lock);
4217e321ac1Sguenther 
4227e321ac1Sguenther 		/* mutex unlocked right now? */
4237e321ac1Sguenther 		if (mutex->owner == NULL &&
4247e321ac1Sguenther 		    TAILQ_EMPTY(&mutex->lockers)) {
4257e321ac1Sguenther 			assert(mutex->count == 0);
4267e321ac1Sguenther 			mutex->owner = self;
4277e321ac1Sguenther 			break;
4287e321ac1Sguenther 		}
4297e321ac1Sguenther 		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
4307e321ac1Sguenther 	}
4317e321ac1Sguenther 
4327e321ac1Sguenther 	/* restore the mutex's count */
4337e321ac1Sguenther 	mutex->count = mutex_count;
4347e321ac1Sguenther 	_spinunlock(&mutex->lock);
4357e321ac1Sguenther 
4367e321ac1Sguenther 	LEAVE_CANCEL_POINT_INNER(tib, canceled);
4377e321ac1Sguenther 
4387e321ac1Sguenther 	return (rv);
4397e321ac1Sguenther }
4407e321ac1Sguenther 
4417e321ac1Sguenther int
pthread_cond_wait(pthread_cond_t * condp,pthread_mutex_t * mutexp)4427e321ac1Sguenther pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
4437e321ac1Sguenther {
4447e321ac1Sguenther 	pthread_cond_t cond;
4457e321ac1Sguenther 	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
4467e321ac1Sguenther 	struct tib *tib = TIB_GET();
4477e321ac1Sguenther 	pthread_t self = tib->tib_thread;
4487e321ac1Sguenther 	pthread_t next;
4497e321ac1Sguenther 	int mutex_count;
4507e321ac1Sguenther 	int canceled = 0;
4517e321ac1Sguenther 	int error;
4527e321ac1Sguenther 	PREP_CANCEL_POINT(tib);
4537e321ac1Sguenther 
4547e321ac1Sguenther 	if (!*condp)
4557e321ac1Sguenther 		if ((error = pthread_cond_init(condp, NULL)))
4567e321ac1Sguenther 			return (error);
4577e321ac1Sguenther 	cond = *condp;
4587e321ac1Sguenther 	_rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
4597e321ac1Sguenther 	    (void *)cond, (void *)mutex);
4607e321ac1Sguenther 
4617e321ac1Sguenther 	if (mutex == NULL)
4627e321ac1Sguenther #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
4637e321ac1Sguenther 		return (EPERM);
4647e321ac1Sguenther #else
4657e321ac1Sguenther 		abort();
4667e321ac1Sguenther #endif
4677e321ac1Sguenther 
4687e321ac1Sguenther 	if (mutex->owner != self) {
4697e321ac1Sguenther 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
4707e321ac1Sguenther 			return (EPERM);
4717e321ac1Sguenther 		else
4727e321ac1Sguenther 			abort();
4737e321ac1Sguenther 	}
4747e321ac1Sguenther 
4757e321ac1Sguenther 	ENTER_DELAYED_CANCEL_POINT(tib, self);
4767e321ac1Sguenther 
4777e321ac1Sguenther 	_spinlock(&cond->lock);
4787e321ac1Sguenther 
4797e321ac1Sguenther 	/* mark the condvar as being associated with this mutex */
4807e321ac1Sguenther 	if (cond->mutex == NULL) {
4817e321ac1Sguenther 		cond->mutex = mutex;
4827e321ac1Sguenther 		assert(TAILQ_EMPTY(&cond->waiters));
4837e321ac1Sguenther 	} else if (cond->mutex != mutex) {
4847e321ac1Sguenther 		assert(cond->mutex == mutex);
4857e321ac1Sguenther 		_spinunlock(&cond->lock);
4867e321ac1Sguenther 		LEAVE_CANCEL_POINT_INNER(tib, 1);
4877e321ac1Sguenther 		return (EINVAL);
4887e321ac1Sguenther 	} else
4897e321ac1Sguenther 		assert(! TAILQ_EMPTY(&cond->waiters));
4907e321ac1Sguenther 
4917e321ac1Sguenther 	/* snag the count in case this is a recursive mutex */
4927e321ac1Sguenther 	mutex_count = mutex->count;
4937e321ac1Sguenther 
4947e321ac1Sguenther 	/* transfer from the mutex queue to the condvar queue */
4957e321ac1Sguenther 	_spinlock(&mutex->lock);
4967e321ac1Sguenther 	self->blocking_cond = cond;
4977e321ac1Sguenther 	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
4987e321ac1Sguenther 	_spinunlock(&cond->lock);
4997e321ac1Sguenther 
5007e321ac1Sguenther 	/* wake the next guy blocked on the mutex */
5017e321ac1Sguenther 	mutex->count = 0;
5027e321ac1Sguenther 	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
5037e321ac1Sguenther 	if (next != NULL) {
5047e321ac1Sguenther 		TAILQ_REMOVE(&mutex->lockers, next, waiting);
5057e321ac1Sguenther 		__thrwakeup(next, 1);
5067e321ac1Sguenther 	}
5077e321ac1Sguenther 
5087e321ac1Sguenther 	/* wait until we're the owner of the mutex again */
5097e321ac1Sguenther 	while (mutex->owner != self) {
5107e321ac1Sguenther 		error = __thrsleep(self, 0, NULL, &mutex->lock,
5117e321ac1Sguenther 		    &self->delayed_cancel);
5127e321ac1Sguenther 
5137e321ac1Sguenther 		/*
5147e321ac1Sguenther 		 * If we took a normal signal (not from
5157e321ac1Sguenther 		 * cancellation) then we should just go back to
5167e321ac1Sguenther 		 * sleep without changing state (timeouts, etc).
5177e321ac1Sguenther 		 */
518bda456ccSpirofti 		if ((error == EINTR || error == ECANCELED) &&
519bda456ccSpirofti 		    (tib->tib_canceled == 0 ||
5207e321ac1Sguenther 		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
5217e321ac1Sguenther 			_spinlock(&mutex->lock);
5227e321ac1Sguenther 			continue;
5237e321ac1Sguenther 		}
5247e321ac1Sguenther 
5257e321ac1Sguenther 		/*
5267e321ac1Sguenther 		 * The remaining reasons for waking up (normal
5277e321ac1Sguenther 		 * wakeup and cancellation) all mean that we won't
5287e321ac1Sguenther 		 * be staying in the condvar queue and we'll no
5297e321ac1Sguenther 		 * longer be cancelable.
5307e321ac1Sguenther 		 */
5317e321ac1Sguenther 		LEAVE_CANCEL_POINT_INNER(tib, 0);
5327e321ac1Sguenther 
5337e321ac1Sguenther 		/*
5347e321ac1Sguenther 		 * If we're no longer in the condvar's queue then
5357e321ac1Sguenther 		 * we're just waiting for mutex ownership.  Need
5367e321ac1Sguenther 		 * cond->lock here to prevent race with cond_signal().
5377e321ac1Sguenther 		 */
5387e321ac1Sguenther 		_spinlock(&cond->lock);
5397e321ac1Sguenther 		if (self->blocking_cond == NULL) {
5407e321ac1Sguenther 			_spinunlock(&cond->lock);
5417e321ac1Sguenther 			_spinlock(&mutex->lock);
5427e321ac1Sguenther 			continue;
5437e321ac1Sguenther 		}
5447e321ac1Sguenther 		assert(self->blocking_cond == cond);
5457e321ac1Sguenther 
5467e321ac1Sguenther 		/* if canceled, make note of that */
5477e321ac1Sguenther 		if (error == EINTR)
5487e321ac1Sguenther 			canceled = 1;
5497e321ac1Sguenther 
5507e321ac1Sguenther 		/* transfer between the queues */
5517e321ac1Sguenther 		TAILQ_REMOVE(&cond->waiters, self, waiting);
5527e321ac1Sguenther 		assert(mutex == cond->mutex);
5537e321ac1Sguenther 		if (TAILQ_EMPTY(&cond->waiters))
5547e321ac1Sguenther 			cond->mutex = NULL;
5557e321ac1Sguenther 		self->blocking_cond = NULL;
5567e321ac1Sguenther 		_spinunlock(&cond->lock);
5577e321ac1Sguenther 		_spinlock(&mutex->lock);
5587e321ac1Sguenther 
5597e321ac1Sguenther 		/* mutex unlocked right now? */
5607e321ac1Sguenther 		if (mutex->owner == NULL &&
5617e321ac1Sguenther 		    TAILQ_EMPTY(&mutex->lockers)) {
5627e321ac1Sguenther 			assert(mutex->count == 0);
5637e321ac1Sguenther 			mutex->owner = self;
5647e321ac1Sguenther 			break;
5657e321ac1Sguenther 		}
5667e321ac1Sguenther 		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
5677e321ac1Sguenther 	}
5687e321ac1Sguenther 
5697e321ac1Sguenther 	/* restore the mutex's count */
5707e321ac1Sguenther 	mutex->count = mutex_count;
5717e321ac1Sguenther 	_spinunlock(&mutex->lock);
5727e321ac1Sguenther 
5737e321ac1Sguenther 	LEAVE_CANCEL_POINT_INNER(tib, canceled);
5747e321ac1Sguenther 
5757e321ac1Sguenther 	return (0);
5767e321ac1Sguenther }
5777e321ac1Sguenther 
5787e321ac1Sguenther 
5797e321ac1Sguenther int
pthread_cond_signal(pthread_cond_t * condp)5807e321ac1Sguenther pthread_cond_signal(pthread_cond_t *condp)
5817e321ac1Sguenther {
5827e321ac1Sguenther 	pthread_cond_t cond;
5837e321ac1Sguenther 	struct pthread_mutex *mutex;
5847e321ac1Sguenther 	pthread_t thread;
5857e321ac1Sguenther 	int wakeup;
5867e321ac1Sguenther 
5877e321ac1Sguenther 	/* uninitialized?  Then there's obviously no one waiting! */
5887e321ac1Sguenther 	if (!*condp)
5897e321ac1Sguenther 		return 0;
5907e321ac1Sguenther 
5917e321ac1Sguenther 	cond = *condp;
5927e321ac1Sguenther 	_rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
5937e321ac1Sguenther 	    (void *)cond, (void *)cond->mutex);
5947e321ac1Sguenther 	_spinlock(&cond->lock);
5957e321ac1Sguenther 	thread = TAILQ_FIRST(&cond->waiters);
5967e321ac1Sguenther 	if (thread == NULL) {
5977e321ac1Sguenther 		assert(cond->mutex == NULL);
5987e321ac1Sguenther 		_spinunlock(&cond->lock);
5997e321ac1Sguenther 		return (0);
6007e321ac1Sguenther 	}
6017e321ac1Sguenther 
6027e321ac1Sguenther 	assert(thread->blocking_cond == cond);
6037e321ac1Sguenther 	TAILQ_REMOVE(&cond->waiters, thread, waiting);
6047e321ac1Sguenther 	thread->blocking_cond = NULL;
6057e321ac1Sguenther 
6067e321ac1Sguenther 	mutex = cond->mutex;
6077e321ac1Sguenther 	assert(mutex != NULL);
6087e321ac1Sguenther 	if (TAILQ_EMPTY(&cond->waiters))
6097e321ac1Sguenther 		cond->mutex = NULL;
6107e321ac1Sguenther 
6117e321ac1Sguenther 	/* link locks to prevent race with timedwait */
6127e321ac1Sguenther 	_spinlock(&mutex->lock);
6137e321ac1Sguenther 	_spinunlock(&cond->lock);
6147e321ac1Sguenther 
6157e321ac1Sguenther 	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
6167e321ac1Sguenther 	if (wakeup)
6177e321ac1Sguenther 		mutex->owner = thread;
6187e321ac1Sguenther 	else
6197e321ac1Sguenther 		TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
6207e321ac1Sguenther 	_spinunlock(&mutex->lock);
6217e321ac1Sguenther 	if (wakeup)
6227e321ac1Sguenther 		__thrwakeup(thread, 1);
6237e321ac1Sguenther 
6247e321ac1Sguenther 	return (0);
6257e321ac1Sguenther }
6267e321ac1Sguenther 
6277e321ac1Sguenther int
pthread_cond_broadcast(pthread_cond_t * condp)6287e321ac1Sguenther pthread_cond_broadcast(pthread_cond_t *condp)
6297e321ac1Sguenther {
6307e321ac1Sguenther 	pthread_cond_t cond;
6317e321ac1Sguenther 	struct pthread_mutex *mutex;
6327e321ac1Sguenther 	pthread_t thread;
6337e321ac1Sguenther 	pthread_t p;
6347e321ac1Sguenther 	int wakeup;
6357e321ac1Sguenther 
6367e321ac1Sguenther 	/* uninitialized?  Then there's obviously no one waiting! */
6377e321ac1Sguenther 	if (!*condp)
6387e321ac1Sguenther 		return 0;
6397e321ac1Sguenther 
6407e321ac1Sguenther 	cond = *condp;
6417e321ac1Sguenther 	_rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
6427e321ac1Sguenther 	    (void *)cond, (void *)cond->mutex);
6437e321ac1Sguenther 	_spinlock(&cond->lock);
6447e321ac1Sguenther 	thread = TAILQ_FIRST(&cond->waiters);
6457e321ac1Sguenther 	if (thread == NULL) {
6467e321ac1Sguenther 		assert(cond->mutex == NULL);
6477e321ac1Sguenther 		_spinunlock(&cond->lock);
6487e321ac1Sguenther 		return (0);
6497e321ac1Sguenther 	}
6507e321ac1Sguenther 
6517e321ac1Sguenther 	mutex = cond->mutex;
6527e321ac1Sguenther 	assert(mutex != NULL);
6537e321ac1Sguenther 
6547e321ac1Sguenther 	/* walk the list, clearing the "blocked on condvar" pointer */
6557e321ac1Sguenther 	p = thread;
6567e321ac1Sguenther 	do
6577e321ac1Sguenther 		p->blocking_cond = NULL;
6587e321ac1Sguenther 	while ((p = TAILQ_NEXT(p, waiting)) != NULL);
6597e321ac1Sguenther 
6607e321ac1Sguenther 	/*
6617e321ac1Sguenther 	 * We want to transfer all the threads from the condvar's list
6627e321ac1Sguenther 	 * to the mutex's list.  The TAILQ_* macros don't let us do that
6637e321ac1Sguenther 	 * efficiently, so this is direct list surgery.  Pay attention!
6647e321ac1Sguenther 	 */
6657e321ac1Sguenther 
6667e321ac1Sguenther 	/* 1) attach the first thread to the end of the mutex's list */
6677e321ac1Sguenther 	_spinlock(&mutex->lock);
6687e321ac1Sguenther 	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
6697e321ac1Sguenther 	thread->waiting.tqe_prev = mutex->lockers.tqh_last;
6707e321ac1Sguenther 	*(mutex->lockers.tqh_last) = thread;
6717e321ac1Sguenther 
6727e321ac1Sguenther 	/* 2) fix up the end pointer for the mutex's list */
6737e321ac1Sguenther 	mutex->lockers.tqh_last = cond->waiters.tqh_last;
6747e321ac1Sguenther 
6757e321ac1Sguenther 	if (wakeup) {
6767e321ac1Sguenther 		TAILQ_REMOVE(&mutex->lockers, thread, waiting);
6777e321ac1Sguenther 		mutex->owner = thread;
6787e321ac1Sguenther 		_spinunlock(&mutex->lock);
6797e321ac1Sguenther 		__thrwakeup(thread, 1);
6807e321ac1Sguenther 	} else
6817e321ac1Sguenther 		_spinunlock(&mutex->lock);
6827e321ac1Sguenther 
6837e321ac1Sguenther 	/* 3) reset the condvar's list and mutex pointer */
6847e321ac1Sguenther 	TAILQ_INIT(&cond->waiters);
6857e321ac1Sguenther 	assert(cond->mutex != NULL);
6867e321ac1Sguenther 	cond->mutex = NULL;
6877e321ac1Sguenther 	_spinunlock(&cond->lock);
6887e321ac1Sguenther 
6897e321ac1Sguenther 	return (0);
6907e321ac1Sguenther }
691