/*- * Copyright (c) 1998 Alex Nash * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $ */ #include "namespace.h" #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" #ifdef _PTHREADS_DEBUGGING #include #include #include #include #endif /* maximum number of times a read lock may be obtained */ #define MAX_READ_LOCKS (INT_MAX - 1) umtx_t _rwlock_static_lock; #ifdef _PTHREADS_DEBUGGING static void rwlock_log(const char *ctl, ...) { char buf[256]; va_list va; size_t len; va_start(va, ctl); len = vsnprintf(buf, sizeof(buf), ctl, va); va_end(va); _thr_log(buf, len); } #else static __inline void rwlock_log(const char *ctl __unused, ...) { } #endif static int rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused) { pthread_rwlock_t prwlock; int ret; /* allocate rwlock object */ prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock)); if (prwlock == NULL) return (ENOMEM); /* initialize the lock */ if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0) { free(prwlock); } else { /* initialize the read condition signal */ ret = _pthread_cond_init(&prwlock->read_signal, NULL); if (ret != 0) { _pthread_mutex_destroy(&prwlock->lock); free(prwlock); } else { /* initialize the write condition signal */ ret = _pthread_cond_init(&prwlock->write_signal, NULL); if (ret != 0) { _pthread_cond_destroy(&prwlock->read_signal); _pthread_mutex_destroy(&prwlock->lock); free(prwlock); } else { /* success */ prwlock->state = 0; prwlock->blocked_writers = 0; *rwlock = prwlock; } } } return (ret); } #if 0 void _rwlock_reinit(pthread_rwlock_t prwlock) { _mutex_reinit(&prwlock->lock); _cond_reinit(prwlock->read_signal); prwlock->state = 0; prwlock->blocked_writers = 0; } #endif int _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) { int ret; if (rwlock == NULL) { ret = EINVAL; } else if (*rwlock == NULL) { ret = 0; } else { pthread_rwlock_t prwlock; prwlock = *rwlock; rwlock_log("rwlock_destroy %p\n", prwlock); _pthread_mutex_destroy(&prwlock->lock); _pthread_cond_destroy(&prwlock->read_signal); _pthread_cond_destroy(&prwlock->write_signal); free(prwlock); *rwlock = NULL; ret = 0; } return (ret); } static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock) { int ret; THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); if (*rwlock == NULL) ret = rwlock_init(rwlock, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_rwlock_static_lock); return (ret); } int _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { *rwlock = NULL; return (rwlock_init(rwlock, attr)); } static int rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = tls_get_curthread(); pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } rwlock_log("rwlock_rdlock_common %p\n", prwlock); /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) { rwlock_log("rwlock_rdlock_common %p (failedA)\n", prwlock); return (ret); } /* check lock count */ if (prwlock->state == MAX_READ_LOCKS) { _pthread_mutex_unlock(&prwlock->lock); rwlock_log("rwlock_rdlock_common %p (failedB)\n", prwlock); return (EAGAIN); } curthread = tls_get_curthread(); if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ ; /* nothing needed */ } else { /* * Give writers priority over readers * * WARNING: pthread_cond*() temporarily releases the * mutex. */ while (prwlock->blocked_writers || prwlock->state < 0) { if (abstime) { ret = _pthread_cond_timedwait( &prwlock->read_signal, &prwlock->lock, abstime); } else { ret = _pthread_cond_wait( &prwlock->read_signal, &prwlock->lock); } if (ret != 0) { /* can't do a whole lot if this fails */ _pthread_mutex_unlock(&prwlock->lock); rwlock_log("rwlock_rdlock_common %p " "(failedC)\n", prwlock); return (ret); } } } curthread->rdlock_count++; prwlock->state++; /* indicate we are locked for reading */ /* * Something is really wrong if this call fails. Returning * error won't do because we've already obtained the read * lock. Decrementing 'state' is no good because we probably * don't have the monitor lock. */ _pthread_mutex_unlock(&prwlock->lock); rwlock_log("rwlock_rdlock_common %p (return %d)\n", prwlock, ret); return (ret); } int _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { return (rwlock_rdlock_common(rwlock, NULL)); } int _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { return (rwlock_rdlock_common(rwlock, abstime)); } int _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = tls_get_curthread(); pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return (ret); curthread = tls_get_curthread(); if (prwlock->state == MAX_READ_LOCKS) ret = EAGAIN; else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { /* see comment for pthread_rwlock_rdlock() */ curthread->rdlock_count++; prwlock->state++; } /* give writers priority over readers */ else if (prwlock->blocked_writers || prwlock->state < 0) ret = EBUSY; else { curthread->rdlock_count++; prwlock->state++; /* indicate we are locked for reading */ } /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); return (ret); } int _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = tls_get_curthread(); pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return (ret); if (prwlock->state != 0) ret = EBUSY; else /* indicate we are locked for writing */ prwlock->state = -1; /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); return (ret); } int _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { struct pthread *curthread; pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return (EINVAL); prwlock = *rwlock; if (prwlock == NULL) return (EINVAL); rwlock_log("rwlock_unlock %p\n", prwlock); /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return (ret); curthread = tls_get_curthread(); if (prwlock->state > 0) { /* * Unlock reader */ curthread->rdlock_count--; prwlock->state--; if (prwlock->state == 0 && prwlock->blocked_writers) ret = _pthread_cond_signal(&prwlock->write_signal); } else if (prwlock->state < 0) { /* * unlock writer */ prwlock->state = 0; if (prwlock->blocked_writers) ret = _pthread_cond_signal(&prwlock->write_signal); else ret = _pthread_cond_broadcast(&prwlock->read_signal); } else { ret = EINVAL; } /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); rwlock_log("rwlock_unlock %p (return %d)\n", prwlock, ret); return (ret); } static int rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = tls_get_curthread(); pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } rwlock_log("rwlock_wrlock_common %p\n", prwlock); /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) { rwlock_log("rwlock_wrlock_common %p (failedA)\n", prwlock); return (ret); } while (prwlock->state != 0) { prwlock->blocked_writers++; /* * WARNING: pthread_cond*() temporarily releases the * mutex. */ if (abstime != NULL) { ret = _pthread_cond_timedwait(&prwlock->write_signal, &prwlock->lock, abstime); } else { ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock); } /* * Undo on failure. When the blocked_writers count drops * to 0 we may have to wakeup blocked readers. */ if (ret != 0) { prwlock->blocked_writers--; if (prwlock->blocked_writers == 0 && prwlock->state >= 0) { _pthread_cond_broadcast(&prwlock->read_signal); } _pthread_mutex_unlock(&prwlock->lock); rwlock_log("rwlock_wrlock_common %p (failedB %d)\n", prwlock, ret); return (ret); } prwlock->blocked_writers--; } /* indicate we are locked for writing */ prwlock->state = -1; /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); rwlock_log("rwlock_wrlock_common %p (returns %d)\n", prwlock, ret); return (ret); } int _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) { return (rwlock_wrlock_common (rwlock, NULL)); } int _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { return (rwlock_wrlock_common (rwlock, abstime)); } __strong_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy); __strong_reference(_pthread_rwlock_init, pthread_rwlock_init); __strong_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock); __strong_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); __strong_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock); __strong_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock); __strong_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock); __strong_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); __strong_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);