1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27  * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.7 2006/04/06 13:03:09 davidxu Exp $
28  */
29 
30 #include "namespace.h"
31 #include <machine/tls.h>
32 
33 #include <errno.h>
34 #include <limits.h>
35 #include <stdlib.h>
36 #include <pthread.h>
37 #include "un-namespace.h"
38 
39 #include "thr_private.h"
40 
41 /* maximum number of times a read lock may be obtained */
42 #define	MAX_READ_LOCKS		(INT_MAX - 1)
43 
44 umtx_t	_rwlock_static_lock;
45 
46 static int
47 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
48 {
49 	pthread_rwlock_t prwlock;
50 	int ret;
51 
52 	/* allocate rwlock object */
53 	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
54 
55 	if (prwlock == NULL)
56 		return (ENOMEM);
57 
58 	/* initialize the lock */
59 	if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
60 		free(prwlock);
61 	else {
62 		/* initialize the read condition signal */
63 		ret = _pthread_cond_init(&prwlock->read_signal, NULL);
64 
65 		if (ret != 0) {
66 			_pthread_mutex_destroy(&prwlock->lock);
67 			free(prwlock);
68 		} else {
69 			/* initialize the write condition signal */
70 			ret = _pthread_cond_init(&prwlock->write_signal, NULL);
71 
72 			if (ret != 0) {
73 				_pthread_cond_destroy(&prwlock->read_signal);
74 				_pthread_mutex_destroy(&prwlock->lock);
75 				free(prwlock);
76 			} else {
77 				/* success */
78 				prwlock->state = 0;
79 				prwlock->blocked_writers = 0;
80 				*rwlock = prwlock;
81 			}
82 		}
83 	}
84 
85 	return (ret);
86 }
87 
88 int
89 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
90 {
91 	int ret;
92 
93 	if (rwlock == NULL)
94 		ret = EINVAL;
95 	else if (*rwlock == NULL)
96 		ret = 0;
97 	else {
98 		pthread_rwlock_t prwlock;
99 
100 		prwlock = *rwlock;
101 
102 		_pthread_mutex_destroy(&prwlock->lock);
103 		_pthread_cond_destroy(&prwlock->read_signal);
104 		_pthread_cond_destroy(&prwlock->write_signal);
105 		free(prwlock);
106 
107 		*rwlock = NULL;
108 
109 		ret = 0;
110 	}
111 	return (ret);
112 }
113 
114 static int
115 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
116 {
117 	int ret;
118 
119 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
120 
121 	if (*rwlock == NULL)
122 		ret = rwlock_init(rwlock, NULL);
123 	else
124 		ret = 0;
125 
126 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
127 
128 	return (ret);
129 }
130 
131 int
132 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
133 {
134 	*rwlock = NULL;
135 	return (rwlock_init(rwlock, attr));
136 }
137 
138 static int
139 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
140 {
141 	struct pthread *curthread = tls_get_curthread();
142 	pthread_rwlock_t prwlock;
143 	int ret;
144 
145 	if (rwlock == NULL)
146 		return (EINVAL);
147 
148 	prwlock = *rwlock;
149 
150 	/* check for static initialization */
151 	if (prwlock == NULL) {
152 		if ((ret = init_static(curthread, rwlock)) != 0)
153 			return (ret);
154 
155 		prwlock = *rwlock;
156 	}
157 
158 	/* grab the monitor lock */
159 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
160 		return (ret);
161 
162 	/* check lock count */
163 	if (prwlock->state == MAX_READ_LOCKS) {
164 		_pthread_mutex_unlock(&prwlock->lock);
165 		return (EAGAIN);
166 	}
167 
168 	curthread = tls_get_curthread();
169 	if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
170 		/*
171 		 * To avoid having to track all the rdlocks held by
172 		 * a thread or all of the threads that hold a rdlock,
173 		 * we keep a simple count of all the rdlocks held by
174 		 * a thread.  If a thread holds any rdlocks it is
175 		 * possible that it is attempting to take a recursive
176 		 * rdlock.  If there are blocked writers and precedence
177 		 * is given to them, then that would result in the thread
178 		 * deadlocking.  So allowing a thread to take the rdlock
179 		 * when it already has one or more rdlocks avoids the
180 		 * deadlock.  I hope the reader can follow that logic ;-)
181 		 */
182 		;	/* nothing needed */
183 	} else {
184 		/* give writers priority over readers */
185 		while (prwlock->blocked_writers || prwlock->state < 0) {
186 			if (abstime)
187 				ret = _pthread_cond_timedwait
188 				    (&prwlock->read_signal,
189 				    &prwlock->lock, abstime);
190 			else
191 				ret = _pthread_cond_wait(&prwlock->read_signal,
192 			    &prwlock->lock);
193 			if (ret != 0) {
194 				/* can't do a whole lot if this fails */
195 				_pthread_mutex_unlock(&prwlock->lock);
196 				return (ret);
197 			}
198 		}
199 	}
200 
201 	curthread->rdlock_count++;
202 	prwlock->state++; /* indicate we are locked for reading */
203 
204 	/*
205 	 * Something is really wrong if this call fails.  Returning
206 	 * error won't do because we've already obtained the read
207 	 * lock.  Decrementing 'state' is no good because we probably
208 	 * don't have the monitor lock.
209 	 */
210 	_pthread_mutex_unlock(&prwlock->lock);
211 
212 	return (ret);
213 }
214 
215 int
216 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
217 {
218 	return (rwlock_rdlock_common(rwlock, NULL));
219 }
220 
221 int
222 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
223 	 const struct timespec *abstime)
224 {
225 	return (rwlock_rdlock_common(rwlock, abstime));
226 }
227 
228 int
229 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
230 {
231 	struct pthread *curthread = tls_get_curthread();
232 	pthread_rwlock_t prwlock;
233 	int ret;
234 
235 	if (rwlock == NULL)
236 		return (EINVAL);
237 
238 	prwlock = *rwlock;
239 
240 	/* check for static initialization */
241 	if (prwlock == NULL) {
242 		if ((ret = init_static(curthread, rwlock)) != 0)
243 			return (ret);
244 
245 		prwlock = *rwlock;
246 	}
247 
248 	/* grab the monitor lock */
249 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
250 		return (ret);
251 
252 	curthread = tls_get_curthread();
253 	if (prwlock->state == MAX_READ_LOCKS)
254 		ret = EAGAIN;
255 	else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
256 		/* see comment for pthread_rwlock_rdlock() */
257 		curthread->rdlock_count++;
258 		prwlock->state++;
259 	}
260 	/* give writers priority over readers */
261 	else if (prwlock->blocked_writers || prwlock->state < 0)
262 		ret = EBUSY;
263 	else {
264 		curthread->rdlock_count++;
265 		prwlock->state++; /* indicate we are locked for reading */
266 	}
267 
268 	/* see the comment on this in pthread_rwlock_rdlock */
269 	_pthread_mutex_unlock(&prwlock->lock);
270 
271 	return (ret);
272 }
273 
274 int
275 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
276 {
277 	struct pthread *curthread = tls_get_curthread();
278 	pthread_rwlock_t prwlock;
279 	int ret;
280 
281 	if (rwlock == NULL)
282 		return (EINVAL);
283 
284 	prwlock = *rwlock;
285 
286 	/* check for static initialization */
287 	if (prwlock == NULL) {
288 		if ((ret = init_static(curthread, rwlock)) != 0)
289 			return (ret);
290 
291 		prwlock = *rwlock;
292 	}
293 
294 	/* grab the monitor lock */
295 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
296 		return (ret);
297 
298 	if (prwlock->state != 0)
299 		ret = EBUSY;
300 	else
301 		/* indicate we are locked for writing */
302 		prwlock->state = -1;
303 
304 	/* see the comment on this in pthread_rwlock_rdlock */
305 	_pthread_mutex_unlock(&prwlock->lock);
306 
307 	return (ret);
308 }
309 
310 int
311 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
312 {
313 	struct pthread *curthread;
314 	pthread_rwlock_t prwlock;
315 	int ret;
316 
317 	if (rwlock == NULL)
318 		return (EINVAL);
319 
320 	prwlock = *rwlock;
321 
322 	if (prwlock == NULL)
323 		return (EINVAL);
324 
325 	/* grab the monitor lock */
326 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
327 		return (ret);
328 
329 	curthread = tls_get_curthread();
330 	if (prwlock->state > 0) {
331 		curthread->rdlock_count--;
332 		prwlock->state--;
333 		if (prwlock->state == 0 && prwlock->blocked_writers)
334 			ret = _pthread_cond_signal(&prwlock->write_signal);
335 	} else if (prwlock->state < 0) {
336 		prwlock->state = 0;
337 
338 		if (prwlock->blocked_writers)
339 			ret = _pthread_cond_signal(&prwlock->write_signal);
340 		else
341 			ret = _pthread_cond_broadcast(&prwlock->read_signal);
342 	} else
343 		ret = EINVAL;
344 
345 	/* see the comment on this in pthread_rwlock_rdlock */
346 	_pthread_mutex_unlock(&prwlock->lock);
347 
348 	return (ret);
349 }
350 
351 static int
352 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
353 {
354 	struct pthread *curthread = tls_get_curthread();
355 	pthread_rwlock_t prwlock;
356 	int ret;
357 
358 	if (rwlock == NULL)
359 		return (EINVAL);
360 
361 	prwlock = *rwlock;
362 
363 	/* check for static initialization */
364 	if (prwlock == NULL) {
365 		if ((ret = init_static(curthread, rwlock)) != 0)
366 			return (ret);
367 
368 		prwlock = *rwlock;
369 	}
370 
371 	/* grab the monitor lock */
372 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
373 		return (ret);
374 
375 	while (prwlock->state != 0) {
376 		prwlock->blocked_writers++;
377 
378 		if (abstime != NULL)
379 			ret = _pthread_cond_timedwait(&prwlock->write_signal,
380 			    &prwlock->lock, abstime);
381 		else
382 			ret = _pthread_cond_wait(&prwlock->write_signal,
383 			    &prwlock->lock);
384 		if (ret != 0) {
385 			prwlock->blocked_writers--;
386 			_pthread_mutex_unlock(&prwlock->lock);
387 			return (ret);
388 		}
389 
390 		prwlock->blocked_writers--;
391 	}
392 
393 	/* indicate we are locked for writing */
394 	prwlock->state = -1;
395 
396 	/* see the comment on this in pthread_rwlock_rdlock */
397 	_pthread_mutex_unlock(&prwlock->lock);
398 
399 	return (ret);
400 }
401 
402 int
403 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
404 {
405 	return (rwlock_wrlock_common (rwlock, NULL));
406 }
407 
408 int
409 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
410     const struct timespec *abstime)
411 {
412 	return (rwlock_wrlock_common (rwlock, abstime));
413 }
414 
415 __strong_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
416 __strong_reference(_pthread_rwlock_init, pthread_rwlock_init);
417 __strong_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
418 __strong_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
419 __strong_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
420 __strong_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
421 __strong_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
422 __strong_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
423 __strong_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
424 
425