1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
27  */
28 
29 #include "namespace.h"
30 #include <machine/tls.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdlib.h>
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37 
38 #ifdef _PTHREADS_DEBUGGING
39 
40 #include <stdio.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <sys/file.h>
44 
45 #endif
46 
47 /* maximum number of times a read lock may be obtained */
48 #define	MAX_READ_LOCKS		(INT_MAX - 1)
49 
50 umtx_t	_rwlock_static_lock;
51 
52 #ifdef _PTHREADS_DEBUGGING
53 
54 static
55 void
56 rwlock_log(const char *ctl, ...)
57 {
58 	char buf[256];
59 	va_list va;
60 	size_t len;
61 
62 	va_start(va, ctl);
63 	len = vsnprintf(buf, sizeof(buf), ctl, va);
64 	va_end(va);
65 	_thr_log(buf, len);
66 }
67 
68 #else
69 
70 static __inline
71 void
72 rwlock_log(const char *ctl __unused, ...)
73 {
74 }
75 
76 #endif
77 
78 static int
79 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
80 {
81 	pthread_rwlock_t prwlock;
82 	int ret;
83 
84 	/* allocate rwlock object */
85 	prwlock = __malloc(sizeof(struct __pthread_rwlock_s));
86 	if (prwlock == NULL)
87 		return (ENOMEM);
88 
89 	/* initialize the lock */
90 	if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0) {
91 		__free(prwlock);
92 	} else {
93 		/* initialize the read condition signal */
94 		ret = _pthread_cond_init(&prwlock->read_signal, NULL);
95 
96 		if (ret != 0) {
97 			_pthread_mutex_destroy(&prwlock->lock);
98 			__free(prwlock);
99 		} else {
100 			/* initialize the write condition signal */
101 			ret = _pthread_cond_init(&prwlock->write_signal, NULL);
102 
103 			if (ret != 0) {
104 				_pthread_cond_destroy(&prwlock->read_signal);
105 				_pthread_mutex_destroy(&prwlock->lock);
106 				__free(prwlock);
107 			} else {
108 				/* success */
109 				prwlock->state = 0;
110 				prwlock->blocked_writers = 0;
111 				*rwlock = prwlock;
112 			}
113 		}
114 	}
115 
116 	return (ret);
117 }
118 
119 #if 0
120 void
121 _rwlock_reinit(pthread_rwlock_t prwlock)
122 {
123 	_mutex_reinit(&prwlock->lock);
124 	_cond_reinit(prwlock->read_signal);
125 	prwlock->state = 0;
126 	prwlock->blocked_writers = 0;
127 }
128 #endif
129 
130 int
131 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
132 {
133 	int ret;
134 
135 	if (rwlock == NULL) {
136 		ret = EINVAL;
137 	} else if (*rwlock == NULL) {
138 		ret = 0;
139 	} else {
140 		pthread_rwlock_t prwlock;
141 
142 		prwlock = *rwlock;
143 		rwlock_log("rwlock_destroy %p\n", prwlock);
144 
145 		_pthread_mutex_destroy(&prwlock->lock);
146 		_pthread_cond_destroy(&prwlock->read_signal);
147 		_pthread_cond_destroy(&prwlock->write_signal);
148 		__free(prwlock);
149 
150 		*rwlock = NULL;
151 
152 		ret = 0;
153 	}
154 	return (ret);
155 }
156 
157 static int
158 init_static(pthread_t thread, pthread_rwlock_t *rwlock)
159 {
160 	int ret;
161 
162 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
163 
164 	if (*rwlock == NULL)
165 		ret = rwlock_init(rwlock, NULL);
166 	else
167 		ret = 0;
168 
169 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
170 
171 	return (ret);
172 }
173 
174 int
175 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
176 {
177 	*rwlock = NULL;
178 	return (rwlock_init(rwlock, attr));
179 }
180 
181 static int
182 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
183 {
184 	pthread_t curthread = tls_get_curthread();
185 	pthread_rwlock_t prwlock;
186 	int ret;
187 
188 	if (rwlock == NULL)
189 		return (EINVAL);
190 
191 	prwlock = *rwlock;
192 
193 	/* check for static initialization */
194 	if (prwlock == NULL) {
195 		if ((ret = init_static(curthread, rwlock)) != 0)
196 			return (ret);
197 
198 		prwlock = *rwlock;
199 	}
200 	rwlock_log("rwlock_rdlock_common %p\n", prwlock);
201 
202 	/* grab the monitor lock */
203 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) {
204 		rwlock_log("rwlock_rdlock_common %p (failedA)\n", prwlock);
205 		return (ret);
206 	}
207 
208 	/* check lock count */
209 	if (prwlock->state == MAX_READ_LOCKS) {
210 		_pthread_mutex_unlock(&prwlock->lock);
211 		rwlock_log("rwlock_rdlock_common %p (failedB)\n", prwlock);
212 		return (EAGAIN);
213 	}
214 
215 	curthread = tls_get_curthread();
216 	if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
217 		/*
218 		 * To avoid having to track all the rdlocks held by
219 		 * a thread or all of the threads that hold a rdlock,
220 		 * we keep a simple count of all the rdlocks held by
221 		 * a thread.  If a thread holds any rdlocks it is
222 		 * possible that it is attempting to take a recursive
223 		 * rdlock.  If there are blocked writers and precedence
224 		 * is given to them, then that would result in the thread
225 		 * deadlocking.  So allowing a thread to take the rdlock
226 		 * when it already has one or more rdlocks avoids the
227 		 * deadlock.  I hope the reader can follow that logic ;-)
228 		 */
229 		;	/* nothing needed */
230 	} else {
231 		/*
232 		 * Give writers priority over readers
233 		 *
234 		 * WARNING: pthread_cond*() temporarily releases the
235 		 *	    mutex.
236 		 */
237 		while (prwlock->blocked_writers || prwlock->state < 0) {
238 			if (abstime) {
239 				ret = _pthread_cond_timedwait(
240 					    &prwlock->read_signal,
241 					    &prwlock->lock, abstime);
242 			} else {
243 				ret = _pthread_cond_wait(
244 					    &prwlock->read_signal,
245 					    &prwlock->lock);
246 			}
247 			if (ret != 0) {
248 				/* can't do a whole lot if this fails */
249 				_pthread_mutex_unlock(&prwlock->lock);
250 				rwlock_log("rwlock_rdlock_common %p "
251 					   "(failedC)\n", prwlock);
252 				return (ret);
253 			}
254 		}
255 	}
256 
257 	curthread->rdlock_count++;
258 	prwlock->state++; /* indicate we are locked for reading */
259 
260 	/*
261 	 * Something is really wrong if this call fails.  Returning
262 	 * error won't do because we've already obtained the read
263 	 * lock.  Decrementing 'state' is no good because we probably
264 	 * don't have the monitor lock.
265 	 */
266 	_pthread_mutex_unlock(&prwlock->lock);
267 	rwlock_log("rwlock_rdlock_common %p (return %d)\n", prwlock, ret);
268 
269 	return (ret);
270 }
271 
272 int
273 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
274 {
275 	return (rwlock_rdlock_common(rwlock, NULL));
276 }
277 
278 int
279 _pthread_rwlock_timedrdlock (pthread_rwlock_t * __restrict rwlock,
280     const struct timespec * __restrict abstime)
281 {
282 	return (rwlock_rdlock_common(rwlock, abstime));
283 }
284 
285 int
286 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
287 {
288 	pthread_t curthread = tls_get_curthread();
289 	pthread_rwlock_t prwlock;
290 	int ret;
291 
292 	if (rwlock == NULL)
293 		return (EINVAL);
294 
295 	prwlock = *rwlock;
296 
297 	/* check for static initialization */
298 	if (prwlock == NULL) {
299 		if ((ret = init_static(curthread, rwlock)) != 0)
300 			return (ret);
301 
302 		prwlock = *rwlock;
303 	}
304 
305 	/* grab the monitor lock */
306 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
307 		return (ret);
308 
309 	curthread = tls_get_curthread();
310 	if (prwlock->state == MAX_READ_LOCKS)
311 		ret = EAGAIN;
312 	else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
313 		/* see comment for pthread_rwlock_rdlock() */
314 		curthread->rdlock_count++;
315 		prwlock->state++;
316 	}
317 	/* give writers priority over readers */
318 	else if (prwlock->blocked_writers || prwlock->state < 0)
319 		ret = EBUSY;
320 	else {
321 		curthread->rdlock_count++;
322 		prwlock->state++; /* indicate we are locked for reading */
323 	}
324 
325 	/* see the comment on this in pthread_rwlock_rdlock */
326 	_pthread_mutex_unlock(&prwlock->lock);
327 
328 	return (ret);
329 }
330 
331 int
332 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
333 {
334 	pthread_t curthread = tls_get_curthread();
335 	pthread_rwlock_t prwlock;
336 	int ret;
337 
338 	if (rwlock == NULL)
339 		return (EINVAL);
340 
341 	prwlock = *rwlock;
342 
343 	/* check for static initialization */
344 	if (prwlock == NULL) {
345 		if ((ret = init_static(curthread, rwlock)) != 0)
346 			return (ret);
347 
348 		prwlock = *rwlock;
349 	}
350 
351 	/* grab the monitor lock */
352 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
353 		return (ret);
354 
355 	if (prwlock->state != 0)
356 		ret = EBUSY;
357 	else
358 		/* indicate we are locked for writing */
359 		prwlock->state = -1;
360 
361 	/* see the comment on this in pthread_rwlock_rdlock */
362 	_pthread_mutex_unlock(&prwlock->lock);
363 
364 	return (ret);
365 }
366 
367 int
368 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
369 {
370 	pthread_t curthread;
371 	pthread_rwlock_t prwlock;
372 	int ret;
373 
374 	if (rwlock == NULL)
375 		return (EINVAL);
376 
377 	prwlock = *rwlock;
378 
379 	if (prwlock == NULL)
380 		return (EINVAL);
381 
382 	rwlock_log("rwlock_unlock %p\n", prwlock);
383 
384 	/* grab the monitor lock */
385 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
386 		return (ret);
387 
388 	curthread = tls_get_curthread();
389 	if (prwlock->state > 0) {
390 		/*
391 		 * Unlock reader
392 		 */
393 		curthread->rdlock_count--;
394 		prwlock->state--;
395 		if (prwlock->state == 0 && prwlock->blocked_writers)
396 			ret = _pthread_cond_signal(&prwlock->write_signal);
397 	} else if (prwlock->state < 0) {
398 		/*
399 		 * unlock writer
400 		 */
401 		prwlock->state = 0;
402 
403 		if (prwlock->blocked_writers)
404 			ret = _pthread_cond_signal(&prwlock->write_signal);
405 		else
406 			ret = _pthread_cond_broadcast(&prwlock->read_signal);
407 	} else {
408 		ret = EINVAL;
409 	}
410 
411 	/* see the comment on this in pthread_rwlock_rdlock */
412 	_pthread_mutex_unlock(&prwlock->lock);
413 	rwlock_log("rwlock_unlock %p (return %d)\n", prwlock, ret);
414 
415 	return (ret);
416 }
417 
418 static int
419 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
420 {
421 	pthread_t curthread = tls_get_curthread();
422 	pthread_rwlock_t prwlock;
423 	int ret;
424 
425 	if (rwlock == NULL)
426 		return (EINVAL);
427 
428 	prwlock = *rwlock;
429 
430 	/* check for static initialization */
431 	if (prwlock == NULL) {
432 		if ((ret = init_static(curthread, rwlock)) != 0)
433 			return (ret);
434 
435 		prwlock = *rwlock;
436 	}
437 	rwlock_log("rwlock_wrlock_common %p\n", prwlock);
438 
439 	/* grab the monitor lock */
440 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) {
441 		rwlock_log("rwlock_wrlock_common %p (failedA)\n", prwlock);
442 		return (ret);
443 	}
444 
445 	while (prwlock->state != 0) {
446 		prwlock->blocked_writers++;
447 
448 		/*
449 		 * WARNING: pthread_cond*() temporarily releases the
450 		 *	    mutex.
451 		 */
452 		if (abstime != NULL) {
453 			ret = _pthread_cond_timedwait(&prwlock->write_signal,
454 						      &prwlock->lock,
455 						      abstime);
456 		} else {
457 			ret = _pthread_cond_wait(&prwlock->write_signal,
458 						 &prwlock->lock);
459 		}
460 
461 		/*
462 		 * Undo on failure.  When the blocked_writers count drops
463 		 * to 0 we may have to wakeup blocked readers.
464 		 */
465 		if (ret != 0) {
466 			prwlock->blocked_writers--;
467 			if (prwlock->blocked_writers == 0 &&
468 			    prwlock->state >= 0) {
469 				_pthread_cond_broadcast(&prwlock->read_signal);
470 			}
471 			_pthread_mutex_unlock(&prwlock->lock);
472 			rwlock_log("rwlock_wrlock_common %p (failedB %d)\n",
473 				   prwlock, ret);
474 			return (ret);
475 		}
476 
477 		prwlock->blocked_writers--;
478 	}
479 
480 	/* indicate we are locked for writing */
481 	prwlock->state = -1;
482 
483 	/* see the comment on this in pthread_rwlock_rdlock */
484 	_pthread_mutex_unlock(&prwlock->lock);
485 	rwlock_log("rwlock_wrlock_common %p (returns %d)\n", prwlock, ret);
486 
487 	return (ret);
488 }
489 
490 int
491 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
492 {
493 	return (rwlock_wrlock_common (rwlock, NULL));
494 }
495 
496 int
497 _pthread_rwlock_timedwrlock (pthread_rwlock_t * __restrict rwlock,
498     const struct timespec * __restrict abstime)
499 {
500 	return (rwlock_wrlock_common (rwlock, abstime));
501 }
502 
503 __strong_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
504 __strong_reference(_pthread_rwlock_init, pthread_rwlock_init);
505 __strong_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
506 __strong_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
507 __strong_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
508 __strong_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
509 __strong_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
510 __strong_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
511 __strong_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
512