xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision e28a4053)
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32 
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37 
38 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47 
48 #define CHECK_AND_INIT_RWLOCK							\
49 	if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) {	\
50 		if (prwlock == THR_RWLOCK_INITIALIZER) {			\
51 			int ret;						\
52 			ret = init_static(_get_curthread(), rwlock);		\
53 			if (ret)						\
54 				return (ret);					\
55 		} else if (prwlock == THR_RWLOCK_DESTROYED) {			\
56 			return (EINVAL);					\
57 		}								\
58 		prwlock = *rwlock;						\
59 	}
60 
61 /*
62  * Prototypes
63  */
64 
65 static int
66 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
67 {
68 	pthread_rwlock_t prwlock;
69 
70 	prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
71 	if (prwlock == NULL)
72 		return (ENOMEM);
73 	*rwlock = prwlock;
74 	return (0);
75 }
76 
77 int
78 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
79 {
80 	pthread_rwlock_t prwlock;
81 	int ret;
82 
83 	prwlock = *rwlock;
84 	if (prwlock == THR_RWLOCK_INITIALIZER)
85 		ret = 0;
86 	else if (prwlock == THR_RWLOCK_DESTROYED)
87 		ret = EINVAL;
88 	else {
89 		*rwlock = THR_RWLOCK_DESTROYED;
90 
91 		free(prwlock);
92 		ret = 0;
93 	}
94 	return (ret);
95 }
96 
97 static int
98 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
99 {
100 	int ret;
101 
102 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
103 
104 	if (*rwlock == THR_RWLOCK_INITIALIZER)
105 		ret = rwlock_init(rwlock, NULL);
106 	else
107 		ret = 0;
108 
109 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
110 
111 	return (ret);
112 }
113 
114 int
115 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
116 {
117 	*rwlock = NULL;
118 	return (rwlock_init(rwlock, attr));
119 }
120 
121 static int
122 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
123 {
124 	struct pthread *curthread = _get_curthread();
125 	pthread_rwlock_t prwlock;
126 	struct timespec ts, ts2, *tsp;
127 	int flags;
128 	int ret;
129 
130 	CHECK_AND_INIT_RWLOCK
131 
132 	if (curthread->rdlock_count) {
133 		/*
134 		 * To avoid having to track all the rdlocks held by
135 		 * a thread or all of the threads that hold a rdlock,
136 		 * we keep a simple count of all the rdlocks held by
137 		 * a thread.  If a thread holds any rdlocks it is
138 		 * possible that it is attempting to take a recursive
139 		 * rdlock.  If there are blocked writers and precedence
140 		 * is given to them, then that would result in the thread
141 		 * deadlocking.  So allowing a thread to take the rdlock
142 		 * when it already has one or more rdlocks avoids the
143 		 * deadlock.  I hope the reader can follow that logic ;-)
144 		 */
145 		flags = URWLOCK_PREFER_READER;
146 	} else {
147 		flags = 0;
148 	}
149 
150 	/*
151 	 * POSIX said the validity of the abstimeout parameter need
152 	 * not be checked if the lock can be immediately acquired.
153 	 */
154 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
155 	if (ret == 0) {
156 		curthread->rdlock_count++;
157 		return (ret);
158 	}
159 
160 	if (__predict_false(abstime &&
161 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
162 		return (EINVAL);
163 
164 	for (;;) {
165 		if (abstime) {
166 			clock_gettime(CLOCK_REALTIME, &ts);
167 			TIMESPEC_SUB(&ts2, abstime, &ts);
168 			if (ts2.tv_sec < 0 ||
169 			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
170 				return (ETIMEDOUT);
171 			tsp = &ts2;
172 		} else
173 			tsp = NULL;
174 
175 		/* goto kernel and lock it */
176 		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
177 		if (ret != EINTR)
178 			break;
179 
180 		/* if interrupted, try to lock it in userland again. */
181 		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
182 			ret = 0;
183 			break;
184 		}
185 	}
186 	if (ret == 0)
187 		curthread->rdlock_count++;
188 	return (ret);
189 }
190 
191 int
192 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
193 {
194 	return (rwlock_rdlock_common(rwlock, NULL));
195 }
196 
197 int
198 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
199 	 const struct timespec *abstime)
200 {
201 	return (rwlock_rdlock_common(rwlock, abstime));
202 }
203 
204 int
205 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
206 {
207 	struct pthread *curthread = _get_curthread();
208 	pthread_rwlock_t prwlock;
209 	int flags;
210 	int ret;
211 
212 	CHECK_AND_INIT_RWLOCK
213 
214 	if (curthread->rdlock_count) {
215 		/*
216 		 * To avoid having to track all the rdlocks held by
217 		 * a thread or all of the threads that hold a rdlock,
218 		 * we keep a simple count of all the rdlocks held by
219 		 * a thread.  If a thread holds any rdlocks it is
220 		 * possible that it is attempting to take a recursive
221 		 * rdlock.  If there are blocked writers and precedence
222 		 * is given to them, then that would result in the thread
223 		 * deadlocking.  So allowing a thread to take the rdlock
224 		 * when it already has one or more rdlocks avoids the
225 		 * deadlock.  I hope the reader can follow that logic ;-)
226 		 */
227 		flags = URWLOCK_PREFER_READER;
228 	} else {
229 		flags = 0;
230 	}
231 
232 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
233 	if (ret == 0)
234 		curthread->rdlock_count++;
235 	return (ret);
236 }
237 
238 int
239 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
240 {
241 	struct pthread *curthread = _get_curthread();
242 	pthread_rwlock_t prwlock;
243 	int ret;
244 
245 	CHECK_AND_INIT_RWLOCK
246 
247 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
248 	if (ret == 0)
249 		prwlock->owner = curthread;
250 	return (ret);
251 }
252 
253 static int
254 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
255 {
256 	struct pthread *curthread = _get_curthread();
257 	pthread_rwlock_t prwlock;
258 	struct timespec ts, ts2, *tsp;
259 	int ret;
260 
261 	CHECK_AND_INIT_RWLOCK
262 
263 	/*
264 	 * POSIX said the validity of the abstimeout parameter need
265 	 * not be checked if the lock can be immediately acquired.
266 	 */
267 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
268 	if (ret == 0) {
269 		prwlock->owner = curthread;
270 		return (ret);
271 	}
272 
273 	if (__predict_false(abstime &&
274 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
275 		return (EINVAL);
276 
277 	for (;;) {
278 		if (abstime != NULL) {
279 			clock_gettime(CLOCK_REALTIME, &ts);
280 			TIMESPEC_SUB(&ts2, abstime, &ts);
281 			if (ts2.tv_sec < 0 ||
282 			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
283 				return (ETIMEDOUT);
284 			tsp = &ts2;
285 		} else
286 			tsp = NULL;
287 
288 		/* goto kernel and lock it */
289 		ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
290 		if (ret == 0) {
291 			prwlock->owner = curthread;
292 			break;
293 		}
294 
295 		if (ret != EINTR)
296 			break;
297 
298 		/* if interrupted, try to lock it in userland again. */
299 		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
300 			ret = 0;
301 			prwlock->owner = curthread;
302 			break;
303 		}
304 	}
305 	return (ret);
306 }
307 
308 int
309 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
310 {
311 	return (rwlock_wrlock_common (rwlock, NULL));
312 }
313 
314 int
315 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
316     const struct timespec *abstime)
317 {
318 	return (rwlock_wrlock_common (rwlock, abstime));
319 }
320 
321 int
322 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
323 {
324 	struct pthread *curthread = _get_curthread();
325 	pthread_rwlock_t prwlock;
326 	int ret;
327 	int32_t state;
328 
329 	prwlock = *rwlock;
330 
331 	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
332 		return (EINVAL);
333 
334 	state = prwlock->lock.rw_state;
335 	if (state & URWLOCK_WRITE_OWNER) {
336 		if (__predict_false(prwlock->owner != curthread))
337 			return (EPERM);
338 		prwlock->owner = NULL;
339 	}
340 
341 	ret = _thr_rwlock_unlock(&prwlock->lock);
342 	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
343 		curthread->rdlock_count--;
344 
345 	return (ret);
346 }
347