xref: /netbsd/lib/libpthread/pthread_rwlock.c (revision f2e0628f)
1 /*	$NetBSD: pthread_rwlock.c,v 1.44 2022/02/12 14:59:32 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.44 2022/02/12 14:59:32 riastradh Exp $");
34 
35 /* Need to use libc-private names for atomic operations. */
36 #include "../../common/lib/libc/atomic/atomic_op_namespace.h"
37 
38 #include <sys/types.h>
39 #include <sys/lwpctl.h>
40 
41 #include <assert.h>
42 #include <time.h>
43 #include <errno.h>
44 #include <stddef.h>
45 
46 #include "pthread.h"
47 #include "pthread_int.h"
48 #include "reentrant.h"
49 
50 #define	_RW_LOCKED		0
51 #define	_RW_WANT_WRITE		1
52 #define	_RW_WANT_READ		2
53 
54 #if __GNUC_PREREQ__(3, 0)
55 #define	NOINLINE		__attribute ((noinline))
56 #else
57 #define	NOINLINE		/* nothing */
58 #endif
59 
60 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
61 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
62 static void pthread__rwlock_early(pthread_t, pthread_rwlock_t *,
63     pthread_mutex_t *);
64 
65 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
66 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
67 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
68 
69 #ifndef lint
__weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)70 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
71 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
72 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
73 #endif
74 
75 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
76 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
77 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
78 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
79 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
80 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
81 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
82 
83 static inline uintptr_t
84 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
85 {
86 
87 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
88 	    (void *)n);
89 }
90 
91 int
pthread_rwlock_init(pthread_rwlock_t * ptr,const pthread_rwlockattr_t * attr)92 pthread_rwlock_init(pthread_rwlock_t *ptr,
93 	    const pthread_rwlockattr_t *attr)
94 {
95 	if (__predict_false(__uselibcstub))
96 		return __libc_rwlock_init_stub(ptr, attr);
97 
98 	pthread__error(EINVAL, "Invalid rwlock attribute",
99 	    attr == NULL || attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
100 
101 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
102 	PTQ_INIT(&ptr->ptr_rblocked);
103 	PTQ_INIT(&ptr->ptr_wblocked);
104 	ptr->ptr_nreaders = 0;
105 	ptr->ptr_owner = NULL;
106 
107 	return 0;
108 }
109 
110 
111 int
pthread_rwlock_destroy(pthread_rwlock_t * ptr)112 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
113 {
114 	if (__predict_false(__uselibcstub))
115 		return __libc_rwlock_destroy_stub(ptr);
116 
117 	pthread__error(EINVAL, "Invalid rwlock",
118 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
119 
120 	if ((!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
121 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
122 	    (ptr->ptr_nreaders != 0) ||
123 	    (ptr->ptr_owner != NULL))
124 		return EINVAL;
125 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
126 
127 	return 0;
128 }
129 
130 /* We want function call overhead. */
131 NOINLINE static void
pthread__rwlock_pause(void)132 pthread__rwlock_pause(void)
133 {
134 
135 	pthread__smt_pause();
136 }
137 
138 NOINLINE static int
pthread__rwlock_spin(uintptr_t owner)139 pthread__rwlock_spin(uintptr_t owner)
140 {
141 	pthread_t thread;
142 	unsigned int i;
143 
144 	if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
145 		return 0;
146 
147 	thread = (pthread_t)(owner & RW_THREAD);
148 	if (__predict_false(thread == NULL) ||
149 	    thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
150 		return 0;
151 
152 	for (i = 128; i != 0; i--)
153 		pthread__rwlock_pause();
154 	return 1;
155 }
156 
157 static int
pthread__rwlock_rdlock(pthread_rwlock_t * ptr,const struct timespec * ts)158 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
159 {
160 	uintptr_t owner, next;
161 	pthread_mutex_t *interlock;
162 	pthread_t self;
163 	int error;
164 
165 	pthread__error(EINVAL, "Invalid rwlock",
166 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
167 
168 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
169 		/*
170 		 * Read the lock owner field.  If the need-to-wait
171 		 * indicator is clear, then try to acquire the lock.
172 		 */
173 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
174 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
175 			if (owner == next) {
176 				/* Got it! */
177 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
178 				membar_enter();
179 #endif
180 				return 0;
181 			}
182 
183 			/*
184 			 * Didn't get it -- spin around again (we'll
185 			 * probably sleep on the next iteration).
186 			 */
187 			continue;
188 		}
189 
190 		self = pthread__self();
191 		if ((owner & RW_THREAD) == (uintptr_t)self)
192 			return EDEADLK;
193 
194 		/* If held write locked and no waiters, spin. */
195 		if (pthread__rwlock_spin(owner)) {
196 			while (pthread__rwlock_spin(owner)) {
197 				owner = (uintptr_t)ptr->ptr_owner;
198 			}
199 			next = owner;
200 			continue;
201 		}
202 
203 		/*
204 		 * Grab the interlock.  Once we have that, we
205 		 * can adjust the waiter bits and sleep queue.
206 		 */
207 		interlock = pthread__hashlock(ptr);
208 		pthread_mutex_lock(interlock);
209 
210 		/*
211 		 * Mark the rwlock as having waiters.  If the set fails,
212 		 * then we may not need to sleep and should spin again.
213 		 */
214 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
215 		if (owner != next) {
216 			pthread_mutex_unlock(interlock);
217 			continue;
218 		}
219 
220 		/* The waiters bit is set - it's safe to sleep. */
221 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
222 	    	ptr->ptr_nreaders++;
223 		self->pt_rwlocked = _RW_WANT_READ;
224 		self->pt_sleepobj = &ptr->ptr_rblocked;
225 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
226 		    ts, 0);
227 
228 		if (self->pt_sleepobj != NULL) {
229 			pthread__rwlock_early(self, ptr, interlock);
230 		}
231 
232 		/* Did we get the lock? */
233 		if (self->pt_rwlocked == _RW_LOCKED) {
234 			membar_enter();
235 			return 0;
236 		}
237 		if (error != 0)
238 			return error;
239 
240 		pthread__errorfunc(__FILE__, __LINE__, __func__,
241 		    "direct handoff failure");
242 	}
243 }
244 
245 
246 int
pthread_rwlock_tryrdlock(pthread_rwlock_t * ptr)247 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
248 {
249 	uintptr_t owner, next;
250 
251 	if (__predict_false(__uselibcstub))
252 		return __libc_rwlock_tryrdlock_stub(ptr);
253 
254 	pthread__error(EINVAL, "Invalid rwlock",
255 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
256 
257 	/*
258 	 * Don't get a readlock if there is a writer or if there are waiting
259 	 * writers; i.e. prefer writers to readers. This strategy is dictated
260 	 * by SUSv3.
261 	 */
262 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
263 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
264 			return EBUSY;
265 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
266 		if (owner == next) {
267 			/* Got it! */
268 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
269 			membar_enter();
270 #endif
271 			return 0;
272 		}
273 	}
274 }
275 
276 static int
pthread__rwlock_wrlock(pthread_rwlock_t * ptr,const struct timespec * ts)277 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
278 {
279 	uintptr_t owner, next;
280 	pthread_mutex_t *interlock;
281 	pthread_t self;
282 	int error;
283 
284 	self = pthread__self();
285 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
286 
287 	pthread__error(EINVAL, "Invalid rwlock",
288 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
289 
290 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
291 		/*
292 		 * Read the lock owner field.  If the need-to-wait
293 		 * indicator is clear, then try to acquire the lock.
294 		 */
295 		if ((owner & RW_THREAD) == 0) {
296 			next = rw_cas(ptr, owner,
297 			    (uintptr_t)self | RW_WRITE_LOCKED);
298 			if (owner == next) {
299 				/* Got it! */
300 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
301 				membar_enter();
302 #endif
303 				return 0;
304 			}
305 
306 			/*
307 			 * Didn't get it -- spin around again (we'll
308 			 * probably sleep on the next iteration).
309 			 */
310 			continue;
311 		}
312 
313 		if ((owner & RW_THREAD) == (uintptr_t)self)
314 			return EDEADLK;
315 
316 		/* If held write locked and no waiters, spin. */
317 		if (pthread__rwlock_spin(owner)) {
318 			while (pthread__rwlock_spin(owner)) {
319 				owner = (uintptr_t)ptr->ptr_owner;
320 			}
321 			next = owner;
322 			continue;
323 		}
324 
325 		/*
326 		 * Grab the interlock.  Once we have that, we
327 		 * can adjust the waiter bits and sleep queue.
328 		 */
329 		interlock = pthread__hashlock(ptr);
330 		pthread_mutex_lock(interlock);
331 
332 		/*
333 		 * Mark the rwlock as having waiters.  If the set fails,
334 		 * then we may not need to sleep and should spin again.
335 		 */
336 		next = rw_cas(ptr, owner,
337 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
338 		if (owner != next) {
339 			pthread_mutex_unlock(interlock);
340 			continue;
341 		}
342 
343 		/* The waiters bit is set - it's safe to sleep. */
344 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
345 		self->pt_rwlocked = _RW_WANT_WRITE;
346 		self->pt_sleepobj = &ptr->ptr_wblocked;
347 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
348 		    ts, 0);
349 
350 		if (self->pt_sleepobj != NULL) {
351 			pthread__rwlock_early(self, ptr, interlock);
352 		}
353 
354 		/* Did we get the lock? */
355 		if (self->pt_rwlocked == _RW_LOCKED) {
356 			membar_enter();
357 			return 0;
358 		}
359 		if (error != 0)
360 			return error;
361 
362 		pthread__errorfunc(__FILE__, __LINE__, __func__,
363 		    "direct handoff failure: %d", errno);
364 	}
365 }
366 
367 int
pthread_rwlock_trywrlock(pthread_rwlock_t * ptr)368 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
369 {
370 	uintptr_t owner, next;
371 	pthread_t self;
372 
373 	if (__predict_false(__uselibcstub))
374 		return __libc_rwlock_trywrlock_stub(ptr);
375 
376 	pthread__error(EINVAL, "Invalid rwlock",
377 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
378 
379 	self = pthread__self();
380 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
381 
382 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
383 		if (owner != 0)
384 			return EBUSY;
385 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
386 		if (owner == next) {
387 			/* Got it! */
388 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
389 			membar_enter();
390 #endif
391 			return 0;
392 		}
393 	}
394 }
395 
396 int
pthread_rwlock_rdlock(pthread_rwlock_t * ptr)397 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
398 {
399 	if (__predict_false(__uselibcstub))
400 		return __libc_rwlock_rdlock_stub(ptr);
401 
402 	return pthread__rwlock_rdlock(ptr, NULL);
403 }
404 
405 int
pthread_rwlock_timedrdlock(pthread_rwlock_t * ptr,const struct timespec * abs_timeout)406 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
407 			   const struct timespec *abs_timeout)
408 {
409 	if (abs_timeout == NULL)
410 		return EINVAL;
411 	if ((abs_timeout->tv_nsec >= 1000000000) ||
412 	    (abs_timeout->tv_nsec < 0) ||
413 	    (abs_timeout->tv_sec < 0))
414 		return EINVAL;
415 
416 	return pthread__rwlock_rdlock(ptr, abs_timeout);
417 }
418 
419 int
pthread_rwlock_wrlock(pthread_rwlock_t * ptr)420 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
421 {
422 	if (__predict_false(__uselibcstub))
423 		return __libc_rwlock_wrlock_stub(ptr);
424 
425 	return pthread__rwlock_wrlock(ptr, NULL);
426 }
427 
428 int
pthread_rwlock_timedwrlock(pthread_rwlock_t * ptr,const struct timespec * abs_timeout)429 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
430 			   const struct timespec *abs_timeout)
431 {
432 	if (abs_timeout == NULL)
433 		return EINVAL;
434 	if ((abs_timeout->tv_nsec >= 1000000000) ||
435 	    (abs_timeout->tv_nsec < 0) ||
436 	    (abs_timeout->tv_sec < 0))
437 		return EINVAL;
438 
439 	return pthread__rwlock_wrlock(ptr, abs_timeout);
440 }
441 
442 
443 int
pthread_rwlock_unlock(pthread_rwlock_t * ptr)444 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
445 {
446 	uintptr_t owner, decr, new, next;
447 	pthread_mutex_t *interlock;
448 	pthread_t self, thread;
449 
450 	if (__predict_false(__uselibcstub))
451 		return __libc_rwlock_unlock_stub(ptr);
452 
453 	pthread__error(EINVAL, "Invalid rwlock",
454 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
455 
456 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
457 	membar_exit();
458 #endif
459 
460 	/*
461 	 * Since we used an add operation to set the required lock
462 	 * bits, we can use a subtract to clear them, which makes
463 	 * the read-release and write-release path similar.
464 	 */
465 	owner = (uintptr_t)ptr->ptr_owner;
466 	if ((owner & RW_WRITE_LOCKED) != 0) {
467 		self = pthread__self();
468 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
469 		if ((owner & RW_THREAD) != (uintptr_t)self) {
470 			return EPERM;
471 		}
472 	} else {
473 		decr = RW_READ_INCR;
474 		if (owner == 0) {
475 			return EPERM;
476 		}
477 	}
478 
479 	for (;; owner = next) {
480 		/*
481 		 * Compute what we expect the new value of the lock to be.
482 		 * Only proceed to do direct handoff if there are waiters,
483 		 * and if the lock would become unowned.
484 		 */
485 		new = (owner - decr);
486 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
487 			next = rw_cas(ptr, owner, new);
488 			if (owner == next) {
489 				/* Released! */
490 				return 0;
491 			}
492 			continue;
493 		}
494 
495 		/*
496 		 * Grab the interlock.  Once we have that, we can adjust
497 		 * the waiter bits.  We must check to see if there are
498 		 * still waiters before proceeding.
499 		 */
500 		interlock = pthread__hashlock(ptr);
501 		pthread_mutex_lock(interlock);
502 		owner = (uintptr_t)ptr->ptr_owner;
503 		if ((owner & RW_HAS_WAITERS) == 0) {
504 			pthread_mutex_unlock(interlock);
505 			next = owner;
506 			continue;
507 		}
508 
509 		/*
510 		 * Give the lock away.  SUSv3 dictates that we must give
511 		 * preference to writers.
512 		 */
513 		self = pthread__self();
514 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
515 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
516 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
517 
518 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
519 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
520 			else if (ptr->ptr_nreaders != 0)
521 				new |= RW_HAS_WAITERS;
522 
523 			/*
524 			 * Set in the new value.  The lock becomes owned
525 			 * by the writer that we are about to wake.
526 			 */
527 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
528 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
529 			membar_exit();
530 #endif
531 
532 			/* Wake the writer. */
533 			thread->pt_rwlocked = _RW_LOCKED;
534 			pthread__unpark(&ptr->ptr_wblocked, self,
535 			    interlock);
536 		} else {
537 			new = 0;
538 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
539 				/*
540 				 * May have already been handed the lock,
541 				 * since pthread__unpark_all() can release
542 				 * our interlock before awakening all
543 				 * threads.
544 				 */
545 				if (thread->pt_sleepobj == NULL)
546 					continue;
547 				new += RW_READ_INCR;
548 				membar_exit();
549 				thread->pt_rwlocked = _RW_LOCKED;
550 			}
551 
552 			/*
553 			 * Set in the new value.  The lock becomes owned
554 			 * by the readers that we are about to wake.
555 			 */
556 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
557 
558 			/* Wake up all sleeping readers. */
559 			ptr->ptr_nreaders = 0;
560 			pthread__unpark_all(&ptr->ptr_rblocked, self,
561 			    interlock);
562 		}
563 		pthread_mutex_unlock(interlock);
564 
565 		return 0;
566 	}
567 }
568 
569 /*
570  * Called when a timedlock awakens early to adjust the waiter bits.
571  * The rwlock's interlock is held on entry, and the caller has been
572  * removed from the waiters lists.
573  */
574 static void
pthread__rwlock_early(pthread_t self,pthread_rwlock_t * ptr,pthread_mutex_t * interlock)575 pthread__rwlock_early(pthread_t self, pthread_rwlock_t *ptr,
576     pthread_mutex_t *interlock)
577 {
578 	uintptr_t owner, set, newval, next;
579 	pthread_queue_t *queue;
580 
581 	pthread_mutex_lock(interlock);
582 	if ((queue = self->pt_sleepobj) == NULL) {
583 		pthread_mutex_unlock(interlock);
584 		return;
585 	}
586 	PTQ_REMOVE(queue, self, pt_sleep);
587 	self->pt_sleepobj = NULL;
588 	owner = (uintptr_t)ptr->ptr_owner;
589 
590 	if ((owner & RW_THREAD) == 0) {
591 		pthread__errorfunc(__FILE__, __LINE__, __func__,
592 		    "lock not held");
593 	}
594 
595 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
596 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
597 	else if (ptr->ptr_nreaders != 0)
598 		set = RW_HAS_WAITERS;
599 	else
600 		set = 0;
601 
602 	for (;; owner = next) {
603 		newval = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
604 		next = rw_cas(ptr, owner, newval);
605 		if (owner == next)
606 			break;
607 	}
608 	pthread_mutex_unlock(interlock);
609 }
610 
611 int
_pthread_rwlock_held_np(pthread_rwlock_t * ptr)612 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
613 {
614 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
615 
616 	if ((owner & RW_WRITE_LOCKED) != 0)
617 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
618 	return (owner & RW_THREAD) != 0;
619 }
620 
621 int
_pthread_rwlock_rdheld_np(pthread_rwlock_t * ptr)622 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
623 {
624 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
625 
626 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
627 }
628 
629 int
_pthread_rwlock_wrheld_np(pthread_rwlock_t * ptr)630 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
631 {
632 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
633 
634 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
635 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
636 }
637 
638 #ifdef _PTHREAD_PSHARED
639 int
pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,int * __restrict pshared)640 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
641     int * __restrict pshared)
642 {
643 
644 	pthread__error(EINVAL, "Invalid rwlock attribute",
645 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
646 
647 	*pshared = PTHREAD_PROCESS_PRIVATE;
648 	return 0;
649 }
650 
651 int
pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr,int pshared)652 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
653 {
654 
655 	pthread__error(EINVAL, "Invalid rwlock attribute",
656 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
657 
658 	switch(pshared) {
659 	case PTHREAD_PROCESS_PRIVATE:
660 		return 0;
661 	case PTHREAD_PROCESS_SHARED:
662 		return ENOSYS;
663 	}
664 	return EINVAL;
665 }
666 #endif
667 
668 int
pthread_rwlockattr_init(pthread_rwlockattr_t * attr)669 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
670 {
671 
672 	if (attr == NULL)
673 		return EINVAL;
674 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
675 
676 	return 0;
677 }
678 
679 
680 int
pthread_rwlockattr_destroy(pthread_rwlockattr_t * attr)681 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
682 {
683 
684 	pthread__error(EINVAL, "Invalid rwlock attribute",
685 	    attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
686 
687 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
688 
689 	return 0;
690 }
691