xref: /minix/external/bsd/bind/dist/lib/isc/rwlock.c (revision 00b67f09)
1*00b67f09SDavid van Moolenbroek /*	$NetBSD: rwlock.c,v 1.8 2014/12/10 04:37:59 christos Exp $	*/
2*00b67f09SDavid van Moolenbroek 
3*00b67f09SDavid van Moolenbroek /*
4*00b67f09SDavid van Moolenbroek  * Copyright (C) 2004, 2005, 2007, 2009, 2011, 2012  Internet Systems Consortium, Inc. ("ISC")
5*00b67f09SDavid van Moolenbroek  * Copyright (C) 1998-2001, 2003  Internet Software Consortium.
6*00b67f09SDavid van Moolenbroek  *
7*00b67f09SDavid van Moolenbroek  * Permission to use, copy, modify, and/or distribute this software for any
8*00b67f09SDavid van Moolenbroek  * purpose with or without fee is hereby granted, provided that the above
9*00b67f09SDavid van Moolenbroek  * copyright notice and this permission notice appear in all copies.
10*00b67f09SDavid van Moolenbroek  *
11*00b67f09SDavid van Moolenbroek  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12*00b67f09SDavid van Moolenbroek  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13*00b67f09SDavid van Moolenbroek  * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14*00b67f09SDavid van Moolenbroek  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15*00b67f09SDavid van Moolenbroek  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16*00b67f09SDavid van Moolenbroek  * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*00b67f09SDavid van Moolenbroek  * PERFORMANCE OF THIS SOFTWARE.
18*00b67f09SDavid van Moolenbroek  */
19*00b67f09SDavid van Moolenbroek 
20*00b67f09SDavid van Moolenbroek /* Id */
21*00b67f09SDavid van Moolenbroek 
22*00b67f09SDavid van Moolenbroek /*! \file */
23*00b67f09SDavid van Moolenbroek 
24*00b67f09SDavid van Moolenbroek #include <config.h>
25*00b67f09SDavid van Moolenbroek 
26*00b67f09SDavid van Moolenbroek #include <stddef.h>
27*00b67f09SDavid van Moolenbroek #include <stdlib.h>
28*00b67f09SDavid van Moolenbroek 
29*00b67f09SDavid van Moolenbroek #include <isc/atomic.h>
30*00b67f09SDavid van Moolenbroek #include <isc/magic.h>
31*00b67f09SDavid van Moolenbroek #include <isc/msgs.h>
32*00b67f09SDavid van Moolenbroek #include <isc/platform.h>
33*00b67f09SDavid van Moolenbroek #include <isc/rwlock.h>
34*00b67f09SDavid van Moolenbroek #include <isc/util.h>
35*00b67f09SDavid van Moolenbroek 
36*00b67f09SDavid van Moolenbroek #define RWLOCK_MAGIC		ISC_MAGIC('R', 'W', 'L', 'k')
37*00b67f09SDavid van Moolenbroek #define VALID_RWLOCK(rwl)	ISC_MAGIC_VALID(rwl, RWLOCK_MAGIC)
38*00b67f09SDavid van Moolenbroek 
39*00b67f09SDavid van Moolenbroek #ifdef ISC_PLATFORM_USETHREADS
40*00b67f09SDavid van Moolenbroek #ifdef ISC_PLATFORM_USE_NATIVE_RWLOCKS
41*00b67f09SDavid van Moolenbroek 
42*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_init(isc_rwlock_t * rwl,unsigned int read_quota,unsigned int write_quota)43*00b67f09SDavid van Moolenbroek isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
44*00b67f09SDavid van Moolenbroek                unsigned int write_quota)
45*00b67f09SDavid van Moolenbroek {
46*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
47*00b67f09SDavid van Moolenbroek 
48*00b67f09SDavid van Moolenbroek 	UNUSED(read_quota);
49*00b67f09SDavid van Moolenbroek 	UNUSED(write_quota);
50*00b67f09SDavid van Moolenbroek 
51*00b67f09SDavid van Moolenbroek 	return pthread_rwlock_init(rwl, NULL) == 0 ?
52*00b67f09SDavid van Moolenbroek 	    ISC_R_SUCCESS : ISC_R_FAILURE;
53*00b67f09SDavid van Moolenbroek }
54*00b67f09SDavid van Moolenbroek 
55*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_lock(isc_rwlock_t * rwl,isc_rwlocktype_t type)56*00b67f09SDavid van Moolenbroek isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type)
57*00b67f09SDavid van Moolenbroek {
58*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
59*00b67f09SDavid van Moolenbroek 
60*00b67f09SDavid van Moolenbroek 	switch (type) {
61*00b67f09SDavid van Moolenbroek 	case isc_rwlocktype_none:
62*00b67f09SDavid van Moolenbroek 		return ISC_R_SUCCESS;
63*00b67f09SDavid van Moolenbroek 
64*00b67f09SDavid van Moolenbroek 	case isc_rwlocktype_read:
65*00b67f09SDavid van Moolenbroek 		return pthread_rwlock_rdlock(rwl) == 0 ?
66*00b67f09SDavid van Moolenbroek 		    ISC_R_SUCCESS : ISC_R_LOCKBUSY;
67*00b67f09SDavid van Moolenbroek 
68*00b67f09SDavid van Moolenbroek 	case isc_rwlocktype_write:
69*00b67f09SDavid van Moolenbroek 		return pthread_rwlock_wrlock(rwl) == 0 ?
70*00b67f09SDavid van Moolenbroek 		    ISC_R_SUCCESS : ISC_R_LOCKBUSY;
71*00b67f09SDavid van Moolenbroek 
72*00b67f09SDavid van Moolenbroek 	default:
73*00b67f09SDavid van Moolenbroek 		abort();
74*00b67f09SDavid van Moolenbroek 		return (ISC_R_FAILURE);
75*00b67f09SDavid van Moolenbroek 	}
76*00b67f09SDavid van Moolenbroek }
77*00b67f09SDavid van Moolenbroek 
78*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_trylock(isc_rwlock_t * rwl,isc_rwlocktype_t type)79*00b67f09SDavid van Moolenbroek isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type)
80*00b67f09SDavid van Moolenbroek {
81*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
82*00b67f09SDavid van Moolenbroek 
83*00b67f09SDavid van Moolenbroek 	switch (type) {
84*00b67f09SDavid van Moolenbroek 	case isc_rwlocktype_none:
85*00b67f09SDavid van Moolenbroek 		return ISC_R_SUCCESS;
86*00b67f09SDavid van Moolenbroek 
87*00b67f09SDavid van Moolenbroek 	case isc_rwlocktype_read:
88*00b67f09SDavid van Moolenbroek 		return pthread_rwlock_tryrdlock(rwl) == 0 ?
89*00b67f09SDavid van Moolenbroek 		    ISC_R_SUCCESS : ISC_R_LOCKBUSY;
90*00b67f09SDavid van Moolenbroek 
91*00b67f09SDavid van Moolenbroek 	case isc_rwlocktype_write:
92*00b67f09SDavid van Moolenbroek 		return pthread_rwlock_trywrlock(rwl) == 0 ?
93*00b67f09SDavid van Moolenbroek 		    ISC_R_SUCCESS : ISC_R_LOCKBUSY;
94*00b67f09SDavid van Moolenbroek 
95*00b67f09SDavid van Moolenbroek 	default:
96*00b67f09SDavid van Moolenbroek 		abort();
97*00b67f09SDavid van Moolenbroek 		return (ISC_R_FAILURE);
98*00b67f09SDavid van Moolenbroek 	}
99*00b67f09SDavid van Moolenbroek }
100*00b67f09SDavid van Moolenbroek 
101*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t * rwl)102*00b67f09SDavid van Moolenbroek isc_rwlock_tryupgrade(isc_rwlock_t *rwl)
103*00b67f09SDavid van Moolenbroek {
104*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
105*00b67f09SDavid van Moolenbroek 
106*00b67f09SDavid van Moolenbroek 	/*
107*00b67f09SDavid van Moolenbroek 	* XXX: we need to make sure we are holding a read lock here
108*00b67f09SDavid van Moolenbroek 	* but how to do it atomically?
109*00b67f09SDavid van Moolenbroek 	*/
110*00b67f09SDavid van Moolenbroek 	return pthread_rwlock_trywrlock(rwl) == 0 ?
111*00b67f09SDavid van Moolenbroek 	    ISC_R_SUCCESS : ISC_R_LOCKBUSY;
112*00b67f09SDavid van Moolenbroek }
113*00b67f09SDavid van Moolenbroek 
114*00b67f09SDavid van Moolenbroek void
isc_rwlock_downgrade(isc_rwlock_t * rwl)115*00b67f09SDavid van Moolenbroek isc_rwlock_downgrade(isc_rwlock_t *rwl)
116*00b67f09SDavid van Moolenbroek {
117*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
118*00b67f09SDavid van Moolenbroek 
119*00b67f09SDavid van Moolenbroek 	/*
120*00b67f09SDavid van Moolenbroek 	* XXX: we need to make sure we are holding a write lock here
121*00b67f09SDavid van Moolenbroek 	* and then give it up and get a read lock but how to do it atomically?
122*00b67f09SDavid van Moolenbroek 	*/
123*00b67f09SDavid van Moolenbroek 	pthread_rwlock_unlock(rwl);
124*00b67f09SDavid van Moolenbroek 	REQUIRE(pthread_rwlock_tryrdlock(rwl) == 0);
125*00b67f09SDavid van Moolenbroek }
126*00b67f09SDavid van Moolenbroek 
127*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_unlock(isc_rwlock_t * rwl,isc_rwlocktype_t type)128*00b67f09SDavid van Moolenbroek isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type)
129*00b67f09SDavid van Moolenbroek {
130*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
131*00b67f09SDavid van Moolenbroek 	UNUSED(type);
132*00b67f09SDavid van Moolenbroek 
133*00b67f09SDavid van Moolenbroek 	pthread_rwlock_unlock(rwl);
134*00b67f09SDavid van Moolenbroek 
135*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
136*00b67f09SDavid van Moolenbroek }
137*00b67f09SDavid van Moolenbroek 
138*00b67f09SDavid van Moolenbroek void
isc_rwlock_destroy(isc_rwlock_t * rwl)139*00b67f09SDavid van Moolenbroek isc_rwlock_destroy(isc_rwlock_t *rwl)
140*00b67f09SDavid van Moolenbroek {
141*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
142*00b67f09SDavid van Moolenbroek }
143*00b67f09SDavid van Moolenbroek 
144*00b67f09SDavid van Moolenbroek #else /* !ISC_PLATFORM_USE_NATIVE_RWLOCKS */
145*00b67f09SDavid van Moolenbroek 
146*00b67f09SDavid van Moolenbroek 
147*00b67f09SDavid van Moolenbroek #ifndef RWLOCK_DEFAULT_READ_QUOTA
148*00b67f09SDavid van Moolenbroek #define RWLOCK_DEFAULT_READ_QUOTA 4
149*00b67f09SDavid van Moolenbroek #endif
150*00b67f09SDavid van Moolenbroek 
151*00b67f09SDavid van Moolenbroek #ifndef RWLOCK_DEFAULT_WRITE_QUOTA
152*00b67f09SDavid van Moolenbroek #define RWLOCK_DEFAULT_WRITE_QUOTA 4
153*00b67f09SDavid van Moolenbroek #endif
154*00b67f09SDavid van Moolenbroek 
155*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
156*00b67f09SDavid van Moolenbroek #include <stdio.h>		/* Required for fprintf/stderr. */
157*00b67f09SDavid van Moolenbroek #include <isc/thread.h>		/* Required for isc_thread_self(). */
158*00b67f09SDavid van Moolenbroek 
159*00b67f09SDavid van Moolenbroek static void
print_lock(const char * operation,isc_rwlock_t * rwl,isc_rwlocktype_t type)160*00b67f09SDavid van Moolenbroek print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
161*00b67f09SDavid van Moolenbroek 	fprintf(stderr,
162*00b67f09SDavid van Moolenbroek 		isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
163*00b67f09SDavid van Moolenbroek 			       ISC_MSG_PRINTLOCK,
164*00b67f09SDavid van Moolenbroek 			       "rwlock %p thread %lu %s(%s): %s, %u active, "
165*00b67f09SDavid van Moolenbroek 			       "%u granted, %u rwaiting, %u wwaiting\n"),
166*00b67f09SDavid van Moolenbroek 		rwl, isc_thread_self(), operation,
167*00b67f09SDavid van Moolenbroek 		(type == isc_rwlocktype_read ?
168*00b67f09SDavid van Moolenbroek 		 isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
169*00b67f09SDavid van Moolenbroek 				ISC_MSG_READ, "read") :
170*00b67f09SDavid van Moolenbroek 		 isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
171*00b67f09SDavid van Moolenbroek 				ISC_MSG_WRITE, "write")),
172*00b67f09SDavid van Moolenbroek 		(rwl->type == isc_rwlocktype_read ?
173*00b67f09SDavid van Moolenbroek 		 isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
174*00b67f09SDavid van Moolenbroek 				ISC_MSG_READING, "reading") :
175*00b67f09SDavid van Moolenbroek 		 isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
176*00b67f09SDavid van Moolenbroek 				ISC_MSG_WRITING, "writing")),
177*00b67f09SDavid van Moolenbroek 		rwl->active, rwl->granted, rwl->readers_waiting,
178*00b67f09SDavid van Moolenbroek 		rwl->writers_waiting);
179*00b67f09SDavid van Moolenbroek }
180*00b67f09SDavid van Moolenbroek #endif
181*00b67f09SDavid van Moolenbroek 
182*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_init(isc_rwlock_t * rwl,unsigned int read_quota,unsigned int write_quota)183*00b67f09SDavid van Moolenbroek isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
184*00b67f09SDavid van Moolenbroek 		unsigned int write_quota)
185*00b67f09SDavid van Moolenbroek {
186*00b67f09SDavid van Moolenbroek 	isc_result_t result;
187*00b67f09SDavid van Moolenbroek 
188*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
189*00b67f09SDavid van Moolenbroek 
190*00b67f09SDavid van Moolenbroek 	/*
191*00b67f09SDavid van Moolenbroek 	 * In case there's trouble initializing, we zero magic now.  If all
192*00b67f09SDavid van Moolenbroek 	 * goes well, we'll set it to RWLOCK_MAGIC.
193*00b67f09SDavid van Moolenbroek 	 */
194*00b67f09SDavid van Moolenbroek 	rwl->magic = 0;
195*00b67f09SDavid van Moolenbroek 
196*00b67f09SDavid van Moolenbroek #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
197*00b67f09SDavid van Moolenbroek 	rwl->write_requests = 0;
198*00b67f09SDavid van Moolenbroek 	rwl->write_completions = 0;
199*00b67f09SDavid van Moolenbroek 	rwl->cnt_and_flag = 0;
200*00b67f09SDavid van Moolenbroek 	rwl->readers_waiting = 0;
201*00b67f09SDavid van Moolenbroek 	rwl->write_granted = 0;
202*00b67f09SDavid van Moolenbroek 	if (read_quota != 0) {
203*00b67f09SDavid van Moolenbroek 		UNEXPECTED_ERROR(__FILE__, __LINE__,
204*00b67f09SDavid van Moolenbroek 				 "read quota is not supported");
205*00b67f09SDavid van Moolenbroek 	}
206*00b67f09SDavid van Moolenbroek 	if (write_quota == 0)
207*00b67f09SDavid van Moolenbroek 		write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
208*00b67f09SDavid van Moolenbroek 	rwl->write_quota = write_quota;
209*00b67f09SDavid van Moolenbroek #else
210*00b67f09SDavid van Moolenbroek 	rwl->type = isc_rwlocktype_read;
211*00b67f09SDavid van Moolenbroek 	rwl->original = isc_rwlocktype_none;
212*00b67f09SDavid van Moolenbroek 	rwl->active = 0;
213*00b67f09SDavid van Moolenbroek 	rwl->granted = 0;
214*00b67f09SDavid van Moolenbroek 	rwl->readers_waiting = 0;
215*00b67f09SDavid van Moolenbroek 	rwl->writers_waiting = 0;
216*00b67f09SDavid van Moolenbroek 	if (read_quota == 0)
217*00b67f09SDavid van Moolenbroek 		read_quota = RWLOCK_DEFAULT_READ_QUOTA;
218*00b67f09SDavid van Moolenbroek 	rwl->read_quota = read_quota;
219*00b67f09SDavid van Moolenbroek 	if (write_quota == 0)
220*00b67f09SDavid van Moolenbroek 		write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
221*00b67f09SDavid van Moolenbroek 	rwl->write_quota = write_quota;
222*00b67f09SDavid van Moolenbroek #endif
223*00b67f09SDavid van Moolenbroek 
224*00b67f09SDavid van Moolenbroek 	result = isc_mutex_init(&rwl->lock);
225*00b67f09SDavid van Moolenbroek 	if (result != ISC_R_SUCCESS)
226*00b67f09SDavid van Moolenbroek 		return (result);
227*00b67f09SDavid van Moolenbroek 
228*00b67f09SDavid van Moolenbroek 	result = isc_condition_init(&rwl->readable);
229*00b67f09SDavid van Moolenbroek 	if (result != ISC_R_SUCCESS) {
230*00b67f09SDavid van Moolenbroek 		UNEXPECTED_ERROR(__FILE__, __LINE__,
231*00b67f09SDavid van Moolenbroek 				 "isc_condition_init(readable) %s: %s",
232*00b67f09SDavid van Moolenbroek 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
233*00b67f09SDavid van Moolenbroek 						ISC_MSG_FAILED, "failed"),
234*00b67f09SDavid van Moolenbroek 				 isc_result_totext(result));
235*00b67f09SDavid van Moolenbroek 		result = ISC_R_UNEXPECTED;
236*00b67f09SDavid van Moolenbroek 		goto destroy_lock;
237*00b67f09SDavid van Moolenbroek 	}
238*00b67f09SDavid van Moolenbroek 	result = isc_condition_init(&rwl->writeable);
239*00b67f09SDavid van Moolenbroek 	if (result != ISC_R_SUCCESS) {
240*00b67f09SDavid van Moolenbroek 		UNEXPECTED_ERROR(__FILE__, __LINE__,
241*00b67f09SDavid van Moolenbroek 				 "isc_condition_init(writeable) %s: %s",
242*00b67f09SDavid van Moolenbroek 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
243*00b67f09SDavid van Moolenbroek 						ISC_MSG_FAILED, "failed"),
244*00b67f09SDavid van Moolenbroek 				 isc_result_totext(result));
245*00b67f09SDavid van Moolenbroek 		result = ISC_R_UNEXPECTED;
246*00b67f09SDavid van Moolenbroek 		goto destroy_rcond;
247*00b67f09SDavid van Moolenbroek 	}
248*00b67f09SDavid van Moolenbroek 
249*00b67f09SDavid van Moolenbroek 	rwl->magic = RWLOCK_MAGIC;
250*00b67f09SDavid van Moolenbroek 
251*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
252*00b67f09SDavid van Moolenbroek 
253*00b67f09SDavid van Moolenbroek   destroy_rcond:
254*00b67f09SDavid van Moolenbroek 	(void)isc_condition_destroy(&rwl->readable);
255*00b67f09SDavid van Moolenbroek   destroy_lock:
256*00b67f09SDavid van Moolenbroek 	DESTROYLOCK(&rwl->lock);
257*00b67f09SDavid van Moolenbroek 
258*00b67f09SDavid van Moolenbroek 	return (result);
259*00b67f09SDavid van Moolenbroek }
260*00b67f09SDavid van Moolenbroek 
261*00b67f09SDavid van Moolenbroek void
isc_rwlock_destroy(isc_rwlock_t * rwl)262*00b67f09SDavid van Moolenbroek isc_rwlock_destroy(isc_rwlock_t *rwl) {
263*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
264*00b67f09SDavid van Moolenbroek 
265*00b67f09SDavid van Moolenbroek #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
266*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->write_requests == rwl->write_completions &&
267*00b67f09SDavid van Moolenbroek 		rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
268*00b67f09SDavid van Moolenbroek #else
269*00b67f09SDavid van Moolenbroek 	LOCK(&rwl->lock);
270*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->active == 0 &&
271*00b67f09SDavid van Moolenbroek 		rwl->readers_waiting == 0 &&
272*00b67f09SDavid van Moolenbroek 		rwl->writers_waiting == 0);
273*00b67f09SDavid van Moolenbroek 	UNLOCK(&rwl->lock);
274*00b67f09SDavid van Moolenbroek #endif
275*00b67f09SDavid van Moolenbroek 
276*00b67f09SDavid van Moolenbroek 	rwl->magic = 0;
277*00b67f09SDavid van Moolenbroek 	(void)isc_condition_destroy(&rwl->readable);
278*00b67f09SDavid van Moolenbroek 	(void)isc_condition_destroy(&rwl->writeable);
279*00b67f09SDavid van Moolenbroek 	DESTROYLOCK(&rwl->lock);
280*00b67f09SDavid van Moolenbroek }
281*00b67f09SDavid van Moolenbroek 
282*00b67f09SDavid van Moolenbroek #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
283*00b67f09SDavid van Moolenbroek 
284*00b67f09SDavid van Moolenbroek /*
285*00b67f09SDavid van Moolenbroek  * When some architecture-dependent atomic operations are available,
286*00b67f09SDavid van Moolenbroek  * rwlock can be more efficient than the generic algorithm defined below.
287*00b67f09SDavid van Moolenbroek  * The basic algorithm is described in the following URL:
288*00b67f09SDavid van Moolenbroek  *   http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html
289*00b67f09SDavid van Moolenbroek  *
290*00b67f09SDavid van Moolenbroek  * The key is to use the following integer variables modified atomically:
291*00b67f09SDavid van Moolenbroek  *   write_requests, write_completions, and cnt_and_flag.
292*00b67f09SDavid van Moolenbroek  *
293*00b67f09SDavid van Moolenbroek  * write_requests and write_completions act as a waiting queue for writers
294*00b67f09SDavid van Moolenbroek  * in order to ensure the FIFO order.  Both variables begin with the initial
295*00b67f09SDavid van Moolenbroek  * value of 0.  When a new writer tries to get a write lock, it increments
296*00b67f09SDavid van Moolenbroek  * write_requests and gets the previous value of the variable as a "ticket".
297*00b67f09SDavid van Moolenbroek  * When write_completions reaches the ticket number, the new writer can start
298*00b67f09SDavid van Moolenbroek  * writing.  When the writer completes its work, it increments
299*00b67f09SDavid van Moolenbroek  * write_completions so that another new writer can start working.  If the
300*00b67f09SDavid van Moolenbroek  * write_requests is not equal to write_completions, it means a writer is now
301*00b67f09SDavid van Moolenbroek  * working or waiting.  In this case, a new readers cannot start reading, or
302*00b67f09SDavid van Moolenbroek  * in other words, this algorithm basically prefers writers.
303*00b67f09SDavid van Moolenbroek  *
304*00b67f09SDavid van Moolenbroek  * cnt_and_flag is a "lock" shared by all readers and writers.  This integer
305*00b67f09SDavid van Moolenbroek  * variable is a kind of structure with two members: writer_flag (1 bit) and
306*00b67f09SDavid van Moolenbroek  * reader_count (31 bits).  The writer_flag shows whether a writer is working,
307*00b67f09SDavid van Moolenbroek  * and the reader_count shows the number of readers currently working or almost
308*00b67f09SDavid van Moolenbroek  * ready for working.  A writer who has the current "ticket" tries to get the
309*00b67f09SDavid van Moolenbroek  * lock by exclusively setting the writer_flag to 1, provided that the whole
310*00b67f09SDavid van Moolenbroek  * 32-bit is 0 (meaning no readers or writers working).  On the other hand,
311*00b67f09SDavid van Moolenbroek  * a new reader tries to increment the "reader_count" field provided that
312*00b67f09SDavid van Moolenbroek  * the writer_flag is 0 (meaning there is no writer working).
313*00b67f09SDavid van Moolenbroek  *
314*00b67f09SDavid van Moolenbroek  * If some of the above operations fail, the reader or the writer sleeps
315*00b67f09SDavid van Moolenbroek  * until the related condition changes.  When a working reader or writer
316*00b67f09SDavid van Moolenbroek  * completes its work, some readers or writers are sleeping, and the condition
317*00b67f09SDavid van Moolenbroek  * that suspended the reader or writer has changed, it wakes up the sleeping
318*00b67f09SDavid van Moolenbroek  * readers or writers.
319*00b67f09SDavid van Moolenbroek  *
320*00b67f09SDavid van Moolenbroek  * As already noted, this algorithm basically prefers writers.  In order to
321*00b67f09SDavid van Moolenbroek  * prevent readers from starving, however, the algorithm also introduces the
322*00b67f09SDavid van Moolenbroek  * "writer quota" (Q).  When Q consecutive writers have completed their work,
323*00b67f09SDavid van Moolenbroek  * suspending readers, the last writer will wake up the readers, even if a new
324*00b67f09SDavid van Moolenbroek  * writer is waiting.
325*00b67f09SDavid van Moolenbroek  *
326*00b67f09SDavid van Moolenbroek  * Implementation specific note: due to the combination of atomic operations
327*00b67f09SDavid van Moolenbroek  * and a mutex lock, ordering between the atomic operation and locks can be
328*00b67f09SDavid van Moolenbroek  * very sensitive in some cases.  In particular, it is generally very important
329*00b67f09SDavid van Moolenbroek  * to check the atomic variable that requires a reader or writer to sleep after
330*00b67f09SDavid van Moolenbroek  * locking the mutex and before actually sleeping; otherwise, it could be very
331*00b67f09SDavid van Moolenbroek  * likely to cause a deadlock.  For example, assume "var" is a variable
332*00b67f09SDavid van Moolenbroek  * atomically modified, then the corresponding code would be:
333*00b67f09SDavid van Moolenbroek  *	if (var == need_sleep) {
334*00b67f09SDavid van Moolenbroek  *		LOCK(lock);
335*00b67f09SDavid van Moolenbroek  *		if (var == need_sleep)
336*00b67f09SDavid van Moolenbroek  *			WAIT(cond, lock);
337*00b67f09SDavid van Moolenbroek  *		UNLOCK(lock);
338*00b67f09SDavid van Moolenbroek  *	}
339*00b67f09SDavid van Moolenbroek  * The second check is important, since "var" is protected by the atomic
340*00b67f09SDavid van Moolenbroek  * operation, not by the mutex, and can be changed just before sleeping.
341*00b67f09SDavid van Moolenbroek  * (The first "if" could be omitted, but this is also important in order to
342*00b67f09SDavid van Moolenbroek  * make the code efficient by avoiding the use of the mutex unless it is
343*00b67f09SDavid van Moolenbroek  * really necessary.)
344*00b67f09SDavid van Moolenbroek  */
345*00b67f09SDavid van Moolenbroek 
346*00b67f09SDavid van Moolenbroek #define WRITER_ACTIVE	0x1
347*00b67f09SDavid van Moolenbroek #define READER_INCR	0x2
348*00b67f09SDavid van Moolenbroek 
349*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_lock(isc_rwlock_t * rwl,isc_rwlocktype_t type)350*00b67f09SDavid van Moolenbroek isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
351*00b67f09SDavid van Moolenbroek 	isc_int32_t cntflag;
352*00b67f09SDavid van Moolenbroek 
353*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
354*00b67f09SDavid van Moolenbroek 
355*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
356*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
357*00b67f09SDavid van Moolenbroek 				  ISC_MSG_PRELOCK, "prelock"), rwl, type);
358*00b67f09SDavid van Moolenbroek #endif
359*00b67f09SDavid van Moolenbroek 
360*00b67f09SDavid van Moolenbroek 	if (type == isc_rwlocktype_read) {
361*00b67f09SDavid van Moolenbroek 		if (rwl->write_requests != rwl->write_completions) {
362*00b67f09SDavid van Moolenbroek 			/* there is a waiting or active writer */
363*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
364*00b67f09SDavid van Moolenbroek 			if (rwl->write_requests != rwl->write_completions) {
365*00b67f09SDavid van Moolenbroek 				rwl->readers_waiting++;
366*00b67f09SDavid van Moolenbroek 				WAIT(&rwl->readable, &rwl->lock);
367*00b67f09SDavid van Moolenbroek 				rwl->readers_waiting--;
368*00b67f09SDavid van Moolenbroek 			}
369*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
370*00b67f09SDavid van Moolenbroek 		}
371*00b67f09SDavid van Moolenbroek 
372*00b67f09SDavid van Moolenbroek 		cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
373*00b67f09SDavid van Moolenbroek 		POST(cntflag);
374*00b67f09SDavid van Moolenbroek 		while (1) {
375*00b67f09SDavid van Moolenbroek 			if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
376*00b67f09SDavid van Moolenbroek 				break;
377*00b67f09SDavid van Moolenbroek 
378*00b67f09SDavid van Moolenbroek 			/* A writer is still working */
379*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
380*00b67f09SDavid van Moolenbroek 			rwl->readers_waiting++;
381*00b67f09SDavid van Moolenbroek 			if ((rwl->cnt_and_flag & WRITER_ACTIVE) != 0)
382*00b67f09SDavid van Moolenbroek 				WAIT(&rwl->readable, &rwl->lock);
383*00b67f09SDavid van Moolenbroek 			rwl->readers_waiting--;
384*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
385*00b67f09SDavid van Moolenbroek 
386*00b67f09SDavid van Moolenbroek 			/*
387*00b67f09SDavid van Moolenbroek 			 * Typically, the reader should be able to get a lock
388*00b67f09SDavid van Moolenbroek 			 * at this stage:
389*00b67f09SDavid van Moolenbroek 			 *   (1) there should have been no pending writer when
390*00b67f09SDavid van Moolenbroek 			 *       the reader was trying to increment the
391*00b67f09SDavid van Moolenbroek 			 *       counter; otherwise, the writer should be in
392*00b67f09SDavid van Moolenbroek 			 *       the waiting queue, preventing the reader from
393*00b67f09SDavid van Moolenbroek 			 *       proceeding to this point.
394*00b67f09SDavid van Moolenbroek 			 *   (2) once the reader increments the counter, no
395*00b67f09SDavid van Moolenbroek 			 *       more writer can get a lock.
396*00b67f09SDavid van Moolenbroek 			 * Still, it is possible another writer can work at
397*00b67f09SDavid van Moolenbroek 			 * this point, e.g. in the following scenario:
398*00b67f09SDavid van Moolenbroek 			 *   A previous writer unlocks the writer lock.
399*00b67f09SDavid van Moolenbroek 			 *   This reader proceeds to point (1).
400*00b67f09SDavid van Moolenbroek 			 *   A new writer appears, and gets a new lock before
401*00b67f09SDavid van Moolenbroek 			 *   the reader increments the counter.
402*00b67f09SDavid van Moolenbroek 			 *   The reader then increments the counter.
403*00b67f09SDavid van Moolenbroek 			 *   The previous writer notices there is a waiting
404*00b67f09SDavid van Moolenbroek 			 *   reader who is almost ready, and wakes it up.
405*00b67f09SDavid van Moolenbroek 			 * So, the reader needs to confirm whether it can now
406*00b67f09SDavid van Moolenbroek 			 * read explicitly (thus we loop).  Note that this is
407*00b67f09SDavid van Moolenbroek 			 * not an infinite process, since the reader has
408*00b67f09SDavid van Moolenbroek 			 * incremented the counter at this point.
409*00b67f09SDavid van Moolenbroek 			 */
410*00b67f09SDavid van Moolenbroek 		}
411*00b67f09SDavid van Moolenbroek 
412*00b67f09SDavid van Moolenbroek 		/*
413*00b67f09SDavid van Moolenbroek 		 * If we are temporarily preferred to writers due to the writer
414*00b67f09SDavid van Moolenbroek 		 * quota, reset the condition (race among readers doesn't
415*00b67f09SDavid van Moolenbroek 		 * matter).
416*00b67f09SDavid van Moolenbroek 		 */
417*00b67f09SDavid van Moolenbroek 		rwl->write_granted = 0;
418*00b67f09SDavid van Moolenbroek 	} else {
419*00b67f09SDavid van Moolenbroek 		isc_int32_t prev_writer;
420*00b67f09SDavid van Moolenbroek 
421*00b67f09SDavid van Moolenbroek 		/* enter the waiting queue, and wait for our turn */
422*00b67f09SDavid van Moolenbroek 		prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
423*00b67f09SDavid van Moolenbroek 		while (rwl->write_completions != prev_writer) {
424*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
425*00b67f09SDavid van Moolenbroek 			if (rwl->write_completions != prev_writer) {
426*00b67f09SDavid van Moolenbroek 				WAIT(&rwl->writeable, &rwl->lock);
427*00b67f09SDavid van Moolenbroek 				UNLOCK(&rwl->lock);
428*00b67f09SDavid van Moolenbroek 				continue;
429*00b67f09SDavid van Moolenbroek 			}
430*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
431*00b67f09SDavid van Moolenbroek 			break;
432*00b67f09SDavid van Moolenbroek 		}
433*00b67f09SDavid van Moolenbroek 
434*00b67f09SDavid van Moolenbroek 		while (1) {
435*00b67f09SDavid van Moolenbroek 			cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
436*00b67f09SDavid van Moolenbroek 						     WRITER_ACTIVE);
437*00b67f09SDavid van Moolenbroek 			if (cntflag == 0)
438*00b67f09SDavid van Moolenbroek 				break;
439*00b67f09SDavid van Moolenbroek 
440*00b67f09SDavid van Moolenbroek 			/* Another active reader or writer is working. */
441*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
442*00b67f09SDavid van Moolenbroek 			if (rwl->cnt_and_flag != 0)
443*00b67f09SDavid van Moolenbroek 				WAIT(&rwl->writeable, &rwl->lock);
444*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
445*00b67f09SDavid van Moolenbroek 		}
446*00b67f09SDavid van Moolenbroek 
447*00b67f09SDavid van Moolenbroek 		INSIST((rwl->cnt_and_flag & WRITER_ACTIVE) != 0);
448*00b67f09SDavid van Moolenbroek 		rwl->write_granted++;
449*00b67f09SDavid van Moolenbroek 	}
450*00b67f09SDavid van Moolenbroek 
451*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
452*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
453*00b67f09SDavid van Moolenbroek 				  ISC_MSG_POSTLOCK, "postlock"), rwl, type);
454*00b67f09SDavid van Moolenbroek #endif
455*00b67f09SDavid van Moolenbroek 
456*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
457*00b67f09SDavid van Moolenbroek }
458*00b67f09SDavid van Moolenbroek 
459*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_trylock(isc_rwlock_t * rwl,isc_rwlocktype_t type)460*00b67f09SDavid van Moolenbroek isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
461*00b67f09SDavid van Moolenbroek 	isc_int32_t cntflag;
462*00b67f09SDavid van Moolenbroek 
463*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
464*00b67f09SDavid van Moolenbroek 
465*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
466*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
467*00b67f09SDavid van Moolenbroek 				  ISC_MSG_PRELOCK, "prelock"), rwl, type);
468*00b67f09SDavid van Moolenbroek #endif
469*00b67f09SDavid van Moolenbroek 
470*00b67f09SDavid van Moolenbroek 	if (type == isc_rwlocktype_read) {
471*00b67f09SDavid van Moolenbroek 		/* If a writer is waiting or working, we fail. */
472*00b67f09SDavid van Moolenbroek 		if (rwl->write_requests != rwl->write_completions)
473*00b67f09SDavid van Moolenbroek 			return (ISC_R_LOCKBUSY);
474*00b67f09SDavid van Moolenbroek 
475*00b67f09SDavid van Moolenbroek 		/* Otherwise, be ready for reading. */
476*00b67f09SDavid van Moolenbroek 		cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
477*00b67f09SDavid van Moolenbroek 		if ((cntflag & WRITER_ACTIVE) != 0) {
478*00b67f09SDavid van Moolenbroek 			/*
479*00b67f09SDavid van Moolenbroek 			 * A writer is working.  We lose, and cancel the read
480*00b67f09SDavid van Moolenbroek 			 * request.
481*00b67f09SDavid van Moolenbroek 			 */
482*00b67f09SDavid van Moolenbroek 			cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
483*00b67f09SDavid van Moolenbroek 						  -READER_INCR);
484*00b67f09SDavid van Moolenbroek 			/*
485*00b67f09SDavid van Moolenbroek 			 * If no other readers are waiting and we've suspended
486*00b67f09SDavid van Moolenbroek 			 * new writers in this short period, wake them up.
487*00b67f09SDavid van Moolenbroek 			 */
488*00b67f09SDavid van Moolenbroek 			if (cntflag == READER_INCR &&
489*00b67f09SDavid van Moolenbroek 			    rwl->write_completions != rwl->write_requests) {
490*00b67f09SDavid van Moolenbroek 				LOCK(&rwl->lock);
491*00b67f09SDavid van Moolenbroek 				BROADCAST(&rwl->writeable);
492*00b67f09SDavid van Moolenbroek 				UNLOCK(&rwl->lock);
493*00b67f09SDavid van Moolenbroek 			}
494*00b67f09SDavid van Moolenbroek 
495*00b67f09SDavid van Moolenbroek 			return (ISC_R_LOCKBUSY);
496*00b67f09SDavid van Moolenbroek 		}
497*00b67f09SDavid van Moolenbroek 	} else {
498*00b67f09SDavid van Moolenbroek 		/* Try locking without entering the waiting queue. */
499*00b67f09SDavid van Moolenbroek 		cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
500*00b67f09SDavid van Moolenbroek 					     WRITER_ACTIVE);
501*00b67f09SDavid van Moolenbroek 		if (cntflag != 0)
502*00b67f09SDavid van Moolenbroek 			return (ISC_R_LOCKBUSY);
503*00b67f09SDavid van Moolenbroek 
504*00b67f09SDavid van Moolenbroek 		/*
505*00b67f09SDavid van Moolenbroek 		 * XXXJT: jump into the queue, possibly breaking the writer
506*00b67f09SDavid van Moolenbroek 		 * order.
507*00b67f09SDavid van Moolenbroek 		 */
508*00b67f09SDavid van Moolenbroek 		(void)isc_atomic_xadd(&rwl->write_completions, -1);
509*00b67f09SDavid van Moolenbroek 
510*00b67f09SDavid van Moolenbroek 		rwl->write_granted++;
511*00b67f09SDavid van Moolenbroek 	}
512*00b67f09SDavid van Moolenbroek 
513*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
514*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
515*00b67f09SDavid van Moolenbroek 				  ISC_MSG_POSTLOCK, "postlock"), rwl, type);
516*00b67f09SDavid van Moolenbroek #endif
517*00b67f09SDavid van Moolenbroek 
518*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
519*00b67f09SDavid van Moolenbroek }
520*00b67f09SDavid van Moolenbroek 
521*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t * rwl)522*00b67f09SDavid van Moolenbroek isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
523*00b67f09SDavid van Moolenbroek 	isc_int32_t prevcnt;
524*00b67f09SDavid van Moolenbroek 
525*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
526*00b67f09SDavid van Moolenbroek 
527*00b67f09SDavid van Moolenbroek 	/* Try to acquire write access. */
528*00b67f09SDavid van Moolenbroek 	prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
529*00b67f09SDavid van Moolenbroek 				     READER_INCR, WRITER_ACTIVE);
530*00b67f09SDavid van Moolenbroek 	/*
531*00b67f09SDavid van Moolenbroek 	 * There must have been no writer, and there must have been at least
532*00b67f09SDavid van Moolenbroek 	 * one reader.
533*00b67f09SDavid van Moolenbroek 	 */
534*00b67f09SDavid van Moolenbroek 	INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
535*00b67f09SDavid van Moolenbroek 	       (prevcnt & ~WRITER_ACTIVE) != 0);
536*00b67f09SDavid van Moolenbroek 
537*00b67f09SDavid van Moolenbroek 	if (prevcnt == READER_INCR) {
538*00b67f09SDavid van Moolenbroek 		/*
539*00b67f09SDavid van Moolenbroek 		 * We are the only reader and have been upgraded.
540*00b67f09SDavid van Moolenbroek 		 * Now jump into the head of the writer waiting queue.
541*00b67f09SDavid van Moolenbroek 		 */
542*00b67f09SDavid van Moolenbroek 		(void)isc_atomic_xadd(&rwl->write_completions, -1);
543*00b67f09SDavid van Moolenbroek 	} else
544*00b67f09SDavid van Moolenbroek 		return (ISC_R_LOCKBUSY);
545*00b67f09SDavid van Moolenbroek 
546*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
547*00b67f09SDavid van Moolenbroek 
548*00b67f09SDavid van Moolenbroek }
549*00b67f09SDavid van Moolenbroek 
550*00b67f09SDavid van Moolenbroek void
isc_rwlock_downgrade(isc_rwlock_t * rwl)551*00b67f09SDavid van Moolenbroek isc_rwlock_downgrade(isc_rwlock_t *rwl) {
552*00b67f09SDavid van Moolenbroek 	isc_int32_t prev_readers;
553*00b67f09SDavid van Moolenbroek 
554*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
555*00b67f09SDavid van Moolenbroek 
556*00b67f09SDavid van Moolenbroek 	/* Become an active reader. */
557*00b67f09SDavid van Moolenbroek 	prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
558*00b67f09SDavid van Moolenbroek 	/* We must have been a writer. */
559*00b67f09SDavid van Moolenbroek 	INSIST((prev_readers & WRITER_ACTIVE) != 0);
560*00b67f09SDavid van Moolenbroek 
561*00b67f09SDavid van Moolenbroek 	/* Complete write */
562*00b67f09SDavid van Moolenbroek 	(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
563*00b67f09SDavid van Moolenbroek 	(void)isc_atomic_xadd(&rwl->write_completions, 1);
564*00b67f09SDavid van Moolenbroek 
565*00b67f09SDavid van Moolenbroek 	/* Resume other readers */
566*00b67f09SDavid van Moolenbroek 	LOCK(&rwl->lock);
567*00b67f09SDavid van Moolenbroek 	if (rwl->readers_waiting > 0)
568*00b67f09SDavid van Moolenbroek 		BROADCAST(&rwl->readable);
569*00b67f09SDavid van Moolenbroek 	UNLOCK(&rwl->lock);
570*00b67f09SDavid van Moolenbroek }
571*00b67f09SDavid van Moolenbroek 
572*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_unlock(isc_rwlock_t * rwl,isc_rwlocktype_t type)573*00b67f09SDavid van Moolenbroek isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
574*00b67f09SDavid van Moolenbroek 	isc_int32_t prev_cnt;
575*00b67f09SDavid van Moolenbroek 
576*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
577*00b67f09SDavid van Moolenbroek 
578*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
579*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
580*00b67f09SDavid van Moolenbroek 				  ISC_MSG_PREUNLOCK, "preunlock"), rwl, type);
581*00b67f09SDavid van Moolenbroek #endif
582*00b67f09SDavid van Moolenbroek 
583*00b67f09SDavid van Moolenbroek 	if (type == isc_rwlocktype_read) {
584*00b67f09SDavid van Moolenbroek 		prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
585*00b67f09SDavid van Moolenbroek 
586*00b67f09SDavid van Moolenbroek 		/*
587*00b67f09SDavid van Moolenbroek 		 * If we're the last reader and any writers are waiting, wake
588*00b67f09SDavid van Moolenbroek 		 * them up.  We need to wake up all of them to ensure the
589*00b67f09SDavid van Moolenbroek 		 * FIFO order.
590*00b67f09SDavid van Moolenbroek 		 */
591*00b67f09SDavid van Moolenbroek 		if (prev_cnt == READER_INCR &&
592*00b67f09SDavid van Moolenbroek 		    rwl->write_completions != rwl->write_requests) {
593*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
594*00b67f09SDavid van Moolenbroek 			BROADCAST(&rwl->writeable);
595*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
596*00b67f09SDavid van Moolenbroek 		}
597*00b67f09SDavid van Moolenbroek 	} else {
598*00b67f09SDavid van Moolenbroek 		isc_boolean_t wakeup_writers = ISC_TRUE;
599*00b67f09SDavid van Moolenbroek 
600*00b67f09SDavid van Moolenbroek 		/*
601*00b67f09SDavid van Moolenbroek 		 * Reset the flag, and (implicitly) tell other writers
602*00b67f09SDavid van Moolenbroek 		 * we are done.
603*00b67f09SDavid van Moolenbroek 		 */
604*00b67f09SDavid van Moolenbroek 		(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
605*00b67f09SDavid van Moolenbroek 		(void)isc_atomic_xadd(&rwl->write_completions, 1);
606*00b67f09SDavid van Moolenbroek 
607*00b67f09SDavid van Moolenbroek 		if (rwl->write_granted >= rwl->write_quota ||
608*00b67f09SDavid van Moolenbroek 		    rwl->write_requests == rwl->write_completions ||
609*00b67f09SDavid van Moolenbroek 		    (rwl->cnt_and_flag & ~WRITER_ACTIVE) != 0) {
610*00b67f09SDavid van Moolenbroek 			/*
611*00b67f09SDavid van Moolenbroek 			 * We have passed the write quota, no writer is
612*00b67f09SDavid van Moolenbroek 			 * waiting, or some readers are almost ready, pending
613*00b67f09SDavid van Moolenbroek 			 * possible writers.  Note that the last case can
614*00b67f09SDavid van Moolenbroek 			 * happen even if write_requests != write_completions
615*00b67f09SDavid van Moolenbroek 			 * (which means a new writer in the queue), so we need
616*00b67f09SDavid van Moolenbroek 			 * to catch the case explicitly.
617*00b67f09SDavid van Moolenbroek 			 */
618*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
619*00b67f09SDavid van Moolenbroek 			if (rwl->readers_waiting > 0) {
620*00b67f09SDavid van Moolenbroek 				wakeup_writers = ISC_FALSE;
621*00b67f09SDavid van Moolenbroek 				BROADCAST(&rwl->readable);
622*00b67f09SDavid van Moolenbroek 			}
623*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
624*00b67f09SDavid van Moolenbroek 		}
625*00b67f09SDavid van Moolenbroek 
626*00b67f09SDavid van Moolenbroek 		if (rwl->write_requests != rwl->write_completions &&
627*00b67f09SDavid van Moolenbroek 		    wakeup_writers) {
628*00b67f09SDavid van Moolenbroek 			LOCK(&rwl->lock);
629*00b67f09SDavid van Moolenbroek 			BROADCAST(&rwl->writeable);
630*00b67f09SDavid van Moolenbroek 			UNLOCK(&rwl->lock);
631*00b67f09SDavid van Moolenbroek 		}
632*00b67f09SDavid van Moolenbroek 	}
633*00b67f09SDavid van Moolenbroek 
634*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
635*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
636*00b67f09SDavid van Moolenbroek 				  ISC_MSG_POSTUNLOCK, "postunlock"),
637*00b67f09SDavid van Moolenbroek 		   rwl, type);
638*00b67f09SDavid van Moolenbroek #endif
639*00b67f09SDavid van Moolenbroek 
640*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
641*00b67f09SDavid van Moolenbroek }
642*00b67f09SDavid van Moolenbroek 
643*00b67f09SDavid van Moolenbroek #else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
644*00b67f09SDavid van Moolenbroek 
645*00b67f09SDavid van Moolenbroek static isc_result_t
doit(isc_rwlock_t * rwl,isc_rwlocktype_t type,isc_boolean_t nonblock)646*00b67f09SDavid van Moolenbroek doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) {
647*00b67f09SDavid van Moolenbroek 	isc_boolean_t skip = ISC_FALSE;
648*00b67f09SDavid van Moolenbroek 	isc_boolean_t done = ISC_FALSE;
649*00b67f09SDavid van Moolenbroek 	isc_result_t result = ISC_R_SUCCESS;
650*00b67f09SDavid van Moolenbroek 
651*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
652*00b67f09SDavid van Moolenbroek 
653*00b67f09SDavid van Moolenbroek 	LOCK(&rwl->lock);
654*00b67f09SDavid van Moolenbroek 
655*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
656*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
657*00b67f09SDavid van Moolenbroek 				  ISC_MSG_PRELOCK, "prelock"), rwl, type);
658*00b67f09SDavid van Moolenbroek #endif
659*00b67f09SDavid van Moolenbroek 
660*00b67f09SDavid van Moolenbroek 	if (type == isc_rwlocktype_read) {
661*00b67f09SDavid van Moolenbroek 		if (rwl->readers_waiting != 0)
662*00b67f09SDavid van Moolenbroek 			skip = ISC_TRUE;
663*00b67f09SDavid van Moolenbroek 		while (!done) {
664*00b67f09SDavid van Moolenbroek 			if (!skip &&
665*00b67f09SDavid van Moolenbroek 			    ((rwl->active == 0 ||
666*00b67f09SDavid van Moolenbroek 			      (rwl->type == isc_rwlocktype_read &&
667*00b67f09SDavid van Moolenbroek 			       (rwl->writers_waiting == 0 ||
668*00b67f09SDavid van Moolenbroek 				rwl->granted < rwl->read_quota)))))
669*00b67f09SDavid van Moolenbroek 			{
670*00b67f09SDavid van Moolenbroek 				rwl->type = isc_rwlocktype_read;
671*00b67f09SDavid van Moolenbroek 				rwl->active++;
672*00b67f09SDavid van Moolenbroek 				rwl->granted++;
673*00b67f09SDavid van Moolenbroek 				done = ISC_TRUE;
674*00b67f09SDavid van Moolenbroek 			} else if (nonblock) {
675*00b67f09SDavid van Moolenbroek 				result = ISC_R_LOCKBUSY;
676*00b67f09SDavid van Moolenbroek 				done = ISC_TRUE;
677*00b67f09SDavid van Moolenbroek 			} else {
678*00b67f09SDavid van Moolenbroek 				skip = ISC_FALSE;
679*00b67f09SDavid van Moolenbroek 				rwl->readers_waiting++;
680*00b67f09SDavid van Moolenbroek 				WAIT(&rwl->readable, &rwl->lock);
681*00b67f09SDavid van Moolenbroek 				rwl->readers_waiting--;
682*00b67f09SDavid van Moolenbroek 			}
683*00b67f09SDavid van Moolenbroek 		}
684*00b67f09SDavid van Moolenbroek 	} else {
685*00b67f09SDavid van Moolenbroek 		if (rwl->writers_waiting != 0)
686*00b67f09SDavid van Moolenbroek 			skip = ISC_TRUE;
687*00b67f09SDavid van Moolenbroek 		while (!done) {
688*00b67f09SDavid van Moolenbroek 			if (!skip && rwl->active == 0) {
689*00b67f09SDavid van Moolenbroek 				rwl->type = isc_rwlocktype_write;
690*00b67f09SDavid van Moolenbroek 				rwl->active = 1;
691*00b67f09SDavid van Moolenbroek 				rwl->granted++;
692*00b67f09SDavid van Moolenbroek 				done = ISC_TRUE;
693*00b67f09SDavid van Moolenbroek 			} else if (nonblock) {
694*00b67f09SDavid van Moolenbroek 				result = ISC_R_LOCKBUSY;
695*00b67f09SDavid van Moolenbroek 				done = ISC_TRUE;
696*00b67f09SDavid van Moolenbroek 			} else {
697*00b67f09SDavid van Moolenbroek 				skip = ISC_FALSE;
698*00b67f09SDavid van Moolenbroek 				rwl->writers_waiting++;
699*00b67f09SDavid van Moolenbroek 				WAIT(&rwl->writeable, &rwl->lock);
700*00b67f09SDavid van Moolenbroek 				rwl->writers_waiting--;
701*00b67f09SDavid van Moolenbroek 			}
702*00b67f09SDavid van Moolenbroek 		}
703*00b67f09SDavid van Moolenbroek 	}
704*00b67f09SDavid van Moolenbroek 
705*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
706*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
707*00b67f09SDavid van Moolenbroek 				  ISC_MSG_POSTLOCK, "postlock"), rwl, type);
708*00b67f09SDavid van Moolenbroek #endif
709*00b67f09SDavid van Moolenbroek 
710*00b67f09SDavid van Moolenbroek 	UNLOCK(&rwl->lock);
711*00b67f09SDavid van Moolenbroek 
712*00b67f09SDavid van Moolenbroek 	return (result);
713*00b67f09SDavid van Moolenbroek }
714*00b67f09SDavid van Moolenbroek 
715*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_lock(isc_rwlock_t * rwl,isc_rwlocktype_t type)716*00b67f09SDavid van Moolenbroek isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
717*00b67f09SDavid van Moolenbroek 	return (doit(rwl, type, ISC_FALSE));
718*00b67f09SDavid van Moolenbroek }
719*00b67f09SDavid van Moolenbroek 
720*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_trylock(isc_rwlock_t * rwl,isc_rwlocktype_t type)721*00b67f09SDavid van Moolenbroek isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
722*00b67f09SDavid van Moolenbroek 	return (doit(rwl, type, ISC_TRUE));
723*00b67f09SDavid van Moolenbroek }
724*00b67f09SDavid van Moolenbroek 
725*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t * rwl)726*00b67f09SDavid van Moolenbroek isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
727*00b67f09SDavid van Moolenbroek 	isc_result_t result = ISC_R_SUCCESS;
728*00b67f09SDavid van Moolenbroek 
729*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
730*00b67f09SDavid van Moolenbroek 	LOCK(&rwl->lock);
731*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->type == isc_rwlocktype_read);
732*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->active != 0);
733*00b67f09SDavid van Moolenbroek 
734*00b67f09SDavid van Moolenbroek 	/* If we are the only reader then succeed. */
735*00b67f09SDavid van Moolenbroek 	if (rwl->active == 1) {
736*00b67f09SDavid van Moolenbroek 		rwl->original = (rwl->original == isc_rwlocktype_none) ?
737*00b67f09SDavid van Moolenbroek 				isc_rwlocktype_read : isc_rwlocktype_none;
738*00b67f09SDavid van Moolenbroek 		rwl->type = isc_rwlocktype_write;
739*00b67f09SDavid van Moolenbroek 	} else
740*00b67f09SDavid van Moolenbroek 		result = ISC_R_LOCKBUSY;
741*00b67f09SDavid van Moolenbroek 
742*00b67f09SDavid van Moolenbroek 	UNLOCK(&rwl->lock);
743*00b67f09SDavid van Moolenbroek 	return (result);
744*00b67f09SDavid van Moolenbroek }
745*00b67f09SDavid van Moolenbroek 
746*00b67f09SDavid van Moolenbroek void
isc_rwlock_downgrade(isc_rwlock_t * rwl)747*00b67f09SDavid van Moolenbroek isc_rwlock_downgrade(isc_rwlock_t *rwl) {
748*00b67f09SDavid van Moolenbroek 
749*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
750*00b67f09SDavid van Moolenbroek 	LOCK(&rwl->lock);
751*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->type == isc_rwlocktype_write);
752*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->active == 1);
753*00b67f09SDavid van Moolenbroek 
754*00b67f09SDavid van Moolenbroek 	rwl->type = isc_rwlocktype_read;
755*00b67f09SDavid van Moolenbroek 	rwl->original = (rwl->original == isc_rwlocktype_none) ?
756*00b67f09SDavid van Moolenbroek 			isc_rwlocktype_write : isc_rwlocktype_none;
757*00b67f09SDavid van Moolenbroek 	/*
758*00b67f09SDavid van Moolenbroek 	 * Resume processing any read request that were blocked when
759*00b67f09SDavid van Moolenbroek 	 * we upgraded.
760*00b67f09SDavid van Moolenbroek 	 */
761*00b67f09SDavid van Moolenbroek 	if (rwl->original == isc_rwlocktype_none &&
762*00b67f09SDavid van Moolenbroek 	    (rwl->writers_waiting == 0 || rwl->granted < rwl->read_quota) &&
763*00b67f09SDavid van Moolenbroek 	    rwl->readers_waiting > 0)
764*00b67f09SDavid van Moolenbroek 		BROADCAST(&rwl->readable);
765*00b67f09SDavid van Moolenbroek 
766*00b67f09SDavid van Moolenbroek 	UNLOCK(&rwl->lock);
767*00b67f09SDavid van Moolenbroek }
768*00b67f09SDavid van Moolenbroek 
769*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_unlock(isc_rwlock_t * rwl,isc_rwlocktype_t type)770*00b67f09SDavid van Moolenbroek isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
771*00b67f09SDavid van Moolenbroek 
772*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
773*00b67f09SDavid van Moolenbroek 	LOCK(&rwl->lock);
774*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->type == type);
775*00b67f09SDavid van Moolenbroek 
776*00b67f09SDavid van Moolenbroek 	UNUSED(type);
777*00b67f09SDavid van Moolenbroek 
778*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
779*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
780*00b67f09SDavid van Moolenbroek 				  ISC_MSG_PREUNLOCK, "preunlock"), rwl, type);
781*00b67f09SDavid van Moolenbroek #endif
782*00b67f09SDavid van Moolenbroek 
783*00b67f09SDavid van Moolenbroek 	INSIST(rwl->active > 0);
784*00b67f09SDavid van Moolenbroek 	rwl->active--;
785*00b67f09SDavid van Moolenbroek 	if (rwl->active == 0) {
786*00b67f09SDavid van Moolenbroek 		if (rwl->original != isc_rwlocktype_none) {
787*00b67f09SDavid van Moolenbroek 			rwl->type = rwl->original;
788*00b67f09SDavid van Moolenbroek 			rwl->original = isc_rwlocktype_none;
789*00b67f09SDavid van Moolenbroek 		}
790*00b67f09SDavid van Moolenbroek 		if (rwl->type == isc_rwlocktype_read) {
791*00b67f09SDavid van Moolenbroek 			rwl->granted = 0;
792*00b67f09SDavid van Moolenbroek 			if (rwl->writers_waiting > 0) {
793*00b67f09SDavid van Moolenbroek 				rwl->type = isc_rwlocktype_write;
794*00b67f09SDavid van Moolenbroek 				SIGNAL(&rwl->writeable);
795*00b67f09SDavid van Moolenbroek 			} else if (rwl->readers_waiting > 0) {
796*00b67f09SDavid van Moolenbroek 				/* Does this case ever happen? */
797*00b67f09SDavid van Moolenbroek 				BROADCAST(&rwl->readable);
798*00b67f09SDavid van Moolenbroek 			}
799*00b67f09SDavid van Moolenbroek 		} else {
800*00b67f09SDavid van Moolenbroek 			if (rwl->readers_waiting > 0) {
801*00b67f09SDavid van Moolenbroek 				if (rwl->writers_waiting > 0 &&
802*00b67f09SDavid van Moolenbroek 				    rwl->granted < rwl->write_quota) {
803*00b67f09SDavid van Moolenbroek 					SIGNAL(&rwl->writeable);
804*00b67f09SDavid van Moolenbroek 				} else {
805*00b67f09SDavid van Moolenbroek 					rwl->granted = 0;
806*00b67f09SDavid van Moolenbroek 					rwl->type = isc_rwlocktype_read;
807*00b67f09SDavid van Moolenbroek 					BROADCAST(&rwl->readable);
808*00b67f09SDavid van Moolenbroek 				}
809*00b67f09SDavid van Moolenbroek 			} else if (rwl->writers_waiting > 0) {
810*00b67f09SDavid van Moolenbroek 				rwl->granted = 0;
811*00b67f09SDavid van Moolenbroek 				SIGNAL(&rwl->writeable);
812*00b67f09SDavid van Moolenbroek 			} else {
813*00b67f09SDavid van Moolenbroek 				rwl->granted = 0;
814*00b67f09SDavid van Moolenbroek 			}
815*00b67f09SDavid van Moolenbroek 		}
816*00b67f09SDavid van Moolenbroek 	}
817*00b67f09SDavid van Moolenbroek 	INSIST(rwl->original == isc_rwlocktype_none);
818*00b67f09SDavid van Moolenbroek 
819*00b67f09SDavid van Moolenbroek #ifdef ISC_RWLOCK_TRACE
820*00b67f09SDavid van Moolenbroek 	print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
821*00b67f09SDavid van Moolenbroek 				  ISC_MSG_POSTUNLOCK, "postunlock"),
822*00b67f09SDavid van Moolenbroek 		   rwl, type);
823*00b67f09SDavid van Moolenbroek #endif
824*00b67f09SDavid van Moolenbroek 
825*00b67f09SDavid van Moolenbroek 	UNLOCK(&rwl->lock);
826*00b67f09SDavid van Moolenbroek 
827*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
828*00b67f09SDavid van Moolenbroek }
829*00b67f09SDavid van Moolenbroek 
830*00b67f09SDavid van Moolenbroek #endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
831*00b67f09SDavid van Moolenbroek #endif /* !ISC_PLATFORM_USE_NATIVE_RWLOCKS */
832*00b67f09SDavid van Moolenbroek #else /* ISC_PLATFORM_USETHREADS */
833*00b67f09SDavid van Moolenbroek 
834*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_init(isc_rwlock_t * rwl,unsigned int read_quota,unsigned int write_quota)835*00b67f09SDavid van Moolenbroek isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
836*00b67f09SDavid van Moolenbroek 		unsigned int write_quota)
837*00b67f09SDavid van Moolenbroek {
838*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
839*00b67f09SDavid van Moolenbroek 
840*00b67f09SDavid van Moolenbroek 	UNUSED(read_quota);
841*00b67f09SDavid van Moolenbroek 	UNUSED(write_quota);
842*00b67f09SDavid van Moolenbroek 
843*00b67f09SDavid van Moolenbroek 	rwl->type = isc_rwlocktype_read;
844*00b67f09SDavid van Moolenbroek 	rwl->active = 0;
845*00b67f09SDavid van Moolenbroek 	rwl->magic = RWLOCK_MAGIC;
846*00b67f09SDavid van Moolenbroek 
847*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
848*00b67f09SDavid van Moolenbroek }
849*00b67f09SDavid van Moolenbroek 
850*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_lock(isc_rwlock_t * rwl,isc_rwlocktype_t type)851*00b67f09SDavid van Moolenbroek isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
852*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
853*00b67f09SDavid van Moolenbroek 
854*00b67f09SDavid van Moolenbroek 	if (type == isc_rwlocktype_read) {
855*00b67f09SDavid van Moolenbroek 		if (rwl->type != isc_rwlocktype_read && rwl->active != 0)
856*00b67f09SDavid van Moolenbroek 			return (ISC_R_LOCKBUSY);
857*00b67f09SDavid van Moolenbroek 		rwl->type = isc_rwlocktype_read;
858*00b67f09SDavid van Moolenbroek 		rwl->active++;
859*00b67f09SDavid van Moolenbroek 	} else {
860*00b67f09SDavid van Moolenbroek 		if (rwl->active != 0)
861*00b67f09SDavid van Moolenbroek 			return (ISC_R_LOCKBUSY);
862*00b67f09SDavid van Moolenbroek 		rwl->type = isc_rwlocktype_write;
863*00b67f09SDavid van Moolenbroek 		rwl->active = 1;
864*00b67f09SDavid van Moolenbroek 	}
865*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
866*00b67f09SDavid van Moolenbroek }
867*00b67f09SDavid van Moolenbroek 
868*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_trylock(isc_rwlock_t * rwl,isc_rwlocktype_t type)869*00b67f09SDavid van Moolenbroek isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
870*00b67f09SDavid van Moolenbroek 	return (isc_rwlock_lock(rwl, type));
871*00b67f09SDavid van Moolenbroek }
872*00b67f09SDavid van Moolenbroek 
873*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t * rwl)874*00b67f09SDavid van Moolenbroek isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
875*00b67f09SDavid van Moolenbroek 	isc_result_t result = ISC_R_SUCCESS;
876*00b67f09SDavid van Moolenbroek 
877*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
878*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->type == isc_rwlocktype_read);
879*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->active != 0);
880*00b67f09SDavid van Moolenbroek 
881*00b67f09SDavid van Moolenbroek 	/* If we are the only reader then succeed. */
882*00b67f09SDavid van Moolenbroek 	if (rwl->active == 1)
883*00b67f09SDavid van Moolenbroek 		rwl->type = isc_rwlocktype_write;
884*00b67f09SDavid van Moolenbroek 	else
885*00b67f09SDavid van Moolenbroek 		result = ISC_R_LOCKBUSY;
886*00b67f09SDavid van Moolenbroek 	return (result);
887*00b67f09SDavid van Moolenbroek }
888*00b67f09SDavid van Moolenbroek 
889*00b67f09SDavid van Moolenbroek void
isc_rwlock_downgrade(isc_rwlock_t * rwl)890*00b67f09SDavid van Moolenbroek isc_rwlock_downgrade(isc_rwlock_t *rwl) {
891*00b67f09SDavid van Moolenbroek 
892*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
893*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->type == isc_rwlocktype_write);
894*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->active == 1);
895*00b67f09SDavid van Moolenbroek 
896*00b67f09SDavid van Moolenbroek 	rwl->type = isc_rwlocktype_read;
897*00b67f09SDavid van Moolenbroek }
898*00b67f09SDavid van Moolenbroek 
899*00b67f09SDavid van Moolenbroek isc_result_t
isc_rwlock_unlock(isc_rwlock_t * rwl,isc_rwlocktype_t type)900*00b67f09SDavid van Moolenbroek isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
901*00b67f09SDavid van Moolenbroek 	REQUIRE(VALID_RWLOCK(rwl));
902*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->type == type);
903*00b67f09SDavid van Moolenbroek 
904*00b67f09SDavid van Moolenbroek 	UNUSED(type);
905*00b67f09SDavid van Moolenbroek 
906*00b67f09SDavid van Moolenbroek 	INSIST(rwl->active > 0);
907*00b67f09SDavid van Moolenbroek 	rwl->active--;
908*00b67f09SDavid van Moolenbroek 
909*00b67f09SDavid van Moolenbroek 	return (ISC_R_SUCCESS);
910*00b67f09SDavid van Moolenbroek }
911*00b67f09SDavid van Moolenbroek 
912*00b67f09SDavid van Moolenbroek void
isc_rwlock_destroy(isc_rwlock_t * rwl)913*00b67f09SDavid van Moolenbroek isc_rwlock_destroy(isc_rwlock_t *rwl) {
914*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl != NULL);
915*00b67f09SDavid van Moolenbroek 	REQUIRE(rwl->active == 0);
916*00b67f09SDavid van Moolenbroek 	rwl->magic = 0;
917*00b67f09SDavid van Moolenbroek }
918*00b67f09SDavid van Moolenbroek 
919*00b67f09SDavid van Moolenbroek #endif /* ISC_PLATFORM_USETHREADS */
920