xref: /freebsd/sys/sys/rwlock.h (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef _SYS_RWLOCK_H_
32 #define _SYS_RWLOCK_H_
33 
34 #include <sys/_lock.h>
35 #include <sys/_rwlock.h>
36 #include <sys/lock_profile.h>
37 #include <sys/lockstat.h>
38 
39 #ifdef _KERNEL
40 #include <sys/pcpu.h>
41 #include <machine/atomic.h>
42 #endif
43 
44 /*
45  * The rw_lock field consists of several fields.  The low bit indicates
46  * if the lock is locked with a read (shared) or write (exclusive) lock.
47  * A value of 0 indicates a write lock, and a value of 1 indicates a read
48  * lock.  Bit 1 is a boolean indicating if there are any threads waiting
49  * for a read lock.  Bit 2 is a boolean indicating if there are any threads
50  * waiting for a write lock.  The rest of the variable's definition is
51  * dependent on the value of the first bit.  For a write lock, it is a
52  * pointer to the thread holding the lock, similar to the mtx_lock field of
53  * mutexes.  For read locks, it is a count of read locks that are held.
54  *
55  * When the lock is not locked by any thread, it is encoded as a read lock
56  * with zero waiters.
57  */
58 
59 #define	RW_LOCK_READ		0x01
60 #define	RW_LOCK_READ_WAITERS	0x02
61 #define	RW_LOCK_WRITE_WAITERS	0x04
62 #define	RW_LOCK_WRITE_SPINNER	0x08
63 #define	RW_LOCK_WRITER_RECURSED	0x10
64 #define	RW_LOCK_FLAGMASK						\
65 	(RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS |	\
66 	RW_LOCK_WRITE_SPINNER | RW_LOCK_WRITER_RECURSED)
67 #define	RW_LOCK_WAITERS		(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
68 
69 #define	RW_OWNER(x)		((x) & ~RW_LOCK_FLAGMASK)
70 #define	RW_READERS_SHIFT	5
71 #define	RW_READERS(x)		(RW_OWNER((x)) >> RW_READERS_SHIFT)
72 #define	RW_READERS_LOCK(x)	((x) << RW_READERS_SHIFT | RW_LOCK_READ)
73 #define	RW_ONE_READER		(1 << RW_READERS_SHIFT)
74 
75 #define	RW_UNLOCKED		RW_READERS_LOCK(0)
76 #define	RW_DESTROYED		(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
77 
78 #ifdef _KERNEL
79 
80 #define	rw_recurse	lock_object.lo_data
81 
82 #define	RW_READ_VALUE(x)	((x)->rw_lock)
83 
84 /* Very simple operations on rw_lock. */
85 
86 /* Try to obtain a write lock once. */
87 #define	_rw_write_lock(rw, tid)						\
88 	atomic_cmpset_acq_ptr(&(rw)->rw_lock, RW_UNLOCKED, (tid))
89 
90 #define	_rw_write_lock_fetch(rw, vp, tid)				\
91 	atomic_fcmpset_acq_ptr(&(rw)->rw_lock, vp, (tid))
92 
93 /* Release a write lock quickly if there are no waiters. */
94 #define	_rw_write_unlock(rw, tid)					\
95 	atomic_cmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
96 
97 #define	_rw_write_unlock_fetch(rw, tid)					\
98 	atomic_fcmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
99 
100 /*
101  * Full lock operations that are suitable to be inlined in non-debug
102  * kernels.  If the lock cannot be acquired or released trivially then
103  * the work is deferred to another function.
104  */
105 
106 /* Acquire a write lock. */
107 #define	__rw_wlock(rw, tid, file, line) do {				\
108 	uintptr_t _tid = (uintptr_t)(tid);				\
109 	uintptr_t _v = RW_UNLOCKED;					\
110 									\
111 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||	\
112 	    !_rw_write_lock_fetch((rw), &_v, _tid)))			\
113 		_rw_wlock_hard((rw), _v, (file), (line));		\
114 } while (0)
115 
116 /* Release a write lock. */
117 #define	__rw_wunlock(rw, tid, file, line) do {				\
118 	uintptr_t _v = (uintptr_t)(tid);				\
119 									\
120 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||	\
121 	    !_rw_write_unlock_fetch((rw), &_v)))			\
122 		_rw_wunlock_hard((rw), _v, (file), (line));		\
123 } while (0)
124 
125 /*
126  * Function prototypes.  Routines that start with _ are not part of the
127  * external API and should not be called directly.  Wrapper macros should
128  * be used instead.
129  */
130 void	_rw_init_flags(volatile uintptr_t *c, const char *name, int opts);
131 void	_rw_destroy(volatile uintptr_t *c);
132 void	rw_sysinit(void *arg);
133 int	_rw_wowned(const volatile uintptr_t *c);
134 void	_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line);
135 int	__rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
136 int	__rw_try_wlock(volatile uintptr_t *c, const char *file, int line);
137 void	_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line);
138 void	__rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
139 void	__rw_rlock(volatile uintptr_t *c, const char *file, int line);
140 int	__rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
141 int	__rw_try_rlock(volatile uintptr_t *c, const char *file, int line);
142 void	_rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
143 void	_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line);
144 void	__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v
145 	    LOCK_FILE_LINE_ARG_DEF);
146 void	__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v
147 	    LOCK_FILE_LINE_ARG_DEF);
148 int	__rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
149 int	__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line);
150 void	__rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
151 void	__rw_downgrade(volatile uintptr_t *c, const char *file, int line);
152 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
153 void	__rw_assert(const volatile uintptr_t *c, int what, const char *file,
154 	    int line);
155 #endif
156 
157 /*
158  * Top-level macros to provide lock cookie once the actual rwlock is passed.
159  * They will also prevent passing a malformed object to the rwlock KPI by
160  * failing compilation as the rw_lock reserved member will not be found.
161  */
162 #define	rw_init(rw, n)							\
163 	_rw_init_flags(&(rw)->rw_lock, n, 0)
164 #define	rw_init_flags(rw, n, o)						\
165 	_rw_init_flags(&(rw)->rw_lock, n, o)
166 #define	rw_destroy(rw)							\
167 	_rw_destroy(&(rw)->rw_lock)
168 #define	rw_wowned(rw)							\
169 	_rw_wowned(&(rw)->rw_lock)
170 #define	_rw_wlock(rw, f, l)						\
171 	_rw_wlock_cookie(&(rw)->rw_lock, f, l)
172 #define	_rw_try_wlock(rw, f, l)						\
173 	__rw_try_wlock(&(rw)->rw_lock, f, l)
174 #define	_rw_wunlock(rw, f, l)						\
175 	_rw_wunlock_cookie(&(rw)->rw_lock, f, l)
176 #define	_rw_try_rlock(rw, f, l)						\
177 	__rw_try_rlock(&(rw)->rw_lock, f, l)
178 #if LOCK_DEBUG > 0
179 #define	_rw_rlock(rw, f, l)						\
180 	__rw_rlock(&(rw)->rw_lock, f, l)
181 #define	_rw_runlock(rw, f, l)						\
182 	_rw_runlock_cookie(&(rw)->rw_lock, f, l)
183 #else
184 #define	_rw_rlock(rw, f, l)						\
185 	__rw_rlock_int((struct rwlock *)rw)
186 #define	_rw_runlock(rw, f, l)						\
187 	_rw_runlock_cookie_int((struct rwlock *)rw)
188 #endif
189 #if LOCK_DEBUG > 0
190 #define	_rw_wlock_hard(rw, v, f, l)					\
191 	__rw_wlock_hard(&(rw)->rw_lock, v, f, l)
192 #define	_rw_wunlock_hard(rw, v, f, l)					\
193 	__rw_wunlock_hard(&(rw)->rw_lock, v, f, l)
194 #define	_rw_try_upgrade(rw, f, l)					\
195 	__rw_try_upgrade(&(rw)->rw_lock, f, l)
196 #define	_rw_downgrade(rw, f, l)						\
197 	__rw_downgrade(&(rw)->rw_lock, f, l)
198 #else
199 #define	_rw_wlock_hard(rw, v, f, l)					\
200 	__rw_wlock_hard(&(rw)->rw_lock, v)
201 #define	_rw_wunlock_hard(rw, v, f, l)					\
202 	__rw_wunlock_hard(&(rw)->rw_lock, v)
203 #define	_rw_try_upgrade(rw, f, l)					\
204 	__rw_try_upgrade_int(rw)
205 #define	_rw_downgrade(rw, f, l)						\
206 	__rw_downgrade_int(rw)
207 #endif
208 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
209 #define	_rw_assert(rw, w, f, l)						\
210 	__rw_assert(&(rw)->rw_lock, w, f, l)
211 #endif
212 
213 
214 /*
215  * Public interface for lock operations.
216  */
217 
218 #ifndef LOCK_DEBUG
219 #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/rwlock.h>
220 #endif
221 #if LOCK_DEBUG > 0 || defined(RWLOCK_NOINLINE)
222 #define	rw_wlock(rw)		_rw_wlock((rw), LOCK_FILE, LOCK_LINE)
223 #define	rw_wunlock(rw)		_rw_wunlock((rw), LOCK_FILE, LOCK_LINE)
224 #else
225 #define	rw_wlock(rw)							\
226 	__rw_wlock((rw), curthread, LOCK_FILE, LOCK_LINE)
227 #define	rw_wunlock(rw)							\
228 	__rw_wunlock((rw), curthread, LOCK_FILE, LOCK_LINE)
229 #endif
230 #define	rw_rlock(rw)		_rw_rlock((rw), LOCK_FILE, LOCK_LINE)
231 #define	rw_runlock(rw)		_rw_runlock((rw), LOCK_FILE, LOCK_LINE)
232 #define	rw_try_rlock(rw)	_rw_try_rlock((rw), LOCK_FILE, LOCK_LINE)
233 #define	rw_try_upgrade(rw)	_rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE)
234 #define	rw_try_wlock(rw)	_rw_try_wlock((rw), LOCK_FILE, LOCK_LINE)
235 #define	rw_downgrade(rw)	_rw_downgrade((rw), LOCK_FILE, LOCK_LINE)
236 #define	rw_unlock(rw)	do {						\
237 	if (rw_wowned(rw))						\
238 		rw_wunlock(rw);						\
239 	else								\
240 		rw_runlock(rw);						\
241 } while (0)
242 #define	rw_sleep(chan, rw, pri, wmesg, timo)				\
243 	_sleep((chan), &(rw)->lock_object, (pri), (wmesg),		\
244 	    tick_sbt * (timo), 0, C_HARDCLOCK)
245 
246 #define	rw_initialized(rw)	lock_initialized(&(rw)->lock_object)
247 
248 struct rw_args {
249 	void		*ra_rw;
250 	const char 	*ra_desc;
251 	int		ra_flags;
252 };
253 
254 #define	RW_SYSINIT_FLAGS(name, rw, desc, flags)				\
255 	static struct rw_args name##_args = {				\
256 		(rw),							\
257 		(desc),							\
258 		(flags),						\
259 	};								\
260 	SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
261 	    rw_sysinit, &name##_args);					\
262 	SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
263 	    _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock))
264 
265 #define	RW_SYSINIT(name, rw, desc)	RW_SYSINIT_FLAGS(name, rw, desc, 0)
266 
267 /*
268  * Options passed to rw_init_flags().
269  */
270 #define	RW_DUPOK	0x01
271 #define	RW_NOPROFILE	0x02
272 #define	RW_NOWITNESS	0x04
273 #define	RW_QUIET	0x08
274 #define	RW_RECURSE	0x10
275 #define	RW_NEW		0x20
276 
277 /*
278  * The INVARIANTS-enabled rw_assert() functionality.
279  *
280  * The constants need to be defined for INVARIANT_SUPPORT infrastructure
281  * support as _rw_assert() itself uses them and the latter implies that
282  * _rw_assert() must build.
283  */
284 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
285 #define	RA_LOCKED		LA_LOCKED
286 #define	RA_RLOCKED		LA_SLOCKED
287 #define	RA_WLOCKED		LA_XLOCKED
288 #define	RA_UNLOCKED		LA_UNLOCKED
289 #define	RA_RECURSED		LA_RECURSED
290 #define	RA_NOTRECURSED		LA_NOTRECURSED
291 #endif
292 
293 #ifdef INVARIANTS
294 #define	rw_assert(rw, what)	_rw_assert((rw), (what), LOCK_FILE, LOCK_LINE)
295 #else
296 #define	rw_assert(rw, what)
297 #endif
298 
299 #endif /* _KERNEL */
300 #endif /* !_SYS_RWLOCK_H_ */
301