xref: /freebsd/sys/sys/rwlock.h (revision 95ee2897)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #ifndef _SYS_RWLOCK_H_
29 #define _SYS_RWLOCK_H_
30 
31 #include <sys/_lock.h>
32 #include <sys/_rwlock.h>
33 #include <sys/lock_profile.h>
34 #include <sys/lockstat.h>
35 
36 #ifdef _KERNEL
37 #include <sys/pcpu.h>
38 #include <machine/atomic.h>
39 #endif
40 
41 /*
42  * The rw_lock field consists of several fields.  The low bit indicates
43  * if the lock is locked with a read (shared) or write (exclusive) lock.
44  * A value of 0 indicates a write lock, and a value of 1 indicates a read
45  * lock.  Bit 1 is a boolean indicating if there are any threads waiting
46  * for a read lock.  Bit 2 is a boolean indicating if there are any threads
47  * waiting for a write lock.  The rest of the variable's definition is
48  * dependent on the value of the first bit.  For a write lock, it is a
49  * pointer to the thread holding the lock, similar to the mtx_lock field of
50  * mutexes.  For read locks, it is a count of read locks that are held.
51  *
52  * When the lock is not locked by any thread, it is encoded as a read lock
53  * with zero waiters.
54  */
55 
56 #define	RW_LOCK_READ		0x01
57 #define	RW_LOCK_READ_WAITERS	0x02
58 #define	RW_LOCK_WRITE_WAITERS	0x04
59 #define	RW_LOCK_WRITE_SPINNER	0x08
60 #define	RW_LOCK_WRITER_RECURSED	0x10
61 #define	RW_LOCK_FLAGMASK						\
62 	(RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS |	\
63 	RW_LOCK_WRITE_SPINNER | RW_LOCK_WRITER_RECURSED)
64 #define	RW_LOCK_WAITERS		(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
65 
66 #define	RW_OWNER(x)		((x) & ~RW_LOCK_FLAGMASK)
67 #define	RW_READERS_SHIFT	5
68 #define	RW_READERS(x)		(RW_OWNER((x)) >> RW_READERS_SHIFT)
69 #define	RW_READERS_LOCK(x)	((x) << RW_READERS_SHIFT | RW_LOCK_READ)
70 #define	RW_ONE_READER		(1 << RW_READERS_SHIFT)
71 
72 #define	RW_UNLOCKED		RW_READERS_LOCK(0)
73 #define	RW_DESTROYED		(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
74 
75 #ifdef _KERNEL
76 
77 #define	rw_recurse	lock_object.lo_data
78 
79 #define	RW_READ_VALUE(x)	((x)->rw_lock)
80 
81 /* Very simple operations on rw_lock. */
82 
83 /* Try to obtain a write lock once. */
84 #define	_rw_write_lock(rw, tid)						\
85 	atomic_cmpset_acq_ptr(&(rw)->rw_lock, RW_UNLOCKED, (tid))
86 
87 #define	_rw_write_lock_fetch(rw, vp, tid)				\
88 	atomic_fcmpset_acq_ptr(&(rw)->rw_lock, vp, (tid))
89 
90 /* Release a write lock quickly if there are no waiters. */
91 #define	_rw_write_unlock(rw, tid)					\
92 	atomic_cmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
93 
94 #define	_rw_write_unlock_fetch(rw, tid)					\
95 	atomic_fcmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
96 
97 /*
98  * Full lock operations that are suitable to be inlined in non-debug
99  * kernels.  If the lock cannot be acquired or released trivially then
100  * the work is deferred to another function.
101  */
102 
103 /* Acquire a write lock. */
104 #define	__rw_wlock(rw, tid, file, line) __extension__ ({		\
105 	uintptr_t _tid = (uintptr_t)(tid);				\
106 	uintptr_t _v = RW_UNLOCKED;					\
107 									\
108 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||	\
109 	    !_rw_write_lock_fetch((rw), &_v, _tid)))			\
110 		_rw_wlock_hard((rw), _v, (file), (line));		\
111 	(void)0; /* ensure void type for expression */			\
112 })
113 
114 /* Release a write lock. */
115 #define	__rw_wunlock(rw, tid, file, line) __extension__ ({		\
116 	uintptr_t _v = (uintptr_t)(tid);				\
117 									\
118 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||	\
119 	    !_rw_write_unlock_fetch((rw), &_v)))			\
120 		_rw_wunlock_hard((rw), _v, (file), (line));		\
121 	(void)0; /* ensure void type for expression */			\
122 })
123 
124 /*
125  * Function prototypes.  Routines that start with _ are not part of the
126  * external API and should not be called directly.  Wrapper macros should
127  * be used instead.
128  */
129 void	_rw_init_flags(volatile uintptr_t *c, const char *name, int opts);
130 void	_rw_destroy(volatile uintptr_t *c);
131 void	rw_sysinit(void *arg);
132 int	_rw_wowned(const volatile uintptr_t *c);
133 void	_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line);
134 int	__rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
135 int	__rw_try_wlock(volatile uintptr_t *c, const char *file, int line);
136 void	_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line);
137 void	__rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
138 void	__rw_rlock(volatile uintptr_t *c, const char *file, int line);
139 int	__rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
140 int	__rw_try_rlock(volatile uintptr_t *c, const char *file, int line);
141 void	_rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
142 void	_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line);
143 void	__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v
144 	    LOCK_FILE_LINE_ARG_DEF);
145 void	__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v
146 	    LOCK_FILE_LINE_ARG_DEF);
147 int	__rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
148 int	__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line);
149 void	__rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
150 void	__rw_downgrade(volatile uintptr_t *c, const char *file, int line);
151 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
152 void	__rw_assert(const volatile uintptr_t *c, int what, const char *file,
153 	    int line);
154 #endif
155 
156 /*
157  * Top-level macros to provide lock cookie once the actual rwlock is passed.
158  * They will also prevent passing a malformed object to the rwlock KPI by
159  * failing compilation as the rw_lock reserved member will not be found.
160  */
161 #define	rw_init(rw, n)							\
162 	_rw_init_flags(&(rw)->rw_lock, n, 0)
163 #define	rw_init_flags(rw, n, o)						\
164 	_rw_init_flags(&(rw)->rw_lock, n, o)
165 #define	rw_destroy(rw)							\
166 	_rw_destroy(&(rw)->rw_lock)
167 #define	rw_wowned(rw)							\
168 	_rw_wowned(&(rw)->rw_lock)
169 #define	_rw_wlock(rw, f, l)						\
170 	_rw_wlock_cookie(&(rw)->rw_lock, f, l)
171 #define	_rw_try_wlock(rw, f, l)						\
172 	__rw_try_wlock(&(rw)->rw_lock, f, l)
173 #define	_rw_wunlock(rw, f, l)						\
174 	_rw_wunlock_cookie(&(rw)->rw_lock, f, l)
175 #define	_rw_try_rlock(rw, f, l)						\
176 	__rw_try_rlock(&(rw)->rw_lock, f, l)
177 #if LOCK_DEBUG > 0
178 #define	_rw_rlock(rw, f, l)						\
179 	__rw_rlock(&(rw)->rw_lock, f, l)
180 #define	_rw_runlock(rw, f, l)						\
181 	_rw_runlock_cookie(&(rw)->rw_lock, f, l)
182 #else
183 #define	_rw_rlock(rw, f, l)						\
184 	__rw_rlock_int((struct rwlock *)rw)
185 #define	_rw_runlock(rw, f, l)						\
186 	_rw_runlock_cookie_int((struct rwlock *)rw)
187 #endif
188 #if LOCK_DEBUG > 0
189 #define	_rw_wlock_hard(rw, v, f, l)					\
190 	__rw_wlock_hard(&(rw)->rw_lock, v, f, l)
191 #define	_rw_wunlock_hard(rw, v, f, l)					\
192 	__rw_wunlock_hard(&(rw)->rw_lock, v, f, l)
193 #define	_rw_try_upgrade(rw, f, l)					\
194 	__rw_try_upgrade(&(rw)->rw_lock, f, l)
195 #define	_rw_downgrade(rw, f, l)						\
196 	__rw_downgrade(&(rw)->rw_lock, f, l)
197 #else
198 #define	_rw_wlock_hard(rw, v, f, l)					\
199 	__rw_wlock_hard(&(rw)->rw_lock, v)
200 #define	_rw_wunlock_hard(rw, v, f, l)					\
201 	__rw_wunlock_hard(&(rw)->rw_lock, v)
202 #define	_rw_try_upgrade(rw, f, l)					\
203 	__rw_try_upgrade_int(rw)
204 #define	_rw_downgrade(rw, f, l)						\
205 	__rw_downgrade_int(rw)
206 #endif
207 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
208 #define	_rw_assert(rw, w, f, l)						\
209 	__rw_assert(&(rw)->rw_lock, w, f, l)
210 #endif
211 
212 /*
213  * Public interface for lock operations.
214  */
215 
216 #ifndef LOCK_DEBUG
217 #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/rwlock.h>
218 #endif
219 #if LOCK_DEBUG > 0 || defined(RWLOCK_NOINLINE)
220 #define	rw_wlock(rw)		_rw_wlock((rw), LOCK_FILE, LOCK_LINE)
221 #define	rw_wunlock(rw)		_rw_wunlock((rw), LOCK_FILE, LOCK_LINE)
222 #else
223 #define	rw_wlock(rw)							\
224 	__rw_wlock((rw), curthread, LOCK_FILE, LOCK_LINE)
225 #define	rw_wunlock(rw)							\
226 	__rw_wunlock((rw), curthread, LOCK_FILE, LOCK_LINE)
227 #endif
228 #define	rw_rlock(rw)		_rw_rlock((rw), LOCK_FILE, LOCK_LINE)
229 #define	rw_runlock(rw)		_rw_runlock((rw), LOCK_FILE, LOCK_LINE)
230 #define	rw_try_rlock(rw)	_rw_try_rlock((rw), LOCK_FILE, LOCK_LINE)
231 #define	rw_try_upgrade(rw)	_rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE)
232 #define	rw_try_wlock(rw)	_rw_try_wlock((rw), LOCK_FILE, LOCK_LINE)
233 #define	rw_downgrade(rw)	_rw_downgrade((rw), LOCK_FILE, LOCK_LINE)
234 #define	rw_unlock(rw)	__extension__ ({				\
235 	if (rw_wowned(rw))						\
236 		rw_wunlock(rw);						\
237 	else								\
238 		rw_runlock(rw);						\
239 	(void)0; /* ensure void type for expression */			\
240 })
241 #define	rw_sleep(chan, rw, pri, wmesg, timo)				\
242 	_sleep((chan), &(rw)->lock_object, (pri), (wmesg),		\
243 	    tick_sbt * (timo), 0, C_HARDCLOCK)
244 
245 #define	rw_initialized(rw)	lock_initialized(&(rw)->lock_object)
246 
247 struct rw_args {
248 	void		*ra_rw;
249 	const char 	*ra_desc;
250 	int		ra_flags;
251 };
252 
253 #define	RW_SYSINIT_FLAGS(name, rw, desc, flags)				\
254 	static struct rw_args name##_args = {				\
255 		(rw),							\
256 		(desc),							\
257 		(flags),						\
258 	};								\
259 	SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
260 	    rw_sysinit, &name##_args);					\
261 	SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
262 	    _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock))
263 
264 #define	RW_SYSINIT(name, rw, desc)	RW_SYSINIT_FLAGS(name, rw, desc, 0)
265 
266 /*
267  * Options passed to rw_init_flags().
268  */
269 #define	RW_DUPOK	0x01
270 #define	RW_NOPROFILE	0x02
271 #define	RW_NOWITNESS	0x04
272 #define	RW_QUIET	0x08
273 #define	RW_RECURSE	0x10
274 #define	RW_NEW		0x20
275 
276 /*
277  * The INVARIANTS-enabled rw_assert() functionality.
278  *
279  * The constants need to be defined for INVARIANT_SUPPORT infrastructure
280  * support as _rw_assert() itself uses them and the latter implies that
281  * _rw_assert() must build.
282  */
283 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
284 #define	RA_LOCKED		LA_LOCKED
285 #define	RA_RLOCKED		LA_SLOCKED
286 #define	RA_WLOCKED		LA_XLOCKED
287 #define	RA_UNLOCKED		LA_UNLOCKED
288 #define	RA_RECURSED		LA_RECURSED
289 #define	RA_NOTRECURSED		LA_NOTRECURSED
290 #endif
291 
292 #ifdef INVARIANTS
293 #define	rw_assert(rw, what)	_rw_assert((rw), (what), LOCK_FILE, LOCK_LINE)
294 #else
295 #define	rw_assert(rw, what)
296 #endif
297 
298 #endif /* _KERNEL */
299 #endif /* !_SYS_RWLOCK_H_ */
300