1 /* $OpenBSD: rwlock.h,v 1.28 2021/01/11 18:49:38 mpi Exp $ */
2 /*
3 * Copyright (c) 2002 Artur Grabowski <art@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 /*
19 * Multiple readers, single writer lock.
20 *
21 * Simplistic implementation modelled after rw locks in Solaris.
22 *
23 * The rwl_owner has the following layout:
24 * [ owner or count of readers | wrlock | wrwant | wait ]
25 *
26 * When the WAIT bit is set (bit 0), the lock has waiters sleeping on it.
27 * When the WRWANT bit is set (bit 1), at least one waiter wants a write lock.
28 * When the WRLOCK bit is set (bit 2) the lock is currently write-locked.
29 *
30 * When write locked, the upper bits contain the struct proc * pointer to
31 * the writer, otherwise they count the number of readers.
32 *
33 * We provide a simple machine independent implementation:
34 *
35 * void rw_enter_read(struct rwlock *)
36 * atomically test for RWLOCK_WRLOCK and if not set, increment the lock
37 * by RWLOCK_READ_INCR. While RWLOCK_WRLOCK is set, loop into rw_enter_wait.
38 *
39 * void rw_enter_write(struct rwlock *);
40 * atomically test for the lock being 0 (it's not possible to have
41 * owner/read count unset and waiter bits set) and if 0 set the owner to
42 * the proc and RWLOCK_WRLOCK. While not zero, loop into rw_enter_wait.
43 *
44 * void rw_exit_read(struct rwlock *);
45 * atomically decrement lock by RWLOCK_READ_INCR and unset RWLOCK_WAIT and
46 * RWLOCK_WRWANT remembering the old value of lock and if RWLOCK_WAIT was set,
47 * call rw_exit_waiters with the old contents of the lock.
48 *
49 * void rw_exit_write(struct rwlock *);
50 * atomically swap the contents of the lock with 0 and if RWLOCK_WAIT was
51 * set, call rw_exit_waiters with the old contents of the lock.
52 */
53
54 #ifndef _SYS_RWLOCK_H
55 #define _SYS_RWLOCK_H
56
57 #include <sys/_lock.h>
58
59 struct proc;
60
61 struct rwlock {
62 volatile unsigned long rwl_owner;
63 const char *rwl_name;
64 #ifdef WITNESS
65 struct lock_object rwl_lock_obj;
66 #endif
67 };
68
69 #define RWLOCK_LO_FLAGS(flags) \
70 ((ISSET(flags, RWL_DUPOK) ? LO_DUPOK : 0) | \
71 (ISSET(flags, RWL_NOWITNESS) ? 0 : LO_WITNESS) | \
72 (ISSET(flags, RWL_IS_VNODE) ? LO_IS_VNODE : 0) | \
73 LO_INITIALIZED | LO_SLEEPABLE | LO_UPGRADABLE | \
74 (LO_CLASS_RWLOCK << LO_CLASSSHIFT))
75
76 #define RRWLOCK_LO_FLAGS(flags) \
77 ((ISSET(flags, RWL_DUPOK) ? LO_DUPOK : 0) | \
78 (ISSET(flags, RWL_NOWITNESS) ? 0 : LO_WITNESS) | \
79 (ISSET(flags, RWL_IS_VNODE) ? LO_IS_VNODE : 0) | \
80 LO_INITIALIZED | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE | \
81 (LO_CLASS_RRWLOCK << LO_CLASSSHIFT))
82
83 #define RWLOCK_LO_INITIALIZER(name, flags) \
84 { .lo_type = &(const struct lock_type){ .lt_name = name }, \
85 .lo_name = (name), \
86 .lo_flags = RWLOCK_LO_FLAGS(flags) }
87
88 #define RWL_DUPOK 0x01
89 #define RWL_NOWITNESS 0x02
90 #define RWL_IS_VNODE 0x04
91
92 #ifdef WITNESS
93 #define RWLOCK_INITIALIZER(name) \
94 { 0, name, .rwl_lock_obj = RWLOCK_LO_INITIALIZER(name, 0) }
95 #else
96 #define RWLOCK_INITIALIZER(name) \
97 { 0, name }
98 #endif
99
100 #define RWLOCK_WAIT 0x01UL
101 #define RWLOCK_WRWANT 0x02UL
102 #define RWLOCK_WRLOCK 0x04UL
103 #define RWLOCK_MASK 0x07UL
104
105 #define RWLOCK_OWNER(rwl) ((struct proc *)((rwl)->rwl_owner & ~RWLOCK_MASK))
106
107 #define RWLOCK_READER_SHIFT 3UL
108 #define RWLOCK_READ_INCR (1UL << RWLOCK_READER_SHIFT)
109
110 #define RW_WRITE 0x0001UL /* exclusive lock */
111 #define RW_READ 0x0002UL /* shared lock */
112 #define RW_DOWNGRADE 0x0004UL /* downgrade exclusive to shared */
113 #define RW_OPMASK 0x0007UL
114
115 #define RW_INTR 0x0010UL /* interruptible sleep */
116 #define RW_SLEEPFAIL 0x0020UL /* fail if we slept for the lock */
117 #define RW_NOSLEEP 0x0040UL /* don't wait for the lock */
118 #define RW_RECURSEFAIL 0x0080UL /* Fail on recursion for RRW locks. */
119 #define RW_DUPOK 0x0100UL /* Permit duplicate lock */
120
121 /*
122 * for rw_status() and rrw_status() only: exclusive lock held by
123 * some other thread
124 */
125 #define RW_WRITE_OTHER 0x0100UL
126
127 /* recursive rwlocks; */
128 struct rrwlock {
129 struct rwlock rrwl_lock;
130 uint32_t rrwl_wcnt; /* # writers. */
131 };
132
133 #ifdef _KERNEL
134
135 void _rw_init_flags(struct rwlock *, const char *, int,
136 const struct lock_type *);
137
138 #ifdef WITNESS
139 #define rw_init_flags(rwl, name, flags) do { \
140 static const struct lock_type __lock_type = { .lt_name = #rwl };\
141 _rw_init_flags(rwl, name, flags, &__lock_type); \
142 } while (0)
143 #define rw_init(rwl, name) rw_init_flags(rwl, name, 0)
144 #else /* WITNESS */
145 #define rw_init_flags(rwl, name, flags) \
146 _rw_init_flags(rwl, name, flags, NULL)
147 #define rw_init(rwl, name) _rw_init_flags(rwl, name, 0, NULL)
148 #endif /* WITNESS */
149
150 void rw_enter_read(struct rwlock *);
151 void rw_enter_write(struct rwlock *);
152 void rw_exit_read(struct rwlock *);
153 void rw_exit_write(struct rwlock *);
154
155 #ifdef DIAGNOSTIC
156 void rw_assert_wrlock(struct rwlock *);
157 void rw_assert_rdlock(struct rwlock *);
158 void rw_assert_anylock(struct rwlock *);
159 void rw_assert_unlocked(struct rwlock *);
160 #else
161 #define rw_assert_wrlock(rwl) ((void)0)
162 #define rw_assert_rdlock(rwl) ((void)0)
163 #define rw_assert_anylock(rwl) ((void)0)
164 #define rw_assert_unlocked(rwl) ((void)0)
165 #endif
166
167 int rw_enter(struct rwlock *, int);
168 void rw_exit(struct rwlock *);
169 int rw_status(struct rwlock *);
170
171 static inline int
rw_read_held(struct rwlock * rwl)172 rw_read_held(struct rwlock *rwl)
173 {
174 return (rw_status(rwl) == RW_READ);
175 }
176
177 static inline int
rw_write_held(struct rwlock * rwl)178 rw_write_held(struct rwlock *rwl)
179 {
180 return (rw_status(rwl) == RW_WRITE);
181 }
182
183 static inline int
rw_lock_held(struct rwlock * rwl)184 rw_lock_held(struct rwlock *rwl)
185 {
186 int status;
187
188 status = rw_status(rwl);
189
190 return (status == RW_READ || status == RW_WRITE);
191 }
192
193
194 void _rrw_init_flags(struct rrwlock *, const char *, int,
195 const struct lock_type *);
196 int rrw_enter(struct rrwlock *, int);
197 void rrw_exit(struct rrwlock *);
198 int rrw_status(struct rrwlock *);
199
200 #ifdef WITNESS
201 #define rrw_init_flags(rrwl, name, flags) do { \
202 static const struct lock_type __lock_type = { .lt_name = #rrwl };\
203 _rrw_init_flags(rrwl, name, flags, &__lock_type); \
204 } while (0)
205 #define rrw_init(rrwl, name) rrw_init_flags(rrwl, name, 0)
206 #else /* WITNESS */
207 #define rrw_init_flags(rrwl, name, flags) \
208 _rrw_init_flags(rrwl, name, 0, NULL)
209 #define rrw_init(rrwl, name) _rrw_init_flags(rrwl, name, 0, NULL)
210 #endif /* WITNESS */
211
212
213 /*
214 * Allocated, reference-counted rwlocks
215 */
216
217 #ifdef WITNESS
218 #define rw_obj_alloc_flags(rwl, name, flags) do { \
219 static struct lock_type __lock_type = { .lt_name = #rwl }; \
220 _rw_obj_alloc_flags(rwl, name, flags, &__lock_type); \
221 } while (0)
222 #else
223 #define rw_obj_alloc_flags(rwl, name, flags) \
224 _rw_obj_alloc_flags(rwl, name, flags, NULL)
225 #endif
226 #define rw_obj_alloc(rwl, name) rw_obj_alloc_flags(rwl, name, 0)
227
228 void rw_obj_init(void);
229 void _rw_obj_alloc_flags(struct rwlock **, const char *, int,
230 struct lock_type *);
231 void rw_obj_hold(struct rwlock *);
232 int rw_obj_free(struct rwlock *);
233
234 #endif /* _KERNEL */
235
236 #endif /* _SYS_RWLOCK_H */
237