xref: /dragonfly/sys/sys/lock.h (revision 35e996c9)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2013-2017
5  *	The DragonFly Project.  All rights reserved.
6  *
7  * This code contains ideas from software contributed to Berkeley by
8  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9  * System project at Carnegie-Mellon University.
10  *
11  * This code is derived from software contributed to The DragonFly Project
12  * by Matthew Dillon <dillon@backplane.com>
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #ifndef	_SYS_LOCK_H_
40 #define	_SYS_LOCK_H_
41 
42 /*
43  * A number of third party programs #include <sys/lock.h> for no good
44  * reason.  Don't actually include anything unless we are the kernel.
45  */
46 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
47 
48 #include <machine/lock.h>
49 #ifndef _SYS_THREAD_H_
50 #include <sys/thread.h>		/* lwkt_token */
51 #endif
52 #ifndef _SYS_SPINLOCK_H_
53 #include <sys/spinlock.h>
54 #endif
55 
56 /*
57  * The general lock structure.  Provides for multiple shared locks,
58  * upgrading from shared to exclusive, and sleeping until the lock
59  * can be gained.
60  *
61  * NOTE: We don't __cachealign struct lock, its too much bloat.  Users
62  *	 of struct lock may be able to arrange it within greater structures
63  *	 in more SMP-friendly ways.
64  */
65 struct thread;
66 
67 struct lock {
68 	u_int	lk_flags;		/* see below */
69 	int	lk_timo;		/* maximum sleep time (for tsleep) */
70 	uint64_t lk_count;		/* see LKC_* bits */
71 	const char *lk_wmesg;		/* resource sleeping (for tsleep) */
72 	struct thread *lk_lockholder;	/* thread of excl lock holder */
73 };
74 
75 /*
76  * Lock request types:
77  *
78  *   LK_SHARED
79  *	Get one of many possible shared locks. If a process holding an
80  *	exclusive lock requests a shared lock, the exclusive lock(s) will
81  *	be downgraded to shared locks.
82  *
83  *   LK_EXCLUSIVE
84  *	Stop further shared locks, when they are cleared, grant a pending
85  *	upgrade if it exists, then grant an exclusive lock. Only one exclusive
86  *	lock may exist at a time, except that a process holding an exclusive
87  *	lock may get additional exclusive locks if it explicitly sets the
88  *	LK_CANRECURSE flag in the lock request, or if the LK_CANRECURSE flag
89  *	was set when the lock was initialized.
90  *
91  *   LK_UPGRADE
92  *	The process must hold a shared lock that it wants to have upgraded
93  *	to an exclusive lock. Other processes may get exclusive access to
94  *	the resource between the time that the upgrade is requested and the
95  *	time that it is granted.
96  *
97  *   LK_EXCLUPGRADE
98  *	the process must hold a shared lock that it wants to have upgraded
99  *	to an exclusive lock. If the request succeeds, no other processes
100  *	will have gotten exclusive access to the resource between the time
101  *	that the upgrade is requested and the time that it is granted.
102  *	However, if another process has already requested an upgrade, the
103  *	request will fail (see error returns below).
104  *
105  *   LK_DOWNGRADE
106  *	The process must hold an exclusive lock that it wants to have
107  *	downgraded to a shared lock. If the process holds multiple (recursive)
108  *	exclusive locks, they will all be downgraded to shared locks.
109  *
110  *   LK_RELEASE
111  *	Release one instance of a lock.
112  *
113  *   LK_CANCEL_BEG
114  *	The current exclusive lock holder can cancel any blocked lock requests,
115  *	or any new requests, whos callers specified LK_CANCELABLE.  They will
116  *	receive a ENOLCK error code.  Cancel beg/end does not stack.
117  *
118  *	The cancel command stays in effect until the exclusive lock holder
119  *	releases the last count on the lock or issues a LK_CANCEL_END command.
120  *
121  *   LK_CANCEL_END
122  *	The current exclusive lock holder can stop canceling new requests
123  *	whos callers specify LK_CANCELABLE.  The exclusive lock is maintained.
124  *
125  *	Note that the last release of the exclusive lock will also
126  *	automatically end cancel mode.
127  *
128  *
129  * ---
130  *
131  *   LK_EXCLOTHER - return for lockstatus().  Used when another process
132  *	holds the lock exclusively.
133  *
134  * These are flags that are passed to the lockmgr routine.
135  */
136 #define LK_TYPE_MASK	0x0000000f	/* type of lock sought */
137 #define LK_SHARED	0x00000001	/* shared lock */
138 #define LK_EXCLUSIVE	0x00000002	/* exclusive lock */
139 #define LK_UPGRADE	0x00000003	/* shared-to-exclusive upgrade */
140 #define LK_EXCLUPGRADE	0x00000004	/* first shared-to-exclusive upgrade */
141 #define LK_DOWNGRADE	0x00000005	/* exclusive-to-shared downgrade */
142 #define LK_RELEASE	0x00000006	/* release any type of lock */
143 #define LK_WAITUPGRADE	0x00000007
144 #define LK_EXCLOTHER	0x00000008	/* other process holds lock */
145 #define LK_CANCEL_BEG	0x00000009	/* cancel other requests */
146 #define LK_CANCEL_END	0x0000000a	/* stop canceling other requests */
147 
148 /*
149  * lk_count bit fields.
150  *
151  * Positive count is exclusive, negative count is shared.  The count field
152  * must be large enough to accomodate all possible threads.
153  */
154 #define LKC_RESERVED8	0x0000000080000000LU	/* (DNU, insn optimization) */
155 #define LKC_EXREQ	0x0000000040000000LU	/* waiting for excl lock */
156 #define LKC_SHARED	0x0000000020000000LU	/* shared lock(s) granted */
157 #define LKC_UPREQ	0x0000000010000000LU	/* waiting for upgrade */
158 #define LKC_EXREQ2	0x0000000008000000LU	/* multi-wait for EXREQ */
159 #define LKC_CANCEL	0x0000000004000000LU	/* cancel in effect */
160 #define LKC_XMASK	0x0000000003FFFFFFLU
161 #define LKC_SMASK	0xFFFFFFFF00000000LU
162 #define LKC_SCOUNT	0x0000000100000000LU
163 #define LKC_SSHIFT	32
164 
165 /*
166  * External lock flags.
167  *
168  * The first three flags may be set in lock_init to set their mode permanently,
169  * or passed in as arguments to the lock manager.
170  */
171 #define LK_EXTFLG_MASK	0x070000F0	/* mask of external flags */
172 #define LK_NOWAIT	0x00000010	/* do not sleep to await lock */
173 #define LK_SLEEPFAIL	0x00000020	/* sleep, then return failure */
174 #define LK_CANRECURSE	0x00000040	/* allow recursive exclusive lock */
175 #define LK_NOCOLLSTATS	0x00000080	/* v_lock_coll not applicable */
176 #define	LK_CANCELABLE	0x01000000	/* blocked caller can be canceled */
177 #define LK_TIMELOCK	0x02000000
178 #define LK_PCATCH	0x04000000	/* timelocked with signal catching */
179 
180 /*
181  * Control flags
182  *
183  * Non-persistent external flags.
184  */
185 #define LK_FAILRECLAIM	0x00010000 /* vn_lock: allowed to fail on reclaim */
186 #define LK_RETRY	0x00020000 /* vn_lock: retry until locked */
187 #define	LK_UNUSED40000	0x00040000
188 #define	LK_UNUSED80000	0x00080000
189 
190 /*
191  * Lock return status.
192  *
193  * Successfully obtained locks return 0. Locks will always succeed
194  * unless one of the following is true:
195  *	LK_FORCEUPGRADE is requested and some other process has already
196  *	    requested a lock upgrade (returns EBUSY).
197  *	LK_WAIT is set and a sleep would be required (returns EBUSY).
198  *	LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
199  *	PCATCH is set in lock priority and a signal arrives (returns
200  *	    either EINTR or ERESTART if system calls is to be restarted).
201  *	Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
202  * A failed lock attempt always returns a non-zero error value. No lock
203  * is held after an error return (in particular, a failed LK_UPGRADE
204  * or LK_FORCEUPGRADE will have released its shared access lock).
205  */
206 
207 /*
208  * Indicator that no process holds exclusive lock
209  */
210 #define LK_KERNTHREAD ((struct thread *)-2)
211 
212 #ifdef _KERNEL
213 
214 void dumplockinfo(struct lock *lkp);
215 struct proc;
216 
217 struct lock_args {
218 	struct lock	*la_lock;
219 	const char 	*la_desc;
220 	int		la_flags;
221 };
222 
223 #define LOCK_INITIALIZER(wmesg, timo, flags)	\
224 {						\
225 	.lk_flags = ((flags) & LK_EXTFLG_MASK),	\
226 	.lk_timo = (timo),			\
227 	.lk_count = 0,				\
228 	.lk_wmesg = wmesg,			\
229 	.lk_lockholder = NULL			\
230 }
231 
232 void	lockinit (struct lock *, const char *wmesg, int timo, int flags);
233 void	lockreinit (struct lock *, const char *wmesg, int timo, int flags);
234 void	lockuninit(struct lock *);
235 void	lock_sysinit(struct lock_args *);
236 int	lockmgr_shared (struct lock *, u_int flags);
237 int	lockmgr_exclusive (struct lock *, u_int flags);
238 int	lockmgr_downgrade (struct lock *, u_int flags);
239 int	lockmgr_upgrade (struct lock *, u_int flags);
240 int	lockmgr_release (struct lock *, u_int flags);
241 int	lockmgr_cancel_beg (struct lock *, u_int flags);
242 int	lockmgr_cancel_end (struct lock *, u_int flags);
243 void	lockmgr_kernproc (struct lock *);
244 void	lockmgr_printinfo (struct lock *);
245 int	lockstatus (struct lock *, struct thread *);
246 int	lockowned (struct lock *);
247 
248 #define	LOCK_SYSINIT(name, lock, desc, flags)				\
249 	static struct lock_args name##_args = {				\
250 		(lock),							\
251 		(desc),							\
252 		(flags)							\
253 	};								\
254 	SYSINIT(name##_lock_sysinit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,	\
255 	    lock_sysinit, &name##_args);				\
256 	SYSUNINIT(name##_lock_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, \
257 	    lockuninit, (lock))
258 
259 /*
260  * Most lockmgr() calls pass a constant flags parameter which
261  * we can optimize-out with an inline.
262  */
263 static __inline
264 int
265 lockmgr(struct lock *lkp, u_int flags)
266 {
267 	switch (flags & LK_TYPE_MASK) {
268 	case LK_SHARED:
269 		return lockmgr_shared(lkp, flags);
270 	case LK_EXCLUSIVE:
271 		return lockmgr_exclusive(lkp, flags);
272 	case LK_DOWNGRADE:
273 		return lockmgr_downgrade(lkp, flags);
274 	case LK_EXCLUPGRADE:
275 	case LK_UPGRADE:
276 		return lockmgr_upgrade(lkp, flags);
277 	case LK_RELEASE:
278 		return lockmgr_release(lkp, flags);
279 	case LK_CANCEL_BEG:
280 		return lockmgr_cancel_beg(lkp, flags);
281 	case LK_CANCEL_END:
282 		return lockmgr_cancel_end(lkp, flags);
283 	default:
284 		panic("lockmgr: unknown locktype request %d",
285 		      flags & LK_TYPE_MASK);
286 		return EINVAL;	/* NOT REACHED */
287 	}
288 }
289 
290 /*
291  * Returns non-zero if the lock is in-use.  Cannot be used to count
292  * refs on a lock (refs cannot be safely counted due to the use of
293  * atomic_fetchadd_int() for shared locks.
294  */
295 static __inline
296 int
297 lockinuse(struct lock *lkp)
298 {
299 	return ((lkp->lk_count & (LKC_SMASK | LKC_XMASK)) != 0);
300 }
301 
302 /*
303  * Returns true if the lock was acquired. Can be used to port
304  * FreeBSD's mtx_trylock() and similar functions.
305  */
306 static __inline
307 boolean_t
308 lockmgr_try(struct lock *lkp, u_int flags)
309 {
310 	return (lockmgr(lkp, flags | LK_NOWAIT) == 0);
311 }
312 
313 /*
314  * Returns true if the lock is exclusively held by anyone
315  */
316 static __inline
317 boolean_t
318 lockmgr_anyexcl(struct lock *lkp)
319 {
320 	return ((lkp->lk_count & LKC_XMASK) != 0);
321 }
322 
323 static __inline
324 boolean_t
325 lockmgr_oneexcl(struct lock *lkp)
326 {
327 	return ((lkp->lk_count & LKC_XMASK) == 1);
328 }
329 
330 static __inline
331 boolean_t
332 lockmgr_exclpending(struct lock *lkp)
333 {
334 	return ((lkp->lk_count & (LKC_EXREQ | LKC_EXREQ2)) != 0);
335 }
336 
337 #endif /* _KERNEL */
338 #endif /* _KERNEL || _KERNEL_STRUCTURES */
339 #endif /* _SYS_LOCK_H_ */
340