xref: /openbsd/sys/kern/kern_lock.c (revision 17df1aa7)
1 /*	$OpenBSD: kern_lock.c,v 1.35 2010/04/26 05:48:17 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code contains ideas from software contributed to Berkeley by
8  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9  * System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/systm.h>
42 #include <sys/sched.h>
43 
44 #include <machine/cpu.h>
45 
46 /*
47  * Locking primitives implementation.
48  * Locks provide shared/exclusive synchronization.
49  */
50 
51 /*
52  * Acquire a resource.
53  */
54 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
55 do {									\
56 	for (error = 0; wanted; ) {					\
57 		if ((drain))						\
58 			(lkp)->lk_flags |= LK_WAITDRAIN;		\
59 		else							\
60 			(lkp)->lk_waitcount++;				\
61 		/* XXX Cast away volatile. */				\
62 		error = tsleep((drain) ?				\
63 		    (void *)&(lkp)->lk_flags : (void *)(lkp),		\
64 		    (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo);	\
65 		if ((drain) == 0)					\
66 			(lkp)->lk_waitcount--;				\
67 		if (error)						\
68 			break;						\
69 	}								\
70 } while (0)
71 
72 #define	SETHOLDER(lkp, pid, cpu_id)					\
73 	(lkp)->lk_lockholder = (pid)
74 
75 #define	WEHOLDIT(lkp, pid, cpu_id)					\
76 	((lkp)->lk_lockholder == (pid))
77 
78 /*
79  * Initialize a lock; required before use.
80  */
81 void
82 lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
83 {
84 
85 	bzero(lkp, sizeof(struct lock));
86 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
87 	lkp->lk_lockholder = LK_NOPROC;
88 	lkp->lk_prio = prio;
89 	lkp->lk_timo = timo;
90 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
91 }
92 
93 /*
94  * Determine the status of a lock.
95  */
96 int
97 lockstatus(struct lock *lkp)
98 {
99 	int lock_type = 0;
100 
101 	if (lkp->lk_exclusivecount != 0)
102 		lock_type = LK_EXCLUSIVE;
103 	else if (lkp->lk_sharecount != 0)
104 		lock_type = LK_SHARED;
105 	return (lock_type);
106 }
107 
108 /*
109  * Set, change, or release a lock.
110  *
111  * Shared requests increment the shared count. Exclusive requests set the
112  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
113  * accepted shared locks and shared-to-exclusive upgrades to go away.
114  */
115 int
116 lockmgr(__volatile struct lock *lkp, u_int flags, void *notused)
117 {
118 	int error;
119 	pid_t pid;
120 	int extflags;
121 	cpuid_t cpu_id;
122 	struct proc *p = curproc;
123 
124 	error = 0;
125 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
126 
127 #ifdef DIAGNOSTIC
128 	if (p == NULL)
129 		panic("lockmgr: process context required");
130 #endif
131 	/* Process context required. */
132 	pid = p->p_pid;
133 	cpu_id = cpu_number();
134 
135 	/*
136 	 * Once a lock has drained, the LK_DRAINING flag is set and an
137 	 * exclusive lock is returned. The only valid operation thereafter
138 	 * is a single release of that exclusive lock. This final release
139 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
140 	 * further requests of any sort will result in a panic. The bits
141 	 * selected for these two flags are chosen so that they will be set
142 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
143 	 */
144 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
145 #ifdef DIAGNOSTIC
146 		if (lkp->lk_flags & LK_DRAINED)
147 			panic("lockmgr: using decommissioned lock");
148 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
149 		    WEHOLDIT(lkp, pid, cpu_id) == 0)
150 			panic("lockmgr: non-release on draining lock: %d",
151 			    flags & LK_TYPE_MASK);
152 #endif /* DIAGNOSTIC */
153 		lkp->lk_flags &= ~LK_DRAINING;
154 		lkp->lk_flags |= LK_DRAINED;
155 	}
156 
157 	/*
158 	 * Check if the caller is asking us to be schizophrenic.
159 	 */
160 	if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
161 	    (LK_CANRECURSE|LK_RECURSEFAIL))
162 		panic("lockmgr: make up your mind");
163 
164 	switch (flags & LK_TYPE_MASK) {
165 
166 	case LK_SHARED:
167 		if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
168 			/*
169 			 * If just polling, check to see if we will block.
170 			 */
171 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
172 			    (LK_HAVE_EXCL | LK_WANT_EXCL))) {
173 				error = EBUSY;
174 				break;
175 			}
176 			/*
177 			 * Wait for exclusive locks and upgrades to clear.
178 			 */
179 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
180 			    (LK_HAVE_EXCL | LK_WANT_EXCL));
181 			if (error)
182 				break;
183 			lkp->lk_sharecount++;
184 			break;
185 		}
186 		/*
187 		 * We hold an exclusive lock, so downgrade it to shared.
188 		 * An alternative would be to fail with EDEADLK.
189 		 */
190 		lkp->lk_sharecount++;
191 
192 		if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
193 		    lkp->lk_exclusivecount == 0)
194 			panic("lockmgr: not holding exclusive lock");
195 		lkp->lk_sharecount += lkp->lk_exclusivecount;
196 		lkp->lk_exclusivecount = 0;
197 		lkp->lk_flags &= ~LK_HAVE_EXCL;
198 		SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
199 		if (lkp->lk_waitcount)
200 			wakeup((void *)(lkp));
201 		break;
202 
203 	case LK_EXCLUSIVE:
204 		if (WEHOLDIT(lkp, pid, cpu_id)) {
205 			/*
206 			 * Recursive lock.
207 			 */
208 			if ((extflags & LK_CANRECURSE) == 0) {
209 				if (extflags & LK_RECURSEFAIL) {
210 					error = EDEADLK;
211 					break;
212 				} else
213 					panic("lockmgr: locking against myself");
214 			}
215 			lkp->lk_exclusivecount++;
216 			break;
217 		}
218 		/*
219 		 * If we are just polling, check to see if we will sleep.
220 		 */
221 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
222 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
223 		     lkp->lk_sharecount != 0)) {
224 			error = EBUSY;
225 			break;
226 		}
227 		/*
228 		 * Try to acquire the want_exclusive flag.
229 		 */
230 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
231 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
232 		if (error)
233 			break;
234 		lkp->lk_flags |= LK_WANT_EXCL;
235 		/*
236 		 * Wait for shared locks and upgrades to finish.
237 		 */
238 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
239 		lkp->lk_flags &= ~LK_WANT_EXCL;
240 		if (error)
241 			break;
242 		lkp->lk_flags |= LK_HAVE_EXCL;
243 		SETHOLDER(lkp, pid, cpu_id);
244 		if (lkp->lk_exclusivecount != 0)
245 			panic("lockmgr: non-zero exclusive count");
246 		lkp->lk_exclusivecount = 1;
247 		break;
248 
249 	case LK_RELEASE:
250 		if (lkp->lk_exclusivecount != 0) {
251 			if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
252 				panic("lockmgr: pid %d, not exclusive lock "
253 				    "holder %d unlocking",
254 				    pid, lkp->lk_lockholder);
255 			}
256 			lkp->lk_exclusivecount--;
257 			if (lkp->lk_exclusivecount == 0) {
258 				lkp->lk_flags &= ~LK_HAVE_EXCL;
259 				SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
260 			}
261 		} else if (lkp->lk_sharecount != 0) {
262 			lkp->lk_sharecount--;
263 		}
264 #ifdef DIAGNOSTIC
265 		else
266 			panic("lockmgr: release of unlocked lock!");
267 #endif
268 		if (lkp->lk_waitcount)
269 			wakeup((void *)(lkp));
270 		break;
271 
272 	case LK_DRAIN:
273 		/*
274 		 * Check that we do not already hold the lock, as it can
275 		 * never drain if we do. Unfortunately, we have no way to
276 		 * check for holding a shared lock, but at least we can
277 		 * check for an exclusive one.
278 		 */
279 		if (WEHOLDIT(lkp, pid, cpu_id))
280 			panic("lockmgr: draining against myself");
281 		/*
282 		 * If we are just polling, check to see if we will sleep.
283 		 */
284 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
285 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
286 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
287 			error = EBUSY;
288 			break;
289 		}
290 		ACQUIRE(lkp, error, extflags, 1,
291 		    ((lkp->lk_flags &
292 		     (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
293 		     lkp->lk_sharecount != 0 ||
294 		     lkp->lk_waitcount != 0));
295 		if (error)
296 			break;
297 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
298 		SETHOLDER(lkp, pid, cpu_id);
299 		lkp->lk_exclusivecount = 1;
300 		break;
301 
302 	default:
303 		panic("lockmgr: unknown locktype request %d",
304 		    flags & LK_TYPE_MASK);
305 		/* NOTREACHED */
306 	}
307 	if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
308 	    ((lkp->lk_flags &
309 	    (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
310 	    lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
311 		lkp->lk_flags &= ~LK_WAITDRAIN;
312 		wakeup((void *)&lkp->lk_flags);
313 	}
314 	return (error);
315 }
316 
317 #ifdef DIAGNOSTIC
318 /*
319  * Print out information about state of a lock. Used by VOP_PRINT
320  * routines to display status about contained locks.
321  */
322 void
323 lockmgr_printinfo(__volatile struct lock *lkp)
324 {
325 
326 	if (lkp->lk_sharecount)
327 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
328 		    lkp->lk_sharecount);
329 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
330 		printf(" lock type %s: EXCL (count %d) by ",
331 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
332 		printf("pid %d", lkp->lk_lockholder);
333 	} else
334 		printf(" not locked");
335 	if (lkp->lk_waitcount > 0)
336 		printf(" with %d pending", lkp->lk_waitcount);
337 }
338 #endif /* DIAGNOSTIC */
339 
340 #if defined(MULTIPROCESSOR)
341 /*
342  * Functions for manipulating the kernel_lock.  We put them here
343  * so that they show up in profiles.
344  */
345 
346 struct __mp_lock kernel_lock;
347 
348 void
349 _kernel_lock_init(void)
350 {
351 	__mp_lock_init(&kernel_lock);
352 }
353 
354 /*
355  * Acquire/release the kernel lock.  Intended for use in the scheduler
356  * and the lower half of the kernel.
357  */
358 
359 void
360 _kernel_lock(void)
361 {
362 	SCHED_ASSERT_UNLOCKED();
363 	__mp_lock(&kernel_lock);
364 }
365 
366 void
367 _kernel_unlock(void)
368 {
369 	__mp_unlock(&kernel_lock);
370 }
371 
372 /*
373  * Acquire/release the kernel_lock on behalf of a process.  Intended for
374  * use in the top half of the kernel.
375  */
376 void
377 _kernel_proc_lock(struct proc *p)
378 {
379 	SCHED_ASSERT_UNLOCKED();
380 	__mp_lock(&kernel_lock);
381 	atomic_setbits_int(&p->p_flag, P_BIGLOCK);
382 }
383 
384 void
385 _kernel_proc_unlock(struct proc *p)
386 {
387 	atomic_clearbits_int(&p->p_flag, P_BIGLOCK);
388 	__mp_unlock(&kernel_lock);
389 }
390 
391 #ifdef MP_LOCKDEBUG
392 /* CPU-dependent timing, needs this to be settable from ddb. */
393 int __mp_lock_spinout = 200000000;
394 #endif
395 
396 #endif /* MULTIPROCESSOR */
397