xref: /original-bsd/sys/kern/kern_lock.c (revision 714b3ab9)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.12 (Berkeley) 05/17/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #ifdef DEBUG
24 #define COUNT(p, x) if (p) (p)->p_locks += (x)
25 #else
26 #define COUNT(p, x)
27 #endif
28 
29 #if NCPUS > 1
30 
31 /*
32  * For multiprocessor system, try spin lock first.
33  *
34  * This should be inline expanded below, but we cannot have #if
35  * inside a multiline define.
36  */
37 int lock_wait_time = 100;
38 #define PAUSE(lkp, wanted)						\
39 		if (lock_wait_time > 0) {				\
40 			int i;						\
41 									\
42 			simple_unlock(&lkp->lk_interlock);		\
43 			for (i = lock_wait_time; i > 0; i--)		\
44 				if (!(wanted))				\
45 					break;				\
46 			simple_lock(&lkp->lk_interlock);		\
47 		}							\
48 		if (!(wanted))						\
49 			break;
50 
51 #else /* NCPUS == 1 */
52 
53 /*
54  * It is an error to spin on a uniprocessor as nothing will ever cause
55  * the simple lock to clear while we are executing.
56  */
57 #define PAUSE(lkp, wanted)
58 
59 #endif /* NCPUS == 1 */
60 
61 /*
62  * Acquire a resource.
63  */
64 #define ACQUIRE(lkp, error, extflags, wanted)				\
65 	PAUSE(lkp, wanted);						\
66 	for (error = 0; wanted; ) {					\
67 		(lkp)->lk_waitcount++;					\
68 		simple_unlock(&(lkp)->lk_interlock);			\
69 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
70 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
71 		simple_lock(&(lkp)->lk_interlock);			\
72 		(lkp)->lk_waitcount--;					\
73 		if (error)						\
74 			break;						\
75 		if ((extflags) & LK_SLEEPFAIL) {			\
76 			error = ENOLCK;					\
77 			break;						\
78 		}							\
79 	}
80 
81 /*
82  * Initialize a lock; required before use.
83  */
84 void
85 lockinit(lkp, prio, wmesg, timo, flags)
86 	struct lock *lkp;
87 	int prio;
88 	char *wmesg;
89 	int timo;
90 	int flags;
91 {
92 
93 	bzero(lkp, sizeof(struct lock));
94 	simple_lock_init(&lkp->lk_interlock);
95 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
96 	lkp->lk_prio = prio;
97 	lkp->lk_timo = timo;
98 	lkp->lk_wmesg = wmesg;
99 	lkp->lk_lockholder = LK_NOPROC;
100 }
101 
102 /*
103  * Determine the status of a lock.
104  */
105 int
106 lockstatus(lkp)
107 	struct lock *lkp;
108 {
109 	int lock_type = 0;
110 
111 	simple_lock(&lkp->lk_interlock);
112 	if (lkp->lk_exclusivecount != 0)
113 		lock_type = LK_EXCLUSIVE;
114 	else if (lkp->lk_sharecount != 0)
115 		lock_type = LK_SHARED;
116 	simple_unlock(&lkp->lk_interlock);
117 	return (lock_type);
118 }
119 
120 /*
121  * Set, change, or release a lock.
122  *
123  * Shared requests increment the shared count. Exclusive requests set the
124  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
125  * accepted shared locks and shared-to-exclusive upgrades to go away.
126  */
127 int
128 lockmgr(lkp, flags, interlkp, p)
129 	__volatile struct lock *lkp;
130 	u_int flags;
131 	struct simplelock *interlkp;
132 	struct proc *p;
133 {
134 	int error;
135 	pid_t pid;
136 	int extflags;
137 
138 	error = 0;
139 	if (p)
140 		pid = p->p_pid;
141 	else
142 		pid = LK_KERNPROC;
143 	simple_lock(&lkp->lk_interlock);
144 	if (flags & LK_INTERLOCK)
145 		simple_unlock(interlkp);
146 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
147 #ifdef DIAGNOSTIC
148 	/*
149 	 * Once a lock has drained, the LK_DRAINING flag is set and an
150 	 * exclusive lock is returned. The only valid operation thereafter
151 	 * is a single release of that exclusive lock. This final release
152 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
153 	 * further requests of any sort will result in a panic. The bits
154 	 * selected for these two flags are chosen so that they will be set
155 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
156 	 */
157 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
158 		if (lkp->lk_flags & LK_DRAINED)
159 			panic("lockmgr: using decommissioned lock");
160 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
161 		    lkp->lk_lockholder != pid)
162 			panic("lockmgr: non-release on draining lock: %d\n",
163 			    flags & LK_TYPE_MASK);
164 		lkp->lk_flags &= ~LK_DRAINING;
165 		lkp->lk_flags |= LK_DRAINED;
166 	}
167 #endif DIAGNOSTIC
168 
169 	switch (flags & LK_TYPE_MASK) {
170 
171 	case LK_SHARED:
172 		if (lkp->lk_lockholder != pid) {
173 			/*
174 			 * If just polling, check to see if we will block.
175 			 */
176 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
177 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
178 				error = EBUSY;
179 				break;
180 			}
181 			/*
182 			 * Wait for exclusive locks and upgrades to clear.
183 			 */
184 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
185 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
186 			if (error)
187 				break;
188 			lkp->lk_sharecount++;
189 			COUNT(p, 1);
190 			break;
191 		}
192 		/*
193 		 * We hold an exclusive lock, so downgrade it to shared.
194 		 * An alternative would be to fail with EDEADLK.
195 		 */
196 		lkp->lk_sharecount++;
197 		COUNT(p, 1);
198 		/* fall into downgrade */
199 
200 	case LK_DOWNGRADE:
201 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
202 			panic("lockmgr: not holding exclusive lock");
203 		lkp->lk_sharecount += lkp->lk_exclusivecount;
204 		lkp->lk_exclusivecount = 0;
205 		lkp->lk_flags &= ~LK_HAVE_EXCL;
206 		lkp->lk_lockholder = LK_NOPROC;
207 		if (lkp->lk_waitcount)
208 			wakeup((void *)lkp);
209 		break;
210 
211 	case LK_EXCLUPGRADE:
212 		/*
213 		 * If another process is ahead of us to get an upgrade,
214 		 * then we want to fail rather than have an intervening
215 		 * exclusive access.
216 		 */
217 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
218 			lkp->lk_sharecount--;
219 			COUNT(p, -1);
220 			error = EBUSY;
221 			break;
222 		}
223 		/* fall into normal upgrade */
224 
225 	case LK_UPGRADE:
226 		/*
227 		 * Upgrade a shared lock to an exclusive one. If another
228 		 * shared lock has already requested an upgrade to an
229 		 * exclusive lock, our shared lock is released and an
230 		 * exclusive lock is requested (which will be granted
231 		 * after the upgrade). If we return an error, the file
232 		 * will always be unlocked.
233 		 */
234 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
235 			panic("lockmgr: upgrade exclusive lock");
236 		lkp->lk_sharecount--;
237 		COUNT(p, -1);
238 		/*
239 		 * If we are just polling, check to see if we will block.
240 		 */
241 		if ((extflags & LK_NOWAIT) &&
242 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
243 		     lkp->lk_sharecount > 1)) {
244 			error = EBUSY;
245 			break;
246 		}
247 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
248 			/*
249 			 * We are first shared lock to request an upgrade, so
250 			 * request upgrade and wait for the shared count to
251 			 * drop to zero, then take exclusive lock.
252 			 */
253 			lkp->lk_flags |= LK_WANT_UPGRADE;
254 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
255 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
256 			if (error)
257 				break;
258 			lkp->lk_flags |= LK_HAVE_EXCL;
259 			lkp->lk_lockholder = pid;
260 			if (lkp->lk_exclusivecount != 0)
261 				panic("lockmgr: non-zero exclusive count");
262 			lkp->lk_exclusivecount = 1;
263 			COUNT(p, 1);
264 			break;
265 		}
266 		/*
267 		 * Someone else has requested upgrade. Release our shared
268 		 * lock, awaken upgrade requestor if we are the last shared
269 		 * lock, then request an exclusive lock.
270 		 */
271 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
272 			wakeup((void *)lkp);
273 		/* fall into exclusive request */
274 
275 	case LK_EXCLUSIVE:
276 		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
277 			/*
278 			 *	Recursive lock.
279 			 */
280 			if ((extflags & LK_CANRECURSE) == 0)
281 				panic("lockmgr: locking against myself");
282 			lkp->lk_exclusivecount++;
283 			COUNT(p, 1);
284 			break;
285 		}
286 		/*
287 		 * If we are just polling, check to see if we will sleep.
288 		 */
289 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
290 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
291 		     lkp->lk_sharecount != 0)) {
292 			error = EBUSY;
293 			break;
294 		}
295 		/*
296 		 * Try to acquire the want_exclusive flag.
297 		 */
298 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
299 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
300 		if (error)
301 			break;
302 		lkp->lk_flags |= LK_WANT_EXCL;
303 		/*
304 		 * Wait for shared locks and upgrades to finish.
305 		 */
306 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
307 		       (lkp->lk_flags & LK_WANT_UPGRADE));
308 		lkp->lk_flags &= ~LK_WANT_EXCL;
309 		if (error)
310 			break;
311 		lkp->lk_flags |= LK_HAVE_EXCL;
312 		lkp->lk_lockholder = pid;
313 		if (lkp->lk_exclusivecount != 0)
314 			panic("lockmgr: non-zero exclusive count");
315 		lkp->lk_exclusivecount = 1;
316 		COUNT(p, 1);
317 		break;
318 
319 	case LK_RELEASE:
320 		if (lkp->lk_exclusivecount != 0) {
321 			if (pid != lkp->lk_lockholder)
322 				panic("lockmgr: pid %d, not %s %d unlocking",
323 				    pid, "exclusive lock holder",
324 				    lkp->lk_lockholder);
325 			lkp->lk_exclusivecount--;
326 			COUNT(p, -1);
327 			if (lkp->lk_exclusivecount == 0) {
328 				lkp->lk_flags &= ~LK_HAVE_EXCL;
329 				lkp->lk_lockholder = LK_NOPROC;
330 			}
331 		} else if (lkp->lk_sharecount != 0) {
332 			lkp->lk_sharecount--;
333 			COUNT(p, -1);
334 		}
335 		if (lkp->lk_waitcount)
336 			wakeup((void *)lkp);
337 		break;
338 
339 	case LK_DRAIN:
340 		/*
341 		 * Check that we do not already hold the lock, as it can
342 		 * never drain if we do. Unfortunately, we have no way to
343 		 * check for holding a shared lock, but at least we can
344 		 * check for an exclusive one.
345 		 */
346 		if (lkp->lk_lockholder == pid)
347 			panic("lockmgr: draining against myself");
348 		/*
349 		 * If we are just polling, check to see if we will sleep.
350 		 */
351 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
352 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
353 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
354 			error = EBUSY;
355 			break;
356 		}
357 		PAUSE(lkp, ((lkp->lk_flags &
358 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
359 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
360 		for (error = 0; ((lkp->lk_flags &
361 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
362 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
363 			lkp->lk_flags |= LK_WAITDRAIN;
364 			simple_unlock(&lkp->lk_interlock);
365 			if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
366 			    lkp->lk_wmesg, lkp->lk_timo))
367 				return (error);
368 			if ((extflags) & LK_SLEEPFAIL)
369 				return (ENOLCK);
370 			simple_lock(&lkp->lk_interlock);
371 		}
372 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
373 		lkp->lk_lockholder = pid;
374 		lkp->lk_exclusivecount = 1;
375 		COUNT(p, 1);
376 		break;
377 
378 	default:
379 		simple_unlock(&lkp->lk_interlock);
380 		panic("lockmgr: unknown locktype request %d",
381 		    flags & LK_TYPE_MASK);
382 		/* NOTREACHED */
383 	}
384 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
385 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
386 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
387 		lkp->lk_flags &= ~LK_WAITDRAIN;
388 		wakeup((void *)&lkp->lk_flags);
389 	}
390 	simple_unlock(&lkp->lk_interlock);
391 	return (error);
392 }
393 
394 /*
395  * Print out information about state of a lock. Used by VOP_PRINT
396  * routines to display ststus about contained locks.
397  */
398 lockmgr_printinfo(lkp)
399 	struct lock *lkp;
400 {
401 
402 	if (lkp->lk_sharecount)
403 		printf(" lock type %s: SHARED", lkp->lk_wmesg);
404 	else if (lkp->lk_flags & LK_HAVE_EXCL)
405 		printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg,
406 		    lkp->lk_lockholder);
407 	if (lkp->lk_waitcount > 0)
408 		printf(" with %d pending", lkp->lk_waitcount);
409 }
410 
411 #if defined(DEBUG) && NCPUS == 1
412 #include <sys/kernel.h>
413 #include <vm/vm.h>
414 #include <sys/sysctl.h>
415 int lockpausetime = 1;
416 struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
417 /*
418  * Simple lock functions so that the debugger can see from whence
419  * they are being called.
420  */
421 void
422 simple_lock_init(alp)
423 	struct simplelock *alp;
424 {
425 
426 	alp->lock_data = 0;
427 }
428 
429 void
430 _simple_lock(alp, id, l)
431 	__volatile struct simplelock *alp;
432 	const char *id;
433 	int l;
434 {
435 
436 	if (alp->lock_data == 1) {
437 		if (lockpausetime == -1)
438 			panic("%s:%d: simple_lock: lock held", id, l);
439 		if (lockpausetime == 0) {
440 			printf("%s:%d: simple_lock: lock held\n", id, l);
441 		} else if (lockpausetime > 0) {
442 			printf("%s:%d: simple_lock: lock held...", id, l);
443 			tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
444 			    lockpausetime * hz);
445 			printf(" continuing\n");
446 		}
447 	}
448 	alp->lock_data = 1;
449 }
450 
451 int
452 _simple_lock_try(alp, id, l)
453 	__volatile struct simplelock *alp;
454 	const char *id;
455 	int l;
456 {
457 
458 	/*
459 	if (alp->lock_data == 1) {
460 		if (lockpausetime == -1)
461 			panic("%s:%d: simple_lock_try: lock held", id, l);
462 		if (lockpausetime == 0) {
463 			printf("%s:%d: simple_lock_try: lock held\n", id, l);
464 		} else if (lockpausetime > 0) {
465 			printf("%s:%d: simple_lock_try: lock held...", id, l);
466 			tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
467 			    lockpausetime * hz);
468 			printf(" continuing\n");
469 		}
470 	}
471 	*/
472 	if (alp->lock_data)
473 		return (0);
474 
475 	alp->lock_data = 1;
476 	return (1);
477 }
478 
479 void
480 _simple_unlock(alp, id, l)
481 	__volatile struct simplelock *alp;
482 	const char *id;
483 	int l;
484 {
485 
486 	if (alp->lock_data == 0) {
487 		if (lockpausetime == -1)
488 			panic("%s:%d: simple_unlock: lock not held", id, l);
489 		if (lockpausetime == 0) {
490 			printf("%s:%d: simple_unlock: lock not held\n", id, l);
491 		} else if (lockpausetime > 0) {
492 			printf("%s:%d: simple_unlock: lock not held...", id, l);
493 			tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
494 			    lockpausetime * hz);
495 			printf(" continuing\n");
496 		}
497 	}
498 	alp->lock_data = 0;
499 }
500 #endif /* DEBUG && NCPUS == 1 */
501