1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code contains ideas from software contributed to Berkeley by
6 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7 * System project at Carnegie-Mellon University.
8 *
9 * %sccs.include.redist.c%
10 *
11 * @(#)kern_lock.c 8.18 (Berkeley) 05/21/95
12 */
13
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 #include <machine/cpu.h>
18
19 /*
20 * Locking primitives implementation.
21 * Locks provide shared/exclusive sychronization.
22 */
23
24 #ifdef DEBUG
25 #define COUNT(p, x) if (p) (p)->p_locks += (x)
26 #else
27 #define COUNT(p, x)
28 #endif
29
30 #if NCPUS > 1
31
32 /*
33 * For multiprocessor system, try spin lock first.
34 *
35 * This should be inline expanded below, but we cannot have #if
36 * inside a multiline define.
37 */
38 int lock_wait_time = 100;
39 #define PAUSE(lkp, wanted) \
40 if (lock_wait_time > 0) { \
41 int i; \
42 \
43 simple_unlock(&lkp->lk_interlock); \
44 for (i = lock_wait_time; i > 0; i--) \
45 if (!(wanted)) \
46 break; \
47 simple_lock(&lkp->lk_interlock); \
48 } \
49 if (!(wanted)) \
50 break;
51
52 #else /* NCPUS == 1 */
53
54 /*
55 * It is an error to spin on a uniprocessor as nothing will ever cause
56 * the simple lock to clear while we are executing.
57 */
58 #define PAUSE(lkp, wanted)
59
60 #endif /* NCPUS == 1 */
61
62 /*
63 * Acquire a resource.
64 */
65 #define ACQUIRE(lkp, error, extflags, wanted) \
66 PAUSE(lkp, wanted); \
67 for (error = 0; wanted; ) { \
68 (lkp)->lk_waitcount++; \
69 simple_unlock(&(lkp)->lk_interlock); \
70 error = tsleep((void *)lkp, (lkp)->lk_prio, \
71 (lkp)->lk_wmesg, (lkp)->lk_timo); \
72 simple_lock(&(lkp)->lk_interlock); \
73 (lkp)->lk_waitcount--; \
74 if (error) \
75 break; \
76 if ((extflags) & LK_SLEEPFAIL) { \
77 error = ENOLCK; \
78 break; \
79 } \
80 }
81
82 /*
83 * Initialize a lock; required before use.
84 */
85 void
lockinit(lkp,prio,wmesg,timo,flags)86 lockinit(lkp, prio, wmesg, timo, flags)
87 struct lock *lkp;
88 int prio;
89 char *wmesg;
90 int timo;
91 int flags;
92 {
93
94 bzero(lkp, sizeof(struct lock));
95 simple_lock_init(&lkp->lk_interlock);
96 lkp->lk_flags = flags & LK_EXTFLG_MASK;
97 lkp->lk_prio = prio;
98 lkp->lk_timo = timo;
99 lkp->lk_wmesg = wmesg;
100 lkp->lk_lockholder = LK_NOPROC;
101 }
102
103 /*
104 * Determine the status of a lock.
105 */
106 int
lockstatus(lkp)107 lockstatus(lkp)
108 struct lock *lkp;
109 {
110 int lock_type = 0;
111
112 simple_lock(&lkp->lk_interlock);
113 if (lkp->lk_exclusivecount != 0)
114 lock_type = LK_EXCLUSIVE;
115 else if (lkp->lk_sharecount != 0)
116 lock_type = LK_SHARED;
117 simple_unlock(&lkp->lk_interlock);
118 return (lock_type);
119 }
120
121 /*
122 * Set, change, or release a lock.
123 *
124 * Shared requests increment the shared count. Exclusive requests set the
125 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
126 * accepted shared locks and shared-to-exclusive upgrades to go away.
127 */
128 int
lockmgr(lkp,flags,interlkp,p)129 lockmgr(lkp, flags, interlkp, p)
130 __volatile struct lock *lkp;
131 u_int flags;
132 struct simplelock *interlkp;
133 struct proc *p;
134 {
135 int error;
136 pid_t pid;
137 int extflags;
138
139 error = 0;
140 if (p)
141 pid = p->p_pid;
142 else
143 pid = LK_KERNPROC;
144 simple_lock(&lkp->lk_interlock);
145 if (flags & LK_INTERLOCK)
146 simple_unlock(interlkp);
147 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
148 #ifdef DIAGNOSTIC
149 /*
150 * Once a lock has drained, the LK_DRAINING flag is set and an
151 * exclusive lock is returned. The only valid operation thereafter
152 * is a single release of that exclusive lock. This final release
153 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
154 * further requests of any sort will result in a panic. The bits
155 * selected for these two flags are chosen so that they will be set
156 * in memory that is freed (freed memory is filled with 0xdeadbeef).
157 * The final release is permitted to give a new lease on life to
158 * the lock by specifying LK_REENABLE.
159 */
160 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
161 if (lkp->lk_flags & LK_DRAINED)
162 panic("lockmgr: using decommissioned lock");
163 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
164 lkp->lk_lockholder != pid)
165 panic("lockmgr: non-release on draining lock: %d\n",
166 flags & LK_TYPE_MASK);
167 lkp->lk_flags &= ~LK_DRAINING;
168 if ((flags & LK_REENABLE) == 0)
169 lkp->lk_flags |= LK_DRAINED;
170 }
171 #endif DIAGNOSTIC
172
173 switch (flags & LK_TYPE_MASK) {
174
175 case LK_SHARED:
176 if (lkp->lk_lockholder != pid) {
177 /*
178 * If just polling, check to see if we will block.
179 */
180 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
181 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
182 error = EBUSY;
183 break;
184 }
185 /*
186 * Wait for exclusive locks and upgrades to clear.
187 */
188 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
189 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
190 if (error)
191 break;
192 lkp->lk_sharecount++;
193 COUNT(p, 1);
194 break;
195 }
196 /*
197 * We hold an exclusive lock, so downgrade it to shared.
198 * An alternative would be to fail with EDEADLK.
199 */
200 lkp->lk_sharecount++;
201 COUNT(p, 1);
202 /* fall into downgrade */
203
204 case LK_DOWNGRADE:
205 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
206 panic("lockmgr: not holding exclusive lock");
207 lkp->lk_sharecount += lkp->lk_exclusivecount;
208 lkp->lk_exclusivecount = 0;
209 lkp->lk_flags &= ~LK_HAVE_EXCL;
210 lkp->lk_lockholder = LK_NOPROC;
211 if (lkp->lk_waitcount)
212 wakeup((void *)lkp);
213 break;
214
215 case LK_EXCLUPGRADE:
216 /*
217 * If another process is ahead of us to get an upgrade,
218 * then we want to fail rather than have an intervening
219 * exclusive access.
220 */
221 if (lkp->lk_flags & LK_WANT_UPGRADE) {
222 lkp->lk_sharecount--;
223 COUNT(p, -1);
224 error = EBUSY;
225 break;
226 }
227 /* fall into normal upgrade */
228
229 case LK_UPGRADE:
230 /*
231 * Upgrade a shared lock to an exclusive one. If another
232 * shared lock has already requested an upgrade to an
233 * exclusive lock, our shared lock is released and an
234 * exclusive lock is requested (which will be granted
235 * after the upgrade). If we return an error, the file
236 * will always be unlocked.
237 */
238 if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
239 panic("lockmgr: upgrade exclusive lock");
240 lkp->lk_sharecount--;
241 COUNT(p, -1);
242 /*
243 * If we are just polling, check to see if we will block.
244 */
245 if ((extflags & LK_NOWAIT) &&
246 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
247 lkp->lk_sharecount > 1)) {
248 error = EBUSY;
249 break;
250 }
251 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
252 /*
253 * We are first shared lock to request an upgrade, so
254 * request upgrade and wait for the shared count to
255 * drop to zero, then take exclusive lock.
256 */
257 lkp->lk_flags |= LK_WANT_UPGRADE;
258 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
259 lkp->lk_flags &= ~LK_WANT_UPGRADE;
260 if (error)
261 break;
262 lkp->lk_flags |= LK_HAVE_EXCL;
263 lkp->lk_lockholder = pid;
264 if (lkp->lk_exclusivecount != 0)
265 panic("lockmgr: non-zero exclusive count");
266 lkp->lk_exclusivecount = 1;
267 COUNT(p, 1);
268 break;
269 }
270 /*
271 * Someone else has requested upgrade. Release our shared
272 * lock, awaken upgrade requestor if we are the last shared
273 * lock, then request an exclusive lock.
274 */
275 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
276 wakeup((void *)lkp);
277 /* fall into exclusive request */
278
279 case LK_EXCLUSIVE:
280 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
281 /*
282 * Recursive lock.
283 */
284 if ((extflags & LK_CANRECURSE) == 0)
285 panic("lockmgr: locking against myself");
286 lkp->lk_exclusivecount++;
287 COUNT(p, 1);
288 break;
289 }
290 /*
291 * If we are just polling, check to see if we will sleep.
292 */
293 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
294 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
295 lkp->lk_sharecount != 0)) {
296 error = EBUSY;
297 break;
298 }
299 /*
300 * Try to acquire the want_exclusive flag.
301 */
302 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
303 (LK_HAVE_EXCL | LK_WANT_EXCL));
304 if (error)
305 break;
306 lkp->lk_flags |= LK_WANT_EXCL;
307 /*
308 * Wait for shared locks and upgrades to finish.
309 */
310 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
311 (lkp->lk_flags & LK_WANT_UPGRADE));
312 lkp->lk_flags &= ~LK_WANT_EXCL;
313 if (error)
314 break;
315 lkp->lk_flags |= LK_HAVE_EXCL;
316 lkp->lk_lockholder = pid;
317 if (lkp->lk_exclusivecount != 0)
318 panic("lockmgr: non-zero exclusive count");
319 lkp->lk_exclusivecount = 1;
320 COUNT(p, 1);
321 break;
322
323 case LK_RELEASE:
324 if (lkp->lk_exclusivecount != 0) {
325 if (pid != lkp->lk_lockholder)
326 panic("lockmgr: pid %d, not %s %d unlocking",
327 pid, "exclusive lock holder",
328 lkp->lk_lockholder);
329 lkp->lk_exclusivecount--;
330 COUNT(p, -1);
331 if (lkp->lk_exclusivecount == 0) {
332 lkp->lk_flags &= ~LK_HAVE_EXCL;
333 lkp->lk_lockholder = LK_NOPROC;
334 }
335 } else if (lkp->lk_sharecount != 0) {
336 lkp->lk_sharecount--;
337 COUNT(p, -1);
338 }
339 if (lkp->lk_waitcount)
340 wakeup((void *)lkp);
341 break;
342
343 case LK_DRAIN:
344 /*
345 * Check that we do not already hold the lock, as it can
346 * never drain if we do. Unfortunately, we have no way to
347 * check for holding a shared lock, but at least we can
348 * check for an exclusive one.
349 */
350 if (lkp->lk_lockholder == pid)
351 panic("lockmgr: draining against myself");
352 /*
353 * If we are just polling, check to see if we will sleep.
354 */
355 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
356 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
357 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
358 error = EBUSY;
359 break;
360 }
361 PAUSE(lkp, ((lkp->lk_flags &
362 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
363 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
364 for (error = 0; ((lkp->lk_flags &
365 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
366 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
367 lkp->lk_flags |= LK_WAITDRAIN;
368 simple_unlock(&lkp->lk_interlock);
369 if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
370 lkp->lk_wmesg, lkp->lk_timo))
371 return (error);
372 if ((extflags) & LK_SLEEPFAIL)
373 return (ENOLCK);
374 simple_lock(&lkp->lk_interlock);
375 }
376 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
377 lkp->lk_lockholder = pid;
378 lkp->lk_exclusivecount = 1;
379 COUNT(p, 1);
380 break;
381
382 default:
383 simple_unlock(&lkp->lk_interlock);
384 panic("lockmgr: unknown locktype request %d",
385 flags & LK_TYPE_MASK);
386 /* NOTREACHED */
387 }
388 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
389 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
390 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
391 lkp->lk_flags &= ~LK_WAITDRAIN;
392 wakeup((void *)&lkp->lk_flags);
393 }
394 simple_unlock(&lkp->lk_interlock);
395 return (error);
396 }
397
398 /*
399 * Print out information about state of a lock. Used by VOP_PRINT
400 * routines to display ststus about contained locks.
401 */
402 lockmgr_printinfo(lkp)
403 struct lock *lkp;
404 {
405
406 if (lkp->lk_sharecount)
407 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
408 lkp->lk_sharecount);
409 else if (lkp->lk_flags & LK_HAVE_EXCL)
410 printf(" lock type %s: EXCL (count %d) by pid %d",
411 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
412 if (lkp->lk_waitcount > 0)
413 printf(" with %d pending", lkp->lk_waitcount);
414 }
415
416 #if defined(DEBUG) && NCPUS == 1
417 #include <sys/kernel.h>
418 #include <vm/vm.h>
419 #include <sys/sysctl.h>
420 int lockpausetime = 0;
421 struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
422 int simplelockrecurse;
423 /*
424 * Simple lock functions so that the debugger can see from whence
425 * they are being called.
426 */
427 void
simple_lock_init(alp)428 simple_lock_init(alp)
429 struct simplelock *alp;
430 {
431
432 alp->lock_data = 0;
433 }
434
435 void
_simple_lock(alp,id,l)436 _simple_lock(alp, id, l)
437 __volatile struct simplelock *alp;
438 const char *id;
439 int l;
440 {
441
442 if (simplelockrecurse)
443 return;
444 if (alp->lock_data == 1) {
445 if (lockpausetime == -1)
446 panic("%s:%d: simple_lock: lock held", id, l);
447 printf("%s:%d: simple_lock: lock held\n", id, l);
448 if (lockpausetime == 1) {
449 BACKTRACE(curproc);
450 } else if (lockpausetime > 1) {
451 printf("%s:%d: simple_lock: lock held...", id, l);
452 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
453 lockpausetime * hz);
454 printf(" continuing\n");
455 }
456 }
457 alp->lock_data = 1;
458 if (curproc)
459 curproc->p_simple_locks++;
460 }
461
462 int
_simple_lock_try(alp,id,l)463 _simple_lock_try(alp, id, l)
464 __volatile struct simplelock *alp;
465 const char *id;
466 int l;
467 {
468
469 if (alp->lock_data)
470 return (0);
471 if (simplelockrecurse)
472 return (1);
473 alp->lock_data = 1;
474 if (curproc)
475 curproc->p_simple_locks++;
476 return (1);
477 }
478
479 void
_simple_unlock(alp,id,l)480 _simple_unlock(alp, id, l)
481 __volatile struct simplelock *alp;
482 const char *id;
483 int l;
484 {
485
486 if (simplelockrecurse)
487 return;
488 if (alp->lock_data == 0) {
489 if (lockpausetime == -1)
490 panic("%s:%d: simple_unlock: lock not held", id, l);
491 printf("%s:%d: simple_unlock: lock not held\n", id, l);
492 if (lockpausetime == 1) {
493 BACKTRACE(curproc);
494 } else if (lockpausetime > 1) {
495 printf("%s:%d: simple_unlock: lock not held...", id, l);
496 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
497 lockpausetime * hz);
498 printf(" continuing\n");
499 }
500 }
501 alp->lock_data = 0;
502 if (curproc)
503 curproc->p_simple_locks--;
504 }
505 #endif /* DEBUG && NCPUS == 1 */
506