1 /* $OpenBSD: vfs_lockf.c,v 1.50 2022/08/14 01:58:28 jsg Exp $ */
2 /* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/pool.h>
42 #include <sys/fcntl.h>
43 #include <sys/lockf.h>
44 #include <sys/rwlock.h>
45 #include <sys/unistd.h>
46
47 /*
48 * The lockf structure is a kernel structure which contains the information
49 * associated with a byte range lock. The lockf structures are linked into
50 * the inode structure. Locks are sorted by the starting byte of the lock for
51 * efficiency.
52 */
53 TAILQ_HEAD(locklist, lockf);
54
55 struct lockf {
56 short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
57 short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
58 off_t lf_start; /* The byte # of the start of the lock */
59 off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/
60 caddr_t lf_id; /* The id of the resource holding the lock */
61 struct lockf_state *lf_state; /* State associated with the lock */
62 TAILQ_ENTRY(lockf) lf_entry;
63 struct lockf *lf_blk; /* The lock that blocks us */
64 struct locklist lf_blkhd; /* The list of blocked locks */
65 TAILQ_ENTRY(lockf) lf_block; /* A request waiting for a lock */
66 uid_t lf_uid; /* User ID responsible */
67 pid_t lf_pid; /* POSIX - owner pid */
68 };
69
70 struct lockf_state {
71 TAILQ_HEAD(, lockf) ls_locks; /* list of active locks */
72 TAILQ_HEAD(, lockf) ls_pending; /* list of pending locks */
73 struct lockf_state **ls_owner; /* owner */
74 int ls_refs; /* reference counter */
75 };
76
77 struct pool lockf_state_pool;
78 struct pool lockf_pool;
79
80 #define SELF 0x1
81 #define OTHERS 0x2
82
83 #ifdef LOCKF_DEBUG
84
85 #define DEBUG_SETLOCK 0x01
86 #define DEBUG_CLEARLOCK 0x02
87 #define DEBUG_GETLOCK 0x04
88 #define DEBUG_FINDOVR 0x08
89 #define DEBUG_SPLIT 0x10
90 #define DEBUG_WAKELOCK 0x20
91 #define DEBUG_LINK 0x40
92
93 int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
94
95 void lf_print(const char *, struct lockf *);
96 void lf_printlist(const char *, struct lockf *);
97
98 #define DPRINTF(args, level) if (lockf_debug & (level)) printf args
99 #define LFPRINT(args, level) if (lockf_debug & (level)) lf_print args
100 #else
101 #define DPRINTF(args, level)
102 #define LFPRINT(args, level)
103 #endif
104
105 struct lockf *lf_alloc(uid_t, int);
106 void lf_free(struct lockf *);
107 int lf_clearlock(struct lockf *);
108 int lf_findoverlap(struct lockf *, struct lockf *, int, struct lockf **);
109 struct lockf *lf_getblock(struct lockf *, struct lockf *);
110 int lf_getlock(struct lockf *, struct flock *);
111 int lf_setlock(struct lockf *);
112 void lf_split(struct lockf *, struct lockf *);
113 void lf_wakelock(struct lockf *, int);
114 int lf_deadlock(struct lockf *);
115 void ls_ref(struct lockf_state *);
116 void ls_rele(struct lockf_state *);
117
118 /*
119 * Serializes access to each instance of struct lockf and struct lockf_state
120 * and each pointer from a vnode to struct lockf_state.
121 */
122 struct rwlock lockf_lock = RWLOCK_INITIALIZER("lockflk");
123
124 void
lf_init(void)125 lf_init(void)
126 {
127 pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE,
128 PR_WAITOK | PR_RWLOCK, "lockfspl", NULL);
129 pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE,
130 PR_WAITOK | PR_RWLOCK, "lockfpl", NULL);
131 }
132
133 void
ls_ref(struct lockf_state * ls)134 ls_ref(struct lockf_state *ls)
135 {
136 rw_assert_wrlock(&lockf_lock);
137
138 ls->ls_refs++;
139 }
140
141 void
ls_rele(struct lockf_state * ls)142 ls_rele(struct lockf_state *ls)
143 {
144 rw_assert_wrlock(&lockf_lock);
145
146 if (--ls->ls_refs > 0)
147 return;
148
149 KASSERT(TAILQ_EMPTY(&ls->ls_locks));
150 KASSERT(TAILQ_EMPTY(&ls->ls_pending));
151
152 *ls->ls_owner = NULL;
153 pool_put(&lockf_state_pool, ls);
154 }
155
156 /*
157 * We enforce a limit on locks by uid, so that a single user cannot
158 * run the kernel out of memory. For now, the limit is pretty coarse.
159 * There is no limit on root.
160 *
161 * Splitting a lock will always succeed, regardless of current allocations.
162 * If you're slightly above the limit, we still have to permit an allocation
163 * so that the unlock can succeed. If the unlocking causes too many splits,
164 * however, you're totally cutoff.
165 */
166 int maxlocksperuid = 1024;
167
168 /*
169 * 3 options for allowfail.
170 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
171 */
172 struct lockf *
lf_alloc(uid_t uid,int allowfail)173 lf_alloc(uid_t uid, int allowfail)
174 {
175 struct uidinfo *uip;
176 struct lockf *lock;
177
178 uip = uid_find(uid);
179 if (uid && allowfail && uip->ui_lockcnt >
180 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
181 uid_release(uip);
182 return (NULL);
183 }
184 uip->ui_lockcnt++;
185 uid_release(uip);
186 lock = pool_get(&lockf_pool, PR_WAITOK);
187 lock->lf_uid = uid;
188 return (lock);
189 }
190
191 void
lf_free(struct lockf * lock)192 lf_free(struct lockf *lock)
193 {
194 struct uidinfo *uip;
195
196 rw_assert_wrlock(&lockf_lock);
197
198 LFPRINT(("lf_free", lock), DEBUG_LINK);
199
200 KASSERT(TAILQ_EMPTY(&lock->lf_blkhd));
201
202 ls_rele(lock->lf_state);
203
204 uip = uid_find(lock->lf_uid);
205 uip->ui_lockcnt--;
206 uid_release(uip);
207 pool_put(&lockf_pool, lock);
208 }
209
210
211 /*
212 * Do an advisory lock operation.
213 */
214 int
lf_advlock(struct lockf_state ** state,off_t size,caddr_t id,int op,struct flock * fl,int flags)215 lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op,
216 struct flock *fl, int flags)
217 {
218 struct proc *p = curproc;
219 struct lockf_state *ls;
220 struct lockf *lock;
221 off_t start, end;
222 int error = 0;
223
224 /*
225 * Convert the flock structure into a start and end.
226 */
227 switch (fl->l_whence) {
228 case SEEK_SET:
229 case SEEK_CUR:
230 /*
231 * Caller is responsible for adding any necessary offset
232 * when SEEK_CUR is used.
233 */
234 start = fl->l_start;
235 break;
236 case SEEK_END:
237 start = size + fl->l_start;
238 break;
239 default:
240 return (EINVAL);
241 }
242 if (start < 0)
243 return (EINVAL);
244 if (fl->l_len > 0) {
245 if (fl->l_len - 1 > LLONG_MAX - start)
246 return (EOVERFLOW);
247 end = start + (fl->l_len - 1);
248 /* Avoid ambiguity at the end of the range. */
249 if (end == LLONG_MAX)
250 end = -1;
251 } else if (fl->l_len < 0) {
252 if (start + fl->l_len < 0)
253 return (EINVAL);
254 end = start - 1;
255 start += fl->l_len;
256 } else {
257 end = -1;
258 }
259
260 rw_enter_write(&lockf_lock);
261 ls = *state;
262
263 /*
264 * Avoid the common case of unlocking when inode has no locks.
265 */
266 if (ls == NULL && op != F_SETLK) {
267 fl->l_type = F_UNLCK;
268 goto out;
269 }
270
271 if (ls == NULL) {
272 ls = pool_get(&lockf_state_pool, PR_WAITOK | PR_ZERO);
273 ls->ls_owner = state;
274 TAILQ_INIT(&ls->ls_locks);
275 TAILQ_INIT(&ls->ls_pending);
276 *state = ls;
277 }
278 ls_ref(ls);
279
280 lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
281 if (!lock) {
282 ls_rele(ls);
283 error = ENOLCK;
284 goto out;
285 }
286 lock->lf_flags = flags;
287 lock->lf_type = fl->l_type;
288 lock->lf_start = start;
289 lock->lf_end = end;
290 lock->lf_id = id;
291 lock->lf_state = ls;
292 lock->lf_blk = NULL;
293 lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
294 TAILQ_INIT(&lock->lf_blkhd);
295
296 switch (op) {
297 case F_SETLK:
298 error = lf_setlock(lock);
299 break;
300 case F_UNLCK:
301 error = lf_clearlock(lock);
302 lf_free(lock);
303 break;
304 case F_GETLK:
305 error = lf_getlock(lock, fl);
306 lf_free(lock);
307 break;
308 default:
309 lf_free(lock);
310 error = EINVAL;
311 break;
312 }
313
314 out:
315 rw_exit_write(&lockf_lock);
316 return (error);
317 }
318
319 /*
320 * Set a byte-range lock.
321 */
322 int
lf_setlock(struct lockf * lock)323 lf_setlock(struct lockf *lock)
324 {
325 struct lockf *block;
326 struct lockf *overlap, *ltmp;
327 int ovcase, priority, needtolink, error;
328
329 rw_assert_wrlock(&lockf_lock);
330
331 LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
332
333 priority = PLOCK;
334 if (lock->lf_type == F_WRLCK)
335 priority += 4;
336 priority |= PCATCH;
337 /*
338 * Scan lock list for this file looking for locks that would block us.
339 */
340 for (;;) {
341 block = lf_getblock(TAILQ_FIRST(&lock->lf_state->ls_locks),
342 lock);
343 if (block == NULL)
344 break;
345
346 if ((lock->lf_flags & F_WAIT) == 0) {
347 lf_free(lock);
348 return (EAGAIN);
349 }
350
351 /*
352 * Lock is blocked, check for deadlock before proceeding.
353 * Note: flock style locks cover the whole file, there is no
354 * chance for deadlock.
355 */
356 if ((lock->lf_flags & F_POSIX) && lf_deadlock(lock)) {
357 lf_free(lock);
358 return (EDEADLK);
359 }
360
361 /*
362 * For flock type locks, we must first remove
363 * any shared locks that we hold before we sleep
364 * waiting for an exclusive lock.
365 */
366 if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
367 lock->lf_type = F_UNLCK;
368 (void)lf_clearlock(lock);
369 lock->lf_type = F_WRLCK;
370 }
371 /*
372 * Add our lock to the blocked list and sleep until we're free.
373 * Remember who blocked us (for deadlock detection).
374 */
375 lock->lf_blk = block;
376 LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
377 LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
378 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
379 TAILQ_INSERT_TAIL(&lock->lf_state->ls_pending, lock, lf_entry);
380 error = rwsleep_nsec(lock, &lockf_lock, priority, "lockf",
381 INFSLP);
382 TAILQ_REMOVE(&lock->lf_state->ls_pending, lock, lf_entry);
383 wakeup_one(lock->lf_state);
384 if (lock->lf_blk != NULL) {
385 TAILQ_REMOVE(&lock->lf_blk->lf_blkhd, lock, lf_block);
386 lock->lf_blk = NULL;
387 }
388 if (error) {
389 lf_free(lock);
390 return (error);
391 }
392 if (lock->lf_flags & F_INTR) {
393 lf_free(lock);
394 return (EINTR);
395 }
396 }
397 /*
398 * No blocks!! Add the lock. Note that we will
399 * downgrade or upgrade any overlapping locks this
400 * process already owns.
401 *
402 * Skip over locks owned by other processes.
403 * Handle any locks that overlap and are owned by ourselves.
404 */
405 block = TAILQ_FIRST(&lock->lf_state->ls_locks);
406 overlap = NULL;
407 needtolink = 1;
408 for (;;) {
409 ovcase = lf_findoverlap(block, lock, SELF, &overlap);
410 if (ovcase)
411 block = TAILQ_NEXT(overlap, lf_entry);
412 /*
413 * Six cases:
414 * 0) no overlap
415 * 1) overlap == lock
416 * 2) overlap contains lock
417 * 3) lock contains overlap
418 * 4) overlap starts before lock
419 * 5) overlap ends after lock
420 */
421 switch (ovcase) {
422 case 0: /* no overlap */
423 if (needtolink) {
424 if (overlap) /* insert before overlap */
425 TAILQ_INSERT_BEFORE(overlap, lock,
426 lf_entry);
427 else /* first or last lock in list */
428 TAILQ_INSERT_TAIL(&lock->lf_state->ls_locks,
429 lock, lf_entry);
430 }
431 break;
432 case 1: /* overlap == lock */
433 /*
434 * If downgrading lock, others may be
435 * able to acquire it.
436 */
437 if (lock->lf_type == F_RDLCK &&
438 overlap->lf_type == F_WRLCK)
439 lf_wakelock(overlap, 0);
440 overlap->lf_type = lock->lf_type;
441 lf_free(lock);
442 lock = overlap; /* for debug output below */
443 break;
444 case 2: /* overlap contains lock */
445 /*
446 * Check for common starting point and different types.
447 */
448 if (overlap->lf_type == lock->lf_type) {
449 if (!needtolink)
450 TAILQ_REMOVE(&lock->lf_state->ls_locks,
451 lock, lf_entry);
452 lf_free(lock);
453 lock = overlap; /* for debug output below */
454 break;
455 }
456 if (overlap->lf_start == lock->lf_start) {
457 if (!needtolink)
458 TAILQ_REMOVE(&lock->lf_state->ls_locks,
459 lock, lf_entry);
460 TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
461 overlap->lf_start = lock->lf_end + 1;
462 } else
463 lf_split(overlap, lock);
464 lf_wakelock(overlap, 0);
465 break;
466 case 3: /* lock contains overlap */
467 /*
468 * If downgrading lock, others may be able to
469 * acquire it, otherwise take the list.
470 */
471 if (lock->lf_type == F_RDLCK &&
472 overlap->lf_type == F_WRLCK) {
473 lf_wakelock(overlap, 0);
474 } else {
475 while ((ltmp =
476 TAILQ_FIRST(&overlap->lf_blkhd))) {
477 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
478 lf_block);
479 ltmp->lf_blk = lock;
480 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
481 ltmp, lf_block);
482 }
483 }
484 /*
485 * Add the new lock if necessary and delete the overlap.
486 */
487 if (needtolink) {
488 TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
489 needtolink = 0;
490 }
491 TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap, lf_entry);
492 lf_free(overlap);
493 continue;
494 case 4: /* overlap starts before lock */
495 /*
496 * Add lock after overlap on the list.
497 */
498 if (!needtolink)
499 TAILQ_REMOVE(&lock->lf_state->ls_locks, lock,
500 lf_entry);
501 TAILQ_INSERT_AFTER(&lock->lf_state->ls_locks, overlap,
502 lock, lf_entry);
503 overlap->lf_end = lock->lf_start - 1;
504 lf_wakelock(overlap, 0);
505 needtolink = 0;
506 continue;
507 case 5: /* overlap ends after lock */
508 /*
509 * Add the new lock before overlap.
510 */
511 if (needtolink)
512 TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
513 overlap->lf_start = lock->lf_end + 1;
514 lf_wakelock(overlap, 0);
515 break;
516 }
517 break;
518 }
519 LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
520 return (0);
521 }
522
523 /*
524 * Remove a byte-range lock on an inode.
525 *
526 * Generally, find the lock (or an overlap to that lock)
527 * and remove it (or shrink it), then wakeup anyone we can.
528 */
529 int
lf_clearlock(struct lockf * lock)530 lf_clearlock(struct lockf *lock)
531 {
532 struct lockf *lf, *overlap;
533 int ovcase;
534
535 rw_assert_wrlock(&lockf_lock);
536
537 lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
538 if (lf == NULL)
539 return (0);
540
541 LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
542 while ((ovcase = lf_findoverlap(lf, lock, SELF, &overlap))) {
543 lf_wakelock(overlap, 0);
544
545 switch (ovcase) {
546 case 1: /* overlap == lock */
547 TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
548 lf_entry);
549 lf_free(overlap);
550 break;
551 case 2: /* overlap contains lock: split it */
552 if (overlap->lf_start == lock->lf_start) {
553 overlap->lf_start = lock->lf_end + 1;
554 break;
555 }
556 lf_split(overlap, lock);
557 /*
558 * The lock is now part of the list, lf_clearlock() must
559 * ensure that the lock remains detached from the list.
560 */
561 TAILQ_REMOVE(&lock->lf_state->ls_locks, lock, lf_entry);
562 break;
563 case 3: /* lock contains overlap */
564 lf = TAILQ_NEXT(overlap, lf_entry);
565 TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
566 lf_entry);
567 lf_free(overlap);
568 continue;
569 case 4: /* overlap starts before lock */
570 overlap->lf_end = lock->lf_start - 1;
571 lf = TAILQ_NEXT(overlap, lf_entry);
572 continue;
573 case 5: /* overlap ends after lock */
574 overlap->lf_start = lock->lf_end + 1;
575 break;
576 }
577 break;
578 }
579 return (0);
580 }
581
582 /*
583 * Check whether there is a blocking lock,
584 * and if so return its process identifier.
585 */
586 int
lf_getlock(struct lockf * lock,struct flock * fl)587 lf_getlock(struct lockf *lock, struct flock *fl)
588 {
589 struct lockf *block, *lf;
590
591 rw_assert_wrlock(&lockf_lock);
592
593 LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
594
595 lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
596 if ((block = lf_getblock(lf, lock)) != NULL) {
597 fl->l_type = block->lf_type;
598 fl->l_whence = SEEK_SET;
599 fl->l_start = block->lf_start;
600 if (block->lf_end == -1)
601 fl->l_len = 0;
602 else
603 fl->l_len = block->lf_end - block->lf_start + 1;
604 fl->l_pid = block->lf_pid;
605 } else {
606 fl->l_type = F_UNLCK;
607 }
608 return (0);
609 }
610
611 /*
612 * Walk the list of locks for an inode and
613 * return the first blocking lock.
614 */
615 struct lockf *
lf_getblock(struct lockf * lf,struct lockf * lock)616 lf_getblock(struct lockf *lf, struct lockf *lock)
617 {
618 struct lockf *overlap;
619
620 rw_assert_wrlock(&lockf_lock);
621
622 while (lf_findoverlap(lf, lock, OTHERS, &overlap) != 0) {
623 /*
624 * We've found an overlap, see if it blocks us
625 */
626 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
627 return (overlap);
628 /*
629 * Nope, point to the next one on the list and
630 * see if it blocks us
631 */
632 lf = TAILQ_NEXT(overlap, lf_entry);
633 }
634 return (NULL);
635 }
636
637 /*
638 * Walk the list of locks for an inode to
639 * find an overlapping lock (if any).
640 *
641 * NOTE: this returns only the FIRST overlapping lock. There
642 * may be more than one.
643 */
644 int
lf_findoverlap(struct lockf * lf,struct lockf * lock,int type,struct lockf ** overlap)645 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
646 struct lockf **overlap)
647 {
648 off_t start, end;
649
650 rw_assert_wrlock(&lockf_lock);
651
652 LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
653
654 *overlap = lf;
655 start = lock->lf_start;
656 end = lock->lf_end;
657 while (lf != NULL) {
658 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
659 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
660 *overlap = lf = TAILQ_NEXT(lf, lf_entry);
661 continue;
662 }
663 LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
664 /*
665 * OK, check for overlap
666 *
667 * Six cases:
668 * 0) no overlap
669 * 1) overlap == lock
670 * 2) overlap contains lock
671 * 3) lock contains overlap
672 * 4) overlap starts before lock
673 * 5) overlap ends after lock
674 */
675
676 /* Case 0 */
677 if ((lf->lf_end != -1 && start > lf->lf_end) ||
678 (end != -1 && lf->lf_start > end)) {
679 DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
680 if ((type & SELF) && end != -1 && lf->lf_start > end)
681 return (0);
682 *overlap = lf = TAILQ_NEXT(lf, lf_entry);
683 continue;
684 }
685 /* Case 1 */
686 if ((lf->lf_start == start) && (lf->lf_end == end)) {
687 DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
688 return (1);
689 }
690 /* Case 2 */
691 if ((lf->lf_start <= start) &&
692 (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
693 DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
694 return (2);
695 }
696 /* Case 3 */
697 if (start <= lf->lf_start &&
698 (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
699 DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
700 return (3);
701 }
702 /* Case 4 */
703 if ((lf->lf_start < start) &&
704 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
705 DPRINTF(("overlap starts before lock\n"),
706 DEBUG_FINDOVR);
707 return (4);
708 }
709 /* Case 5 */
710 if ((lf->lf_start > start) && (end != -1) &&
711 ((lf->lf_end > end) || (lf->lf_end == -1))) {
712 DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
713 return (5);
714 }
715 panic("lf_findoverlap: default");
716 }
717 return (0);
718 }
719
720 /*
721 * Purge all locks associated with the given lock state.
722 */
723 void
lf_purgelocks(struct lockf_state ** state)724 lf_purgelocks(struct lockf_state **state)
725 {
726 struct lockf_state *ls;
727 struct lockf *lock;
728
729 rw_enter_write(&lockf_lock);
730
731 ls = *state;
732 if (ls == NULL)
733 goto out;
734
735 ls_ref(ls);
736
737 /* Interrupt blocked locks and wait for all of them to finish. */
738 TAILQ_FOREACH(lock, &ls->ls_locks, lf_entry) {
739 LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK);
740 lf_wakelock(lock, F_INTR);
741 }
742 while (!TAILQ_EMPTY(&ls->ls_pending))
743 rwsleep_nsec(ls, &lockf_lock, PLOCK, "lockfp", INFSLP);
744
745 /*
746 * Any remaining locks cannot block other locks at this point and can
747 * safely be removed.
748 */
749 while ((lock = TAILQ_FIRST(&ls->ls_locks))) {
750 TAILQ_REMOVE(&ls->ls_locks, lock, lf_entry);
751 lf_free(lock);
752 }
753
754 /* This is the last expected thread to hold a lock state reference. */
755 KASSERT(ls->ls_refs == 1);
756 ls_rele(ls);
757
758 out:
759 rw_exit_write(&lockf_lock);
760 }
761
762 /*
763 * Split a lock and a contained region into
764 * two or three locks as necessary.
765 */
766 void
lf_split(struct lockf * lock1,struct lockf * lock2)767 lf_split(struct lockf *lock1, struct lockf *lock2)
768 {
769 struct lockf *splitlock;
770
771 rw_assert_wrlock(&lockf_lock);
772
773 LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
774 LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
775
776 /*
777 * Check to see if splitting into only two pieces.
778 */
779 if (lock1->lf_start == lock2->lf_start) {
780 lock1->lf_start = lock2->lf_end + 1;
781 TAILQ_INSERT_BEFORE(lock1, lock2, lf_entry);
782 return;
783 }
784 if (lock1->lf_end == lock2->lf_end) {
785 lock1->lf_end = lock2->lf_start - 1;
786 TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2,
787 lf_entry);
788 return;
789 }
790 /*
791 * Make a new lock consisting of the last part of
792 * the encompassing lock
793 */
794 splitlock = lf_alloc(lock1->lf_uid, 0);
795 splitlock->lf_flags = lock1->lf_flags;
796 splitlock->lf_type = lock1->lf_type;
797 splitlock->lf_start = lock2->lf_end + 1;
798 splitlock->lf_end = lock1->lf_end;
799 splitlock->lf_id = lock1->lf_id;
800 splitlock->lf_state = lock1->lf_state;
801 splitlock->lf_blk = NULL;
802 splitlock->lf_pid = lock1->lf_pid;
803 TAILQ_INIT(&splitlock->lf_blkhd);
804 ls_ref(splitlock->lf_state);
805 lock1->lf_end = lock2->lf_start - 1;
806
807 TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2, lf_entry);
808 TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock2, splitlock,
809 lf_entry);
810 }
811
812 /*
813 * Wakeup a blocklist
814 */
815 void
lf_wakelock(struct lockf * lock,int flags)816 lf_wakelock(struct lockf *lock, int flags)
817 {
818 struct lockf *wakelock;
819
820 rw_assert_wrlock(&lockf_lock);
821
822 while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
823 TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
824 wakelock->lf_blk = NULL;
825 wakelock->lf_flags |= flags;
826 wakeup_one(wakelock);
827 }
828 }
829
830 /*
831 * Returns non-zero if the given lock would cause a deadlock.
832 */
833 int
lf_deadlock(struct lockf * lock)834 lf_deadlock(struct lockf *lock)
835 {
836 struct lockf *block, *lf, *pending;
837
838 lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
839 for (; (block = lf_getblock(lf, lock)) != NULL;
840 lf = TAILQ_NEXT(block, lf_entry)) {
841 if ((block->lf_flags & F_POSIX) == 0)
842 continue;
843
844 TAILQ_FOREACH(pending, &lock->lf_state->ls_pending, lf_entry) {
845 if (pending->lf_blk == NULL)
846 continue; /* lock already unblocked */
847
848 if (pending->lf_pid == block->lf_pid &&
849 pending->lf_blk->lf_pid == lock->lf_pid)
850 return (1);
851 }
852 }
853
854 return (0);
855 }
856
857 #ifdef LOCKF_DEBUG
858 /*
859 * Print out a lock.
860 */
861 void
lf_print(const char * tag,struct lockf * lock)862 lf_print(const char *tag, struct lockf *lock)
863 {
864 struct lockf *block;
865
866 if (tag)
867 printf("%s: ", tag);
868 printf("lock %p", lock);
869 if (lock == NULL) {
870 printf("\n");
871 return;
872 }
873 printf(", %s %p %s, start %lld, end %lld",
874 lock->lf_flags & F_POSIX ? "posix" : "flock",
875 lock->lf_id,
876 lock->lf_type == F_RDLCK ? "shared" :
877 lock->lf_type == F_WRLCK ? "exclusive" :
878 lock->lf_type == F_UNLCK ? "unlock" :
879 "unknown", lock->lf_start, lock->lf_end);
880 printf(", next %p, state %p",
881 TAILQ_NEXT(lock, lf_entry), lock->lf_state);
882 block = TAILQ_FIRST(&lock->lf_blkhd);
883 if (block)
884 printf(", block");
885 TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
886 printf(" %p,", block);
887 printf("\n");
888 }
889
890 void
lf_printlist(const char * tag,struct lockf * lock)891 lf_printlist(const char *tag, struct lockf *lock)
892 {
893 struct lockf *lf;
894
895 printf("%s: Lock list:\n", tag);
896 TAILQ_FOREACH(lf, &lock->lf_state->ls_locks, lf_entry) {
897 if (lock == lf)
898 printf(" * ");
899 else
900 printf(" ");
901 lf_print(NULL, lf);
902 }
903 }
904 #endif /* LOCKF_DEBUG */
905