xref: /openbsd/sys/kern/vfs_lockf.c (revision ee8a2252)
1 /*	$OpenBSD: vfs_lockf.c,v 1.48 2022/06/01 14:18:43 visa Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/pool.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/rwlock.h>
47 #include <sys/unistd.h>
48 
49 /*
50  * The lockf structure is a kernel structure which contains the information
51  * associated with a byte range lock.  The lockf structures are linked into
52  * the inode structure. Locks are sorted by the starting byte of the lock for
53  * efficiency.
54  */
55 TAILQ_HEAD(locklist, lockf);
56 
57 struct lockf {
58 	short	lf_flags;	 /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
59 	short	lf_type;	 /* Lock type: F_RDLCK, F_WRLCK */
60 	off_t	lf_start;	 /* The byte # of the start of the lock */
61 	off_t	lf_end;		 /* The byte # of the end of the lock (-1=EOF)*/
62 	caddr_t	lf_id;		 /* The id of the resource holding the lock */
63 	struct	lockf_state *lf_state;	/* State associated with the lock */
64 	TAILQ_ENTRY(lockf) lf_entry;
65 	struct	lockf *lf_blk;	 /* The lock that blocks us */
66 	struct	locklist lf_blkhd;	/* The list of blocked locks */
67 	TAILQ_ENTRY(lockf) lf_block; /* A request waiting for a lock */
68 	uid_t	lf_uid;		/* User ID responsible */
69 	pid_t	lf_pid;		/* POSIX - owner pid */
70 };
71 
72 struct lockf_state {
73 	TAILQ_HEAD(, lockf)	  ls_locks;	/* list of active locks */
74 	TAILQ_HEAD(, lockf)	  ls_pending;	/* list of pending locks */
75 	struct lockf_state	**ls_owner;	/* owner */
76 	int		 	  ls_refs;	/* reference counter */
77 };
78 
79 struct pool lockf_state_pool;
80 struct pool lockf_pool;
81 
82 #define SELF	0x1
83 #define OTHERS	0x2
84 
85 #ifdef LOCKF_DEBUG
86 
87 #define	DEBUG_SETLOCK		0x01
88 #define	DEBUG_CLEARLOCK		0x02
89 #define	DEBUG_GETLOCK		0x04
90 #define	DEBUG_FINDOVR		0x08
91 #define	DEBUG_SPLIT		0x10
92 #define	DEBUG_WAKELOCK		0x20
93 #define	DEBUG_LINK		0x40
94 
95 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
96 
97 void	lf_print(const char *, struct lockf *);
98 void	lf_printlist(const char *, struct lockf *);
99 
100 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
101 #define	LFPRINT(args, level)	if (lockf_debug & (level)) lf_print args
102 #else
103 #define	DPRINTF(args, level)
104 #define	LFPRINT(args, level)
105 #endif
106 
107 struct lockf *lf_alloc(uid_t, int);
108 void lf_free(struct lockf *);
109 int lf_clearlock(struct lockf *);
110 int lf_findoverlap(struct lockf *, struct lockf *, int, struct lockf **);
111 struct lockf *lf_getblock(struct lockf *, struct lockf *);
112 int lf_getlock(struct lockf *, struct flock *);
113 int lf_setlock(struct lockf *);
114 void lf_split(struct lockf *, struct lockf *);
115 void lf_wakelock(struct lockf *, int);
116 int lf_deadlock(struct lockf *);
117 void ls_ref(struct lockf_state *);
118 void ls_rele(struct lockf_state *);
119 
120 /*
121  * Serializes access to each instance of struct lockf and struct lockf_state
122  * and each pointer from a vnode to struct lockf_state.
123  */
124 struct rwlock lockf_lock = RWLOCK_INITIALIZER("lockflk");
125 
126 void
127 lf_init(void)
128 {
129 	pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE,
130 	    PR_WAITOK | PR_RWLOCK, "lockfspl", NULL);
131 	pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE,
132 	    PR_WAITOK | PR_RWLOCK, "lockfpl", NULL);
133 }
134 
135 void
136 ls_ref(struct lockf_state *ls)
137 {
138 	rw_assert_wrlock(&lockf_lock);
139 
140 	ls->ls_refs++;
141 }
142 
143 void
144 ls_rele(struct lockf_state *ls)
145 {
146 	rw_assert_wrlock(&lockf_lock);
147 
148 	if (--ls->ls_refs > 0)
149 		return;
150 
151 #ifdef LOCKF_DIAGNOSTIC
152 	KASSERT(TAILQ_EMPTY(&ls->ls_locks));
153 	KASSERT(TAILQ_EMPTY(&ls->ls_pending));
154 #endif
155 
156 	*ls->ls_owner = NULL;
157 	pool_put(&lockf_state_pool, ls);
158 }
159 
160 /*
161  * We enforce a limit on locks by uid, so that a single user cannot
162  * run the kernel out of memory.  For now, the limit is pretty coarse.
163  * There is no limit on root.
164  *
165  * Splitting a lock will always succeed, regardless of current allocations.
166  * If you're slightly above the limit, we still have to permit an allocation
167  * so that the unlock can succeed.  If the unlocking causes too many splits,
168  * however, you're totally cutoff.
169  */
170 int maxlocksperuid = 1024;
171 
172 /*
173  * 3 options for allowfail.
174  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
175  */
176 struct lockf *
177 lf_alloc(uid_t uid, int allowfail)
178 {
179 	struct uidinfo *uip;
180 	struct lockf *lock;
181 
182 	uip = uid_find(uid);
183 	if (uid && allowfail && uip->ui_lockcnt >
184 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
185 		uid_release(uip);
186 		return (NULL);
187 	}
188 	uip->ui_lockcnt++;
189 	uid_release(uip);
190 	lock = pool_get(&lockf_pool, PR_WAITOK);
191 	lock->lf_uid = uid;
192 	return (lock);
193 }
194 
195 void
196 lf_free(struct lockf *lock)
197 {
198 	struct uidinfo *uip;
199 
200 	rw_assert_wrlock(&lockf_lock);
201 
202 	LFPRINT(("lf_free", lock), DEBUG_LINK);
203 
204 #ifdef LOCKF_DIAGNOSTIC
205 	KASSERT(TAILQ_EMPTY(&lock->lf_blkhd));
206 #endif /* LOCKF_DIAGNOSTIC */
207 
208 	ls_rele(lock->lf_state);
209 
210 	uip = uid_find(lock->lf_uid);
211 	uip->ui_lockcnt--;
212 	uid_release(uip);
213 	pool_put(&lockf_pool, lock);
214 }
215 
216 
217 /*
218  * Do an advisory lock operation.
219  */
220 int
221 lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op,
222     struct flock *fl, int flags)
223 {
224 	struct proc *p = curproc;
225 	struct lockf_state *ls;
226 	struct lockf *lock;
227 	off_t start, end;
228 	int error = 0;
229 
230 	/*
231 	 * Convert the flock structure into a start and end.
232 	 */
233 	switch (fl->l_whence) {
234 	case SEEK_SET:
235 	case SEEK_CUR:
236 		/*
237 		 * Caller is responsible for adding any necessary offset
238 		 * when SEEK_CUR is used.
239 		 */
240 		start = fl->l_start;
241 		break;
242 	case SEEK_END:
243 		start = size + fl->l_start;
244 		break;
245 	default:
246 		return (EINVAL);
247 	}
248 	if (start < 0)
249 		return (EINVAL);
250 	if (fl->l_len > 0) {
251 		if (fl->l_len - 1 > LLONG_MAX - start)
252 			return (EOVERFLOW);
253 		end = start + (fl->l_len - 1);
254 		/* Avoid ambiguity at the end of the range. */
255 		if (end == LLONG_MAX)
256 			end = -1;
257 	} else if (fl->l_len < 0) {
258 		if (start + fl->l_len < 0)
259 			return (EINVAL);
260 		end = start - 1;
261 		start += fl->l_len;
262 	} else {
263 		end = -1;
264 	}
265 
266 	rw_enter_write(&lockf_lock);
267 	ls = *state;
268 
269 	/*
270 	 * Avoid the common case of unlocking when inode has no locks.
271 	 */
272 	if (ls == NULL && op != F_SETLK) {
273 		fl->l_type = F_UNLCK;
274 		goto out;
275 	}
276 
277 	if (ls == NULL) {
278 		ls = pool_get(&lockf_state_pool, PR_WAITOK | PR_ZERO);
279 		ls->ls_owner = state;
280 		TAILQ_INIT(&ls->ls_locks);
281 		TAILQ_INIT(&ls->ls_pending);
282 		*state = ls;
283 	}
284 	ls_ref(ls);
285 
286 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
287 	if (!lock) {
288 		ls_rele(ls);
289 		error = ENOLCK;
290 		goto out;
291 	}
292 	lock->lf_flags = flags;
293 	lock->lf_type = fl->l_type;
294 	lock->lf_start = start;
295 	lock->lf_end = end;
296 	lock->lf_id = id;
297 	lock->lf_state = ls;
298 	lock->lf_blk = NULL;
299 	lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
300 	TAILQ_INIT(&lock->lf_blkhd);
301 
302 	switch (op) {
303 	case F_SETLK:
304 		error = lf_setlock(lock);
305 		break;
306 	case F_UNLCK:
307 		error = lf_clearlock(lock);
308 		lf_free(lock);
309 		break;
310 	case F_GETLK:
311 		error = lf_getlock(lock, fl);
312 		lf_free(lock);
313 		break;
314 	default:
315 		lf_free(lock);
316 		error = EINVAL;
317 		break;
318 	}
319 
320 out:
321 	rw_exit_write(&lockf_lock);
322 	return (error);
323 }
324 
325 /*
326  * Set a byte-range lock.
327  */
328 int
329 lf_setlock(struct lockf *lock)
330 {
331 	struct lockf *block;
332 	struct lockf *overlap, *ltmp;
333 	int ovcase, priority, needtolink, error;
334 
335 	rw_assert_wrlock(&lockf_lock);
336 
337 	LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
338 
339 	priority = PLOCK;
340 	if (lock->lf_type == F_WRLCK)
341 		priority += 4;
342 	priority |= PCATCH;
343 	/*
344 	 * Scan lock list for this file looking for locks that would block us.
345 	 */
346 	for (;;) {
347 		block = lf_getblock(TAILQ_FIRST(&lock->lf_state->ls_locks),
348 		    lock);
349 		if (block == NULL)
350 			break;
351 
352 		if ((lock->lf_flags & F_WAIT) == 0) {
353 			lf_free(lock);
354 			return (EAGAIN);
355 		}
356 
357 		/*
358 		 * Lock is blocked, check for deadlock before proceeding.
359 		 * Note: flock style locks cover the whole file, there is no
360 		 * chance for deadlock.
361 		 */
362 		if ((lock->lf_flags & F_POSIX) && lf_deadlock(lock)) {
363 			lf_free(lock);
364 			return (EDEADLK);
365 		}
366 
367 		/*
368 		 * For flock type locks, we must first remove
369 		 * any shared locks that we hold before we sleep
370 		 * waiting for an exclusive lock.
371 		 */
372 		if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
373 			lock->lf_type = F_UNLCK;
374 			(void)lf_clearlock(lock);
375 			lock->lf_type = F_WRLCK;
376 		}
377 		/*
378 		 * Add our lock to the blocked list and sleep until we're free.
379 		 * Remember who blocked us (for deadlock detection).
380 		 */
381 		lock->lf_blk = block;
382 		LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
383 		LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
384 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
385 		TAILQ_INSERT_TAIL(&lock->lf_state->ls_pending, lock, lf_entry);
386 		error = rwsleep_nsec(lock, &lockf_lock, priority, "lockf",
387 		    INFSLP);
388 		TAILQ_REMOVE(&lock->lf_state->ls_pending, lock, lf_entry);
389 		wakeup_one(lock->lf_state);
390 		if (lock->lf_blk != NULL) {
391 			TAILQ_REMOVE(&lock->lf_blk->lf_blkhd, lock, lf_block);
392 			lock->lf_blk = NULL;
393 		}
394 		if (error) {
395 			lf_free(lock);
396 			return (error);
397 		}
398 		if (lock->lf_flags & F_INTR) {
399 			lf_free(lock);
400 			return (EINTR);
401 		}
402 	}
403 	/*
404 	 * No blocks!!  Add the lock.  Note that we will
405 	 * downgrade or upgrade any overlapping locks this
406 	 * process already owns.
407 	 *
408 	 * Skip over locks owned by other processes.
409 	 * Handle any locks that overlap and are owned by ourselves.
410 	 */
411 	block = TAILQ_FIRST(&lock->lf_state->ls_locks);
412 	overlap = NULL;
413 	needtolink = 1;
414 	for (;;) {
415 		ovcase = lf_findoverlap(block, lock, SELF, &overlap);
416 		if (ovcase)
417 			block = TAILQ_NEXT(overlap, lf_entry);
418 		/*
419 		 * Six cases:
420 		 *	0) no overlap
421 		 *	1) overlap == lock
422 		 *	2) overlap contains lock
423 		 *	3) lock contains overlap
424 		 *	4) overlap starts before lock
425 		 *	5) overlap ends after lock
426 		 */
427 		switch (ovcase) {
428 		case 0: /* no overlap */
429 			if (needtolink) {
430 				if (overlap)	/* insert before overlap */
431 					TAILQ_INSERT_BEFORE(overlap, lock,
432 					    lf_entry);
433 				else		/* first or last lock in list */
434 					TAILQ_INSERT_TAIL(&lock->lf_state->ls_locks,
435 					    lock, lf_entry);
436 			}
437 			break;
438 		case 1: /* overlap == lock */
439 			/*
440 			 * If downgrading lock, others may be
441 			 * able to acquire it.
442 			 */
443 			if (lock->lf_type == F_RDLCK &&
444 			    overlap->lf_type == F_WRLCK)
445 				lf_wakelock(overlap, 0);
446 			overlap->lf_type = lock->lf_type;
447 			lf_free(lock);
448 			lock = overlap; /* for debug output below */
449 			break;
450 		case 2: /* overlap contains lock */
451 			/*
452 			 * Check for common starting point and different types.
453 			 */
454 			if (overlap->lf_type == lock->lf_type) {
455 				if (!needtolink)
456 					TAILQ_REMOVE(&lock->lf_state->ls_locks,
457 					    lock, lf_entry);
458 				lf_free(lock);
459 				lock = overlap; /* for debug output below */
460 				break;
461 			}
462 			if (overlap->lf_start == lock->lf_start) {
463 				if (!needtolink)
464 					TAILQ_REMOVE(&lock->lf_state->ls_locks,
465 					    lock, lf_entry);
466 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
467 				overlap->lf_start = lock->lf_end + 1;
468 			} else
469 				lf_split(overlap, lock);
470 			lf_wakelock(overlap, 0);
471 			break;
472 		case 3: /* lock contains overlap */
473 			/*
474 			 * If downgrading lock, others may be able to
475 			 * acquire it, otherwise take the list.
476 			 */
477 			if (lock->lf_type == F_RDLCK &&
478 			    overlap->lf_type == F_WRLCK) {
479 				lf_wakelock(overlap, 0);
480 			} else {
481 				while ((ltmp =
482 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
483 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
484 					    lf_block);
485 					ltmp->lf_blk = lock;
486 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
487 					    ltmp, lf_block);
488 				}
489 			}
490 			/*
491 			 * Add the new lock if necessary and delete the overlap.
492 			 */
493 			if (needtolink) {
494 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
495 				needtolink = 0;
496 			}
497 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap, lf_entry);
498 			lf_free(overlap);
499 			continue;
500 		case 4: /* overlap starts before lock */
501 			/*
502 			 * Add lock after overlap on the list.
503 			 */
504 			if (!needtolink)
505 				TAILQ_REMOVE(&lock->lf_state->ls_locks, lock,
506 				    lf_entry);
507 			TAILQ_INSERT_AFTER(&lock->lf_state->ls_locks, overlap,
508 			    lock, lf_entry);
509 			overlap->lf_end = lock->lf_start - 1;
510 			lf_wakelock(overlap, 0);
511 			needtolink = 0;
512 			continue;
513 		case 5: /* overlap ends after lock */
514 			/*
515 			 * Add the new lock before overlap.
516 			 */
517 			if (needtolink)
518 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
519 			overlap->lf_start = lock->lf_end + 1;
520 			lf_wakelock(overlap, 0);
521 			break;
522 		}
523 		break;
524 	}
525 	LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
526 	return (0);
527 }
528 
529 /*
530  * Remove a byte-range lock on an inode.
531  *
532  * Generally, find the lock (or an overlap to that lock)
533  * and remove it (or shrink it), then wakeup anyone we can.
534  */
535 int
536 lf_clearlock(struct lockf *lock)
537 {
538 	struct lockf *lf, *overlap;
539 	int ovcase;
540 
541 	rw_assert_wrlock(&lockf_lock);
542 
543 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
544 	if (lf == NULL)
545 		return (0);
546 
547 	LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
548 	while ((ovcase = lf_findoverlap(lf, lock, SELF, &overlap))) {
549 		lf_wakelock(overlap, 0);
550 
551 		switch (ovcase) {
552 		case 1: /* overlap == lock */
553 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
554 			    lf_entry);
555 			lf_free(overlap);
556 			break;
557 		case 2: /* overlap contains lock: split it */
558 			if (overlap->lf_start == lock->lf_start) {
559 				overlap->lf_start = lock->lf_end + 1;
560 				break;
561 			}
562 			lf_split(overlap, lock);
563 			/*
564 			 * The lock is now part of the list, lf_clearlock() must
565 			 * ensure that the lock remains detached from the list.
566 			 */
567 			TAILQ_REMOVE(&lock->lf_state->ls_locks, lock, lf_entry);
568 			break;
569 		case 3: /* lock contains overlap */
570 			lf = TAILQ_NEXT(overlap, lf_entry);
571 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
572 			    lf_entry);
573 			lf_free(overlap);
574 			continue;
575 		case 4: /* overlap starts before lock */
576 			overlap->lf_end = lock->lf_start - 1;
577 			lf = TAILQ_NEXT(overlap, lf_entry);
578 			continue;
579 		case 5: /* overlap ends after lock */
580 			overlap->lf_start = lock->lf_end + 1;
581 			break;
582 		}
583 		break;
584 	}
585 	return (0);
586 }
587 
588 /*
589  * Check whether there is a blocking lock,
590  * and if so return its process identifier.
591  */
592 int
593 lf_getlock(struct lockf *lock, struct flock *fl)
594 {
595 	struct lockf *block, *lf;
596 
597 	rw_assert_wrlock(&lockf_lock);
598 
599 	LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
600 
601 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
602 	if ((block = lf_getblock(lf, lock)) != NULL) {
603 		fl->l_type = block->lf_type;
604 		fl->l_whence = SEEK_SET;
605 		fl->l_start = block->lf_start;
606 		if (block->lf_end == -1)
607 			fl->l_len = 0;
608 		else
609 			fl->l_len = block->lf_end - block->lf_start + 1;
610 		fl->l_pid = block->lf_pid;
611 	} else {
612 		fl->l_type = F_UNLCK;
613 	}
614 	return (0);
615 }
616 
617 /*
618  * Walk the list of locks for an inode and
619  * return the first blocking lock.
620  */
621 struct lockf *
622 lf_getblock(struct lockf *lf, struct lockf *lock)
623 {
624 	struct lockf *overlap;
625 
626 	rw_assert_wrlock(&lockf_lock);
627 
628 	while (lf_findoverlap(lf, lock, OTHERS, &overlap) != 0) {
629 		/*
630 		 * We've found an overlap, see if it blocks us
631 		 */
632 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
633 			return (overlap);
634 		/*
635 		 * Nope, point to the next one on the list and
636 		 * see if it blocks us
637 		 */
638 		lf = TAILQ_NEXT(overlap, lf_entry);
639 	}
640 	return (NULL);
641 }
642 
643 /*
644  * Walk the list of locks for an inode to
645  * find an overlapping lock (if any).
646  *
647  * NOTE: this returns only the FIRST overlapping lock.  There
648  *	 may be more than one.
649  */
650 int
651 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
652     struct lockf **overlap)
653 {
654 	off_t start, end;
655 
656 	rw_assert_wrlock(&lockf_lock);
657 
658 	LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
659 
660 	*overlap = lf;
661 	start = lock->lf_start;
662 	end = lock->lf_end;
663 	while (lf != NULL) {
664 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
665 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
666 			*overlap = lf = TAILQ_NEXT(lf, lf_entry);
667 			continue;
668 		}
669 		LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
670 		/*
671 		 * OK, check for overlap
672 		 *
673 		 * Six cases:
674 		 *	0) no overlap
675 		 *	1) overlap == lock
676 		 *	2) overlap contains lock
677 		 *	3) lock contains overlap
678 		 *	4) overlap starts before lock
679 		 *	5) overlap ends after lock
680 		 */
681 
682 		/* Case 0 */
683 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
684 		    (end != -1 && lf->lf_start > end)) {
685 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
686 			if ((type & SELF) && end != -1 && lf->lf_start > end)
687 				return (0);
688 			*overlap = lf = TAILQ_NEXT(lf, lf_entry);
689 			continue;
690 		}
691 		/* Case 1 */
692 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
693 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
694 			return (1);
695 		}
696 		/* Case 2 */
697 		if ((lf->lf_start <= start) &&
698 		    (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
699 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
700 			return (2);
701 		}
702 		/* Case 3 */
703 		if (start <= lf->lf_start &&
704 		    (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
705 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
706 			return (3);
707 		}
708 		/* Case 4 */
709 		if ((lf->lf_start < start) &&
710 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
711 			DPRINTF(("overlap starts before lock\n"),
712 			    DEBUG_FINDOVR);
713 			return (4);
714 		}
715 		/* Case 5 */
716 		if ((lf->lf_start > start) && (end != -1) &&
717 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
718 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
719 			return (5);
720 		}
721 		panic("lf_findoverlap: default");
722 	}
723 	return (0);
724 }
725 
726 /*
727  * Purge all locks associated with the given lock state.
728  */
729 void
730 lf_purgelocks(struct lockf_state **state)
731 {
732 	struct lockf_state *ls;
733 	struct lockf *lock;
734 
735 	rw_enter_write(&lockf_lock);
736 
737 	ls = *state;
738 	if (ls == NULL)
739 		goto out;
740 
741 	ls_ref(ls);
742 
743 	/* Interrupt blocked locks and wait for all of them to finish. */
744 	TAILQ_FOREACH(lock, &ls->ls_locks, lf_entry) {
745 		LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK);
746 		lf_wakelock(lock, F_INTR);
747 	}
748 	while (!TAILQ_EMPTY(&ls->ls_pending))
749 		rwsleep_nsec(ls, &lockf_lock, PLOCK, "lockfp", INFSLP);
750 
751 	/*
752 	 * Any remaining locks cannot block other locks at this point and can
753 	 * safely be removed.
754 	 */
755 	while ((lock = TAILQ_FIRST(&ls->ls_locks))) {
756 		TAILQ_REMOVE(&ls->ls_locks, lock, lf_entry);
757 		lf_free(lock);
758 	}
759 
760 	/* This is the last expected thread to hold a lock state reference. */
761 #ifdef LOCKF_DIAGNOSTIC
762 	KASSERT(ls->ls_refs == 1);
763 #endif
764 	ls_rele(ls);
765 
766 out:
767 	rw_exit_write(&lockf_lock);
768 }
769 
770 /*
771  * Split a lock and a contained region into
772  * two or three locks as necessary.
773  */
774 void
775 lf_split(struct lockf *lock1, struct lockf *lock2)
776 {
777 	struct lockf *splitlock;
778 
779 	rw_assert_wrlock(&lockf_lock);
780 
781 	LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
782 	LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
783 
784 	/*
785 	 * Check to see if splitting into only two pieces.
786 	 */
787 	if (lock1->lf_start == lock2->lf_start) {
788 		lock1->lf_start = lock2->lf_end + 1;
789 		TAILQ_INSERT_BEFORE(lock1, lock2, lf_entry);
790 		return;
791 	}
792 	if (lock1->lf_end == lock2->lf_end) {
793 		lock1->lf_end = lock2->lf_start - 1;
794 		TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2,
795 		    lf_entry);
796 		return;
797 	}
798 	/*
799 	 * Make a new lock consisting of the last part of
800 	 * the encompassing lock
801 	 */
802 	splitlock = lf_alloc(lock1->lf_uid, 0);
803 	splitlock->lf_flags = lock1->lf_flags;
804 	splitlock->lf_type = lock1->lf_type;
805 	splitlock->lf_start = lock2->lf_end + 1;
806 	splitlock->lf_end = lock1->lf_end;
807 	splitlock->lf_id = lock1->lf_id;
808 	splitlock->lf_state = lock1->lf_state;
809 	splitlock->lf_blk = NULL;
810 	splitlock->lf_pid = lock1->lf_pid;
811 	TAILQ_INIT(&splitlock->lf_blkhd);
812 	ls_ref(splitlock->lf_state);
813 	lock1->lf_end = lock2->lf_start - 1;
814 
815 	TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2, lf_entry);
816 	TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock2, splitlock,
817 	    lf_entry);
818 }
819 
820 /*
821  * Wakeup a blocklist
822  */
823 void
824 lf_wakelock(struct lockf *lock, int flags)
825 {
826 	struct lockf *wakelock;
827 
828 	rw_assert_wrlock(&lockf_lock);
829 
830 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
831 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
832 		wakelock->lf_blk = NULL;
833 		wakelock->lf_flags |= flags;
834 		wakeup_one(wakelock);
835 	}
836 }
837 
838 /*
839  * Returns non-zero if the given lock would cause a deadlock.
840  */
841 int
842 lf_deadlock(struct lockf *lock)
843 {
844 	struct lockf *block, *lf, *pending;
845 
846 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
847 	for (; (block = lf_getblock(lf, lock)) != NULL;
848 	    lf = TAILQ_NEXT(block, lf_entry)) {
849 		if ((block->lf_flags & F_POSIX) == 0)
850 			continue;
851 
852 		TAILQ_FOREACH(pending, &lock->lf_state->ls_pending, lf_entry) {
853 			if (pending->lf_blk == NULL)
854 				continue; /* lock already unblocked */
855 
856 			if (pending->lf_pid == block->lf_pid &&
857 			    pending->lf_blk->lf_pid == lock->lf_pid)
858 				return (1);
859 		}
860 	}
861 
862 	return (0);
863 }
864 
865 #ifdef LOCKF_DEBUG
866 /*
867  * Print out a lock.
868  */
869 void
870 lf_print(const char *tag, struct lockf *lock)
871 {
872 	struct lockf	*block;
873 
874 	if (tag)
875 		printf("%s: ", tag);
876 	printf("lock %p", lock);
877 	if (lock == NULL) {
878 		printf("\n");
879 		return;
880 	}
881 	printf(", %s %p %s, start %lld, end %lld",
882 		lock->lf_flags & F_POSIX ? "posix" : "flock",
883 		lock->lf_id,
884 		lock->lf_type == F_RDLCK ? "shared" :
885 		lock->lf_type == F_WRLCK ? "exclusive" :
886 		lock->lf_type == F_UNLCK ? "unlock" :
887 		"unknown", lock->lf_start, lock->lf_end);
888 	printf(", next %p, state %p",
889 	    TAILQ_NEXT(lock, lf_entry), lock->lf_state);
890 	block = TAILQ_FIRST(&lock->lf_blkhd);
891 	if (block)
892 		printf(", block");
893 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
894 		printf(" %p,", block);
895 	printf("\n");
896 }
897 
898 void
899 lf_printlist(const char *tag, struct lockf *lock)
900 {
901 	struct lockf *lf;
902 
903 	printf("%s: Lock list:\n", tag);
904 	TAILQ_FOREACH(lf, &lock->lf_state->ls_locks, lf_entry) {
905 		if (lock == lf)
906 			printf(" * ");
907 		else
908 			printf("   ");
909 		lf_print(NULL, lf);
910 	}
911 }
912 #endif /* LOCKF_DEBUG */
913