xref: /openbsd/sys/kern/vfs_lockf.c (revision 17df1aa7)
1 /*	$OpenBSD: vfs_lockf.c,v 1.15 2009/03/24 09:04:30 otto Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/file.h>
42 #include <sys/proc.h>
43 #include <sys/vnode.h>
44 #include <sys/pool.h>
45 #include <sys/fcntl.h>
46 #include <sys/lockf.h>
47 
48 struct pool lockfpool;
49 
50 /*
51  * This variable controls the maximum number of processes that will
52  * be checked in doing deadlock detection.
53  */
54 int maxlockdepth = MAXDEPTH;
55 
56 #define SELF	0x1
57 #define OTHERS	0x2
58 
59 #ifdef LOCKF_DEBUG
60 
61 #define	DEBUG_SETLOCK		0x01
62 #define	DEBUG_CLEARLOCK		0x02
63 #define	DEBUG_GETLOCK		0x04
64 #define	DEBUG_FINDOVR		0x08
65 #define	DEBUG_SPLIT		0x10
66 #define	DEBUG_WAKELOCK		0x20
67 
68 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
69 
70 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
71 #else
72 #define	DPRINTF(args, level)
73 #endif
74 
75 void
76 lf_init(void)
77 {
78 	pool_init(&lockfpool, sizeof(struct lockf), 0, 0, 0,
79 	    "lockfpl", &pool_allocator_nointr);
80 }
81 
82 struct lockf *lf_alloc(uid_t, int);
83 void lf_free(struct lockf *);
84 
85 /*
86  * We enforce a limit on locks by uid, so that a single user cannot
87  * run the kernel out of memory.  For now, the limit is pretty coarse.
88  * There is no limit on root.
89  *
90  * Splitting a lock will always succeed, regardless of current allocations.
91  * If you're slightly above the limit, we still have to permit an allocation
92  * so that the unlock can succeed.  If the unlocking causes too many splits,
93  * however, you're totally cutoff.
94  */
95 int maxlocksperuid = 1024;
96 
97 /*
98  * 3 options for allowfail.
99  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
100  */
101 struct lockf *
102 lf_alloc(uid_t uid, int allowfail)
103 {
104 	struct uidinfo *uip;
105 	struct lockf *lock;
106 
107 	uip = uid_find(uid);
108 	if (uid && allowfail && uip->ui_lockcnt >
109 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2)))
110 		return (NULL);
111 	uip->ui_lockcnt++;
112 	lock = pool_get(&lockfpool, PR_WAITOK);
113 	lock->lf_uid = uid;
114 	return (lock);
115 }
116 
117 void
118 lf_free(struct lockf *lock)
119 {
120 	struct uidinfo *uip;
121 
122 	uip = uid_find(lock->lf_uid);
123 	uip->ui_lockcnt--;
124 	pool_put(&lockfpool, lock);
125 }
126 
127 
128 /*
129  * Do an advisory lock operation.
130  */
131 int
132 lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
133     struct flock *fl, int flags)
134 {
135 	struct proc *p = curproc;
136 	struct lockf *lock;
137 	off_t start, end;
138 	int error;
139 
140 	/*
141 	 * Convert the flock structure into a start and end.
142 	 */
143 	switch (fl->l_whence) {
144 
145 	case SEEK_SET:
146 	case SEEK_CUR:
147 		/*
148 		 * Caller is responsible for adding any necessary offset
149 		 * when SEEK_CUR is used.
150 		 */
151 		start = fl->l_start;
152 		break;
153 
154 	case SEEK_END:
155 		start = size + fl->l_start;
156 		break;
157 
158 	default:
159 		return (EINVAL);
160 	}
161 	if (start < 0)
162 		return (EINVAL);
163 	if (fl->l_len == 0)
164 		end = -1;
165 	else {
166 		end = start + fl->l_len - 1;
167 		if (end < start)
168 			return (EINVAL);
169 	}
170 
171 	/*
172 	 * Avoid the common case of unlocking when inode has no locks.
173 	 */
174 	if (*head == NULL) {
175 		if (op != F_SETLK) {
176 			fl->l_type = F_UNLCK;
177 			return (0);
178 		}
179 	}
180 
181 	/*
182 	 * Create the lockf structure.
183 	 */
184 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
185 	if (!lock)
186 		return (ENOLCK);
187 	lock->lf_start = start;
188 	lock->lf_end = end;
189 	lock->lf_id = id;
190 	lock->lf_head = head;
191 	lock->lf_type = fl->l_type;
192 	lock->lf_next = NULL;
193 	TAILQ_INIT(&lock->lf_blkhd);
194 	lock->lf_flags = flags;
195 	lock->lf_pid = (flags & F_POSIX) ? curproc->p_pid : -1;
196 	/*
197 	 * Do the requested operation.
198 	 */
199 	switch (op) {
200 
201 	case F_SETLK:
202 		return (lf_setlock(lock));
203 
204 	case F_UNLCK:
205 		error = lf_clearlock(lock);
206 		lf_free(lock);
207 		return (error);
208 
209 	case F_GETLK:
210 		error = lf_getlock(lock, fl);
211 		lf_free(lock);
212 		return (error);
213 
214 	default:
215 		lf_free(lock);
216 		return (EINVAL);
217 	}
218 	/* NOTREACHED */
219 }
220 
221 /*
222  * Set a byte-range lock.
223  */
224 int
225 lf_setlock(struct lockf *lock)
226 {
227 	struct lockf *block;
228 	struct lockf **head = lock->lf_head;
229 	struct lockf **prev, *overlap, *ltmp;
230 	static char lockstr[] = "lockf";
231 	int ovcase, priority, needtolink, error;
232 
233 #ifdef LOCKF_DEBUG
234 	if (lockf_debug & DEBUG_SETLOCK)
235 		lf_print("lf_setlock", lock);
236 #endif /* LOCKF_DEBUG */
237 
238 	/*
239 	 * Set the priority
240 	 */
241 	priority = PLOCK;
242 	if (lock->lf_type == F_WRLCK)
243 		priority += 4;
244 	priority |= PCATCH;
245 	/*
246 	 * Scan lock list for this file looking for locks that would block us.
247 	 */
248 	while ((block = lf_getblock(lock)) != NULL) {
249 		/*
250 		 * Free the structure and return if nonblocking.
251 		 */
252 		if ((lock->lf_flags & F_WAIT) == 0) {
253 			lf_free(lock);
254 			return (EAGAIN);
255 		}
256 		/*
257 		 * We are blocked. Since flock style locks cover
258 		 * the whole file, there is no chance for deadlock.
259 		 * For byte-range locks we must check for deadlock.
260 		 *
261 		 * Deadlock detection is done by looking through the
262 		 * wait channels to see if there are any cycles that
263 		 * involve us. MAXDEPTH is set just to make sure we
264 		 * do not go off into neverland.
265 		 */
266 		if ((lock->lf_flags & F_POSIX) &&
267 		    (block->lf_flags & F_POSIX)) {
268 			struct proc *wproc;
269 			struct lockf *waitblock;
270 			int i = 0;
271 
272 			/* The block is waiting on something */
273 			wproc = (struct proc *)block->lf_id;
274 			while (wproc->p_wchan &&
275 			    (wproc->p_wmesg == lockstr) &&
276 			    (i++ < maxlockdepth)) {
277 				waitblock = (struct lockf *)wproc->p_wchan;
278 				/* Get the owner of the blocking lock */
279 				waitblock = waitblock->lf_next;
280 				if ((waitblock->lf_flags & F_POSIX) == 0)
281 					break;
282 				wproc = (struct proc *)waitblock->lf_id;
283 				if (wproc == (struct proc *)lock->lf_id) {
284 					lf_free(lock);
285 					return (EDEADLK);
286 				}
287 			}
288 		}
289 		/*
290 		 * For flock type locks, we must first remove
291 		 * any shared locks that we hold before we sleep
292 		 * waiting for an exclusive lock.
293 		 */
294 		if ((lock->lf_flags & F_FLOCK) &&
295 		    lock->lf_type == F_WRLCK) {
296 			lock->lf_type = F_UNLCK;
297 			(void) lf_clearlock(lock);
298 			lock->lf_type = F_WRLCK;
299 		}
300 		/*
301 		 * Add our lock to the blocked list and sleep until we're free.
302 		 * Remember who blocked us (for deadlock detection).
303 		 */
304 		lock->lf_next = block;
305 #ifdef LOCKF_DEBUG
306 		if (lockf_debug & DEBUG_SETLOCK) {
307 			lf_print("lf_setlock", lock);
308 			lf_print("lf_setlock: blocking on", block);
309 		}
310 #endif /* LOCKF_DEBUG */
311 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
312 		error = tsleep(lock, priority, lockstr, 0);
313 #if 0
314 		if (error) {
315 			/*
316 			 * Delete ourselves from the waiting to lock list.
317 			 */
318 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
319 			lf_free(lock);
320 			return (error);
321 		}
322 #else
323 		if (lock->lf_next != NULL) {
324 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
325 			lock->lf_next = NULL;
326 		}
327 		if (error) {
328 			lf_free(lock);
329 			return (error);
330 		}
331 #endif
332 	}
333 	/*
334 	 * No blocks!!  Add the lock.  Note that we will
335 	 * downgrade or upgrade any overlapping locks this
336 	 * process already owns.
337 	 *
338 	 * Skip over locks owned by other processes.
339 	 * Handle any locks that overlap and are owned by ourselves.
340 	 */
341 	prev = head;
342 	block = *head;
343 	needtolink = 1;
344 	for (;;) {
345 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
346 		if (ovcase)
347 			block = overlap->lf_next;
348 		/*
349 		 * Six cases:
350 		 *	0) no overlap
351 		 *	1) overlap == lock
352 		 *	2) overlap contains lock
353 		 *	3) lock contains overlap
354 		 *	4) overlap starts before lock
355 		 *	5) overlap ends after lock
356 		 */
357 		switch (ovcase) {
358 		case 0: /* no overlap */
359 			if (needtolink) {
360 				*prev = lock;
361 				lock->lf_next = overlap;
362 			}
363 			break;
364 
365 		case 1: /* overlap == lock */
366 			/*
367 			 * If downgrading lock, others may be
368 			 * able to acquire it.
369 			 */
370 			if (lock->lf_type == F_RDLCK &&
371 			    overlap->lf_type == F_WRLCK)
372 				lf_wakelock(overlap);
373 			overlap->lf_type = lock->lf_type;
374 			lf_free(lock);
375 			lock = overlap; /* for debug output below */
376 			break;
377 
378 		case 2: /* overlap contains lock */
379 			/*
380 			 * Check for common starting point and different types.
381 			 */
382 			if (overlap->lf_type == lock->lf_type) {
383 				lf_free(lock);
384 				lock = overlap; /* for debug output below */
385 				break;
386 			}
387 			if (overlap->lf_start == lock->lf_start) {
388 				*prev = lock;
389 				lock->lf_next = overlap;
390 				overlap->lf_start = lock->lf_end + 1;
391 			} else
392 				lf_split(overlap, lock);
393 			lf_wakelock(overlap);
394 			break;
395 
396 		case 3: /* lock contains overlap */
397 			/*
398 			 * If downgrading lock, others may be able to
399 			 * acquire it, otherwise take the list.
400 			 */
401 			if (lock->lf_type == F_RDLCK &&
402 			    overlap->lf_type == F_WRLCK) {
403 				lf_wakelock(overlap);
404 			} else {
405 				while ((ltmp =
406 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
407 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
408 					    lf_block);
409 					ltmp->lf_next = lock;
410 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
411 					    ltmp, lf_block);
412 				}
413 			}
414 			/*
415 			 * Add the new lock if necessary and delete the overlap.
416 			 */
417 			if (needtolink) {
418 				*prev = lock;
419 				lock->lf_next = overlap->lf_next;
420 				prev = &lock->lf_next;
421 				needtolink = 0;
422 			} else
423 				*prev = overlap->lf_next;
424 			lf_free(overlap);
425 			continue;
426 
427 		case 4: /* overlap starts before lock */
428 			/*
429 			 * Add lock after overlap on the list.
430 			 */
431 			lock->lf_next = overlap->lf_next;
432 			overlap->lf_next = lock;
433 			overlap->lf_end = lock->lf_start - 1;
434 			prev = &lock->lf_next;
435 			lf_wakelock(overlap);
436 			needtolink = 0;
437 			continue;
438 
439 		case 5: /* overlap ends after lock */
440 			/*
441 			 * Add the new lock before overlap.
442 			 */
443 			if (needtolink) {
444 				*prev = lock;
445 				lock->lf_next = overlap;
446 			}
447 			overlap->lf_start = lock->lf_end + 1;
448 			lf_wakelock(overlap);
449 			break;
450 		}
451 		break;
452 	}
453 #ifdef LOCKF_DEBUG
454 	if (lockf_debug & DEBUG_SETLOCK) {
455 		lf_print("lf_setlock: got the lock", lock);
456 	}
457 #endif /* LOCKF_DEBUG */
458 	return (0);
459 }
460 
461 /*
462  * Remove a byte-range lock on an inode.
463  *
464  * Generally, find the lock (or an overlap to that lock)
465  * and remove it (or shrink it), then wakeup anyone we can.
466  */
467 int
468 lf_clearlock(struct lockf *lock)
469 {
470 	struct lockf **head = lock->lf_head;
471 	struct lockf *lf = *head;
472 	struct lockf *overlap, **prev;
473 	int ovcase;
474 
475 	if (lf == NULL)
476 		return (0);
477 #ifdef LOCKF_DEBUG
478 	if (lockf_debug & DEBUG_CLEARLOCK)
479 		lf_print("lf_clearlock", lock);
480 #endif /* LOCKF_DEBUG */
481 	prev = head;
482 	while ((ovcase = lf_findoverlap(lf, lock, SELF,
483 					&prev, &overlap)) != 0) {
484 		/*
485 		 * Wakeup the list of locks to be retried.
486 		 */
487 		lf_wakelock(overlap);
488 
489 		switch (ovcase) {
490 
491 		case 1: /* overlap == lock */
492 			*prev = overlap->lf_next;
493 			lf_free(overlap);
494 			break;
495 
496 		case 2: /* overlap contains lock: split it */
497 			if (overlap->lf_start == lock->lf_start) {
498 				overlap->lf_start = lock->lf_end + 1;
499 				break;
500 			}
501 			lf_split(overlap, lock);
502 			overlap->lf_next = lock->lf_next;
503 			break;
504 
505 		case 3: /* lock contains overlap */
506 			*prev = overlap->lf_next;
507 			lf = overlap->lf_next;
508 			lf_free(overlap);
509 			continue;
510 
511 		case 4: /* overlap starts before lock */
512 			overlap->lf_end = lock->lf_start - 1;
513 			prev = &overlap->lf_next;
514 			lf = overlap->lf_next;
515 			continue;
516 
517 		case 5: /* overlap ends after lock */
518 			overlap->lf_start = lock->lf_end + 1;
519 			break;
520 		}
521 		break;
522 	}
523 	return (0);
524 }
525 
526 /*
527  * Check whether there is a blocking lock,
528  * and if so return its process identifier.
529  */
530 int
531 lf_getlock(struct lockf *lock, struct flock *fl)
532 {
533 	struct lockf *block;
534 
535 #ifdef LOCKF_DEBUG
536 	if (lockf_debug & DEBUG_CLEARLOCK)
537 		lf_print("lf_getlock", lock);
538 #endif /* LOCKF_DEBUG */
539 
540 	if ((block = lf_getblock(lock)) != NULL) {
541 		fl->l_type = block->lf_type;
542 		fl->l_whence = SEEK_SET;
543 		fl->l_start = block->lf_start;
544 		if (block->lf_end == -1)
545 			fl->l_len = 0;
546 		else
547 			fl->l_len = block->lf_end - block->lf_start + 1;
548 		fl->l_pid = block->lf_pid;
549 	} else {
550 		fl->l_type = F_UNLCK;
551 	}
552 	return (0);
553 }
554 
555 /*
556  * Walk the list of locks for an inode and
557  * return the first blocking lock.
558  */
559 struct lockf *
560 lf_getblock(struct lockf *lock)
561 {
562 	struct lockf **prev, *overlap, *lf;
563 
564 	prev = lock->lf_head;
565 	lf = *prev;
566 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
567 		/*
568 		 * We've found an overlap, see if it blocks us
569 		 */
570 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
571 			return (overlap);
572 		/*
573 		 * Nope, point to the next one on the list and
574 		 * see if it blocks us
575 		 */
576 		lf = overlap->lf_next;
577 	}
578 	return (NULL);
579 }
580 
581 /*
582  * Walk the list of locks for an inode to
583  * find an overlapping lock (if any).
584  *
585  * NOTE: this returns only the FIRST overlapping lock.  There
586  *	 may be more than one.
587  */
588 int
589 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
590     struct lockf ***prev, struct lockf **overlap)
591 {
592 	off_t start, end;
593 
594 #ifdef LOCKF_DEBUG
595 	if (lf && lockf_debug & DEBUG_FINDOVR)
596 		lf_print("lf_findoverlap: looking for overlap in", lock);
597 #endif /* LOCKF_DEBUG */
598 
599 	*overlap = lf;
600 	start = lock->lf_start;
601 	end = lock->lf_end;
602 	while (lf != NULL) {
603 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
604 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
605 			*prev = &lf->lf_next;
606 			*overlap = lf = lf->lf_next;
607 			continue;
608 		}
609 #ifdef LOCKF_DEBUG
610 		if (lockf_debug & DEBUG_FINDOVR)
611 			lf_print("\tchecking", lf);
612 #endif /* LOCKF_DEBUG */
613 		/*
614 		 * OK, check for overlap
615 		 *
616 		 * Six cases:
617 		 *	0) no overlap
618 		 *	1) overlap == lock
619 		 *	2) overlap contains lock
620 		 *	3) lock contains overlap
621 		 *	4) overlap starts before lock
622 		 *	5) overlap ends after lock
623 		 */
624 
625 		/* Case 0 */
626 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
627 		    (end != -1 && lf->lf_start > end)) {
628 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
629 			if ((type & SELF) && end != -1 && lf->lf_start > end)
630 				return (0);
631 			*prev = &lf->lf_next;
632 			*overlap = lf = lf->lf_next;
633 			continue;
634 		}
635 		/* Case 1 */
636 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
637 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
638 			return (1);
639 		}
640 		/* Case 2 */
641 		if ((lf->lf_start <= start) &&
642 		    (lf->lf_end == -1 ||
643 		    (end != -1 && lf->lf_end >= end))) {
644 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
645 			return (2);
646 		}
647 		/* Case 3 */
648 		if (start <= lf->lf_start &&
649 		    (end == -1 ||
650 		    (lf->lf_end != -1 && end >= lf->lf_end))) {
651 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
652 			return (3);
653 		}
654 		/* Case 4 */
655 		if ((lf->lf_start < start) &&
656 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
657 			DPRINTF(("overlap starts before lock\n"),
658 			    DEBUG_FINDOVR);
659 			return (4);
660 		}
661 		/* Case 5 */
662 		if ((lf->lf_start > start) &&
663 		    (end != -1) &&
664 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
665 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
666 			return (5);
667 		}
668 		panic("lf_findoverlap: default");
669 	}
670 	return (0);
671 }
672 
673 /*
674  * Split a lock and a contained region into
675  * two or three locks as necessary.
676  */
677 void
678 lf_split(struct lockf *lock1, struct lockf *lock2)
679 {
680 	struct lockf *splitlock;
681 
682 #ifdef LOCKF_DEBUG
683 	if (lockf_debug & DEBUG_SPLIT) {
684 		lf_print("lf_split", lock1);
685 		lf_print("splitting from", lock2);
686 	}
687 #endif /* LOCKF_DEBUG */
688 	/*
689 	 * Check to see if spliting into only two pieces.
690 	 */
691 	if (lock1->lf_start == lock2->lf_start) {
692 		lock1->lf_start = lock2->lf_end + 1;
693 		lock2->lf_next = lock1;
694 		return;
695 	}
696 	if (lock1->lf_end == lock2->lf_end) {
697 		lock1->lf_end = lock2->lf_start - 1;
698 		lock2->lf_next = lock1->lf_next;
699 		lock1->lf_next = lock2;
700 		return;
701 	}
702 	/*
703 	 * Make a new lock consisting of the last part of
704 	 * the encompassing lock
705 	 */
706 	splitlock = lf_alloc(lock1->lf_uid, 0);
707 	memcpy(splitlock, lock1, sizeof(*splitlock));
708 	splitlock->lf_start = lock2->lf_end + 1;
709 	splitlock->lf_block.tqe_next = NULL;
710 	TAILQ_INIT(&splitlock->lf_blkhd);
711 	lock1->lf_end = lock2->lf_start - 1;
712 	/*
713 	 * OK, now link it in
714 	 */
715 	lock2->lf_next = splitlock;
716 	lock1->lf_next = lock2;
717 }
718 
719 /*
720  * Wakeup a blocklist
721  */
722 void
723 lf_wakelock(struct lockf *lock)
724 {
725 	struct lockf *wakelock;
726 
727 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
728 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
729 		wakelock->lf_next = NULL;
730 		wakeup_one(wakelock);
731 	}
732 }
733 
734 #ifdef LOCKF_DEBUG
735 /*
736  * Print out a lock.
737  */
738 void
739 lf_print(char *tag, struct lockf *lock)
740 {
741 	struct lockf	*block;
742 
743 	printf("%s: lock %p for ", tag, lock);
744 	if (lock->lf_flags & F_POSIX)
745 		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
746 	else
747 		printf("id %p", lock->lf_id);
748 	printf(" %s, start %llx, end %llx",
749 		lock->lf_type == F_RDLCK ? "shared" :
750 		lock->lf_type == F_WRLCK ? "exclusive" :
751 		lock->lf_type == F_UNLCK ? "unlock" :
752 		"unknown", lock->lf_start, lock->lf_end);
753 	block = TAILQ_FIRST(&lock->lf_blkhd);
754 	if (block)
755 		printf(" block");
756 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
757 		printf(" %p,", block);
758 	printf("\n");
759 
760 }
761 
762 void
763 lf_printlist(char *tag, struct lockf *lock)
764 {
765 	struct lockf *lf;
766 
767 	printf("%s: Lock list:\n", tag);
768 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
769 		printf("\tlock %p for ", lf);
770 		if (lf->lf_flags & F_POSIX)
771 			printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid);
772 		else
773 			printf("id %p", lf->lf_id);
774 		printf(" %s, start %llx, end %llx",
775 			lf->lf_type == F_RDLCK ? "shared" :
776 			lf->lf_type == F_WRLCK ? "exclusive" :
777 			lf->lf_type == F_UNLCK ? "unlock" :
778 			"unknown", lf->lf_start, lf->lf_end);
779 		printf("\n");
780 	}
781 }
782 #endif /* LOCKF_DEBUG */
783