xref: /dragonfly/sys/kern/kern_lockf.c (revision af79c6e5)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Scooter Morris at Genentech Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
37  * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
38  * $DragonFly: src/sys/kern/kern_lockf.c,v 1.6 2003/08/26 21:09:02 rob Exp $
39  */
40 
41 #include "opt_debug_lockf.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/proc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 
53 #include <sys/lockf.h>
54 
55 /*
56  * This variable controls the maximum number of processes that will
57  * be checked in doing deadlock detection.
58  */
59 static int maxlockdepth = MAXDEPTH;
60 
61 #ifdef LOCKF_DEBUG
62 #include <sys/kernel.h>
63 #include <sys/sysctl.h>
64 
65 #include <vfs/ufs/quota.h>
66 #include <vfs/ufs/inode.h>
67 
68 
69 static int	lockf_debug = 0;
70 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
71 #endif
72 
73 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
74 
75 #define NOLOCKF (struct lockf *)0
76 #define SELF	0x1
77 #define OTHERS	0x2
78 static int	 lf_clearlock (struct lockf *);
79 static int	 lf_findoverlap (struct lockf *,
80 	    struct lockf *, int, struct lockf ***, struct lockf **);
81 static struct lockf *
82 	 lf_getblock (struct lockf *);
83 static int	 lf_getlock (struct lockf *, struct flock *);
84 static int	 lf_setlock (struct lockf *);
85 static void	 lf_split (struct lockf *, struct lockf *);
86 static void	 lf_wakelock (struct lockf *);
87 
88 /*
89  * Advisory record locking support
90  */
91 int
92 lf_advlock(ap, head, size)
93 	struct vop_advlock_args /* {
94 		struct vnode *a_vp;
95 		caddr_t  a_id;
96 		int  a_op;
97 		struct flock *a_fl;
98 		int  a_flags;
99 	} */ *ap;
100 	struct lockf **head;
101 	u_quad_t size;
102 {
103 	struct flock *fl = ap->a_fl;
104 	struct lockf *lock;
105 	off_t start, end;
106 	int error;
107 
108 	/*
109 	 * Convert the flock structure into a start and end.
110 	 */
111 	switch (fl->l_whence) {
112 
113 	case SEEK_SET:
114 	case SEEK_CUR:
115 		/*
116 		 * Caller is responsible for adding any necessary offset
117 		 * when SEEK_CUR is used.
118 		 */
119 		start = fl->l_start;
120 		break;
121 
122 	case SEEK_END:
123 		start = size + fl->l_start;
124 		break;
125 
126 	default:
127 		return (EINVAL);
128 	}
129 	if (start < 0)
130 		return (EINVAL);
131 	if (fl->l_len == 0)
132 		end = -1;
133 	else {
134 		end = start + fl->l_len - 1;
135 		if (end < start)
136 			return (EINVAL);
137 	}
138 	/*
139 	 * Avoid the common case of unlocking when inode has no locks.
140 	 */
141 	if (*head == (struct lockf *)0) {
142 		if (ap->a_op != F_SETLK) {
143 			fl->l_type = F_UNLCK;
144 			return (0);
145 		}
146 	}
147 	/*
148 	 * Create the lockf structure
149 	 */
150 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
151 	lock->lf_start = start;
152 	lock->lf_end = end;
153 	lock->lf_id = ap->a_id;
154 /*	lock->lf_inode = ip; */	/* XXX JH */
155 	lock->lf_type = fl->l_type;
156 	lock->lf_head = head;
157 	lock->lf_next = (struct lockf *)0;
158 	TAILQ_INIT(&lock->lf_blkhd);
159 	lock->lf_flags = ap->a_flags;
160 	/*
161 	 * Do the requested operation.
162 	 */
163 	switch(ap->a_op) {
164 	case F_SETLK:
165 		return (lf_setlock(lock));
166 
167 	case F_UNLCK:
168 		error = lf_clearlock(lock);
169 		FREE(lock, M_LOCKF);
170 		return (error);
171 
172 	case F_GETLK:
173 		error = lf_getlock(lock, fl);
174 		FREE(lock, M_LOCKF);
175 		return (error);
176 
177 	default:
178 		free(lock, M_LOCKF);
179 		return (EINVAL);
180 	}
181 	/* NOTREACHED */
182 }
183 
184 /*
185  * Set a byte-range lock.
186  */
187 static int
188 lf_setlock(lock)
189 	struct lockf *lock;
190 {
191 	struct lockf *block;
192 	struct lockf **head = lock->lf_head;
193 	struct lockf **prev, *overlap, *ltmp;
194 	static char lockstr[] = "lockf";
195 	int ovcase, needtolink, error;
196 
197 #ifdef LOCKF_DEBUG
198 	if (lockf_debug & 1)
199 		lf_print("lf_setlock", lock);
200 #endif /* LOCKF_DEBUG */
201 
202 	/*
203 	 * Scan lock list for this file looking for locks that would block us.
204 	 */
205 	while ((block = lf_getblock(lock))) {
206 		/*
207 		 * Free the structure and return if nonblocking.
208 		 */
209 		if ((lock->lf_flags & F_WAIT) == 0) {
210 			FREE(lock, M_LOCKF);
211 			return (EAGAIN);
212 		}
213 		/*
214 		 * We are blocked. Since flock style locks cover
215 		 * the whole file, there is no chance for deadlock.
216 		 * For byte-range locks we must check for deadlock.
217 		 *
218 		 * Deadlock detection is done by looking through the
219 		 * wait channels to see if there are any cycles that
220 		 * involve us. MAXDEPTH is set just to make sure we
221 		 * do not go off into neverland.
222 		 */
223 		if ((lock->lf_flags & F_POSIX) &&
224 		    (block->lf_flags & F_POSIX)) {
225 			struct proc *wproc;
226 			struct lockf *waitblock;
227 			int i = 0;
228 
229 			/* The block is waiting on something */
230 			wproc = (struct proc *)block->lf_id;
231 			while (wproc->p_wchan &&
232 			       (wproc->p_wmesg == lockstr) &&
233 			       (i++ < maxlockdepth)) {
234 				waitblock = (struct lockf *)wproc->p_wchan;
235 				/* Get the owner of the blocking lock */
236 				waitblock = waitblock->lf_next;
237 				if ((waitblock->lf_flags & F_POSIX) == 0)
238 					break;
239 				wproc = (struct proc *)waitblock->lf_id;
240 				if (wproc == (struct proc *)lock->lf_id) {
241 					free(lock, M_LOCKF);
242 					return (EDEADLK);
243 				}
244 			}
245 		}
246 		/*
247 		 * For flock type locks, we must first remove
248 		 * any shared locks that we hold before we sleep
249 		 * waiting for an exclusive lock.
250 		 */
251 		if ((lock->lf_flags & F_FLOCK) &&
252 		    lock->lf_type == F_WRLCK) {
253 			lock->lf_type = F_UNLCK;
254 			(void) lf_clearlock(lock);
255 			lock->lf_type = F_WRLCK;
256 		}
257 		/*
258 		 * Add our lock to the blocked list and sleep until we're free.
259 		 * Remember who blocked us (for deadlock detection).
260 		 */
261 		lock->lf_next = block;
262 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
263 #ifdef LOCKF_DEBUG
264 		if (lockf_debug & 1) {
265 			lf_print("lf_setlock: blocking on", block);
266 			lf_printlist("lf_setlock", block);
267 		}
268 #endif /* LOCKF_DEBUG */
269 		error = tsleep((caddr_t)lock, PCATCH, lockstr, 0);
270 		/*
271 		 * We may have been awakened by a signal and/or by a
272 		 * debugger continuing us (in which cases we must remove
273 		 * ourselves from the blocked list) and/or by another
274 		 * process releasing a lock (in which case we have
275 		 * already been removed from the blocked list and our
276 		 * lf_next field set to NOLOCKF).
277 		 */
278 		if (lock->lf_next) {
279 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
280 			lock->lf_next = NOLOCKF;
281 		}
282 		if (error) {
283 			free(lock, M_LOCKF);
284 			return (error);
285 		}
286 	}
287 	/*
288 	 * No blocks!!  Add the lock.  Note that we will
289 	 * downgrade or upgrade any overlapping locks this
290 	 * process already owns.
291 	 *
292 	 * Skip over locks owned by other processes.
293 	 * Handle any locks that overlap and are owned by ourselves.
294 	 */
295 	prev = head;
296 	block = *head;
297 	needtolink = 1;
298 	for (;;) {
299 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
300 		if (ovcase)
301 			block = overlap->lf_next;
302 		/*
303 		 * Six cases:
304 		 *	0) no overlap
305 		 *	1) overlap == lock
306 		 *	2) overlap contains lock
307 		 *	3) lock contains overlap
308 		 *	4) overlap starts before lock
309 		 *	5) overlap ends after lock
310 		 */
311 		switch (ovcase) {
312 		case 0: /* no overlap */
313 			if (needtolink) {
314 				*prev = lock;
315 				lock->lf_next = overlap;
316 			}
317 			break;
318 
319 		case 1: /* overlap == lock */
320 			/*
321 			 * If downgrading lock, others may be
322 			 * able to acquire it.
323 			 */
324 			if (lock->lf_type == F_RDLCK &&
325 			    overlap->lf_type == F_WRLCK)
326 				lf_wakelock(overlap);
327 			overlap->lf_type = lock->lf_type;
328 			FREE(lock, M_LOCKF);
329 			lock = overlap; /* for debug output below */
330 			break;
331 
332 		case 2: /* overlap contains lock */
333 			/*
334 			 * Check for common starting point and different types.
335 			 */
336 			if (overlap->lf_type == lock->lf_type) {
337 				free(lock, M_LOCKF);
338 				lock = overlap; /* for debug output below */
339 				break;
340 			}
341 			if (overlap->lf_start == lock->lf_start) {
342 				*prev = lock;
343 				lock->lf_next = overlap;
344 				overlap->lf_start = lock->lf_end + 1;
345 			} else
346 				lf_split(overlap, lock);
347 			lf_wakelock(overlap);
348 			break;
349 
350 		case 3: /* lock contains overlap */
351 			/*
352 			 * If downgrading lock, others may be able to
353 			 * acquire it, otherwise take the list.
354 			 */
355 			if (lock->lf_type == F_RDLCK &&
356 			    overlap->lf_type == F_WRLCK) {
357 				lf_wakelock(overlap);
358 			} else {
359 				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
360 					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
361 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
362 					    lf_block);
363 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
364 					    ltmp, lf_block);
365 					ltmp->lf_next = lock;
366 				}
367 			}
368 			/*
369 			 * Add the new lock if necessary and delete the overlap.
370 			 */
371 			if (needtolink) {
372 				*prev = lock;
373 				lock->lf_next = overlap->lf_next;
374 				prev = &lock->lf_next;
375 				needtolink = 0;
376 			} else
377 				*prev = overlap->lf_next;
378 			free(overlap, M_LOCKF);
379 			continue;
380 
381 		case 4: /* overlap starts before lock */
382 			/*
383 			 * Add lock after overlap on the list.
384 			 */
385 			lock->lf_next = overlap->lf_next;
386 			overlap->lf_next = lock;
387 			overlap->lf_end = lock->lf_start - 1;
388 			prev = &lock->lf_next;
389 			lf_wakelock(overlap);
390 			needtolink = 0;
391 			continue;
392 
393 		case 5: /* overlap ends after lock */
394 			/*
395 			 * Add the new lock before overlap.
396 			 */
397 			if (needtolink) {
398 				*prev = lock;
399 				lock->lf_next = overlap;
400 			}
401 			overlap->lf_start = lock->lf_end + 1;
402 			lf_wakelock(overlap);
403 			break;
404 		}
405 		break;
406 	}
407 #ifdef LOCKF_DEBUG
408 	if (lockf_debug & 1) {
409 		lf_print("lf_setlock: got the lock", lock);
410 		lf_printlist("lf_setlock", lock);
411 	}
412 #endif /* LOCKF_DEBUG */
413 	return (0);
414 }
415 
416 /*
417  * Remove a byte-range lock on an inode.
418  *
419  * Generally, find the lock (or an overlap to that lock)
420  * and remove it (or shrink it), then wakeup anyone we can.
421  */
422 static int
423 lf_clearlock(unlock)
424 	struct lockf *unlock;
425 {
426 	struct lockf **head = unlock->lf_head;
427 	struct lockf *lf = *head;
428 	struct lockf *overlap, **prev;
429 	int ovcase;
430 
431 	if (lf == NOLOCKF)
432 		return (0);
433 #ifdef LOCKF_DEBUG
434 	if (unlock->lf_type != F_UNLCK)
435 		panic("lf_clearlock: bad type");
436 	if (lockf_debug & 1)
437 		lf_print("lf_clearlock", unlock);
438 #endif /* LOCKF_DEBUG */
439 	prev = head;
440 	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
441 		/*
442 		 * Wakeup the list of locks to be retried.
443 		 */
444 		lf_wakelock(overlap);
445 
446 		switch (ovcase) {
447 
448 		case 1: /* overlap == lock */
449 			*prev = overlap->lf_next;
450 			FREE(overlap, M_LOCKF);
451 			break;
452 
453 		case 2: /* overlap contains lock: split it */
454 			if (overlap->lf_start == unlock->lf_start) {
455 				overlap->lf_start = unlock->lf_end + 1;
456 				break;
457 			}
458 			lf_split(overlap, unlock);
459 			overlap->lf_next = unlock->lf_next;
460 			break;
461 
462 		case 3: /* lock contains overlap */
463 			*prev = overlap->lf_next;
464 			lf = overlap->lf_next;
465 			free(overlap, M_LOCKF);
466 			continue;
467 
468 		case 4: /* overlap starts before lock */
469 			overlap->lf_end = unlock->lf_start - 1;
470 			prev = &overlap->lf_next;
471 			lf = overlap->lf_next;
472 			continue;
473 
474 		case 5: /* overlap ends after lock */
475 			overlap->lf_start = unlock->lf_end + 1;
476 			break;
477 		}
478 		break;
479 	}
480 #ifdef LOCKF_DEBUG
481 	if (lockf_debug & 1)
482 		lf_printlist("lf_clearlock", unlock);
483 #endif /* LOCKF_DEBUG */
484 	return (0);
485 }
486 
487 /*
488  * Check whether there is a blocking lock,
489  * and if so return its process identifier.
490  */
491 static int
492 lf_getlock(lock, fl)
493 	struct lockf *lock;
494 	struct flock *fl;
495 {
496 	struct lockf *block;
497 
498 #ifdef LOCKF_DEBUG
499 	if (lockf_debug & 1)
500 		lf_print("lf_getlock", lock);
501 #endif /* LOCKF_DEBUG */
502 
503 	if ((block = lf_getblock(lock))) {
504 		fl->l_type = block->lf_type;
505 		fl->l_whence = SEEK_SET;
506 		fl->l_start = block->lf_start;
507 		if (block->lf_end == -1)
508 			fl->l_len = 0;
509 		else
510 			fl->l_len = block->lf_end - block->lf_start + 1;
511 		if (block->lf_flags & F_POSIX)
512 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
513 		else
514 			fl->l_pid = -1;
515 	} else {
516 		fl->l_type = F_UNLCK;
517 	}
518 	return (0);
519 }
520 
521 /*
522  * Walk the list of locks for an inode and
523  * return the first blocking lock.
524  */
525 static struct lockf *
526 lf_getblock(lock)
527 	struct lockf *lock;
528 {
529 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
530 	int ovcase;
531 
532 	prev = lock->lf_head;
533 	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
534 		/*
535 		 * We've found an overlap, see if it blocks us
536 		 */
537 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
538 			return (overlap);
539 		/*
540 		 * Nope, point to the next one on the list and
541 		 * see if it blocks us
542 		 */
543 		lf = overlap->lf_next;
544 	}
545 	return (NOLOCKF);
546 }
547 
548 /*
549  * Walk the list of locks for an inode to
550  * find an overlapping lock (if any).
551  *
552  * NOTE: this returns only the FIRST overlapping lock.  There
553  *	 may be more than one.
554  */
555 static int
556 lf_findoverlap(lf, lock, type, prev, overlap)
557 	struct lockf *lf;
558 	struct lockf *lock;
559 	int type;
560 	struct lockf ***prev;
561 	struct lockf **overlap;
562 {
563 	off_t start, end;
564 
565 	*overlap = lf;
566 	if (lf == NOLOCKF)
567 		return (0);
568 #ifdef LOCKF_DEBUG
569 	if (lockf_debug & 2)
570 		lf_print("lf_findoverlap: looking for overlap in", lock);
571 #endif /* LOCKF_DEBUG */
572 	start = lock->lf_start;
573 	end = lock->lf_end;
574 	while (lf != NOLOCKF) {
575 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
576 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
577 			*prev = &lf->lf_next;
578 			*overlap = lf = lf->lf_next;
579 			continue;
580 		}
581 #ifdef LOCKF_DEBUG
582 		if (lockf_debug & 2)
583 			lf_print("\tchecking", lf);
584 #endif /* LOCKF_DEBUG */
585 		/*
586 		 * OK, check for overlap
587 		 *
588 		 * Six cases:
589 		 *	0) no overlap
590 		 *	1) overlap == lock
591 		 *	2) overlap contains lock
592 		 *	3) lock contains overlap
593 		 *	4) overlap starts before lock
594 		 *	5) overlap ends after lock
595 		 */
596 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
597 		    (end != -1 && lf->lf_start > end)) {
598 			/* Case 0 */
599 #ifdef LOCKF_DEBUG
600 			if (lockf_debug & 2)
601 				printf("no overlap\n");
602 #endif /* LOCKF_DEBUG */
603 			if ((type & SELF) && end != -1 && lf->lf_start > end)
604 				return (0);
605 			*prev = &lf->lf_next;
606 			*overlap = lf = lf->lf_next;
607 			continue;
608 		}
609 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
610 			/* Case 1 */
611 #ifdef LOCKF_DEBUG
612 			if (lockf_debug & 2)
613 				printf("overlap == lock\n");
614 #endif /* LOCKF_DEBUG */
615 			return (1);
616 		}
617 		if ((lf->lf_start <= start) &&
618 		    (end != -1) &&
619 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
620 			/* Case 2 */
621 #ifdef LOCKF_DEBUG
622 			if (lockf_debug & 2)
623 				printf("overlap contains lock\n");
624 #endif /* LOCKF_DEBUG */
625 			return (2);
626 		}
627 		if (start <= lf->lf_start &&
628 		           (end == -1 ||
629 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
630 			/* Case 3 */
631 #ifdef LOCKF_DEBUG
632 			if (lockf_debug & 2)
633 				printf("lock contains overlap\n");
634 #endif /* LOCKF_DEBUG */
635 			return (3);
636 		}
637 		if ((lf->lf_start < start) &&
638 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
639 			/* Case 4 */
640 #ifdef LOCKF_DEBUG
641 			if (lockf_debug & 2)
642 				printf("overlap starts before lock\n");
643 #endif /* LOCKF_DEBUG */
644 			return (4);
645 		}
646 		if ((lf->lf_start > start) &&
647 			(end != -1) &&
648 			((lf->lf_end > end) || (lf->lf_end == -1))) {
649 			/* Case 5 */
650 #ifdef LOCKF_DEBUG
651 			if (lockf_debug & 2)
652 				printf("overlap ends after lock\n");
653 #endif /* LOCKF_DEBUG */
654 			return (5);
655 		}
656 		panic("lf_findoverlap: default");
657 	}
658 	return (0);
659 }
660 
661 /*
662  * Split a lock and a contained region into
663  * two or three locks as necessary.
664  */
665 static void
666 lf_split(lock1, lock2)
667 	struct lockf *lock1;
668 	struct lockf *lock2;
669 {
670 	struct lockf *splitlock;
671 
672 #ifdef LOCKF_DEBUG
673 	if (lockf_debug & 2) {
674 		lf_print("lf_split", lock1);
675 		lf_print("splitting from", lock2);
676 	}
677 #endif /* LOCKF_DEBUG */
678 	/*
679 	 * Check to see if spliting into only two pieces.
680 	 */
681 	if (lock1->lf_start == lock2->lf_start) {
682 		lock1->lf_start = lock2->lf_end + 1;
683 		lock2->lf_next = lock1;
684 		return;
685 	}
686 	if (lock1->lf_end == lock2->lf_end) {
687 		lock1->lf_end = lock2->lf_start - 1;
688 		lock2->lf_next = lock1->lf_next;
689 		lock1->lf_next = lock2;
690 		return;
691 	}
692 	/*
693 	 * Make a new lock consisting of the last part of
694 	 * the encompassing lock
695 	 */
696 	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
697 	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
698 	splitlock->lf_start = lock2->lf_end + 1;
699 	TAILQ_INIT(&splitlock->lf_blkhd);
700 	lock1->lf_end = lock2->lf_start - 1;
701 	/*
702 	 * OK, now link it in
703 	 */
704 	splitlock->lf_next = lock1->lf_next;
705 	lock2->lf_next = splitlock;
706 	lock1->lf_next = lock2;
707 }
708 
709 /*
710  * Wakeup a blocklist
711  */
712 static void
713 lf_wakelock(listhead)
714 	struct lockf *listhead;
715 {
716 	struct lockf *wakelock;
717 
718 	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
719 		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
720 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
721 		wakelock->lf_next = NOLOCKF;
722 #ifdef LOCKF_DEBUG
723 		if (lockf_debug & 2)
724 			lf_print("lf_wakelock: awakening", wakelock);
725 #endif /* LOCKF_DEBUG */
726 		wakeup((caddr_t)wakelock);
727 	}
728 }
729 
730 #ifdef LOCKF_DEBUG
731 /*
732  * Print out a lock.
733  */
734 void
735 lf_print(tag, lock)
736 	char *tag;
737 	struct lockf *lock;
738 {
739 
740 	printf("%s: lock %p for ", tag, (void *)lock);
741 	if (lock->lf_flags & F_POSIX)
742 		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
743 	else
744 		printf("id %p", (void *)lock->lf_id);
745 	/* XXX no %qd in kernel.  Truncate. */
746 	printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld",
747 	    (u_long)lock->lf_inode->i_number,
748 	    major(lock->lf_inode->i_dev),
749 	    minor(lock->lf_inode->i_dev),
750 	    lock->lf_type == F_RDLCK ? "shared" :
751 	    lock->lf_type == F_WRLCK ? "exclusive" :
752 	    lock->lf_type == F_UNLCK ? "unlock" :
753 	    "unknown", (long)lock->lf_start, (long)lock->lf_end);
754 	if (!TAILQ_EMPTY(&lock->lf_blkhd))
755 		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
756 	else
757 		printf("\n");
758 }
759 
760 void
761 lf_printlist(tag, lock)
762 	char *tag;
763 	struct lockf *lock;
764 {
765 	struct lockf *lf, *blk;
766 
767 	printf("%s: Lock list for ino %lu on dev <%d, %d>:\n",
768 	    tag, (u_long)lock->lf_inode->i_number,
769 	    major(lock->lf_inode->i_dev),
770 	    minor(lock->lf_inode->i_dev));
771 	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
772 		printf("\tlock %p for ",(void *)lf);
773 		if (lf->lf_flags & F_POSIX)
774 			printf("proc %ld",
775 			    (long)((struct proc *)lf->lf_id)->p_pid);
776 		else
777 			printf("id %p", (void *)lf->lf_id);
778 		/* XXX no %qd in kernel.  Truncate. */
779 		printf(", %s, start %ld, end %ld",
780 		    lf->lf_type == F_RDLCK ? "shared" :
781 		    lf->lf_type == F_WRLCK ? "exclusive" :
782 		    lf->lf_type == F_UNLCK ? "unlock" :
783 		    "unknown", (long)lf->lf_start, (long)lf->lf_end);
784 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
785 			printf("\n\t\tlock request %p for ", (void *)blk);
786 			if (blk->lf_flags & F_POSIX)
787 				printf("proc %ld",
788 				    (long)((struct proc *)blk->lf_id)->p_pid);
789 			else
790 				printf("id %p", (void *)blk->lf_id);
791 			/* XXX no %qd in kernel.  Truncate. */
792 			printf(", %s, start %ld, end %ld",
793 			    blk->lf_type == F_RDLCK ? "shared" :
794 			    blk->lf_type == F_WRLCK ? "exclusive" :
795 			    blk->lf_type == F_UNLCK ? "unlock" :
796 			    "unknown", (long)blk->lf_start,
797 			    (long)blk->lf_end);
798 			if (!TAILQ_EMPTY(&blk->lf_blkhd))
799 				panic("lf_printlist: bad list");
800 		}
801 		printf("\n");
802 	}
803 }
804 #endif /* LOCKF_DEBUG */
805