xref: /minix/sys/ufs/lfs/lfs_balloc.c (revision 00b67f09)
1 /*	$NetBSD: lfs_balloc.c,v 1.87 2015/09/01 06:08:37 dholland Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 1989, 1991, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)lfs_balloc.c	8.4 (Berkeley) 5/8/95
60  */
61 
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.87 2015/09/01 06:08:37 dholland Exp $");
64 
65 #if defined(_KERNEL_OPT)
66 #include "opt_quota.h"
67 #endif
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/buf.h>
72 #include <sys/proc.h>
73 #include <sys/vnode.h>
74 #include <sys/mount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/tree.h>
77 #include <sys/trace.h>
78 #include <sys/kauth.h>
79 
80 #include <miscfs/specfs/specdev.h>
81 
82 #include <ufs/lfs/ulfs_quotacommon.h>
83 #include <ufs/lfs/ulfs_inode.h>
84 #include <ufs/lfs/ulfsmount.h>
85 #include <ufs/lfs/ulfs_extern.h>
86 
87 #include <ufs/lfs/lfs.h>
88 #include <ufs/lfs/lfs_accessors.h>
89 #include <ufs/lfs/lfs_extern.h>
90 #include <ufs/lfs/lfs_kernel.h>
91 
92 #include <uvm/uvm.h>
93 
94 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, kauth_cred_t);
95 
96 u_int64_t locked_fakequeue_count;
97 
98 /*
99  * Allocate a block, and to inode and filesystem block accounting for it
100  * and for any indirect blocks the may need to be created in order for
101  * this block to be created.
102  *
103  * Blocks which have never been accounted for (i.e., which "do not exist")
104  * have disk address 0, which is translated by ulfs_bmap to the special value
105  * UNASSIGNED == -1, as in the historical ULFS.
106  *
107  * Blocks which have been accounted for but which have not yet been written
108  * to disk are given the new special disk address UNWRITTEN == -2, so that
109  * they can be differentiated from completely new blocks.
110  */
111 /* VOP_BWRITE ULFS_NIADDR+2 times */
112 int
113 lfs_balloc(struct vnode *vp, off_t startoffset, int iosize, kauth_cred_t cred,
114     int flags, struct buf **bpp)
115 {
116 	int offset;
117 	daddr_t daddr, idaddr;
118 	struct buf *ibp, *bp;
119 	struct inode *ip;
120 	struct lfs *fs;
121 	struct indir indirs[ULFS_NIADDR+2], *idp;
122 	daddr_t	lbn, lastblock;
123 	int bcount;
124 	int error, frags, i, nsize, osize, num;
125 
126 	ip = VTOI(vp);
127 	fs = ip->i_lfs;
128 	offset = lfs_blkoff(fs, startoffset);
129 	KASSERT(iosize <= lfs_sb_getbsize(fs));
130 	lbn = lfs_lblkno(fs, startoffset);
131 	/* (void)lfs_check(vp, lbn, 0); */
132 
133 	ASSERT_MAYBE_SEGLOCK(fs);
134 
135 	/*
136 	 * Three cases: it's a block beyond the end of file, it's a block in
137 	 * the file that may or may not have been assigned a disk address or
138 	 * we're writing an entire block.
139 	 *
140 	 * Note, if the daddr is UNWRITTEN, the block already exists in
141 	 * the cache (it was read or written earlier).	If so, make sure
142 	 * we don't count it as a new block or zero out its contents. If
143 	 * it did not, make sure we allocate any necessary indirect
144 	 * blocks.
145 	 *
146 	 * If we are writing a block beyond the end of the file, we need to
147 	 * check if the old last block was a fragment.	If it was, we need
148 	 * to rewrite it.
149 	 */
150 
151 	if (bpp)
152 		*bpp = NULL;
153 
154 	/* Check for block beyond end of file and fragment extension needed. */
155 	lastblock = lfs_lblkno(fs, ip->i_size);
156 	if (lastblock < ULFS_NDADDR && lastblock < lbn) {
157 		osize = lfs_blksize(fs, ip, lastblock);
158 		if (osize < lfs_sb_getbsize(fs) && osize > 0) {
159 			if ((error = lfs_fragextend(vp, osize, lfs_sb_getbsize(fs),
160 						    lastblock,
161 						    (bpp ? &bp : NULL), cred)))
162 				return (error);
163 			ip->i_size = (lastblock + 1) * lfs_sb_getbsize(fs);
164 			lfs_dino_setsize(fs, ip->i_din, ip->i_size);
165 			uvm_vnp_setsize(vp, ip->i_size);
166 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
167 			if (bpp)
168 				(void) VOP_BWRITE(bp->b_vp, bp);
169 		}
170 	}
171 
172 	/*
173 	 * If the block we are writing is a direct block, it's the last
174 	 * block in the file, and offset + iosize is less than a full
175 	 * block, we can write one or more fragments.  There are two cases:
176 	 * the block is brand new and we should allocate it the correct
177 	 * size or it already exists and contains some fragments and
178 	 * may need to extend it.
179 	 */
180 	if (lbn < ULFS_NDADDR && lfs_lblkno(fs, ip->i_size) <= lbn) {
181 		osize = lfs_blksize(fs, ip, lbn);
182 		nsize = lfs_fragroundup(fs, offset + iosize);
183 		if (lfs_lblktosize(fs, lbn) >= ip->i_size) {
184 			/* Brand new block or fragment */
185 			frags = lfs_numfrags(fs, nsize);
186 			if (!ISSPACE(fs, frags, cred))
187 				return ENOSPC;
188 			if (bpp) {
189 				*bpp = bp = getblk(vp, lbn, nsize, 0, 0);
190 				bp->b_blkno = UNWRITTEN;
191 				if (flags & B_CLRBUF)
192 					clrbuf(bp);
193 			}
194 			ip->i_lfs_effnblks += frags;
195 			mutex_enter(&lfs_lock);
196 			lfs_sb_subbfree(fs, frags);
197 			mutex_exit(&lfs_lock);
198 			lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN);
199 		} else {
200 			if (nsize <= osize) {
201 				/* No need to extend */
202 				if (bpp && (error = bread(vp, lbn, osize,
203 				    0, &bp)))
204 					return error;
205 			} else {
206 				/* Extend existing block */
207 				if ((error =
208 				     lfs_fragextend(vp, osize, nsize, lbn,
209 						    (bpp ? &bp : NULL), cred)))
210 					return error;
211 			}
212 			if (bpp)
213 				*bpp = bp;
214 		}
215 		return 0;
216 	}
217 
218 	error = ulfs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
219 	if (error)
220 		return (error);
221 
222 	KASSERT(daddr <= LFS_MAX_DADDR(fs));
223 
224 	/*
225 	 * Do byte accounting all at once, so we can gracefully fail *before*
226 	 * we start assigning blocks.
227 	 */
228 	frags = fs->um_seqinc;
229 	bcount = 0;
230 	if (daddr == UNASSIGNED) {
231 		bcount = frags;
232 	}
233 	for (i = 1; i < num; ++i) {
234 		if (!indirs[i].in_exists) {
235 			bcount += frags;
236 		}
237 	}
238 	if (ISSPACE(fs, bcount, cred)) {
239 		mutex_enter(&lfs_lock);
240 		lfs_sb_subbfree(fs, bcount);
241 		mutex_exit(&lfs_lock);
242 		ip->i_lfs_effnblks += bcount;
243 	} else {
244 		return ENOSPC;
245 	}
246 
247 	if (daddr == UNASSIGNED) {
248 		if (num > 0 && lfs_dino_getib(fs, ip->i_din, indirs[0].in_off) == 0) {
249 			lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN);
250 		}
251 
252 		/*
253 		 * Create new indirect blocks if necessary
254 		 */
255 		if (num > 1) {
256 			idaddr = lfs_dino_getib(fs, ip->i_din, indirs[0].in_off);
257 			for (i = 1; i < num; ++i) {
258 				ibp = getblk(vp, indirs[i].in_lbn,
259 				    lfs_sb_getbsize(fs), 0,0);
260 				if (!indirs[i].in_exists) {
261 					clrbuf(ibp);
262 					ibp->b_blkno = UNWRITTEN;
263 				} else if (!(ibp->b_oflags & (BO_DELWRI | BO_DONE))) {
264 					ibp->b_blkno = LFS_FSBTODB(fs, idaddr);
265 					ibp->b_flags |= B_READ;
266 					VOP_STRATEGY(vp, ibp);
267 					biowait(ibp);
268 				}
269 				/*
270 				 * This block exists, but the next one may not.
271 				 * If that is the case mark it UNWRITTEN to keep
272 				 * the accounting straight.
273 				 */
274 				/* XXX ondisk32 */
275 				if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
276 					((int32_t *)ibp->b_data)[indirs[i].in_off] =
277 						UNWRITTEN;
278 				/* XXX ondisk32 */
279 				idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
280 #ifdef DEBUG
281 				if (vp == fs->lfs_ivnode) {
282 					LFS_ENTER_LOG("balloc", __FILE__,
283 						__LINE__, indirs[i].in_lbn,
284 						ibp->b_flags, curproc->p_pid);
285 				}
286 #endif
287 				if ((error = VOP_BWRITE(ibp->b_vp, ibp)))
288 					return error;
289 			}
290 		}
291 	}
292 
293 
294 	/*
295 	 * Get the existing block from the cache, if requested.
296 	 */
297 	if (bpp)
298 		*bpp = bp = getblk(vp, lbn, lfs_blksize(fs, ip, lbn), 0, 0);
299 
300 	/*
301 	 * Do accounting on blocks that represent pages.
302 	 */
303 	if (!bpp)
304 		lfs_register_block(vp, lbn);
305 
306 	/*
307 	 * The block we are writing may be a brand new block
308 	 * in which case we need to do accounting.
309 	 *
310 	 * We can tell a truly new block because ulfs_bmaparray will say
311 	 * it is UNASSIGNED.  Once we allocate it we will assign it the
312 	 * disk address UNWRITTEN.
313 	 */
314 	if (daddr == UNASSIGNED) {
315 		if (bpp) {
316 			if (flags & B_CLRBUF)
317 				clrbuf(bp);
318 
319 			/* Note the new address */
320 			bp->b_blkno = UNWRITTEN;
321 		}
322 
323 		switch (num) {
324 		    case 0:
325 			lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN);
326 			break;
327 		    case 1:
328 			lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN);
329 			break;
330 		    default:
331 			idp = &indirs[num - 1];
332 			if (bread(vp, idp->in_lbn, lfs_sb_getbsize(fs),
333 				  B_MODIFY, &ibp))
334 				panic("lfs_balloc: bread bno %lld",
335 				    (long long)idp->in_lbn);
336 			/* XXX ondisk32 */
337 			((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
338 #ifdef DEBUG
339 			if (vp == fs->lfs_ivnode) {
340 				LFS_ENTER_LOG("balloc", __FILE__,
341 					__LINE__, idp->in_lbn,
342 					ibp->b_flags, curproc->p_pid);
343 			}
344 #endif
345 			VOP_BWRITE(ibp->b_vp, ibp);
346 		}
347 	} else if (bpp && !(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
348 		/*
349 		 * Not a brand new block, also not in the cache;
350 		 * read it in from disk.
351 		 */
352 		if (iosize == lfs_sb_getbsize(fs))
353 			/* Optimization: I/O is unnecessary. */
354 			bp->b_blkno = daddr;
355 		else {
356 			/*
357 			 * We need to read the block to preserve the
358 			 * existing bytes.
359 			 */
360 			bp->b_blkno = daddr;
361 			bp->b_flags |= B_READ;
362 			VOP_STRATEGY(vp, bp);
363 			return (biowait(bp));
364 		}
365 	}
366 
367 	return (0);
368 }
369 
370 /* VOP_BWRITE 1 time */
371 int
372 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp,
373     kauth_cred_t cred)
374 {
375 	struct inode *ip;
376 	struct lfs *fs;
377 	long frags;
378 	int error;
379 	extern long locked_queue_bytes;
380 	size_t obufsize;
381 
382 	ip = VTOI(vp);
383 	fs = ip->i_lfs;
384 	frags = (long)lfs_numfrags(fs, nsize - osize);
385 	error = 0;
386 
387 	ASSERT_NO_SEGLOCK(fs);
388 
389 	/*
390 	 * Get the seglock so we don't enlarge blocks while a segment
391 	 * is being written.  If we're called with bpp==NULL, though,
392 	 * we are only pretending to change a buffer, so we don't have to
393 	 * lock.
394 	 */
395     top:
396 	if (bpp) {
397 		rw_enter(&fs->lfs_fraglock, RW_READER);
398 		LFS_DEBUG_COUNTLOCKED("frag");
399 	}
400 
401 	if (!ISSPACE(fs, frags, cred)) {
402 		error = ENOSPC;
403 		goto out;
404 	}
405 
406 	/*
407 	 * If we are not asked to actually return the block, all we need
408 	 * to do is allocate space for it.  UBC will handle dirtying the
409 	 * appropriate things and making sure it all goes to disk.
410 	 * Don't bother to read in that case.
411 	 */
412 	if (bpp && (error = bread(vp, lbn, osize, 0, bpp))) {
413 		goto out;
414 	}
415 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
416 	if ((error = lfs_chkdq(ip, frags, cred, 0))) {
417 		if (bpp)
418 			brelse(*bpp, 0);
419 		goto out;
420 	}
421 #endif
422 	/*
423 	 * Adjust accounting for lfs_avail.  If there's not enough room,
424 	 * we will have to wait for the cleaner, which we can't do while
425 	 * holding a block busy or while holding the seglock.  In that case,
426 	 * release both and start over after waiting.
427 	 */
428 
429 	if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) {
430 		if (!lfs_fits(fs, frags)) {
431 			if (bpp)
432 				brelse(*bpp, 0);
433 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
434 			lfs_chkdq(ip, -frags, cred, 0);
435 #endif
436 			rw_exit(&fs->lfs_fraglock);
437 			lfs_availwait(fs, frags);
438 			goto top;
439 		}
440 		lfs_sb_subavail(fs, frags);
441 	}
442 
443 	mutex_enter(&lfs_lock);
444 	lfs_sb_subbfree(fs, frags);
445 	mutex_exit(&lfs_lock);
446 	ip->i_lfs_effnblks += frags;
447 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
448 
449 	if (bpp) {
450 		obufsize = (*bpp)->b_bufsize;
451 		allocbuf(*bpp, nsize, 1);
452 
453 		/* Adjust locked-list accounting */
454 		if (((*bpp)->b_flags & B_LOCKED) != 0 &&
455 		    (*bpp)->b_iodone == NULL) {
456 			mutex_enter(&lfs_lock);
457 			locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
458 			mutex_exit(&lfs_lock);
459 		}
460 
461 		memset((char *)((*bpp)->b_data) + osize, 0, (u_int)(nsize - osize));
462 	}
463 
464     out:
465 	if (bpp) {
466 		rw_exit(&fs->lfs_fraglock);
467 	}
468 	return (error);
469 }
470 
471 static inline int
472 lge(struct lbnentry *a, struct lbnentry *b)
473 {
474 	return a->lbn - b->lbn;
475 }
476 
477 SPLAY_PROTOTYPE(lfs_splay, lbnentry, entry, lge);
478 
479 SPLAY_GENERATE(lfs_splay, lbnentry, entry, lge);
480 
481 /*
482  * Record this lbn as being "write pending".  We used to have this information
483  * on the buffer headers, but since pages don't have buffer headers we
484  * record it here instead.
485  */
486 void
487 lfs_register_block(struct vnode *vp, daddr_t lbn)
488 {
489 	struct lfs *fs;
490 	struct inode *ip;
491 	struct lbnentry *lbp;
492 
493 	ip = VTOI(vp);
494 
495 	/* Don't count metadata */
496 	if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
497 		return;
498 
499 	fs = ip->i_lfs;
500 
501 	ASSERT_NO_SEGLOCK(fs);
502 
503 	/* If no space, wait for the cleaner */
504 	lfs_availwait(fs, lfs_btofsb(fs, 1 << lfs_sb_getbshift(fs)));
505 
506 	lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
507 	lbp->lbn = lbn;
508 	mutex_enter(&lfs_lock);
509 	if (SPLAY_INSERT(lfs_splay, &ip->i_lfs_lbtree, lbp) != NULL) {
510 		mutex_exit(&lfs_lock);
511 		/* Already there */
512 		pool_put(&lfs_lbnentry_pool, lbp);
513 		return;
514 	}
515 
516 	++ip->i_lfs_nbtree;
517 	fs->lfs_favail += lfs_btofsb(fs, (1 << lfs_sb_getbshift(fs)));
518 	fs->lfs_pages += lfs_sb_getbsize(fs) >> PAGE_SHIFT;
519 	++locked_fakequeue_count;
520 	lfs_subsys_pages += lfs_sb_getbsize(fs) >> PAGE_SHIFT;
521 	mutex_exit(&lfs_lock);
522 }
523 
524 static void
525 lfs_do_deregister(struct lfs *fs, struct inode *ip, struct lbnentry *lbp)
526 {
527 	ASSERT_MAYBE_SEGLOCK(fs);
528 
529 	mutex_enter(&lfs_lock);
530 	--ip->i_lfs_nbtree;
531 	SPLAY_REMOVE(lfs_splay, &ip->i_lfs_lbtree, lbp);
532 	if (fs->lfs_favail > lfs_btofsb(fs, (1 << lfs_sb_getbshift(fs))))
533 		fs->lfs_favail -= lfs_btofsb(fs, (1 << lfs_sb_getbshift(fs)));
534 	fs->lfs_pages -= lfs_sb_getbsize(fs) >> PAGE_SHIFT;
535 	if (locked_fakequeue_count > 0)
536 		--locked_fakequeue_count;
537 	lfs_subsys_pages -= lfs_sb_getbsize(fs) >> PAGE_SHIFT;
538 	mutex_exit(&lfs_lock);
539 
540 	pool_put(&lfs_lbnentry_pool, lbp);
541 }
542 
543 void
544 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
545 {
546 	struct lfs *fs;
547 	struct inode *ip;
548 	struct lbnentry *lbp;
549 	struct lbnentry tmp;
550 
551 	ip = VTOI(vp);
552 
553 	/* Don't count metadata */
554 	if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
555 		return;
556 
557 	fs = ip->i_lfs;
558 	tmp.lbn = lbn;
559 	lbp = SPLAY_FIND(lfs_splay, &ip->i_lfs_lbtree, &tmp);
560 	if (lbp == NULL)
561 		return;
562 
563 	lfs_do_deregister(fs, ip, lbp);
564 }
565 
566 void
567 lfs_deregister_all(struct vnode *vp)
568 {
569 	struct lbnentry *lbp, *nlbp;
570 	struct lfs_splay *hd;
571 	struct lfs *fs;
572 	struct inode *ip;
573 
574 	ip = VTOI(vp);
575 	fs = ip->i_lfs;
576 	hd = &ip->i_lfs_lbtree;
577 
578 	for (lbp = SPLAY_MIN(lfs_splay, hd); lbp != NULL; lbp = nlbp) {
579 		nlbp = SPLAY_NEXT(lfs_splay, hd, lbp);
580 		lfs_do_deregister(fs, ip, lbp);
581 	}
582 }
583