xref: /original-bsd/sys/kern/vfs_cluster.c (revision 7f8f2e51)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_cluster.c	7.32 (Berkeley) 01/28/91
8  */
9 
10 #include "param.h"
11 #include "user.h"
12 #include "buf.h"
13 #include "vnode.h"
14 #include "specdev.h"
15 #include "mount.h"
16 #include "trace.h"
17 #include "ucred.h"
18 
19 /*
20  * Find the block in the buffer pool.
21  * If the buffer is not present, allocate a new buffer and load
22  * its contents according to the filesystem fill routine.
23  */
24 bread(vp, blkno, size, cred, bpp)
25 	struct vnode *vp;
26 	daddr_t blkno;
27 	int size;
28 	struct ucred *cred;
29 	struct buf **bpp;
30 {
31 	register struct buf *bp;
32 
33 	if (size == 0)
34 		panic("bread: size 0");
35 	*bpp = bp = getblk(vp, blkno, size);
36 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
37 		trace(TR_BREADHIT, pack(vp, size), blkno);
38 		return (0);
39 	}
40 	bp->b_flags |= B_READ;
41 	if (bp->b_bcount > bp->b_bufsize)
42 		panic("bread");
43 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
44 		crhold(cred);
45 		bp->b_rcred = cred;
46 	}
47 	VOP_STRATEGY(bp);
48 	trace(TR_BREADMISS, pack(vp, size), blkno);
49 	u.u_ru.ru_inblock++;		/* pay for read */
50 	return (biowait(bp));
51 }
52 
53 /*
54  * Operates like bread, but also starts I/O on the specified
55  * read-ahead block.
56  */
57 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
58 	struct vnode *vp;
59 	daddr_t blkno; int size;
60 	daddr_t rablkno; int rabsize;
61 	struct ucred *cred;
62 	struct buf **bpp;
63 {
64 	register struct buf *bp, *rabp;
65 
66 	bp = NULL;
67 	/*
68 	 * If the block is not memory resident,
69 	 * allocate a buffer and start I/O.
70 	 */
71 	if (!incore(vp, blkno)) {
72 		*bpp = bp = getblk(vp, blkno, size);
73 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
74 			bp->b_flags |= B_READ;
75 			if (bp->b_bcount > bp->b_bufsize)
76 				panic("breada");
77 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
78 				crhold(cred);
79 				bp->b_rcred = cred;
80 			}
81 			VOP_STRATEGY(bp);
82 			trace(TR_BREADMISS, pack(vp, size), blkno);
83 			u.u_ru.ru_inblock++;		/* pay for read */
84 		} else
85 			trace(TR_BREADHIT, pack(vp, size), blkno);
86 	}
87 
88 	/*
89 	 * If there is a read-ahead block, start I/O on it too.
90 	 */
91 	if (!incore(vp, rablkno)) {
92 		rabp = getblk(vp, rablkno, rabsize);
93 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
94 			brelse(rabp);
95 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
96 		} else {
97 			rabp->b_flags |= B_ASYNC | B_READ;
98 			if (rabp->b_bcount > rabp->b_bufsize)
99 				panic("breadrabp");
100 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
101 				crhold(cred);
102 				rabp->b_rcred = cred;
103 			}
104 			VOP_STRATEGY(rabp);
105 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
106 			u.u_ru.ru_inblock++;		/* pay in advance */
107 		}
108 	}
109 
110 	/*
111 	 * If block was memory resident, let bread get it.
112 	 * If block was not memory resident, the read was
113 	 * started above, so just wait for the read to complete.
114 	 */
115 	if (bp == NULL)
116 		return (bread(vp, blkno, size, cred, bpp));
117 	return (biowait(bp));
118 }
119 
120 /*
121  * Synchronous write.
122  * Release buffer on completion.
123  */
124 bwrite(bp)
125 	register struct buf *bp;
126 {
127 	register int flag;
128 	int s, error;
129 
130 	flag = bp->b_flags;
131 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
132 	if ((flag & B_DELWRI) == 0)
133 		u.u_ru.ru_oublock++;		/* noone paid yet */
134 	else
135 		reassignbuf(bp, bp->b_vp);
136 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
137 	if (bp->b_bcount > bp->b_bufsize)
138 		panic("bwrite");
139 	s = splbio();
140 	bp->b_vp->v_numoutput++;
141 	splx(s);
142 	VOP_STRATEGY(bp);
143 
144 	/*
145 	 * If the write was synchronous, then await I/O completion.
146 	 * If the write was "delayed", then we put the buffer on
147 	 * the queue of blocks awaiting I/O completion status.
148 	 */
149 	if ((flag & B_ASYNC) == 0) {
150 		error = biowait(bp);
151 		brelse(bp);
152 	} else if (flag & B_DELWRI) {
153 		bp->b_flags |= B_AGE;
154 		error = 0;
155 	}
156 	return (error);
157 }
158 
159 /*
160  * Delayed write.
161  *
162  * The buffer is marked dirty, but is not queued for I/O.
163  * This routine should be used when the buffer is expected
164  * to be modified again soon, typically a small write that
165  * partially fills a buffer.
166  *
167  * NB: magnetic tapes cannot be delayed; they must be
168  * written in the order that the writes are requested.
169  */
170 bdwrite(bp)
171 	register struct buf *bp;
172 {
173 
174 	if ((bp->b_flags & B_DELWRI) == 0) {
175 		bp->b_flags |= B_DELWRI;
176 		reassignbuf(bp, bp->b_vp);
177 		u.u_ru.ru_oublock++;		/* noone paid yet */
178 	}
179 	/*
180 	 * If this is a tape drive, the write must be initiated.
181 	 */
182 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
183 		bawrite(bp);
184 	} else {
185 		bp->b_flags |= (B_DONE | B_DELWRI);
186 		brelse(bp);
187 	}
188 }
189 
190 /*
191  * Asynchronous write.
192  * Start I/O on a buffer, but do not wait for it to complete.
193  * The buffer is released when the I/O completes.
194  */
195 bawrite(bp)
196 	register struct buf *bp;
197 {
198 
199 	/*
200 	 * Setting the ASYNC flag causes bwrite to return
201 	 * after starting the I/O.
202 	 */
203 	bp->b_flags |= B_ASYNC;
204 	(void) bwrite(bp);
205 }
206 
207 /*
208  * Release a buffer.
209  * Even if the buffer is dirty, no I/O is started.
210  */
211 brelse(bp)
212 	register struct buf *bp;
213 {
214 	register struct buf *flist;
215 	int s;
216 
217 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
218 	/*
219 	 * If a process is waiting for the buffer, or
220 	 * is waiting for a free buffer, awaken it.
221 	 */
222 	if (bp->b_flags & B_WANTED)
223 		wakeup((caddr_t)bp);
224 	if (bfreelist[0].b_flags & B_WANTED) {
225 		bfreelist[0].b_flags &= ~B_WANTED;
226 		wakeup((caddr_t)bfreelist);
227 	}
228 	/*
229 	 * Retry I/O for locked buffers rather than invalidating them.
230 	 */
231 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
232 		bp->b_flags &= ~B_ERROR;
233 	/*
234 	 * Disassociate buffers that are no longer valid.
235 	 */
236 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
237 		bp->b_flags |= B_INVAL;
238 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
239 		if (bp->b_vp)
240 			brelvp(bp);
241 		bp->b_flags &= ~B_DELWRI;
242 	}
243 	/*
244 	 * Stick the buffer back on a free list.
245 	 */
246 	s = splbio();
247 	if (bp->b_bufsize <= 0) {
248 		/* block has no buffer ... put at front of unused buffer list */
249 		flist = &bfreelist[BQ_EMPTY];
250 		binsheadfree(bp, flist);
251 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
252 		/* block has no info ... put at front of most free list */
253 		flist = &bfreelist[BQ_AGE];
254 		binsheadfree(bp, flist);
255 	} else {
256 		if (bp->b_flags & B_LOCKED)
257 			flist = &bfreelist[BQ_LOCKED];
258 		else if (bp->b_flags & B_AGE)
259 			flist = &bfreelist[BQ_AGE];
260 		else
261 			flist = &bfreelist[BQ_LRU];
262 		binstailfree(bp, flist);
263 	}
264 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
265 	splx(s);
266 }
267 
268 /*
269  * Check to see if a block is currently memory resident.
270  */
271 incore(vp, blkno)
272 	struct vnode *vp;
273 	daddr_t blkno;
274 {
275 	register struct buf *bp;
276 	register struct buf *dp;
277 
278 	dp = BUFHASH(vp, blkno);
279 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
280 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
281 		    (bp->b_flags & B_INVAL) == 0)
282 			return (1);
283 	return (0);
284 }
285 
286 /*
287  * Check to see if a block is currently memory resident.
288  * If it is resident, return it. If it is not resident,
289  * allocate a new buffer and assign it to the block.
290  */
291 struct buf *
292 getblk(vp, blkno, size)
293 	register struct vnode *vp;
294 	daddr_t blkno;
295 	int size;
296 {
297 	register struct buf *bp, *dp;
298 	int s;
299 
300 	if (size > MAXBSIZE)
301 		panic("getblk: size too big");
302 	/*
303 	 * Search the cache for the block. If the buffer is found,
304 	 * but it is currently locked, the we must wait for it to
305 	 * become available.
306 	 */
307 	dp = BUFHASH(vp, blkno);
308 loop:
309 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
310 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
311 		    (bp->b_flags & B_INVAL))
312 			continue;
313 		s = splbio();
314 		if (bp->b_flags & B_BUSY) {
315 			bp->b_flags |= B_WANTED;
316 			sleep((caddr_t)bp, PRIBIO + 1);
317 			splx(s);
318 			goto loop;
319 		}
320 		bremfree(bp);
321 		bp->b_flags |= B_BUSY;
322 		splx(s);
323 		if (bp->b_bcount != size) {
324 			printf("getblk: stray size");
325 			bp->b_flags |= B_INVAL;
326 			bwrite(bp);
327 			goto loop;
328 		}
329 		bp->b_flags |= B_CACHE;
330 		return (bp);
331 	}
332 	bp = getnewbuf();
333 	bremhash(bp);
334 	bgetvp(vp, bp);
335 	bp->b_bcount = 0;
336 	bp->b_lblkno = blkno;
337 	bp->b_blkno = blkno;
338 	bp->b_error = 0;
339 	bp->b_resid = 0;
340 	binshash(bp, dp);
341 	allocbuf(bp, size);
342 	return (bp);
343 }
344 
345 /*
346  * Allocate a buffer.
347  * The caller will assign it to a block.
348  */
349 struct buf *
350 geteblk(size)
351 	int size;
352 {
353 	register struct buf *bp, *flist;
354 
355 	if (size > MAXBSIZE)
356 		panic("geteblk: size too big");
357 	bp = getnewbuf();
358 	bp->b_flags |= B_INVAL;
359 	bremhash(bp);
360 	flist = &bfreelist[BQ_AGE];
361 	bp->b_bcount = 0;
362 	bp->b_error = 0;
363 	bp->b_resid = 0;
364 	binshash(bp, flist);
365 	allocbuf(bp, size);
366 	return (bp);
367 }
368 
369 /*
370  * Expand or contract the actual memory allocated to a buffer.
371  * If no memory is available, release buffer and take error exit.
372  */
373 allocbuf(tp, size)
374 	register struct buf *tp;
375 	int size;
376 {
377 	register struct buf *bp, *ep;
378 	int sizealloc, take, s;
379 
380 	sizealloc = roundup(size, CLBYTES);
381 	/*
382 	 * Buffer size does not change
383 	 */
384 	if (sizealloc == tp->b_bufsize)
385 		goto out;
386 	/*
387 	 * Buffer size is shrinking.
388 	 * Place excess space in a buffer header taken from the
389 	 * BQ_EMPTY buffer list and placed on the "most free" list.
390 	 * If no extra buffer headers are available, leave the
391 	 * extra space in the present buffer.
392 	 */
393 	if (sizealloc < tp->b_bufsize) {
394 		ep = bfreelist[BQ_EMPTY].av_forw;
395 		if (ep == &bfreelist[BQ_EMPTY])
396 			goto out;
397 		s = splbio();
398 		bremfree(ep);
399 		ep->b_flags |= B_BUSY;
400 		splx(s);
401 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
402 		    (int)tp->b_bufsize - sizealloc);
403 		ep->b_bufsize = tp->b_bufsize - sizealloc;
404 		tp->b_bufsize = sizealloc;
405 		ep->b_flags |= B_INVAL;
406 		ep->b_bcount = 0;
407 		brelse(ep);
408 		goto out;
409 	}
410 	/*
411 	 * More buffer space is needed. Get it out of buffers on
412 	 * the "most free" list, placing the empty headers on the
413 	 * BQ_EMPTY buffer header list.
414 	 */
415 	while (tp->b_bufsize < sizealloc) {
416 		take = sizealloc - tp->b_bufsize;
417 		bp = getnewbuf();
418 		if (take >= bp->b_bufsize)
419 			take = bp->b_bufsize;
420 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
421 		    &tp->b_un.b_addr[tp->b_bufsize], take);
422 		tp->b_bufsize += take;
423 		bp->b_bufsize = bp->b_bufsize - take;
424 		if (bp->b_bcount > bp->b_bufsize)
425 			bp->b_bcount = bp->b_bufsize;
426 		if (bp->b_bufsize <= 0) {
427 			bremhash(bp);
428 			binshash(bp, &bfreelist[BQ_EMPTY]);
429 			bp->b_dev = NODEV;
430 			bp->b_error = 0;
431 			bp->b_flags |= B_INVAL;
432 		}
433 		brelse(bp);
434 	}
435 out:
436 	tp->b_bcount = size;
437 	return (1);
438 }
439 
440 /*
441  * Find a buffer which is available for use.
442  * Select something from a free list.
443  * Preference is to AGE list, then LRU list.
444  */
445 struct buf *
446 getnewbuf()
447 {
448 	register struct buf *bp, *dp;
449 	register struct ucred *cred;
450 	int s;
451 
452 loop:
453 	s = splbio();
454 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
455 		if (dp->av_forw != dp)
456 			break;
457 	if (dp == bfreelist) {		/* no free blocks */
458 		dp->b_flags |= B_WANTED;
459 		sleep((caddr_t)dp, PRIBIO + 1);
460 		splx(s);
461 		goto loop;
462 	}
463 	bp = dp->av_forw;
464 	bremfree(bp);
465 	bp->b_flags |= B_BUSY;
466 	splx(s);
467 	if (bp->b_flags & B_DELWRI) {
468 		(void) bawrite(bp);
469 		goto loop;
470 	}
471 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
472 	if (bp->b_vp)
473 		brelvp(bp);
474 	if (bp->b_rcred != NOCRED) {
475 		cred = bp->b_rcred;
476 		bp->b_rcred = NOCRED;
477 		crfree(cred);
478 	}
479 	if (bp->b_wcred != NOCRED) {
480 		cred = bp->b_wcred;
481 		bp->b_wcred = NOCRED;
482 		crfree(cred);
483 	}
484 	bp->b_flags = B_BUSY;
485 	return (bp);
486 }
487 
488 /*
489  * Wait for I/O to complete.
490  *
491  * Extract and return any errors associated with the I/O.
492  * If the error flag is set, but no specific error is
493  * given, return EIO.
494  */
495 biowait(bp)
496 	register struct buf *bp;
497 {
498 	int s;
499 
500 	s = splbio();
501 	while ((bp->b_flags & B_DONE) == 0)
502 		sleep((caddr_t)bp, PRIBIO);
503 	splx(s);
504 	if ((bp->b_flags & B_ERROR) == 0)
505 		return (0);
506 	if (bp->b_error)
507 		return (bp->b_error);
508 	return (EIO);
509 }
510 
511 /*
512  * Mark I/O complete on a buffer.
513  *
514  * If a callback has been requested, e.g. the pageout
515  * daemon, do so. Otherwise, awaken waiting processes.
516  */
517 biodone(bp)
518 	register struct buf *bp;
519 {
520 	register struct vnode *vp;
521 
522 	if (bp->b_flags & B_DONE)
523 		panic("dup biodone");
524 	bp->b_flags |= B_DONE;
525 	if ((bp->b_flags & B_READ) == 0) {
526 		bp->b_dirtyoff = bp->b_dirtyend = 0;
527 		if (vp = bp->b_vp) {
528 			vp->v_numoutput--;
529 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
530 				if (vp->v_numoutput < 0)
531 					panic("biodone: neg numoutput");
532 				vp->v_flag &= ~VBWAIT;
533 				wakeup((caddr_t)&vp->v_numoutput);
534 			}
535 		}
536 	}
537 	if (bp->b_flags & B_CALL) {
538 		bp->b_flags &= ~B_CALL;
539 		(*bp->b_iodone)(bp);
540 		return;
541 	}
542 	if (bp->b_flags & B_ASYNC)
543 		brelse(bp);
544 	else {
545 		bp->b_flags &= ~B_WANTED;
546 		wakeup((caddr_t)bp);
547 	}
548 }
549 
550 /*
551  * Make sure all write-behind blocks associated
552  * with mount point are flushed out (from sync).
553  */
554 mntflushbuf(mountp, flags)
555 	struct mount *mountp;
556 	int flags;
557 {
558 	register struct vnode *vp;
559 
560 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
561 		panic("mntflushbuf: not busy");
562 loop:
563 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
564 		if (vget(vp))
565 			goto loop;
566 		vflushbuf(vp, flags);
567 		vput(vp);
568 		if (vp->v_mount != mountp)
569 			goto loop;
570 	}
571 }
572 
573 /*
574  * Flush all dirty buffers associated with a vnode.
575  */
576 vflushbuf(vp, flags)
577 	register struct vnode *vp;
578 	int flags;
579 {
580 	register struct buf *bp;
581 	struct buf *nbp;
582 	int s;
583 
584 loop:
585 	s = splbio();
586 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
587 		nbp = bp->b_blockf;
588 		if ((bp->b_flags & B_BUSY))
589 			continue;
590 		if ((bp->b_flags & B_DELWRI) == 0)
591 			panic("vflushbuf: not dirty");
592 		bremfree(bp);
593 		bp->b_flags |= B_BUSY;
594 		splx(s);
595 		/*
596 		 * Wait for I/O associated with indirect blocks to complete,
597 		 * since there is no way to quickly wait for them below.
598 		 * NB: This is really specific to ufs, but is done here
599 		 * as it is easier and quicker.
600 		 */
601 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
602 			(void) bawrite(bp);
603 			s = splbio();
604 		} else {
605 			(void) bwrite(bp);
606 			goto loop;
607 		}
608 	}
609 	splx(s);
610 	if ((flags & B_SYNC) == 0)
611 		return;
612 	s = splbio();
613 	while (vp->v_numoutput) {
614 		vp->v_flag |= VBWAIT;
615 		sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
616 	}
617 	splx(s);
618 	if (vp->v_dirtyblkhd) {
619 		vprint("vflushbuf: dirty", vp);
620 		goto loop;
621 	}
622 }
623 
624 /*
625  * Invalidate in core blocks belonging to closed or umounted filesystem
626  *
627  * Go through the list of vnodes associated with the file system;
628  * for each vnode invalidate any buffers that it holds. Normally
629  * this routine is preceeded by a bflush call, so that on a quiescent
630  * filesystem there will be no dirty buffers when we are done. Binval
631  * returns the count of dirty buffers when it is finished.
632  */
633 mntinvalbuf(mountp)
634 	struct mount *mountp;
635 {
636 	register struct vnode *vp;
637 	int dirty = 0;
638 
639 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
640 		panic("mntinvalbuf: not busy");
641 loop:
642 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
643 		if (vget(vp))
644 			goto loop;
645 		dirty += vinvalbuf(vp, 1);
646 		vput(vp);
647 		if (vp->v_mount != mountp)
648 			goto loop;
649 	}
650 	return (dirty);
651 }
652 
653 /*
654  * Flush out and invalidate all buffers associated with a vnode.
655  * Called with the underlying object locked.
656  */
657 vinvalbuf(vp, save)
658 	register struct vnode *vp;
659 	int save;
660 {
661 	register struct buf *bp;
662 	struct buf *nbp, *blist;
663 	int s, dirty = 0;
664 
665 	for (;;) {
666 		if (blist = vp->v_dirtyblkhd)
667 			/* void */;
668 		else if (blist = vp->v_cleanblkhd)
669 			/* void */;
670 		else
671 			break;
672 		for (bp = blist; bp; bp = nbp) {
673 			nbp = bp->b_blockf;
674 			s = splbio();
675 			if (bp->b_flags & B_BUSY) {
676 				bp->b_flags |= B_WANTED;
677 				sleep((caddr_t)bp, PRIBIO + 1);
678 				splx(s);
679 				break;
680 			}
681 			bremfree(bp);
682 			bp->b_flags |= B_BUSY;
683 			splx(s);
684 			if (save && (bp->b_flags & B_DELWRI)) {
685 				dirty++;
686 				(void) bwrite(bp);
687 				break;
688 			}
689 			if (bp->b_vp != vp)
690 				reassignbuf(bp, bp->b_vp);
691 			else
692 				bp->b_flags |= B_INVAL;
693 			brelse(bp);
694 		}
695 	}
696 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
697 		panic("vinvalbuf: flush failed");
698 	return (dirty);
699 }
700 
701 /*
702  * Associate a buffer with a vnode.
703  */
704 bgetvp(vp, bp)
705 	register struct vnode *vp;
706 	register struct buf *bp;
707 {
708 
709 	if (bp->b_vp)
710 		panic("bgetvp: not free");
711 	VHOLD(vp);
712 	bp->b_vp = vp;
713 	if (vp->v_type == VBLK || vp->v_type == VCHR)
714 		bp->b_dev = vp->v_rdev;
715 	else
716 		bp->b_dev = NODEV;
717 	/*
718 	 * Insert onto list for new vnode.
719 	 */
720 	if (vp->v_cleanblkhd) {
721 		bp->b_blockf = vp->v_cleanblkhd;
722 		bp->b_blockb = &vp->v_cleanblkhd;
723 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
724 		vp->v_cleanblkhd = bp;
725 	} else {
726 		vp->v_cleanblkhd = bp;
727 		bp->b_blockb = &vp->v_cleanblkhd;
728 		bp->b_blockf = NULL;
729 	}
730 }
731 
732 /*
733  * Disassociate a buffer from a vnode.
734  */
735 brelvp(bp)
736 	register struct buf *bp;
737 {
738 	struct buf *bq;
739 	struct vnode *vp;
740 
741 	if (bp->b_vp == (struct vnode *) 0)
742 		panic("brelvp: NULL");
743 	/*
744 	 * Delete from old vnode list, if on one.
745 	 */
746 	if (bp->b_blockb) {
747 		if (bq = bp->b_blockf)
748 			bq->b_blockb = bp->b_blockb;
749 		*bp->b_blockb = bq;
750 		bp->b_blockf = NULL;
751 		bp->b_blockb = NULL;
752 	}
753 	vp = bp->b_vp;
754 	bp->b_vp = (struct vnode *) 0;
755 	HOLDRELE(vp);
756 }
757 
758 /*
759  * Reassign a buffer from one vnode to another.
760  * Used to assign file specific control information
761  * (indirect blocks) to the vnode to which they belong.
762  */
763 reassignbuf(bp, newvp)
764 	register struct buf *bp;
765 	register struct vnode *newvp;
766 {
767 	register struct buf *bq, **listheadp;
768 
769 	if (newvp == NULL)
770 		panic("reassignbuf: NULL");
771 	/*
772 	 * Delete from old vnode list, if on one.
773 	 */
774 	if (bp->b_blockb) {
775 		if (bq = bp->b_blockf)
776 			bq->b_blockb = bp->b_blockb;
777 		*bp->b_blockb = bq;
778 	}
779 	/*
780 	 * If dirty, put on list of dirty buffers;
781 	 * otherwise insert onto list of clean buffers.
782 	 */
783 	if (bp->b_flags & B_DELWRI)
784 		listheadp = &newvp->v_dirtyblkhd;
785 	else
786 		listheadp = &newvp->v_cleanblkhd;
787 	if (*listheadp) {
788 		bp->b_blockf = *listheadp;
789 		bp->b_blockb = listheadp;
790 		bp->b_blockf->b_blockb = &bp->b_blockf;
791 		*listheadp = bp;
792 	} else {
793 		*listheadp = bp;
794 		bp->b_blockb = listheadp;
795 		bp->b_blockf = NULL;
796 	}
797 }
798