xref: /original-bsd/sys/kern/vfs_bio.c (revision 56b48dd2)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_bio.c	7.37 (Berkeley) 04/19/91
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "buf.h"
13 #include "vnode.h"
14 #include "specdev.h"
15 #include "mount.h"
16 #include "trace.h"
17 #include "resourcevar.h"
18 
19 /*
20  * Find the block in the buffer pool.
21  * If the buffer is not present, allocate a new buffer and load
22  * its contents according to the filesystem fill routine.
23  */
24 bread(vp, blkno, size, cred, bpp)
25 	struct vnode *vp;
26 	daddr_t blkno;
27 	int size;
28 	struct ucred *cred;
29 	struct buf **bpp;
30 {
31 	struct proc *p = curproc;		/* XXX */
32 	register struct buf *bp;
33 
34 	if (size == 0)
35 		panic("bread: size 0");
36 	*bpp = bp = getblk(vp, blkno, size);
37 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
38 		trace(TR_BREADHIT, pack(vp, size), blkno);
39 		return (0);
40 	}
41 	bp->b_flags |= B_READ;
42 	if (bp->b_bcount > bp->b_bufsize)
43 		panic("bread");
44 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
45 		crhold(cred);
46 		bp->b_rcred = cred;
47 	}
48 	VOP_STRATEGY(bp);
49 	trace(TR_BREADMISS, pack(vp, size), blkno);
50 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
51 	return (biowait(bp));
52 }
53 
54 /*
55  * Operates like bread, but also starts I/O on the specified
56  * read-ahead block.
57  */
58 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
59 	struct vnode *vp;
60 	daddr_t blkno; int size;
61 	daddr_t rablkno; int rabsize;
62 	struct ucred *cred;
63 	struct buf **bpp;
64 {
65 	struct proc *p = curproc;		/* XXX */
66 	register struct buf *bp, *rabp;
67 
68 	bp = NULL;
69 	/*
70 	 * If the block is not memory resident,
71 	 * allocate a buffer and start I/O.
72 	 */
73 	if (!incore(vp, blkno)) {
74 		*bpp = bp = getblk(vp, blkno, size);
75 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
76 			bp->b_flags |= B_READ;
77 			if (bp->b_bcount > bp->b_bufsize)
78 				panic("breada");
79 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
80 				crhold(cred);
81 				bp->b_rcred = cred;
82 			}
83 			VOP_STRATEGY(bp);
84 			trace(TR_BREADMISS, pack(vp, size), blkno);
85 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
86 		} else
87 			trace(TR_BREADHIT, pack(vp, size), blkno);
88 	}
89 
90 	/*
91 	 * If there is a read-ahead block, start I/O on it too.
92 	 */
93 	if (!incore(vp, rablkno)) {
94 		rabp = getblk(vp, rablkno, rabsize);
95 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
96 			brelse(rabp);
97 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
98 		} else {
99 			rabp->b_flags |= B_ASYNC | B_READ;
100 			if (rabp->b_bcount > rabp->b_bufsize)
101 				panic("breadrabp");
102 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
103 				crhold(cred);
104 				rabp->b_rcred = cred;
105 			}
106 			VOP_STRATEGY(rabp);
107 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
108 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
109 		}
110 	}
111 
112 	/*
113 	 * If block was memory resident, let bread get it.
114 	 * If block was not memory resident, the read was
115 	 * started above, so just wait for the read to complete.
116 	 */
117 	if (bp == NULL)
118 		return (bread(vp, blkno, size, cred, bpp));
119 	return (biowait(bp));
120 }
121 
122 /*
123  * Synchronous write.
124  * Release buffer on completion.
125  */
126 bwrite(bp)
127 	register struct buf *bp;
128 {
129 	struct proc *p = curproc;		/* XXX */
130 	register int flag;
131 	int s, error;
132 
133 	flag = bp->b_flags;
134 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
135 	if ((flag & B_DELWRI) == 0)
136 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
137 	else
138 		reassignbuf(bp, bp->b_vp);
139 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
140 	if (bp->b_bcount > bp->b_bufsize)
141 		panic("bwrite");
142 	s = splbio();
143 	bp->b_vp->v_numoutput++;
144 	splx(s);
145 	VOP_STRATEGY(bp);
146 
147 	/*
148 	 * If the write was synchronous, then await I/O completion.
149 	 * If the write was "delayed", then we put the buffer on
150 	 * the queue of blocks awaiting I/O completion status.
151 	 */
152 	if ((flag & B_ASYNC) == 0) {
153 		error = biowait(bp);
154 		brelse(bp);
155 	} else if (flag & B_DELWRI) {
156 		bp->b_flags |= B_AGE;
157 		error = 0;
158 	}
159 	return (error);
160 }
161 
162 /*
163  * Delayed write.
164  *
165  * The buffer is marked dirty, but is not queued for I/O.
166  * This routine should be used when the buffer is expected
167  * to be modified again soon, typically a small write that
168  * partially fills a buffer.
169  *
170  * NB: magnetic tapes cannot be delayed; they must be
171  * written in the order that the writes are requested.
172  */
173 bdwrite(bp)
174 	register struct buf *bp;
175 {
176 	struct proc *p = curproc;		/* XXX */
177 
178 	if ((bp->b_flags & B_DELWRI) == 0) {
179 		bp->b_flags |= B_DELWRI;
180 		reassignbuf(bp, bp->b_vp);
181 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
182 	}
183 	/*
184 	 * If this is a tape drive, the write must be initiated.
185 	 */
186 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
187 		bawrite(bp);
188 	} else {
189 		bp->b_flags |= (B_DONE | B_DELWRI);
190 		brelse(bp);
191 	}
192 }
193 
194 /*
195  * Asynchronous write.
196  * Start I/O on a buffer, but do not wait for it to complete.
197  * The buffer is released when the I/O completes.
198  */
199 bawrite(bp)
200 	register struct buf *bp;
201 {
202 
203 	/*
204 	 * Setting the ASYNC flag causes bwrite to return
205 	 * after starting the I/O.
206 	 */
207 	bp->b_flags |= B_ASYNC;
208 	(void) bwrite(bp);
209 }
210 
211 /*
212  * Release a buffer.
213  * Even if the buffer is dirty, no I/O is started.
214  */
215 brelse(bp)
216 	register struct buf *bp;
217 {
218 	register struct buf *flist;
219 	int s;
220 
221 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
222 	/*
223 	 * If a process is waiting for the buffer, or
224 	 * is waiting for a free buffer, awaken it.
225 	 */
226 	if (bp->b_flags & B_WANTED)
227 		wakeup((caddr_t)bp);
228 	if (bfreelist[0].b_flags & B_WANTED) {
229 		bfreelist[0].b_flags &= ~B_WANTED;
230 		wakeup((caddr_t)bfreelist);
231 	}
232 	/*
233 	 * Retry I/O for locked buffers rather than invalidating them.
234 	 */
235 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
236 		bp->b_flags &= ~B_ERROR;
237 	/*
238 	 * Disassociate buffers that are no longer valid.
239 	 */
240 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
241 		bp->b_flags |= B_INVAL;
242 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
243 		if (bp->b_vp)
244 			brelvp(bp);
245 		bp->b_flags &= ~B_DELWRI;
246 	}
247 	/*
248 	 * Stick the buffer back on a free list.
249 	 */
250 	s = splbio();
251 	if (bp->b_bufsize <= 0) {
252 		/* block has no buffer ... put at front of unused buffer list */
253 		flist = &bfreelist[BQ_EMPTY];
254 		binsheadfree(bp, flist);
255 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
256 		/* block has no info ... put at front of most free list */
257 		flist = &bfreelist[BQ_AGE];
258 		binsheadfree(bp, flist);
259 	} else {
260 		if (bp->b_flags & B_LOCKED)
261 			flist = &bfreelist[BQ_LOCKED];
262 		else if (bp->b_flags & B_AGE)
263 			flist = &bfreelist[BQ_AGE];
264 		else
265 			flist = &bfreelist[BQ_LRU];
266 		binstailfree(bp, flist);
267 	}
268 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
269 	splx(s);
270 }
271 
272 /*
273  * Check to see if a block is currently memory resident.
274  */
275 incore(vp, blkno)
276 	struct vnode *vp;
277 	daddr_t blkno;
278 {
279 	register struct buf *bp;
280 	register struct buf *dp;
281 
282 	dp = BUFHASH(vp, blkno);
283 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
284 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
285 		    (bp->b_flags & B_INVAL) == 0)
286 			return (1);
287 	return (0);
288 }
289 
290 /*
291  * Check to see if a block is currently memory resident.
292  * If it is resident, return it. If it is not resident,
293  * allocate a new buffer and assign it to the block.
294  */
295 struct buf *
296 getblk(vp, blkno, size)
297 	register struct vnode *vp;
298 	daddr_t blkno;
299 	int size;
300 {
301 	register struct buf *bp, *dp;
302 	int s;
303 
304 	if (size > MAXBSIZE)
305 		panic("getblk: size too big");
306 	/*
307 	 * Search the cache for the block. If the buffer is found,
308 	 * but it is currently locked, the we must wait for it to
309 	 * become available.
310 	 */
311 	dp = BUFHASH(vp, blkno);
312 loop:
313 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
314 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
315 		    (bp->b_flags & B_INVAL))
316 			continue;
317 		s = splbio();
318 		if (bp->b_flags & B_BUSY) {
319 			bp->b_flags |= B_WANTED;
320 			sleep((caddr_t)bp, PRIBIO + 1);
321 			splx(s);
322 			goto loop;
323 		}
324 		bremfree(bp);
325 		bp->b_flags |= B_BUSY;
326 		splx(s);
327 		if (bp->b_bcount != size) {
328 			printf("getblk: stray size");
329 			bp->b_flags |= B_INVAL;
330 			bwrite(bp);
331 			goto loop;
332 		}
333 		bp->b_flags |= B_CACHE;
334 		return (bp);
335 	}
336 	bp = getnewbuf();
337 	bremhash(bp);
338 	bgetvp(vp, bp);
339 	bp->b_bcount = 0;
340 	bp->b_lblkno = blkno;
341 	bp->b_blkno = blkno;
342 	bp->b_error = 0;
343 	bp->b_resid = 0;
344 	binshash(bp, dp);
345 	allocbuf(bp, size);
346 	return (bp);
347 }
348 
349 /*
350  * Allocate a buffer.
351  * The caller will assign it to a block.
352  */
353 struct buf *
354 geteblk(size)
355 	int size;
356 {
357 	register struct buf *bp, *flist;
358 
359 	if (size > MAXBSIZE)
360 		panic("geteblk: size too big");
361 	bp = getnewbuf();
362 	bp->b_flags |= B_INVAL;
363 	bremhash(bp);
364 	flist = &bfreelist[BQ_AGE];
365 	bp->b_bcount = 0;
366 	bp->b_error = 0;
367 	bp->b_resid = 0;
368 	binshash(bp, flist);
369 	allocbuf(bp, size);
370 	return (bp);
371 }
372 
373 /*
374  * Expand or contract the actual memory allocated to a buffer.
375  * If no memory is available, release buffer and take error exit.
376  */
377 allocbuf(tp, size)
378 	register struct buf *tp;
379 	int size;
380 {
381 	register struct buf *bp, *ep;
382 	int sizealloc, take, s;
383 
384 	sizealloc = roundup(size, CLBYTES);
385 	/*
386 	 * Buffer size does not change
387 	 */
388 	if (sizealloc == tp->b_bufsize)
389 		goto out;
390 	/*
391 	 * Buffer size is shrinking.
392 	 * Place excess space in a buffer header taken from the
393 	 * BQ_EMPTY buffer list and placed on the "most free" list.
394 	 * If no extra buffer headers are available, leave the
395 	 * extra space in the present buffer.
396 	 */
397 	if (sizealloc < tp->b_bufsize) {
398 		ep = bfreelist[BQ_EMPTY].av_forw;
399 		if (ep == &bfreelist[BQ_EMPTY])
400 			goto out;
401 		s = splbio();
402 		bremfree(ep);
403 		ep->b_flags |= B_BUSY;
404 		splx(s);
405 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
406 		    (int)tp->b_bufsize - sizealloc);
407 		ep->b_bufsize = tp->b_bufsize - sizealloc;
408 		tp->b_bufsize = sizealloc;
409 		ep->b_flags |= B_INVAL;
410 		ep->b_bcount = 0;
411 		brelse(ep);
412 		goto out;
413 	}
414 	/*
415 	 * More buffer space is needed. Get it out of buffers on
416 	 * the "most free" list, placing the empty headers on the
417 	 * BQ_EMPTY buffer header list.
418 	 */
419 	while (tp->b_bufsize < sizealloc) {
420 		take = sizealloc - tp->b_bufsize;
421 		bp = getnewbuf();
422 		if (take >= bp->b_bufsize)
423 			take = bp->b_bufsize;
424 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
425 		    &tp->b_un.b_addr[tp->b_bufsize], take);
426 		tp->b_bufsize += take;
427 		bp->b_bufsize = bp->b_bufsize - take;
428 		if (bp->b_bcount > bp->b_bufsize)
429 			bp->b_bcount = bp->b_bufsize;
430 		if (bp->b_bufsize <= 0) {
431 			bremhash(bp);
432 			binshash(bp, &bfreelist[BQ_EMPTY]);
433 			bp->b_dev = NODEV;
434 			bp->b_error = 0;
435 			bp->b_flags |= B_INVAL;
436 		}
437 		brelse(bp);
438 	}
439 out:
440 	tp->b_bcount = size;
441 	return (1);
442 }
443 
444 /*
445  * Find a buffer which is available for use.
446  * Select something from a free list.
447  * Preference is to AGE list, then LRU list.
448  */
449 struct buf *
450 getnewbuf()
451 {
452 	register struct buf *bp, *dp;
453 	register struct ucred *cred;
454 	int s;
455 
456 loop:
457 	s = splbio();
458 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
459 		if (dp->av_forw != dp)
460 			break;
461 	if (dp == bfreelist) {		/* no free blocks */
462 		dp->b_flags |= B_WANTED;
463 		sleep((caddr_t)dp, PRIBIO + 1);
464 		splx(s);
465 		goto loop;
466 	}
467 	bp = dp->av_forw;
468 	bremfree(bp);
469 	bp->b_flags |= B_BUSY;
470 	splx(s);
471 	if (bp->b_flags & B_DELWRI) {
472 		(void) bawrite(bp);
473 		goto loop;
474 	}
475 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
476 	if (bp->b_vp)
477 		brelvp(bp);
478 	if (bp->b_rcred != NOCRED) {
479 		cred = bp->b_rcred;
480 		bp->b_rcred = NOCRED;
481 		crfree(cred);
482 	}
483 	if (bp->b_wcred != NOCRED) {
484 		cred = bp->b_wcred;
485 		bp->b_wcred = NOCRED;
486 		crfree(cred);
487 	}
488 	bp->b_flags = B_BUSY;
489 	bp->b_dirtyoff = bp->b_dirtyend = 0;
490 	return (bp);
491 }
492 
493 /*
494  * Wait for I/O to complete.
495  *
496  * Extract and return any errors associated with the I/O.
497  * If the error flag is set, but no specific error is
498  * given, return EIO.
499  */
500 biowait(bp)
501 	register struct buf *bp;
502 {
503 	int s;
504 
505 	s = splbio();
506 	while ((bp->b_flags & B_DONE) == 0)
507 		sleep((caddr_t)bp, PRIBIO);
508 	splx(s);
509 	if ((bp->b_flags & B_ERROR) == 0)
510 		return (0);
511 	if (bp->b_error)
512 		return (bp->b_error);
513 	return (EIO);
514 }
515 
516 /*
517  * Mark I/O complete on a buffer.
518  *
519  * If a callback has been requested, e.g. the pageout
520  * daemon, do so. Otherwise, awaken waiting processes.
521  */
522 biodone(bp)
523 	register struct buf *bp;
524 {
525 	register struct vnode *vp;
526 
527 	if (bp->b_flags & B_DONE)
528 		panic("dup biodone");
529 	bp->b_flags |= B_DONE;
530 	if ((bp->b_flags & B_READ) == 0) {
531 		bp->b_dirtyoff = bp->b_dirtyend = 0;
532 		if (vp = bp->b_vp) {
533 			vp->v_numoutput--;
534 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
535 				if (vp->v_numoutput < 0)
536 					panic("biodone: neg numoutput");
537 				vp->v_flag &= ~VBWAIT;
538 				wakeup((caddr_t)&vp->v_numoutput);
539 			}
540 		}
541 	}
542 	if (bp->b_flags & B_CALL) {
543 		bp->b_flags &= ~B_CALL;
544 		(*bp->b_iodone)(bp);
545 		return;
546 	}
547 	if (bp->b_flags & B_ASYNC)
548 		brelse(bp);
549 	else {
550 		bp->b_flags &= ~B_WANTED;
551 		wakeup((caddr_t)bp);
552 	}
553 }
554 
555 /*
556  * Make sure all write-behind blocks associated
557  * with mount point are flushed out (from sync).
558  */
559 mntflushbuf(mountp, flags)
560 	struct mount *mountp;
561 	int flags;
562 {
563 	register struct vnode *vp;
564 
565 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
566 		panic("mntflushbuf: not busy");
567 loop:
568 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
569 		if (VOP_ISLOCKED(vp))
570 			continue;
571 		if (vget(vp))
572 			goto loop;
573 		vflushbuf(vp, flags);
574 		vput(vp);
575 		if (vp->v_mount != mountp)
576 			goto loop;
577 	}
578 }
579 
580 /*
581  * Flush all dirty buffers associated with a vnode.
582  */
583 vflushbuf(vp, flags)
584 	register struct vnode *vp;
585 	int flags;
586 {
587 	register struct buf *bp;
588 	struct buf *nbp;
589 	int s;
590 
591 loop:
592 	s = splbio();
593 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
594 		nbp = bp->b_blockf;
595 		if ((bp->b_flags & B_BUSY))
596 			continue;
597 		if ((bp->b_flags & B_DELWRI) == 0)
598 			panic("vflushbuf: not dirty");
599 		bremfree(bp);
600 		bp->b_flags |= B_BUSY;
601 		splx(s);
602 		/*
603 		 * Wait for I/O associated with indirect blocks to complete,
604 		 * since there is no way to quickly wait for them below.
605 		 * NB: This is really specific to ufs, but is done here
606 		 * as it is easier and quicker.
607 		 */
608 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
609 			(void) bawrite(bp);
610 			s = splbio();
611 		} else {
612 			(void) bwrite(bp);
613 			goto loop;
614 		}
615 	}
616 	splx(s);
617 	if ((flags & B_SYNC) == 0)
618 		return;
619 	s = splbio();
620 	while (vp->v_numoutput) {
621 		vp->v_flag |= VBWAIT;
622 		sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
623 	}
624 	splx(s);
625 	if (vp->v_dirtyblkhd) {
626 		vprint("vflushbuf: dirty", vp);
627 		goto loop;
628 	}
629 }
630 
631 /*
632  * Invalidate in core blocks belonging to closed or umounted filesystem
633  *
634  * Go through the list of vnodes associated with the file system;
635  * for each vnode invalidate any buffers that it holds. Normally
636  * this routine is preceeded by a bflush call, so that on a quiescent
637  * filesystem there will be no dirty buffers when we are done. Binval
638  * returns the count of dirty buffers when it is finished.
639  */
640 mntinvalbuf(mountp)
641 	struct mount *mountp;
642 {
643 	register struct vnode *vp;
644 	int dirty = 0;
645 
646 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
647 		panic("mntinvalbuf: not busy");
648 loop:
649 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
650 		if (vget(vp))
651 			goto loop;
652 		dirty += vinvalbuf(vp, 1);
653 		vput(vp);
654 		if (vp->v_mount != mountp)
655 			goto loop;
656 	}
657 	return (dirty);
658 }
659 
660 /*
661  * Flush out and invalidate all buffers associated with a vnode.
662  * Called with the underlying object locked.
663  */
664 vinvalbuf(vp, save)
665 	register struct vnode *vp;
666 	int save;
667 {
668 	register struct buf *bp;
669 	struct buf *nbp, *blist;
670 	int s, dirty = 0;
671 
672 	for (;;) {
673 		if (blist = vp->v_dirtyblkhd)
674 			/* void */;
675 		else if (blist = vp->v_cleanblkhd)
676 			/* void */;
677 		else
678 			break;
679 		for (bp = blist; bp; bp = nbp) {
680 			nbp = bp->b_blockf;
681 			s = splbio();
682 			if (bp->b_flags & B_BUSY) {
683 				bp->b_flags |= B_WANTED;
684 				sleep((caddr_t)bp, PRIBIO + 1);
685 				splx(s);
686 				break;
687 			}
688 			bremfree(bp);
689 			bp->b_flags |= B_BUSY;
690 			splx(s);
691 			if (save && (bp->b_flags & B_DELWRI)) {
692 				dirty++;
693 				(void) bwrite(bp);
694 				break;
695 			}
696 			if (bp->b_vp != vp)
697 				reassignbuf(bp, bp->b_vp);
698 			else
699 				bp->b_flags |= B_INVAL;
700 			brelse(bp);
701 		}
702 	}
703 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
704 		panic("vinvalbuf: flush failed");
705 	return (dirty);
706 }
707 
708 /*
709  * Associate a buffer with a vnode.
710  */
711 bgetvp(vp, bp)
712 	register struct vnode *vp;
713 	register struct buf *bp;
714 {
715 
716 	if (bp->b_vp)
717 		panic("bgetvp: not free");
718 	VHOLD(vp);
719 	bp->b_vp = vp;
720 	if (vp->v_type == VBLK || vp->v_type == VCHR)
721 		bp->b_dev = vp->v_rdev;
722 	else
723 		bp->b_dev = NODEV;
724 	/*
725 	 * Insert onto list for new vnode.
726 	 */
727 	if (vp->v_cleanblkhd) {
728 		bp->b_blockf = vp->v_cleanblkhd;
729 		bp->b_blockb = &vp->v_cleanblkhd;
730 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
731 		vp->v_cleanblkhd = bp;
732 	} else {
733 		vp->v_cleanblkhd = bp;
734 		bp->b_blockb = &vp->v_cleanblkhd;
735 		bp->b_blockf = NULL;
736 	}
737 }
738 
739 /*
740  * Disassociate a buffer from a vnode.
741  */
742 brelvp(bp)
743 	register struct buf *bp;
744 {
745 	struct buf *bq;
746 	struct vnode *vp;
747 
748 	if (bp->b_vp == (struct vnode *) 0)
749 		panic("brelvp: NULL");
750 	/*
751 	 * Delete from old vnode list, if on one.
752 	 */
753 	if (bp->b_blockb) {
754 		if (bq = bp->b_blockf)
755 			bq->b_blockb = bp->b_blockb;
756 		*bp->b_blockb = bq;
757 		bp->b_blockf = NULL;
758 		bp->b_blockb = NULL;
759 	}
760 	vp = bp->b_vp;
761 	bp->b_vp = (struct vnode *) 0;
762 	HOLDRELE(vp);
763 }
764 
765 /*
766  * Reassign a buffer from one vnode to another.
767  * Used to assign file specific control information
768  * (indirect blocks) to the vnode to which they belong.
769  */
770 reassignbuf(bp, newvp)
771 	register struct buf *bp;
772 	register struct vnode *newvp;
773 {
774 	register struct buf *bq, **listheadp;
775 
776 	if (newvp == NULL)
777 		panic("reassignbuf: NULL");
778 	/*
779 	 * Delete from old vnode list, if on one.
780 	 */
781 	if (bp->b_blockb) {
782 		if (bq = bp->b_blockf)
783 			bq->b_blockb = bp->b_blockb;
784 		*bp->b_blockb = bq;
785 	}
786 	/*
787 	 * If dirty, put on list of dirty buffers;
788 	 * otherwise insert onto list of clean buffers.
789 	 */
790 	if (bp->b_flags & B_DELWRI)
791 		listheadp = &newvp->v_dirtyblkhd;
792 	else
793 		listheadp = &newvp->v_cleanblkhd;
794 	if (*listheadp) {
795 		bp->b_blockf = *listheadp;
796 		bp->b_blockb = listheadp;
797 		bp->b_blockf->b_blockb = &bp->b_blockf;
798 		*listheadp = bp;
799 	} else {
800 		*listheadp = bp;
801 		bp->b_blockb = listheadp;
802 		bp->b_blockf = NULL;
803 	}
804 }
805