xref: /original-bsd/sys/kern/vfs_bio.c (revision a94793f7)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_bio.c	7.35 (Berkeley) 03/19/91
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "buf.h"
13 #include "vnode.h"
14 #include "specdev.h"
15 #include "mount.h"
16 #include "trace.h"
17 #include "resourcevar.h"
18 
19 /*
20  * Find the block in the buffer pool.
21  * If the buffer is not present, allocate a new buffer and load
22  * its contents according to the filesystem fill routine.
23  */
24 bread(vp, blkno, size, cred, bpp)
25 	struct vnode *vp;
26 	daddr_t blkno;
27 	int size;
28 	struct ucred *cred;
29 	struct buf **bpp;
30 {
31 	struct proc *p = curproc;		/* XXX */
32 	register struct buf *bp;
33 
34 	if (size == 0)
35 		panic("bread: size 0");
36 	*bpp = bp = getblk(vp, blkno, size);
37 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
38 		trace(TR_BREADHIT, pack(vp, size), blkno);
39 		return (0);
40 	}
41 	bp->b_flags |= B_READ;
42 	if (bp->b_bcount > bp->b_bufsize)
43 		panic("bread");
44 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
45 		crhold(cred);
46 		bp->b_rcred = cred;
47 	}
48 	VOP_STRATEGY(bp);
49 	trace(TR_BREADMISS, pack(vp, size), blkno);
50 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
51 	return (biowait(bp));
52 }
53 
54 /*
55  * Operates like bread, but also starts I/O on the specified
56  * read-ahead block.
57  */
58 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
59 	struct vnode *vp;
60 	daddr_t blkno; int size;
61 	daddr_t rablkno; int rabsize;
62 	struct ucred *cred;
63 	struct buf **bpp;
64 {
65 	struct proc *p = curproc;		/* XXX */
66 	register struct buf *bp, *rabp;
67 
68 	bp = NULL;
69 	/*
70 	 * If the block is not memory resident,
71 	 * allocate a buffer and start I/O.
72 	 */
73 	if (!incore(vp, blkno)) {
74 		*bpp = bp = getblk(vp, blkno, size);
75 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
76 			bp->b_flags |= B_READ;
77 			if (bp->b_bcount > bp->b_bufsize)
78 				panic("breada");
79 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
80 				crhold(cred);
81 				bp->b_rcred = cred;
82 			}
83 			VOP_STRATEGY(bp);
84 			trace(TR_BREADMISS, pack(vp, size), blkno);
85 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
86 		} else
87 			trace(TR_BREADHIT, pack(vp, size), blkno);
88 	}
89 
90 	/*
91 	 * If there is a read-ahead block, start I/O on it too.
92 	 */
93 	if (!incore(vp, rablkno)) {
94 		rabp = getblk(vp, rablkno, rabsize);
95 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
96 			brelse(rabp);
97 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
98 		} else {
99 			rabp->b_flags |= B_ASYNC | B_READ;
100 			if (rabp->b_bcount > rabp->b_bufsize)
101 				panic("breadrabp");
102 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
103 				crhold(cred);
104 				rabp->b_rcred = cred;
105 			}
106 			VOP_STRATEGY(rabp);
107 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
108 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
109 		}
110 	}
111 
112 	/*
113 	 * If block was memory resident, let bread get it.
114 	 * If block was not memory resident, the read was
115 	 * started above, so just wait for the read to complete.
116 	 */
117 	if (bp == NULL)
118 		return (bread(vp, blkno, size, cred, bpp));
119 	return (biowait(bp));
120 }
121 
122 /*
123  * Synchronous write.
124  * Release buffer on completion.
125  */
126 bwrite(bp)
127 	register struct buf *bp;
128 {
129 	struct proc *p = curproc;		/* XXX */
130 	register int flag;
131 	int s, error;
132 
133 	flag = bp->b_flags;
134 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
135 	if ((flag & B_DELWRI) == 0)
136 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
137 	else
138 		reassignbuf(bp, bp->b_vp);
139 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
140 	if (bp->b_bcount > bp->b_bufsize)
141 		panic("bwrite");
142 	s = splbio();
143 	bp->b_vp->v_numoutput++;
144 	splx(s);
145 	VOP_STRATEGY(bp);
146 
147 	/*
148 	 * If the write was synchronous, then await I/O completion.
149 	 * If the write was "delayed", then we put the buffer on
150 	 * the queue of blocks awaiting I/O completion status.
151 	 */
152 	if ((flag & B_ASYNC) == 0) {
153 		error = biowait(bp);
154 		brelse(bp);
155 	} else if (flag & B_DELWRI) {
156 		bp->b_flags |= B_AGE;
157 		error = 0;
158 	}
159 	return (error);
160 }
161 
162 /*
163  * Delayed write.
164  *
165  * The buffer is marked dirty, but is not queued for I/O.
166  * This routine should be used when the buffer is expected
167  * to be modified again soon, typically a small write that
168  * partially fills a buffer.
169  *
170  * NB: magnetic tapes cannot be delayed; they must be
171  * written in the order that the writes are requested.
172  */
173 bdwrite(bp)
174 	register struct buf *bp;
175 {
176 	struct proc *p = curproc;		/* XXX */
177 
178 	if ((bp->b_flags & B_DELWRI) == 0) {
179 		bp->b_flags |= B_DELWRI;
180 		reassignbuf(bp, bp->b_vp);
181 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
182 	}
183 	/*
184 	 * If this is a tape drive, the write must be initiated.
185 	 */
186 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
187 		bawrite(bp);
188 	} else {
189 		bp->b_flags |= (B_DONE | B_DELWRI);
190 		brelse(bp);
191 	}
192 }
193 
194 /*
195  * Asynchronous write.
196  * Start I/O on a buffer, but do not wait for it to complete.
197  * The buffer is released when the I/O completes.
198  */
199 bawrite(bp)
200 	register struct buf *bp;
201 {
202 
203 	/*
204 	 * Setting the ASYNC flag causes bwrite to return
205 	 * after starting the I/O.
206 	 */
207 	bp->b_flags |= B_ASYNC;
208 	(void) bwrite(bp);
209 }
210 
211 /*
212  * Release a buffer.
213  * Even if the buffer is dirty, no I/O is started.
214  */
215 brelse(bp)
216 	register struct buf *bp;
217 {
218 	register struct buf *flist;
219 	int s;
220 
221 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
222 	/*
223 	 * If a process is waiting for the buffer, or
224 	 * is waiting for a free buffer, awaken it.
225 	 */
226 	if (bp->b_flags & B_WANTED)
227 		wakeup((caddr_t)bp);
228 	if (bfreelist[0].b_flags & B_WANTED) {
229 		bfreelist[0].b_flags &= ~B_WANTED;
230 		wakeup((caddr_t)bfreelist);
231 	}
232 	/*
233 	 * Retry I/O for locked buffers rather than invalidating them.
234 	 */
235 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
236 		bp->b_flags &= ~B_ERROR;
237 	/*
238 	 * Disassociate buffers that are no longer valid.
239 	 */
240 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
241 		bp->b_flags |= B_INVAL;
242 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
243 		if (bp->b_vp)
244 			brelvp(bp);
245 		bp->b_flags &= ~B_DELWRI;
246 	}
247 	/*
248 	 * Stick the buffer back on a free list.
249 	 */
250 	s = splbio();
251 	if (bp->b_bufsize <= 0) {
252 		/* block has no buffer ... put at front of unused buffer list */
253 		flist = &bfreelist[BQ_EMPTY];
254 		binsheadfree(bp, flist);
255 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
256 		/* block has no info ... put at front of most free list */
257 		flist = &bfreelist[BQ_AGE];
258 		binsheadfree(bp, flist);
259 	} else {
260 		if (bp->b_flags & B_LOCKED)
261 			flist = &bfreelist[BQ_LOCKED];
262 		else if (bp->b_flags & B_AGE)
263 			flist = &bfreelist[BQ_AGE];
264 		else
265 			flist = &bfreelist[BQ_LRU];
266 		binstailfree(bp, flist);
267 	}
268 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
269 	splx(s);
270 }
271 
272 /*
273  * Check to see if a block is currently memory resident.
274  */
275 incore(vp, blkno)
276 	struct vnode *vp;
277 	daddr_t blkno;
278 {
279 	register struct buf *bp;
280 	register struct buf *dp;
281 
282 	dp = BUFHASH(vp, blkno);
283 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
284 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
285 		    (bp->b_flags & B_INVAL) == 0)
286 			return (1);
287 	return (0);
288 }
289 
290 /*
291  * Check to see if a block is currently memory resident.
292  * If it is resident, return it. If it is not resident,
293  * allocate a new buffer and assign it to the block.
294  */
295 struct buf *
296 getblk(vp, blkno, size)
297 	register struct vnode *vp;
298 	daddr_t blkno;
299 	int size;
300 {
301 	register struct buf *bp, *dp;
302 	int s;
303 
304 	if (size > MAXBSIZE)
305 		panic("getblk: size too big");
306 	/*
307 	 * Search the cache for the block. If the buffer is found,
308 	 * but it is currently locked, the we must wait for it to
309 	 * become available.
310 	 */
311 	dp = BUFHASH(vp, blkno);
312 loop:
313 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
314 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
315 		    (bp->b_flags & B_INVAL))
316 			continue;
317 		s = splbio();
318 		if (bp->b_flags & B_BUSY) {
319 			bp->b_flags |= B_WANTED;
320 			sleep((caddr_t)bp, PRIBIO + 1);
321 			splx(s);
322 			goto loop;
323 		}
324 		bremfree(bp);
325 		bp->b_flags |= B_BUSY;
326 		splx(s);
327 		if (bp->b_bcount != size) {
328 			printf("getblk: stray size");
329 			bp->b_flags |= B_INVAL;
330 			bwrite(bp);
331 			goto loop;
332 		}
333 		bp->b_flags |= B_CACHE;
334 		return (bp);
335 	}
336 	bp = getnewbuf();
337 	bremhash(bp);
338 	bgetvp(vp, bp);
339 	bp->b_bcount = 0;
340 	bp->b_lblkno = blkno;
341 	bp->b_blkno = blkno;
342 	bp->b_error = 0;
343 	bp->b_resid = 0;
344 	binshash(bp, dp);
345 	allocbuf(bp, size);
346 	return (bp);
347 }
348 
349 /*
350  * Allocate a buffer.
351  * The caller will assign it to a block.
352  */
353 struct buf *
354 geteblk(size)
355 	int size;
356 {
357 	register struct buf *bp, *flist;
358 
359 	if (size > MAXBSIZE)
360 		panic("geteblk: size too big");
361 	bp = getnewbuf();
362 	bp->b_flags |= B_INVAL;
363 	bremhash(bp);
364 	flist = &bfreelist[BQ_AGE];
365 	bp->b_bcount = 0;
366 	bp->b_error = 0;
367 	bp->b_resid = 0;
368 	binshash(bp, flist);
369 	allocbuf(bp, size);
370 	return (bp);
371 }
372 
373 /*
374  * Expand or contract the actual memory allocated to a buffer.
375  * If no memory is available, release buffer and take error exit.
376  */
377 allocbuf(tp, size)
378 	register struct buf *tp;
379 	int size;
380 {
381 	register struct buf *bp, *ep;
382 	int sizealloc, take, s;
383 
384 	sizealloc = roundup(size, CLBYTES);
385 	/*
386 	 * Buffer size does not change
387 	 */
388 	if (sizealloc == tp->b_bufsize)
389 		goto out;
390 	/*
391 	 * Buffer size is shrinking.
392 	 * Place excess space in a buffer header taken from the
393 	 * BQ_EMPTY buffer list and placed on the "most free" list.
394 	 * If no extra buffer headers are available, leave the
395 	 * extra space in the present buffer.
396 	 */
397 	if (sizealloc < tp->b_bufsize) {
398 		ep = bfreelist[BQ_EMPTY].av_forw;
399 		if (ep == &bfreelist[BQ_EMPTY])
400 			goto out;
401 		s = splbio();
402 		bremfree(ep);
403 		ep->b_flags |= B_BUSY;
404 		splx(s);
405 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
406 		    (int)tp->b_bufsize - sizealloc);
407 		ep->b_bufsize = tp->b_bufsize - sizealloc;
408 		tp->b_bufsize = sizealloc;
409 		ep->b_flags |= B_INVAL;
410 		ep->b_bcount = 0;
411 		brelse(ep);
412 		goto out;
413 	}
414 	/*
415 	 * More buffer space is needed. Get it out of buffers on
416 	 * the "most free" list, placing the empty headers on the
417 	 * BQ_EMPTY buffer header list.
418 	 */
419 	while (tp->b_bufsize < sizealloc) {
420 		take = sizealloc - tp->b_bufsize;
421 		bp = getnewbuf();
422 		if (take >= bp->b_bufsize)
423 			take = bp->b_bufsize;
424 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
425 		    &tp->b_un.b_addr[tp->b_bufsize], take);
426 		tp->b_bufsize += take;
427 		bp->b_bufsize = bp->b_bufsize - take;
428 		if (bp->b_bcount > bp->b_bufsize)
429 			bp->b_bcount = bp->b_bufsize;
430 		if (bp->b_bufsize <= 0) {
431 			bremhash(bp);
432 			binshash(bp, &bfreelist[BQ_EMPTY]);
433 			bp->b_dev = NODEV;
434 			bp->b_error = 0;
435 			bp->b_flags |= B_INVAL;
436 		}
437 		brelse(bp);
438 	}
439 out:
440 	tp->b_bcount = size;
441 	return (1);
442 }
443 
444 /*
445  * Find a buffer which is available for use.
446  * Select something from a free list.
447  * Preference is to AGE list, then LRU list.
448  */
449 struct buf *
450 getnewbuf()
451 {
452 	register struct buf *bp, *dp;
453 	register struct ucred *cred;
454 	int s;
455 
456 loop:
457 	s = splbio();
458 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
459 		if (dp->av_forw != dp)
460 			break;
461 	if (dp == bfreelist) {		/* no free blocks */
462 		dp->b_flags |= B_WANTED;
463 		sleep((caddr_t)dp, PRIBIO + 1);
464 		splx(s);
465 		goto loop;
466 	}
467 	bp = dp->av_forw;
468 	bremfree(bp);
469 	bp->b_flags |= B_BUSY;
470 	splx(s);
471 	if (bp->b_flags & B_DELWRI) {
472 		(void) bawrite(bp);
473 		goto loop;
474 	}
475 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
476 	if (bp->b_vp)
477 		brelvp(bp);
478 	if (bp->b_rcred != NOCRED) {
479 		cred = bp->b_rcred;
480 		bp->b_rcred = NOCRED;
481 		crfree(cred);
482 	}
483 	if (bp->b_wcred != NOCRED) {
484 		cred = bp->b_wcred;
485 		bp->b_wcred = NOCRED;
486 		crfree(cred);
487 	}
488 	bp->b_flags = B_BUSY;
489 	bp->b_dirtyoff = bp->b_dirtyend = 0;
490 	return (bp);
491 }
492 
493 /*
494  * Wait for I/O to complete.
495  *
496  * Extract and return any errors associated with the I/O.
497  * If the error flag is set, but no specific error is
498  * given, return EIO.
499  */
500 biowait(bp)
501 	register struct buf *bp;
502 {
503 	int s;
504 
505 	s = splbio();
506 	while ((bp->b_flags & B_DONE) == 0)
507 		sleep((caddr_t)bp, PRIBIO);
508 	splx(s);
509 	if ((bp->b_flags & B_ERROR) == 0)
510 		return (0);
511 	if (bp->b_error)
512 		return (bp->b_error);
513 	return (EIO);
514 }
515 
516 /*
517  * Mark I/O complete on a buffer.
518  *
519  * If a callback has been requested, e.g. the pageout
520  * daemon, do so. Otherwise, awaken waiting processes.
521  */
522 biodone(bp)
523 	register struct buf *bp;
524 {
525 	register struct vnode *vp;
526 
527 	if (bp->b_flags & B_DONE)
528 		panic("dup biodone");
529 	bp->b_flags |= B_DONE;
530 	if ((bp->b_flags & B_READ) == 0) {
531 		bp->b_dirtyoff = bp->b_dirtyend = 0;
532 		if (vp = bp->b_vp) {
533 			vp->v_numoutput--;
534 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
535 				if (vp->v_numoutput < 0)
536 					panic("biodone: neg numoutput");
537 				vp->v_flag &= ~VBWAIT;
538 				wakeup((caddr_t)&vp->v_numoutput);
539 			}
540 		}
541 	}
542 	if (bp->b_flags & B_CALL) {
543 		bp->b_flags &= ~B_CALL;
544 		(*bp->b_iodone)(bp);
545 		return;
546 	}
547 	if (bp->b_flags & B_ASYNC)
548 		brelse(bp);
549 	else {
550 		bp->b_flags &= ~B_WANTED;
551 		wakeup((caddr_t)bp);
552 	}
553 }
554 
555 /*
556  * Make sure all write-behind blocks associated
557  * with mount point are flushed out (from sync).
558  */
559 mntflushbuf(mountp, flags)
560 	struct mount *mountp;
561 	int flags;
562 {
563 	register struct vnode *vp;
564 
565 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
566 		panic("mntflushbuf: not busy");
567 loop:
568 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
569 		if (vget(vp))
570 			goto loop;
571 		vflushbuf(vp, flags);
572 		vput(vp);
573 		if (vp->v_mount != mountp)
574 			goto loop;
575 	}
576 }
577 
578 /*
579  * Flush all dirty buffers associated with a vnode.
580  */
581 vflushbuf(vp, flags)
582 	register struct vnode *vp;
583 	int flags;
584 {
585 	register struct buf *bp;
586 	struct buf *nbp;
587 	int s;
588 
589 loop:
590 	s = splbio();
591 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
592 		nbp = bp->b_blockf;
593 		if ((bp->b_flags & B_BUSY))
594 			continue;
595 		if ((bp->b_flags & B_DELWRI) == 0)
596 			panic("vflushbuf: not dirty");
597 		bremfree(bp);
598 		bp->b_flags |= B_BUSY;
599 		splx(s);
600 		/*
601 		 * Wait for I/O associated with indirect blocks to complete,
602 		 * since there is no way to quickly wait for them below.
603 		 * NB: This is really specific to ufs, but is done here
604 		 * as it is easier and quicker.
605 		 */
606 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
607 			(void) bawrite(bp);
608 			s = splbio();
609 		} else {
610 			(void) bwrite(bp);
611 			goto loop;
612 		}
613 	}
614 	splx(s);
615 	if ((flags & B_SYNC) == 0)
616 		return;
617 	s = splbio();
618 	while (vp->v_numoutput) {
619 		vp->v_flag |= VBWAIT;
620 		sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
621 	}
622 	splx(s);
623 	if (vp->v_dirtyblkhd) {
624 		vprint("vflushbuf: dirty", vp);
625 		goto loop;
626 	}
627 }
628 
629 /*
630  * Invalidate in core blocks belonging to closed or umounted filesystem
631  *
632  * Go through the list of vnodes associated with the file system;
633  * for each vnode invalidate any buffers that it holds. Normally
634  * this routine is preceeded by a bflush call, so that on a quiescent
635  * filesystem there will be no dirty buffers when we are done. Binval
636  * returns the count of dirty buffers when it is finished.
637  */
638 mntinvalbuf(mountp)
639 	struct mount *mountp;
640 {
641 	register struct vnode *vp;
642 	int dirty = 0;
643 
644 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
645 		panic("mntinvalbuf: not busy");
646 loop:
647 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
648 		if (vget(vp))
649 			goto loop;
650 		dirty += vinvalbuf(vp, 1);
651 		vput(vp);
652 		if (vp->v_mount != mountp)
653 			goto loop;
654 	}
655 	return (dirty);
656 }
657 
658 /*
659  * Flush out and invalidate all buffers associated with a vnode.
660  * Called with the underlying object locked.
661  */
662 vinvalbuf(vp, save)
663 	register struct vnode *vp;
664 	int save;
665 {
666 	register struct buf *bp;
667 	struct buf *nbp, *blist;
668 	int s, dirty = 0;
669 
670 	for (;;) {
671 		if (blist = vp->v_dirtyblkhd)
672 			/* void */;
673 		else if (blist = vp->v_cleanblkhd)
674 			/* void */;
675 		else
676 			break;
677 		for (bp = blist; bp; bp = nbp) {
678 			nbp = bp->b_blockf;
679 			s = splbio();
680 			if (bp->b_flags & B_BUSY) {
681 				bp->b_flags |= B_WANTED;
682 				sleep((caddr_t)bp, PRIBIO + 1);
683 				splx(s);
684 				break;
685 			}
686 			bremfree(bp);
687 			bp->b_flags |= B_BUSY;
688 			splx(s);
689 			if (save && (bp->b_flags & B_DELWRI)) {
690 				dirty++;
691 				(void) bwrite(bp);
692 				break;
693 			}
694 			if (bp->b_vp != vp)
695 				reassignbuf(bp, bp->b_vp);
696 			else
697 				bp->b_flags |= B_INVAL;
698 			brelse(bp);
699 		}
700 	}
701 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
702 		panic("vinvalbuf: flush failed");
703 	return (dirty);
704 }
705 
706 /*
707  * Associate a buffer with a vnode.
708  */
709 bgetvp(vp, bp)
710 	register struct vnode *vp;
711 	register struct buf *bp;
712 {
713 
714 	if (bp->b_vp)
715 		panic("bgetvp: not free");
716 	VHOLD(vp);
717 	bp->b_vp = vp;
718 	if (vp->v_type == VBLK || vp->v_type == VCHR)
719 		bp->b_dev = vp->v_rdev;
720 	else
721 		bp->b_dev = NODEV;
722 	/*
723 	 * Insert onto list for new vnode.
724 	 */
725 	if (vp->v_cleanblkhd) {
726 		bp->b_blockf = vp->v_cleanblkhd;
727 		bp->b_blockb = &vp->v_cleanblkhd;
728 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
729 		vp->v_cleanblkhd = bp;
730 	} else {
731 		vp->v_cleanblkhd = bp;
732 		bp->b_blockb = &vp->v_cleanblkhd;
733 		bp->b_blockf = NULL;
734 	}
735 }
736 
737 /*
738  * Disassociate a buffer from a vnode.
739  */
740 brelvp(bp)
741 	register struct buf *bp;
742 {
743 	struct buf *bq;
744 	struct vnode *vp;
745 
746 	if (bp->b_vp == (struct vnode *) 0)
747 		panic("brelvp: NULL");
748 	/*
749 	 * Delete from old vnode list, if on one.
750 	 */
751 	if (bp->b_blockb) {
752 		if (bq = bp->b_blockf)
753 			bq->b_blockb = bp->b_blockb;
754 		*bp->b_blockb = bq;
755 		bp->b_blockf = NULL;
756 		bp->b_blockb = NULL;
757 	}
758 	vp = bp->b_vp;
759 	bp->b_vp = (struct vnode *) 0;
760 	HOLDRELE(vp);
761 }
762 
763 /*
764  * Reassign a buffer from one vnode to another.
765  * Used to assign file specific control information
766  * (indirect blocks) to the vnode to which they belong.
767  */
768 reassignbuf(bp, newvp)
769 	register struct buf *bp;
770 	register struct vnode *newvp;
771 {
772 	register struct buf *bq, **listheadp;
773 
774 	if (newvp == NULL)
775 		panic("reassignbuf: NULL");
776 	/*
777 	 * Delete from old vnode list, if on one.
778 	 */
779 	if (bp->b_blockb) {
780 		if (bq = bp->b_blockf)
781 			bq->b_blockb = bp->b_blockb;
782 		*bp->b_blockb = bq;
783 	}
784 	/*
785 	 * If dirty, put on list of dirty buffers;
786 	 * otherwise insert onto list of clean buffers.
787 	 */
788 	if (bp->b_flags & B_DELWRI)
789 		listheadp = &newvp->v_dirtyblkhd;
790 	else
791 		listheadp = &newvp->v_cleanblkhd;
792 	if (*listheadp) {
793 		bp->b_blockf = *listheadp;
794 		bp->b_blockb = listheadp;
795 		bp->b_blockf->b_blockb = &bp->b_blockf;
796 		*listheadp = bp;
797 	} else {
798 		*listheadp = bp;
799 		bp->b_blockb = listheadp;
800 		bp->b_blockf = NULL;
801 	}
802 }
803