xref: /original-bsd/sys/kern/vfs_bio.c (revision f0fd5f8a)
1 /*	vfs_bio.c	4.40	82/12/17	*/
2 
3 #include "../machine/pte.h"
4 
5 #include "../h/param.h"
6 #include "../h/systm.h"
7 #include "../h/dir.h"
8 #include "../h/user.h"
9 #include "../h/buf.h"
10 #include "../h/conf.h"
11 #include "../h/proc.h"
12 #include "../h/seg.h"
13 #include "../h/vm.h"
14 #include "../h/trace.h"
15 
16 /*
17  * Read in (if necessary) the block and return a buffer pointer.
18  */
19 struct buf *
20 bread(dev, blkno, size)
21 	dev_t dev;
22 	daddr_t blkno;
23 	int size;
24 {
25 	register struct buf *bp;
26 
27 	if (size == 0)
28 		panic("bread: size 0");
29 	bp = getblk(dev, blkno, size);
30 	if (bp->b_flags&B_DONE) {
31 		trace(TR_BREADHIT, dev, blkno);
32 		return(bp);
33 	}
34 	bp->b_flags |= B_READ;
35 	if (bp->b_bcount > bp->b_bufsize)
36 		panic("bread");
37 	(*bdevsw[major(dev)].d_strategy)(bp);
38 	trace(TR_BREADMISS, dev, blkno);
39 	u.u_ru.ru_inblock++;		/* pay for read */
40 	biowait(bp);
41 	return(bp);
42 }
43 
44 /*
45  * Read in the block, like bread, but also start I/O on the
46  * read-ahead block (which is not allocated to the caller)
47  */
48 struct buf *
49 breada(dev, blkno, size, rablkno, rabsize)
50 	dev_t dev;
51 	daddr_t blkno; int size;
52 	daddr_t rablkno; int rabsize;
53 {
54 	register struct buf *bp, *rabp;
55 
56 	bp = NULL;
57 	/*
58 	 * If the block isn't in core, then allocate
59 	 * a buffer and initiate i/o (getblk checks
60 	 * for a cache hit).
61 	 */
62 	if (!incore(dev, blkno)) {
63 		bp = getblk(dev, blkno, size);
64 		if ((bp->b_flags&B_DONE) == 0) {
65 			bp->b_flags |= B_READ;
66 			if (bp->b_bcount > bp->b_bufsize)
67 				panic("breada");
68 			(*bdevsw[major(dev)].d_strategy)(bp);
69 			trace(TR_BREADMISS, dev, blkno);
70 			u.u_ru.ru_inblock++;		/* pay for read */
71 		} else
72 			trace(TR_BREADHIT, dev, blkno);
73 	}
74 
75 	/*
76 	 * If there's a read-ahead block, start i/o
77 	 * on it also (as above).
78 	 */
79 	if (rablkno && !incore(dev, rablkno)) {
80 		rabp = getblk(dev, rablkno, rabsize);
81 		if (rabp->b_flags & B_DONE) {
82 			brelse(rabp);
83 			trace(TR_BREADHITRA, dev, blkno);
84 		} else {
85 			rabp->b_flags |= B_READ|B_ASYNC;
86 			if (rabp->b_bcount > rabp->b_bufsize)
87 				panic("breadrabp");
88 			(*bdevsw[major(dev)].d_strategy)(rabp);
89 			trace(TR_BREADMISSRA, dev, rablock);
90 			u.u_ru.ru_inblock++;		/* pay in advance */
91 		}
92 	}
93 
94 	/*
95 	 * If block was in core, let bread get it.
96 	 * If block wasn't in core, then the read was started
97 	 * above, and just wait for it.
98 	 */
99 	if (bp == NULL)
100 		return (bread(dev, blkno, size));
101 	biowait(bp);
102 	return (bp);
103 }
104 
105 /*
106  * Write the buffer, waiting for completion.
107  * Then release the buffer.
108  */
109 bwrite(bp)
110 	register struct buf *bp;
111 {
112 	register flag;
113 
114 	flag = bp->b_flags;
115 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI | B_AGE);
116 	if ((flag&B_DELWRI) == 0)
117 		u.u_ru.ru_oublock++;		/* noone paid yet */
118 	trace(TR_BWRITE, bp->b_dev, bp->b_blkno);
119 	if (bp->b_bcount > bp->b_bufsize)
120 		panic("bwrite");
121 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
122 
123 	/*
124 	 * If the write was synchronous, then await i/o completion.
125 	 * If the write was "delayed", then we put the buffer on
126 	 * the q of blocks awaiting i/o completion status.
127 	 * Otherwise, the i/o must be finished and we check for
128 	 * an error.
129 	 */
130 	if ((flag&B_ASYNC) == 0) {
131 		biowait(bp);
132 		brelse(bp);
133 	} else if (flag & B_DELWRI)
134 		bp->b_flags |= B_AGE;
135 	else
136 		u.u_error = geterror(bp);
137 }
138 
139 /*
140  * Release the buffer, marking it so that if it is grabbed
141  * for another purpose it will be written out before being
142  * given up (e.g. when writing a partial block where it is
143  * assumed that another write for the same block will soon follow).
144  * This can't be done for magtape, since writes must be done
145  * in the same order as requested.
146  */
147 bdwrite(bp)
148 	register struct buf *bp;
149 {
150 	register int flags;
151 
152 	if ((bp->b_flags&B_DELWRI) == 0)
153 		u.u_ru.ru_oublock++;		/* noone paid yet */
154 	flags = bdevsw[major(bp->b_dev)].d_flags;
155 	if(flags & B_TAPE)
156 		bawrite(bp);
157 	else {
158 		bp->b_flags |= B_DELWRI | B_DONE;
159 		brelse(bp);
160 	}
161 }
162 
163 /*
164  * Release the buffer, start I/O on it, but don't wait for completion.
165  */
166 bawrite(bp)
167 	register struct buf *bp;
168 {
169 
170 	bp->b_flags |= B_ASYNC;
171 	bwrite(bp);
172 }
173 
174 /*
175  * Release the buffer, with no I/O implied.
176  */
177 brelse(bp)
178 	register struct buf *bp;
179 {
180 	register struct buf *flist;
181 	register s;
182 
183 	/*
184 	 * If someone's waiting for the buffer, or
185 	 * is waiting for a buffer wake 'em up.
186 	 */
187 	if (bp->b_flags&B_WANTED)
188 		wakeup((caddr_t)bp);
189 	if (bfreelist[0].b_flags&B_WANTED) {
190 		bfreelist[0].b_flags &= ~B_WANTED;
191 		wakeup((caddr_t)bfreelist);
192 	}
193 	if (bp->b_flags&B_ERROR)
194 		if (bp->b_flags & B_LOCKED)
195 			bp->b_flags &= ~B_ERROR;	/* try again later */
196 		else
197 			bp->b_dev = NODEV;  		/* no assoc */
198 
199 	/*
200 	 * Stick the buffer back on a free list.
201 	 */
202 	s = spl6();
203 	if (bp->b_bufsize <= 0) {
204 		/* block has no buffer ... put at front of unused buffer list */
205 		flist = &bfreelist[BQ_EMPTY];
206 		binsheadfree(bp, flist);
207 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
208 		/* block has no info ... put at front of most free list */
209 		flist = &bfreelist[BQ_AGE];
210 		binsheadfree(bp, flist);
211 	} else {
212 		if (bp->b_flags & B_LOCKED)
213 			flist = &bfreelist[BQ_LOCKED];
214 		else if (bp->b_flags & B_AGE)
215 			flist = &bfreelist[BQ_AGE];
216 		else
217 			flist = &bfreelist[BQ_LRU];
218 		binstailfree(bp, flist);
219 	}
220 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE);
221 	splx(s);
222 }
223 
224 /*
225  * See if the block is associated with some buffer
226  * (mainly to avoid getting hung up on a wait in breada)
227  */
228 incore(dev, blkno)
229 	dev_t dev;
230 	daddr_t blkno;
231 {
232 	register struct buf *bp;
233 	register struct buf *dp;
234 
235 	dp = BUFHASH(dev, blkno);
236 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
237 		if (bp->b_blkno == blkno && bp->b_dev == dev &&
238 		    (bp->b_flags & B_INVAL) == 0)
239 			return (1);
240 	return (0);
241 }
242 
243 struct buf *
244 baddr(dev, blkno, size)
245 	dev_t dev;
246 	daddr_t blkno;
247 	int size;
248 {
249 
250 	if (incore(dev, blkno))
251 		return (bread(dev, blkno, size));
252 	return (0);
253 }
254 
255 /*
256  * Assign a buffer for the given block.  If the appropriate
257  * block is already associated, return it; otherwise search
258  * for the oldest non-busy buffer and reassign it.
259  *
260  * We use splx here because this routine may be called
261  * on the interrupt stack during a dump, and we don't
262  * want to lower the ipl back to 0.
263  */
264 struct buf *
265 getblk(dev, blkno, size)
266 	dev_t dev;
267 	daddr_t blkno;
268 	int size;
269 {
270 	register struct buf *bp, *dp;
271 	int s;
272 
273 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT))	/* XXX */
274 		blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1);
275 	/*
276 	 * Search the cache for the block.  If we hit, but
277 	 * the buffer is in use for i/o, then we wait until
278 	 * the i/o has completed.
279 	 */
280 	dp = BUFHASH(dev, blkno);
281 loop:
282 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
283 		if (bp->b_blkno != blkno || bp->b_dev != dev ||
284 		    bp->b_flags&B_INVAL)
285 			continue;
286 		s = spl6();
287 		if (bp->b_flags&B_BUSY) {
288 			bp->b_flags |= B_WANTED;
289 			sleep((caddr_t)bp, PRIBIO+1);
290 			splx(s);
291 			goto loop;
292 		}
293 		splx(s);
294 		notavail(bp);
295 		if (brealloc(bp, size) == 0)
296 			goto loop;
297 		bp->b_flags |= B_CACHE;
298 		return(bp);
299 	}
300 	if (major(dev) >= nblkdev)
301 		panic("blkdev");
302 	bp = getnewbuf();
303 	bfree(bp);
304 	bremhash(bp);
305 	binshash(bp, dp);
306 	bp->b_dev = dev;
307 	bp->b_blkno = blkno;
308 	bp->b_error = 0;
309 	if (brealloc(bp, size) == 0)
310 		goto loop;
311 	return(bp);
312 }
313 
314 /*
315  * get an empty block,
316  * not assigned to any particular device
317  */
318 struct buf *
319 geteblk(size)
320 	int size;
321 {
322 	register struct buf *bp, *flist;
323 
324 loop:
325 	bp = getnewbuf();
326 	bp->b_flags |= B_INVAL;
327 	bfree(bp);
328 	bremhash(bp);
329 	flist = &bfreelist[BQ_AGE];
330 	binshash(bp, flist);
331 	bp->b_dev = (dev_t)NODEV;
332 	bp->b_error = 0;
333 	if (brealloc(bp, size) == 0)
334 		goto loop;
335 	return(bp);
336 }
337 
338 /*
339  * Allocate space associated with a buffer.
340  * If can't get space, buffer is released
341  */
342 brealloc(bp, size)
343 	register struct buf *bp;
344 	int size;
345 {
346 	daddr_t start, last;
347 	register struct buf *ep;
348 	struct buf *dp;
349 	int s;
350 
351 	/*
352 	 * First need to make sure that all overlaping previous I/O
353 	 * is dispatched with.
354 	 */
355 	if (size == bp->b_bcount)
356 		return (1);
357 	if (size < bp->b_bcount) {
358 		if (bp->b_flags & B_DELWRI) {
359 			bwrite(bp);
360 			return (0);
361 		}
362 		if (bp->b_flags & B_LOCKED)
363 			panic("brealloc");
364 		return (allocbuf(bp, size));
365 	}
366 	bp->b_flags &= ~B_DONE;
367 	if (bp->b_dev == NODEV)
368 		return (allocbuf(bp, size));
369 
370 	/*
371 	 * Search cache for any buffers that overlap the one that we
372 	 * are trying to allocate. Overlapping buffers must be marked
373 	 * invalid, after being written out if they are dirty. (indicated
374 	 * by B_DELWRI) A disk block must be mapped by at most one buffer
375 	 * at any point in time. Care must be taken to avoid deadlocking
376 	 * when two buffer are trying to get the same set of disk blocks.
377 	 */
378 	start = bp->b_blkno;
379 	last = start + (size / DEV_BSIZE) - 1;
380 	dp = BUFHASH(bp->b_dev, bp->b_blkno);
381 loop:
382 	for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
383 		if (ep == bp || ep->b_dev != bp->b_dev || (ep->b_flags&B_INVAL))
384 			continue;
385 		/* look for overlap */
386 		if (ep->b_bcount == 0 || ep->b_blkno > last ||
387 		    ep->b_blkno + (ep->b_bcount / DEV_BSIZE) <= start)
388 			continue;
389 		s = spl6();
390 		if (ep->b_flags&B_BUSY) {
391 			ep->b_flags |= B_WANTED;
392 			sleep((caddr_t)ep, PRIBIO+1);
393 			splx(s);
394 			goto loop;
395 		}
396 		splx(s);
397 		notavail(ep);
398 		if (ep->b_flags & B_DELWRI) {
399 			bwrite(ep);
400 			goto loop;
401 		}
402 		ep->b_flags |= B_INVAL;
403 		brelse(ep);
404 	}
405 	return (allocbuf(bp, size));
406 }
407 
408 /*
409  * Expand or contract the actual memory allocated to a buffer.
410  * If no memory is available, release buffer and take error exit
411  */
412 allocbuf(tp, size)
413 	register struct buf *tp;
414 	int size;
415 {
416 	register struct buf *bp, *ep;
417 	int sizealloc, take;
418 #ifdef sun
419 	register char *a;
420 	int osize;
421 #endif
422 
423 #ifndef sun
424 	sizealloc = roundup(size, CLBYTES);
425 #else
426 	sizealloc = roundup(size, BUFALLOCSIZE);
427 #endif
428 	/*
429 	 * Buffer size does not change
430 	 */
431 	if (sizealloc == tp->b_bufsize)
432 		goto out;
433 #ifndef sun
434 	/*
435 	 * Buffer size is shrinking.
436 	 * Place excess space in a buffer header taken from the
437 	 * BQ_EMPTY buffer list and placed on the "most free" list.
438 	 * If no extra buffer headers are available, leave the
439 	 * extra space in the present buffer.
440 	 */
441 	if (sizealloc < tp->b_bufsize) {
442 		ep = bfreelist[BQ_EMPTY].av_forw;
443 		if (ep == &bfreelist[BQ_EMPTY])
444 			goto out;
445 		notavail(ep);
446 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
447 		    (int)tp->b_bufsize - sizealloc);
448 		ep->b_bufsize = tp->b_bufsize - sizealloc;
449 		tp->b_bufsize = sizealloc;
450 		ep->b_flags |= B_INVAL;
451 		ep->b_bcount = 0;
452 		brelse(ep);
453 		goto out;
454 	}
455 	/*
456 	 * More buffer space is needed. Get it out of buffers on
457 	 * the "most free" list, placing the empty headers on the
458 	 * BQ_EMPTY buffer header list.
459 	 */
460 	while (tp->b_bufsize < sizealloc) {
461 		take = sizealloc - tp->b_bufsize;
462 		bp = getnewbuf();
463 		if (take >= bp->b_bufsize)
464 			take = bp->b_bufsize;
465 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
466 		    &tp->b_un.b_addr[tp->b_bufsize], take);
467 		tp->b_bufsize += take;
468 		bp->b_bufsize = bp->b_bufsize - take;
469 		if (bp->b_bcount > bp->b_bufsize)
470 			bp->b_bcount = bp->b_bufsize;
471 		if (bp->b_bufsize <= 0) {
472 			bremhash(bp);
473 			binshash(bp, &bfreelist[BQ_EMPTY]);
474 			bp->b_dev = (dev_t)NODEV;
475 			bp->b_error = 0;
476 			bp->b_flags |= B_INVAL;
477 		}
478 		brelse(bp);
479 	}
480 #else
481 	/*
482 	 * Buffer size is shrinking
483 	 * Just put the tail end back in the map
484 	 */
485 	if (sizealloc < tp->b_bufsize) {
486 		rmfree(buffermap, (long)(tp->b_bufsize - sizealloc),
487 			(long)(tp->b_un.b_addr + sizealloc));
488 		tp->b_bufsize = sizealloc;
489 		goto out;
490 	}
491 	/*
492 	 * Buffer is being expanded or created
493 	 * If being expanded, attempt to get contiguous
494 	 * section, otherwise get a new chunk and copy.
495 	 * If no space, free up a buffer on the AGE list
496 	 * and try again.
497 	 */
498 	do {
499 		if ((osize = tp->b_bufsize)) {
500 			a = (char *)rmget(buffermap, (long)(sizealloc-osize),
501 				(long)(tp->b_un.b_addr + osize));
502 			if (a == 0) {
503 				a = (char *)rmalloc(buffermap, (long)sizealloc);
504 				if (a != 0) {
505 					bcopy(tp->b_un.b_addr, a, osize);
506 					rmfree(buffermap, (long)osize,
507 						(long)tp->b_un.b_addr);
508 					tp->b_un.b_addr = a;
509 				}
510 			}
511 		} else {
512 			a = (char *)rmalloc(buffermap, (long)sizealloc);
513 			if (a != 0)
514 				tp->b_un.b_addr = a;
515 		}
516 	} while (a == 0 && bfreemem());
517 	if (a == 0) {
518 		brelse(tp);
519 		return (0);
520 	}
521 	tp->b_bufsize = sizealloc;
522 #endif
523 out:
524 	tp->b_bcount = size;
525 	return (1);
526 }
527 
528 /*
529  * Release space associated with a buffer.
530  */
531 bfree(bp)
532 	struct buf *bp;
533 {
534 #ifdef sun
535 	if (bp->b_bufsize) {
536 		rmfree(buffermap, (long)bp->b_bufsize, (long)bp->b_un.b_addr);
537 		bp->b_bufsize = 0;
538 	}
539 #endif
540 	bp->b_bcount = 0;
541 }
542 
543 #ifdef sun
544 /*
545  * Attempt to free up buffer space by flushing
546  * something in the free list.
547  * Don't wait for something, that could cause deadlocks
548  * We start with BQ_AGE because we know BQ_EMPTY take no memory.
549  */
550 bfreemem()
551 {
552 	register struct buf *bp, *dp;
553 	int s;
554 
555 loop:
556 	s = spl6();
557 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
558 		if (dp->av_forw != dp)
559 			break;
560 	splx(s);
561 	if (dp == bfreelist) {		/* no free blocks */
562 		return (0);
563 	}
564 	bp = dp->av_forw;
565 	notavail(bp);
566 	if (bp->b_flags & B_DELWRI) {
567 		bp->b_flags |= B_ASYNC;
568 		bwrite(bp);
569 		goto loop;
570 	}
571 	trace(TR_BRELSE, bp->b_dev, bp->b_blkno);
572 	bp->b_flags = B_BUSY | B_INVAL;
573 	bfree(bp);
574 	bremhash(bp);
575 	binshash(bp, &bfreelist[BQ_EMPTY]);
576 	bp->b_dev = (dev_t)NODEV;
577 	bp->b_error = 0;
578 	brelse(bp);
579 	return (1);
580 }
581 #endif
582 
583 /*
584  * Find a buffer which is available for use.
585  * Select something from a free list.
586  * Preference is to AGE list, then LRU list.
587  */
588 struct buf *
589 getnewbuf()
590 {
591 	register struct buf *bp, *dp;
592 	int s;
593 
594 loop:
595 	s = spl6();
596 #ifndef sun
597 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
598 #else
599 	for (dp = &bfreelist[BQ_EMPTY]; dp > bfreelist; dp--)
600 #endif
601 		if (dp->av_forw != dp)
602 			break;
603 	if (dp == bfreelist) {		/* no free blocks */
604 		dp->b_flags |= B_WANTED;
605 		sleep((caddr_t)dp, PRIBIO+1);
606 		goto loop;
607 	}
608 	splx(s);
609 	bp = dp->av_forw;
610 	notavail(bp);
611 	if (bp->b_flags & B_DELWRI) {
612 		bp->b_flags |= B_ASYNC;
613 		bwrite(bp);
614 		goto loop;
615 	}
616 	trace(TR_BRELSE, bp->b_dev, bp->b_blkno);
617 	bp->b_flags = B_BUSY;
618 	return (bp);
619 }
620 
621 /*
622  * Wait for I/O completion on the buffer; return errors
623  * to the user.
624  */
625 biowait(bp)
626 	register struct buf *bp;
627 {
628 	int s;
629 
630 	s = spl6();
631 	while ((bp->b_flags&B_DONE)==0)
632 		sleep((caddr_t)bp, PRIBIO);
633 	splx(s);
634 	u.u_error = geterror(bp);
635 }
636 
637 /*
638  * Mark I/O complete on a buffer. If the header
639  * indicates a dirty page push completion, the
640  * header is inserted into the ``cleaned'' list
641  * to be processed by the pageout daemon. Otherwise
642  * release it if I/O is asynchronous, and wake
643  * up anyone waiting for it.
644  */
645 biodone(bp)
646 	register struct buf *bp;
647 {
648 	register int s;
649 
650 	if (bp->b_flags & B_DONE)
651 		panic("dup biodone");
652 	bp->b_flags |= B_DONE;
653 	if (bp->b_flags & B_DIRTY) {
654 		if (bp->b_flags & B_ERROR)
655 			panic("IO err in push");
656 		s = spl6();
657 		bp->av_forw = bclnlist;
658 		bp->b_bcount = swsize[bp - swbuf];
659 		bp->b_pfcent = swpf[bp - swbuf];
660 		cnt.v_pgout++;
661 		cnt.v_pgpgout += bp->b_bcount / NBPG;
662 		bclnlist = bp;
663 		if (bswlist.b_flags & B_WANTED)
664 			wakeup((caddr_t)&proc[2]);
665 		splx(s);
666 		return;
667 	}
668 	if (bp->b_flags & B_CALL) {
669 		bp->b_flags &= ~B_CALL;
670 		(*bp->b_iodone)(bp);
671 		return;
672 	}
673 	if (bp->b_flags&B_ASYNC)
674 		brelse(bp);
675 	else {
676 		bp->b_flags &= ~B_WANTED;
677 		wakeup((caddr_t)bp);
678 	}
679 }
680 
681 /*
682  * Insure that no part of a specified block is in an incore buffer.
683  */
684 blkflush(dev, blkno, size)
685 	dev_t dev;
686 	daddr_t blkno;
687 	long size;
688 {
689 	register struct buf *ep;
690 	struct buf *dp;
691 	daddr_t start, last;
692 	int s;
693 
694 	start = blkno;
695 	last = start + (size / DEV_BSIZE) - 1;
696 	dp = BUFHASH(dev, blkno);
697 loop:
698 	for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {
699 		if (ep->b_dev != dev || (ep->b_flags&B_INVAL))
700 			continue;
701 		/* look for overlap */
702 		if (ep->b_bcount == 0 || ep->b_blkno > last ||
703 		    ep->b_blkno + (ep->b_bcount / DEV_BSIZE) <= start)
704 			continue;
705 		s = spl6();
706 		if (ep->b_flags&B_BUSY) {
707 			ep->b_flags |= B_WANTED;
708 			sleep((caddr_t)ep, PRIBIO+1);
709 			splx(s);
710 			goto loop;
711 		}
712 		if (ep->b_flags & B_DELWRI) {
713 			splx(s);
714 			notavail(ep);
715 			bwrite(ep);
716 			goto loop;
717 		}
718 		splx(s);
719 	}
720 }
721 
722 /*
723  * make sure all write-behind blocks
724  * on dev (or NODEV for all)
725  * are flushed out.
726  * (from umount and update)
727  * (and temporarily pagein)
728  */
729 bflush(dev)
730 	dev_t dev;
731 {
732 	register struct buf *bp;
733 	register struct buf *flist;
734 	int s;
735 
736 loop:
737 	s = spl6();
738 	for (flist = bfreelist; flist < &bfreelist[BQ_EMPTY]; flist++)
739 	for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) {
740 		if ((bp->b_flags & B_DELWRI) == 0)
741 			continue;
742 		if (dev == NODEV || dev == bp->b_dev) {
743 			bp->b_flags |= B_ASYNC;
744 			notavail(bp);
745 			bwrite(bp);
746 			goto loop;
747 		}
748 	}
749 	splx(s);
750 }
751 
752 /*
753  * Pick up the device's error number and pass it to the user;
754  * if there is an error but the number is 0 set a generalized
755  * code.  Actually the latter is always true because devices
756  * don't yet return specific errors.
757  */
758 geterror(bp)
759 	register struct buf *bp;
760 {
761 	int error = 0;
762 
763 	if (bp->b_flags&B_ERROR)
764 		if ((error = bp->b_error)==0)
765 			return (EIO);
766 	return (error);
767 }
768 
769 /*
770  * Invalidate in core blocks belonging to closed or umounted filesystem
771  *
772  * This is not nicely done at all - the buffer ought to be removed from the
773  * hash chains & have its dev/blkno fields clobbered, but unfortunately we
774  * can't do that here, as it is quite possible that the block is still
775  * being used for i/o. Eventually, all disc drivers should be forced to
776  * have a close routine, which ought ensure that the queue is empty, then
777  * properly flush the queues. Until that happy day, this suffices for
778  * correctness.						... kre
779  */
780 binval(dev)
781 	dev_t dev;
782 {
783 	register struct buf *bp;
784 	register struct bufhd *hp;
785 #define dp ((struct buf *)hp)
786 
787 	for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++)
788 		for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
789 			if (bp->b_dev == dev)
790 				bp->b_flags |= B_INVAL;
791 }
792