1 /* vfs_cluster.c 4.21 81/05/08 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/dir.h" 6 #include "../h/user.h" 7 #include "../h/buf.h" 8 #include "../h/conf.h" 9 #include "../h/proc.h" 10 #include "../h/seg.h" 11 #include "../h/pte.h" 12 #include "../h/vm.h" 13 #include "../h/trace.h" 14 15 /* 16 * The following several routines allocate and free 17 * buffers with various side effects. In general the 18 * arguments to an allocate routine are a device and 19 * a block number, and the value is a pointer to 20 * to the buffer header; the buffer is marked "busy" 21 * so that no one else can touch it. If the block was 22 * already in core, no I/O need be done; if it is 23 * already busy, the process waits until it becomes free. 24 * The following routines allocate a buffer: 25 * getblk 26 * bread 27 * breada 28 * baddr (if it is incore) 29 * Eventually the buffer must be released, possibly with the 30 * side effect of writing it out, by using one of 31 * bwrite 32 * bdwrite 33 * bawrite 34 * brelse 35 */ 36 37 struct buf bfreelist[BQUEUES]; 38 struct buf bswlist, *bclnlist; 39 40 #define BUFHSZ 63 41 struct bufhd bufhash[BUFHSZ]; 42 #define BUFHASH(dev, dblkno) \ 43 ((struct buf *)&bufhash[((int)(dev)+(int)(dblkno)) % BUFHSZ]) 44 45 /* 46 * Initialize hash links for buffers. 47 */ 48 bhinit() 49 { 50 register int i; 51 register struct bufhd *bp; 52 53 for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++) 54 bp->b_forw = bp->b_back = (struct buf *)bp; 55 } 56 57 /* #define DISKMON 1 */ 58 59 #ifdef DISKMON 60 struct { 61 int nbuf; 62 long nread; 63 long nreada; 64 long ncache; 65 long nwrite; 66 long bufcount[64]; 67 } io_info; 68 #endif 69 70 /* 71 * Swap IO headers - 72 * They contain the necessary information for the swap I/O. 73 * At any given time, a swap header can be in three 74 * different lists. When free it is in the free list, 75 * when allocated and the I/O queued, it is on the swap 76 * device list, and finally, if the operation was a dirty 77 * page push, when the I/O completes, it is inserted 78 * in a list of cleaned pages to be processed by the pageout daemon. 79 */ 80 struct buf *swbuf; 81 short *swsize; /* CAN WE JUST USE B_BCOUNT? */ 82 int *swpf; 83 84 85 #ifndef UNFAST 86 #define notavail(bp) \ 87 { \ 88 int s = spl6(); \ 89 (bp)->av_back->av_forw = (bp)->av_forw; \ 90 (bp)->av_forw->av_back = (bp)->av_back; \ 91 (bp)->b_flags |= B_BUSY; \ 92 splx(s); \ 93 } 94 #endif 95 96 /* 97 * Read in (if necessary) the block and return a buffer pointer. 98 */ 99 struct buf * 100 bread(dev, blkno) 101 dev_t dev; 102 daddr_t blkno; 103 { 104 register struct buf *bp; 105 106 bp = getblk(dev, blkno); 107 if (bp->b_flags&B_DONE) { 108 #ifdef TRACE 109 trace(TR_BREADHIT, dev, blkno); 110 #endif 111 #ifdef DISKMON 112 io_info.ncache++; 113 #endif 114 return(bp); 115 } 116 bp->b_flags |= B_READ; 117 bp->b_bcount = BSIZE; 118 (*bdevsw[major(dev)].d_strategy)(bp); 119 #ifdef TRACE 120 trace(TR_BREADMISS, dev, blkno); 121 #endif 122 #ifdef DISKMON 123 io_info.nread++; 124 #endif 125 u.u_vm.vm_inblk++; /* pay for read */ 126 iowait(bp); 127 return(bp); 128 } 129 130 /* 131 * Read in the block, like bread, but also start I/O on the 132 * read-ahead block (which is not allocated to the caller) 133 */ 134 struct buf * 135 breada(dev, blkno, rablkno) 136 dev_t dev; 137 daddr_t blkno, rablkno; 138 { 139 register struct buf *bp, *rabp; 140 141 bp = NULL; 142 if (!incore(dev, blkno)) { 143 bp = getblk(dev, blkno); 144 if ((bp->b_flags&B_DONE) == 0) { 145 bp->b_flags |= B_READ; 146 bp->b_bcount = BSIZE; 147 (*bdevsw[major(dev)].d_strategy)(bp); 148 #ifdef TRACE 149 trace(TR_BREADMISS, dev, blkno); 150 #endif 151 #ifdef DISKMON 152 io_info.nread++; 153 #endif 154 u.u_vm.vm_inblk++; /* pay for read */ 155 } 156 #ifdef TRACE 157 else 158 trace(TR_BREADHIT, dev, blkno); 159 #endif 160 } 161 if (rablkno && !incore(dev, rablkno)) { 162 rabp = getblk(dev, rablkno); 163 if (rabp->b_flags & B_DONE) { 164 brelse(rabp); 165 #ifdef TRACE 166 trace(TR_BREADHITRA, dev, blkno); 167 #endif 168 } else { 169 rabp->b_flags |= B_READ|B_ASYNC; 170 rabp->b_bcount = BSIZE; 171 (*bdevsw[major(dev)].d_strategy)(rabp); 172 #ifdef TRACE 173 trace(TR_BREADMISSRA, dev, rablock); 174 #endif 175 #ifdef DISKMON 176 io_info.nreada++; 177 #endif 178 u.u_vm.vm_inblk++; /* pay in advance */ 179 } 180 } 181 if(bp == NULL) 182 return(bread(dev, blkno)); 183 iowait(bp); 184 return(bp); 185 } 186 187 /* 188 * Write the buffer, waiting for completion. 189 * Then release the buffer. 190 */ 191 bwrite(bp) 192 register struct buf *bp; 193 { 194 register flag; 195 196 flag = bp->b_flags; 197 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI | B_AGE); 198 bp->b_bcount = BSIZE; 199 #ifdef DISKMON 200 io_info.nwrite++; 201 #endif 202 if ((flag&B_DELWRI) == 0) 203 u.u_vm.vm_oublk++; /* noone paid yet */ 204 #ifdef TRACE 205 trace(TR_BWRITE, bp->b_dev, dbtofsb(bp->b_blkno)); 206 #endif 207 (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 208 if ((flag&B_ASYNC) == 0) { 209 iowait(bp); 210 brelse(bp); 211 } else if (flag & B_DELWRI) 212 bp->b_flags |= B_AGE; 213 else 214 geterror(bp); 215 } 216 217 /* 218 * Release the buffer, marking it so that if it is grabbed 219 * for another purpose it will be written out before being 220 * given up (e.g. when writing a partial block where it is 221 * assumed that another write for the same block will soon follow). 222 * This can't be done for magtape, since writes must be done 223 * in the same order as requested. 224 */ 225 bdwrite(bp) 226 register struct buf *bp; 227 { 228 register int flags; 229 230 if ((bp->b_flags&B_DELWRI) == 0) 231 u.u_vm.vm_oublk++; /* noone paid yet */ 232 flags = bdevsw[major(bp->b_dev)].d_flags; 233 if(flags & B_TAPE) 234 bawrite(bp); 235 else { 236 bp->b_flags |= B_DELWRI | B_DONE; 237 brelse(bp); 238 } 239 } 240 241 /* 242 * Release the buffer, start I/O on it, but don't wait for completion. 243 */ 244 bawrite(bp) 245 register struct buf *bp; 246 { 247 248 bp->b_flags |= B_ASYNC; 249 bwrite(bp); 250 } 251 252 /* 253 * release the buffer, with no I/O implied. 254 */ 255 brelse(bp) 256 register struct buf *bp; 257 { 258 register struct buf *flist; 259 register s; 260 261 if (bp->b_flags&B_WANTED) 262 wakeup((caddr_t)bp); 263 if (bfreelist[0].b_flags&B_WANTED) { 264 bfreelist[0].b_flags &= ~B_WANTED; 265 wakeup((caddr_t)bfreelist); 266 } 267 if (bp->b_flags&B_ERROR) 268 if (bp->b_flags & B_LOCKED) 269 bp->b_flags &= ~B_ERROR; /* try again later */ 270 else 271 bp->b_dev = NODEV; /* no assoc */ 272 s = spl6(); 273 if (bp->b_flags & (B_ERROR|B_INVAL)) { 274 /* block has no info ... put at front of most free list */ 275 flist = &bfreelist[BQUEUES-1]; 276 flist->av_forw->av_back = bp; 277 bp->av_forw = flist->av_forw; 278 flist->av_forw = bp; 279 bp->av_back = flist; 280 } else { 281 if (bp->b_flags & B_LOCKED) 282 flist = &bfreelist[BQ_LOCKED]; 283 else if (bp->b_flags & B_AGE) 284 flist = &bfreelist[BQ_AGE]; 285 else 286 flist = &bfreelist[BQ_LRU]; 287 flist->av_back->av_forw = bp; 288 bp->av_back = flist->av_back; 289 flist->av_back = bp; 290 bp->av_forw = flist; 291 } 292 bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); 293 splx(s); 294 } 295 296 /* 297 * See if the block is associated with some buffer 298 * (mainly to avoid getting hung up on a wait in breada) 299 */ 300 incore(dev, blkno) 301 dev_t dev; 302 daddr_t blkno; 303 { 304 register struct buf *bp; 305 register struct buf *dp; 306 register int dblkno = fsbtodb(blkno); 307 308 dp = BUFHASH(dev, dblkno); 309 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 310 if (bp->b_blkno == dblkno && bp->b_dev == dev && 311 !(bp->b_flags & B_INVAL)) 312 return (1); 313 return (0); 314 } 315 316 struct buf * 317 baddr(dev, blkno) 318 dev_t dev; 319 daddr_t blkno; 320 { 321 322 if (incore(dev, blkno)) 323 return (bread(dev, blkno)); 324 return (0); 325 } 326 327 /* 328 * Assign a buffer for the given block. If the appropriate 329 * block is already associated, return it; otherwise search 330 * for the oldest non-busy buffer and reassign it. 331 */ 332 struct buf * 333 getblk(dev, blkno) 334 dev_t dev; 335 daddr_t blkno; 336 { 337 register struct buf *bp, *dp, *ep; 338 register int dblkno = fsbtodb(blkno); 339 #ifdef DISKMON 340 register int i; 341 #endif 342 343 if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT)) 344 blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1); 345 dblkno = fsbtodb(blkno); 346 dp = BUFHASH(dev, dblkno); 347 loop: 348 (void) spl0(); 349 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 350 if (bp->b_blkno != dblkno || bp->b_dev != dev || 351 bp->b_flags&B_INVAL) 352 continue; 353 (void) spl6(); 354 if (bp->b_flags&B_BUSY) { 355 bp->b_flags |= B_WANTED; 356 sleep((caddr_t)bp, PRIBIO+1); 357 goto loop; 358 } 359 (void) spl0(); 360 #ifdef DISKMON 361 i = 0; 362 dp = bp->av_forw; 363 while ((dp->b_flags & B_HEAD) == 0) { 364 i++; 365 dp = dp->av_forw; 366 } 367 if (i<64) 368 io_info.bufcount[i]++; 369 #endif 370 notavail(bp); 371 bp->b_flags |= B_CACHE; 372 return(bp); 373 } 374 if (major(dev) >= nblkdev) 375 panic("blkdev"); 376 (void) spl6(); 377 for (ep = &bfreelist[BQUEUES-1]; ep > bfreelist; ep--) 378 if (ep->av_forw != ep) 379 break; 380 if (ep == bfreelist) { /* no free blocks at all */ 381 ep->b_flags |= B_WANTED; 382 sleep((caddr_t)ep, PRIBIO+1); 383 goto loop; 384 } 385 (void) spl0(); 386 bp = ep->av_forw; 387 notavail(bp); 388 if (bp->b_flags & B_DELWRI) { 389 bp->b_flags |= B_ASYNC; 390 bwrite(bp); 391 goto loop; 392 } 393 #ifdef TRACE 394 trace(TR_BRELSE, bp->b_dev, dbtofsb(bp->b_blkno)); 395 #endif 396 bp->b_flags = B_BUSY; 397 bp->b_back->b_forw = bp->b_forw; 398 bp->b_forw->b_back = bp->b_back; 399 bp->b_forw = dp->b_forw; 400 bp->b_back = dp; 401 dp->b_forw->b_back = bp; 402 dp->b_forw = bp; 403 bp->b_dev = dev; 404 bp->b_blkno = dblkno; 405 return(bp); 406 } 407 408 /* 409 * get an empty block, 410 * not assigned to any particular device 411 */ 412 struct buf * 413 geteblk() 414 { 415 register struct buf *bp, *dp; 416 417 loop: 418 (void) spl6(); 419 for (dp = &bfreelist[BQUEUES-1]; dp > bfreelist; dp--) 420 if (dp->av_forw != dp) 421 break; 422 if (dp == bfreelist) { /* no free blocks */ 423 dp->b_flags |= B_WANTED; 424 sleep((caddr_t)dp, PRIBIO+1); 425 goto loop; 426 } 427 (void) spl0(); 428 bp = dp->av_forw; 429 notavail(bp); 430 if (bp->b_flags & B_DELWRI) { 431 bp->b_flags |= B_ASYNC; 432 bwrite(bp); 433 goto loop; 434 } 435 #ifdef TRACE 436 trace(TR_BRELSE, bp->b_dev, dbtofsb(bp->b_blkno)); 437 #endif 438 bp->b_flags = B_BUSY|B_INVAL; 439 bp->b_back->b_forw = bp->b_forw; 440 bp->b_forw->b_back = bp->b_back; 441 bp->b_forw = dp->b_forw; 442 bp->b_back = dp; 443 dp->b_forw->b_back = bp; 444 dp->b_forw = bp; 445 bp->b_dev = (dev_t)NODEV; 446 return(bp); 447 } 448 449 /* 450 * Wait for I/O completion on the buffer; return errors 451 * to the user. 452 */ 453 iowait(bp) 454 register struct buf *bp; 455 { 456 457 (void) spl6(); 458 while ((bp->b_flags&B_DONE)==0) 459 sleep((caddr_t)bp, PRIBIO); 460 (void) spl0(); 461 geterror(bp); 462 } 463 464 #ifdef UNFAST 465 /* 466 * Unlink a buffer from the available list and mark it busy. 467 * (internal interface) 468 */ 469 notavail(bp) 470 register struct buf *bp; 471 { 472 register s; 473 474 s = spl6(); 475 bp->av_back->av_forw = bp->av_forw; 476 bp->av_forw->av_back = bp->av_back; 477 bp->b_flags |= B_BUSY; 478 splx(s); 479 } 480 #endif 481 482 /* 483 * Mark I/O complete on a buffer. If the header 484 * indicates a dirty page push completion, the 485 * header is inserted into the ``cleaned'' list 486 * to be processed by the pageout daemon. Otherwise 487 * release it if I/O is asynchronous, and wake 488 * up anyone waiting for it. 489 */ 490 iodone(bp) 491 register struct buf *bp; 492 { 493 register int s; 494 495 if (bp->b_flags & B_DONE) 496 panic("dup iodone"); 497 bp->b_flags |= B_DONE; 498 if (bp->b_flags & B_DIRTY) { 499 if (bp->b_flags & B_ERROR) 500 panic("IO err in push"); 501 s = spl6(); 502 bp->av_forw = bclnlist; 503 bp->b_bcount = swsize[bp - swbuf]; 504 bp->b_pfcent = swpf[bp - swbuf]; 505 cnt.v_pgout++; 506 cnt.v_pgpgout += bp->b_bcount / NBPG; 507 bclnlist = bp; 508 if (bswlist.b_flags & B_WANTED) 509 wakeup((caddr_t)&proc[2]); 510 splx(s); 511 return; 512 } 513 if (bp->b_flags&B_ASYNC) 514 brelse(bp); 515 else { 516 bp->b_flags &= ~B_WANTED; 517 wakeup((caddr_t)bp); 518 } 519 } 520 521 /* 522 * Zero the core associated with a buffer. 523 */ 524 clrbuf(bp) 525 struct buf *bp; 526 { 527 register *p; 528 register c; 529 530 p = bp->b_un.b_words; 531 c = BSIZE/sizeof(int); 532 do 533 *p++ = 0; 534 while (--c); 535 bp->b_resid = 0; 536 } 537 538 /* 539 * swap I/O - 540 * 541 * If the flag indicates a dirty page push initiated 542 * by the pageout daemon, we map the page into the i th 543 * virtual page of process 2 (the daemon itself) where i is 544 * the index of the swap header that has been allocated. 545 * We simply initialize the header and queue the I/O but 546 * do not wait for completion. When the I/O completes, 547 * iodone() will link the header to a list of cleaned 548 * pages to be processed by the pageout daemon. 549 */ 550 swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) 551 struct proc *p; 552 swblk_t dblkno; 553 caddr_t addr; 554 int flag, nbytes; 555 dev_t dev; 556 unsigned pfcent; 557 { 558 register struct buf *bp; 559 register int c; 560 int p2dp; 561 register struct pte *dpte, *vpte; 562 563 (void) spl6(); 564 while (bswlist.av_forw == NULL) { 565 bswlist.b_flags |= B_WANTED; 566 sleep((caddr_t)&bswlist, PSWP+1); 567 } 568 bp = bswlist.av_forw; 569 bswlist.av_forw = bp->av_forw; 570 (void) spl0(); 571 572 bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; 573 if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) 574 if (rdflg == B_READ) 575 sum.v_pswpin += btoc(nbytes); 576 else 577 sum.v_pswpout += btoc(nbytes); 578 bp->b_proc = p; 579 if (flag & B_DIRTY) { 580 p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; 581 dpte = dptopte(&proc[2], p2dp); 582 vpte = vtopte(p, btop(addr)); 583 for (c = 0; c < nbytes; c += NBPG) { 584 if (vpte->pg_pfnum == 0 || vpte->pg_fod) 585 panic("swap bad pte"); 586 *dpte++ = *vpte++; 587 } 588 bp->b_un.b_addr = (caddr_t)ctob(p2dp); 589 } else 590 bp->b_un.b_addr = addr; 591 while (nbytes > 0) { 592 c = imin(ctob(120), nbytes); 593 bp->b_bcount = c; 594 bp->b_blkno = dblkno; 595 bp->b_dev = dev; 596 if (flag & B_DIRTY) { 597 swpf[bp - swbuf] = pfcent; 598 swsize[bp - swbuf] = nbytes; 599 } 600 (*bdevsw[major(dev)].d_strategy)(bp); 601 if (flag & B_DIRTY) { 602 if (c < nbytes) 603 panic("big push"); 604 return; 605 } 606 (void) spl6(); 607 while((bp->b_flags&B_DONE)==0) 608 sleep((caddr_t)bp, PSWP); 609 (void) spl0(); 610 bp->b_un.b_addr += c; 611 bp->b_flags &= ~B_DONE; 612 if (bp->b_flags & B_ERROR) { 613 if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) 614 panic("hard IO err in swap"); 615 swkill(p, (char *)0); 616 } 617 nbytes -= c; 618 dblkno += btoc(c); 619 } 620 (void) spl6(); 621 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 622 bp->av_forw = bswlist.av_forw; 623 bswlist.av_forw = bp; 624 if (bswlist.b_flags & B_WANTED) { 625 bswlist.b_flags &= ~B_WANTED; 626 wakeup((caddr_t)&bswlist); 627 wakeup((caddr_t)&proc[2]); 628 } 629 (void) spl0(); 630 } 631 632 /* 633 * If rout == 0 then killed on swap error, else 634 * rout is the name of the routine where we ran out of 635 * swap space. 636 */ 637 swkill(p, rout) 638 struct proc *p; 639 char *rout; 640 { 641 char *mesg; 642 643 printf("pid %d: ", p->p_pid); 644 if (rout) 645 printf(mesg = "killed due to no swap space\n"); 646 else 647 printf(mesg = "killed on swap error\n"); 648 uprintf("sorry, pid %d was %s", p->p_pid, mesg); 649 /* 650 * To be sure no looping (e.g. in vmsched trying to 651 * swap out) mark process locked in core (as though 652 * done by user) after killing it so noone will try 653 * to swap it out. 654 */ 655 psignal(p, SIGKILL); 656 p->p_flag |= SULOCK; 657 } 658 659 /* 660 * make sure all write-behind blocks 661 * on dev (or NODEV for all) 662 * are flushed out. 663 * (from umount and update) 664 */ 665 bflush(dev) 666 dev_t dev; 667 { 668 register struct buf *bp; 669 register struct buf *flist; 670 671 loop: 672 (void) spl6(); 673 for (flist = bfreelist; flist < &bfreelist[BQUEUES]; flist++) 674 for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { 675 if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) { 676 bp->b_flags |= B_ASYNC; 677 notavail(bp); 678 bwrite(bp); 679 goto loop; 680 } 681 } 682 (void) spl0(); 683 } 684 685 /* 686 * Raw I/O. The arguments are 687 * The strategy routine for the device 688 * A buffer, which will always be a special buffer 689 * header owned exclusively by the device for this purpose 690 * The device number 691 * Read/write flag 692 * Essentially all the work is computing physical addresses and 693 * validating them. 694 * If the user has the proper access privilidges, the process is 695 * marked 'delayed unlock' and the pages involved in the I/O are 696 * faulted and locked. After the completion of the I/O, the above pages 697 * are unlocked. 698 */ 699 physio(strat, bp, dev, rw, mincnt) 700 int (*strat)(); 701 register struct buf *bp; 702 unsigned (*mincnt)(); 703 { 704 register int c; 705 char *a; 706 707 if (useracc(u.u_base,u.u_count,rw==B_READ?B_WRITE:B_READ) == NULL) { 708 u.u_error = EFAULT; 709 return; 710 } 711 (void) spl6(); 712 while (bp->b_flags&B_BUSY) { 713 bp->b_flags |= B_WANTED; 714 sleep((caddr_t)bp, PRIBIO+1); 715 } 716 bp->b_error = 0; 717 bp->b_proc = u.u_procp; 718 bp->b_un.b_addr = u.u_base; 719 while (u.u_count != 0) { 720 bp->b_flags = B_BUSY | B_PHYS | rw; 721 bp->b_dev = dev; 722 bp->b_blkno = u.u_offset >> PGSHIFT; 723 bp->b_bcount = u.u_count; 724 (*mincnt)(bp); 725 c = bp->b_bcount; 726 u.u_procp->p_flag |= SPHYSIO; 727 vslock(a = bp->b_un.b_addr, c); 728 (*strat)(bp); 729 (void) spl6(); 730 while ((bp->b_flags&B_DONE) == 0) 731 sleep((caddr_t)bp, PRIBIO); 732 vsunlock(a, c, rw); 733 u.u_procp->p_flag &= ~SPHYSIO; 734 if (bp->b_flags&B_WANTED) 735 wakeup((caddr_t)bp); 736 (void) spl0(); 737 bp->b_un.b_addr += c; 738 u.u_count -= c; 739 u.u_offset += c; 740 if (bp->b_flags&B_ERROR) 741 break; 742 } 743 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); 744 u.u_count = bp->b_resid; 745 geterror(bp); 746 } 747 748 /*ARGSUSED*/ 749 unsigned 750 minphys(bp) 751 struct buf *bp; 752 { 753 754 if (bp->b_bcount > 60 * 1024) 755 bp->b_bcount = 60 * 1024; 756 } 757 758 /* 759 * Pick up the device's error number and pass it to the user; 760 * if there is an error but the number is 0 set a generalized 761 * code. Actually the latter is always true because devices 762 * don't yet return specific errors. 763 */ 764 geterror(bp) 765 register struct buf *bp; 766 { 767 768 if (bp->b_flags&B_ERROR) 769 if ((u.u_error = bp->b_error)==0) 770 u.u_error = EIO; 771 } 772 773 /* 774 * Invalidate in core blocks belonging to closed or umounted filesystem 775 * 776 * This is not nicely done at all - the buffer ought to be removed from the 777 * hash chains & have its dev/blkno fields clobbered, but unfortunately we 778 * can't do that here, as it is quite possible that the block is still 779 * being used for i/o. Eventually, all disc drivers should be forced to 780 * have a close routine, which ought ensure that the queue is empty, then 781 * properly flush the queues. Until that happy day, this suffices for 782 * correctness. ... kre 783 */ 784 binval(dev) 785 dev_t dev; 786 { 787 register struct buf *bp; 788 register struct bufhd *hp; 789 #define dp ((struct buf *)hp) 790 791 for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) 792 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 793 if (bp->b_dev == dev) 794 bp->b_flags |= B_INVAL; 795 } 796