1 /* vfs_cluster.c 4.17 81/03/09 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/dir.h" 6 #include "../h/user.h" 7 #include "../h/buf.h" 8 #include "../h/conf.h" 9 #include "../h/proc.h" 10 #include "../h/seg.h" 11 #include "../h/pte.h" 12 #include "../h/vm.h" 13 #include "../h/trace.h" 14 15 /* 16 * The following several routines allocate and free 17 * buffers with various side effects. In general the 18 * arguments to an allocate routine are a device and 19 * a block number, and the value is a pointer to 20 * to the buffer header; the buffer is marked "busy" 21 * so that no one else can touch it. If the block was 22 * already in core, no I/O need be done; if it is 23 * already busy, the process waits until it becomes free. 24 * The following routines allocate a buffer: 25 * getblk 26 * bread 27 * breada 28 * baddr (if it is incore) 29 * Eventually the buffer must be released, possibly with the 30 * side effect of writing it out, by using one of 31 * bwrite 32 * bdwrite 33 * bawrite 34 * brelse 35 */ 36 37 struct buf bfreelist[BQUEUES]; 38 struct buf bswlist, *bclnlist; 39 40 #define BUFHSZ 63 41 struct bufhd bufhash[BUFHSZ]; 42 #define BUFHASH(dev, dblkno) \ 43 ((struct buf *)&bufhash[((int)(dev)+(int)(dblkno)) % BUFHSZ]) 44 45 /* 46 * Initialize hash links for buffers. 47 */ 48 bhinit() 49 { 50 register int i; 51 register struct bufhd *bp; 52 53 for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++) 54 bp->b_forw = bp->b_back = (struct buf *)bp; 55 } 56 57 /* #define DISKMON 1 */ 58 59 #ifdef DISKMON 60 struct { 61 int nbuf; 62 long nread; 63 long nreada; 64 long ncache; 65 long nwrite; 66 long bufcount[64]; 67 } io_info; 68 #endif 69 70 /* 71 * Swap IO headers - 72 * They contain the necessary information for the swap I/O. 73 * At any given time, a swap header can be in three 74 * different lists. When free it is in the free list, 75 * when allocated and the I/O queued, it is on the swap 76 * device list, and finally, if the operation was a dirty 77 * page push, when the I/O completes, it is inserted 78 * in a list of cleaned pages to be processed by the pageout daemon. 79 */ 80 struct buf *swbuf; 81 short *swsize; /* CAN WE JUST USE B_BCOUNT? */ 82 int *swpf; 83 84 85 #ifndef UNFAST 86 #define notavail(bp) \ 87 { \ 88 int s = spl6(); \ 89 (bp)->av_back->av_forw = (bp)->av_forw; \ 90 (bp)->av_forw->av_back = (bp)->av_back; \ 91 (bp)->b_flags |= B_BUSY; \ 92 splx(s); \ 93 } 94 #endif 95 96 /* 97 * Read in (if necessary) the block and return a buffer pointer. 98 */ 99 struct buf * 100 bread(dev, blkno) 101 dev_t dev; 102 daddr_t blkno; 103 { 104 register struct buf *bp; 105 106 bp = getblk(dev, blkno); 107 if (bp->b_flags&B_DONE) { 108 #ifdef EPAWNJ 109 trace(TR_BREAD|TR_HIT, dev, blkno); 110 #endif 111 #ifdef DISKMON 112 io_info.ncache++; 113 #endif 114 return(bp); 115 } 116 bp->b_flags |= B_READ; 117 bp->b_bcount = BSIZE; 118 (*bdevsw[major(dev)].d_strategy)(bp); 119 #ifdef EPAWNJ 120 trace(TR_BREAD|TR_MISS, dev, blkno); 121 #endif 122 #ifdef DISKMON 123 io_info.nread++; 124 #endif 125 u.u_vm.vm_inblk++; /* pay for read */ 126 iowait(bp); 127 return(bp); 128 } 129 130 /* 131 * Read in the block, like bread, but also start I/O on the 132 * read-ahead block (which is not allocated to the caller) 133 */ 134 struct buf * 135 breada(dev, blkno, rablkno) 136 dev_t dev; 137 daddr_t blkno, rablkno; 138 { 139 register struct buf *bp, *rabp; 140 141 bp = NULL; 142 if (!incore(dev, blkno)) { 143 bp = getblk(dev, blkno); 144 if ((bp->b_flags&B_DONE) == 0) { 145 bp->b_flags |= B_READ; 146 bp->b_bcount = BSIZE; 147 (*bdevsw[major(dev)].d_strategy)(bp); 148 #ifdef EPAWNJ 149 trace(TR_BREAD|TR_MISS, dev, blkno); 150 #endif 151 #ifdef DISKMON 152 io_info.nread++; 153 #endif 154 u.u_vm.vm_inblk++; /* pay for read */ 155 } 156 #ifdef EPAWNJ 157 else 158 trace(TR_BREAD|TR_HIT, dev, blkno); 159 #endif 160 } 161 if (rablkno && !incore(dev, rablkno)) { 162 rabp = getblk(dev, rablkno); 163 if (rabp->b_flags & B_DONE) { 164 brelse(rabp); 165 #ifdef EPAWNJ 166 trace(TR_BREAD|TR_HIT|TR_RA, dev, blkno); 167 #endif 168 } else { 169 rabp->b_flags |= B_READ|B_ASYNC; 170 rabp->b_bcount = BSIZE; 171 (*bdevsw[major(dev)].d_strategy)(rabp); 172 #ifdef EPAWNJ 173 trace(TR_BREAD|TR_MISS|TR_RA, dev, rablock); 174 #endif 175 #ifdef DISKMON 176 io_info.nreada++; 177 #endif 178 u.u_vm.vm_inblk++; /* pay in advance */ 179 } 180 } 181 if(bp == NULL) 182 return(bread(dev, blkno)); 183 iowait(bp); 184 return(bp); 185 } 186 187 /* 188 * Write the buffer, waiting for completion. 189 * Then release the buffer. 190 */ 191 bwrite(bp) 192 register struct buf *bp; 193 { 194 register flag; 195 196 flag = bp->b_flags; 197 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI | B_AGE); 198 bp->b_bcount = BSIZE; 199 #ifdef DISKMON 200 io_info.nwrite++; 201 #endif 202 if ((flag&B_DELWRI) == 0) 203 u.u_vm.vm_oublk++; /* noone paid yet */ 204 #ifdef EPAWNJ 205 trace(TR_BWRITE, bp->b_dev, dbtofsb(bp->b_blkno)); 206 #endif 207 (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 208 if ((flag&B_ASYNC) == 0) { 209 iowait(bp); 210 brelse(bp); 211 } else if (flag & B_DELWRI) 212 bp->b_flags |= B_AGE; 213 else 214 geterror(bp); 215 } 216 217 /* 218 * Release the buffer, marking it so that if it is grabbed 219 * for another purpose it will be written out before being 220 * given up (e.g. when writing a partial block where it is 221 * assumed that another write for the same block will soon follow). 222 * This can't be done for magtape, since writes must be done 223 * in the same order as requested. 224 */ 225 bdwrite(bp) 226 register struct buf *bp; 227 { 228 register int flags; 229 230 if ((bp->b_flags&B_DELWRI) == 0) 231 u.u_vm.vm_oublk++; /* noone paid yet */ 232 flags = bdevsw[major(bp->b_dev)].d_flags; 233 if(flags & B_TAPE) 234 bawrite(bp); 235 else { 236 bp->b_flags |= B_DELWRI | B_DONE; 237 brelse(bp); 238 } 239 } 240 241 /* 242 * Release the buffer, start I/O on it, but don't wait for completion. 243 */ 244 bawrite(bp) 245 register struct buf *bp; 246 { 247 248 bp->b_flags |= B_ASYNC; 249 bwrite(bp); 250 } 251 252 /* 253 * release the buffer, with no I/O implied. 254 */ 255 brelse(bp) 256 register struct buf *bp; 257 { 258 register struct buf *flist; 259 register s; 260 261 if (bp->b_flags&B_WANTED) 262 wakeup((caddr_t)bp); 263 if (bfreelist[0].b_flags&B_WANTED) { 264 bfreelist[0].b_flags &= ~B_WANTED; 265 wakeup((caddr_t)bfreelist); 266 } 267 if (bp->b_flags&B_ERROR) 268 if (bp->b_flags & B_LOCKED) 269 bp->b_flags &= ~B_ERROR; /* try again later */ 270 else 271 bp->b_dev = NODEV; /* no assoc */ 272 s = spl6(); 273 if (bp->b_flags & (B_ERROR|B_INVAL)) { 274 /* block has no info ... put at front of most free list */ 275 flist = &bfreelist[BQUEUES-1]; 276 flist->av_forw->av_back = bp; 277 bp->av_forw = flist->av_forw; 278 flist->av_forw = bp; 279 bp->av_back = flist; 280 } else { 281 if (bp->b_flags & B_LOCKED) 282 flist = &bfreelist[BQ_LOCKED]; 283 else if (bp->b_flags & B_AGE) 284 flist = &bfreelist[BQ_AGE]; 285 else 286 flist = &bfreelist[BQ_LRU]; 287 flist->av_back->av_forw = bp; 288 bp->av_back = flist->av_back; 289 flist->av_back = bp; 290 bp->av_forw = flist; 291 } 292 bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE); 293 splx(s); 294 } 295 296 /* 297 * See if the block is associated with some buffer 298 * (mainly to avoid getting hung up on a wait in breada) 299 */ 300 incore(dev, blkno) 301 dev_t dev; 302 daddr_t blkno; 303 { 304 register struct buf *bp; 305 register struct buf *dp; 306 register int dblkno = fsbtodb(blkno); 307 308 dp = BUFHASH(dev, dblkno); 309 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 310 if (bp->b_blkno == dblkno && bp->b_dev == dev && 311 !(bp->b_flags & B_INVAL)) 312 return (1); 313 return (0); 314 } 315 316 struct buf * 317 baddr(dev, blkno) 318 dev_t dev; 319 daddr_t blkno; 320 { 321 322 if (incore(dev, blkno)) 323 return (bread(dev, blkno)); 324 return (0); 325 } 326 327 /* 328 * Assign a buffer for the given block. If the appropriate 329 * block is already associated, return it; otherwise search 330 * for the oldest non-busy buffer and reassign it. 331 */ 332 struct buf * 333 getblk(dev, blkno) 334 dev_t dev; 335 daddr_t blkno; 336 { 337 register struct buf *bp, *dp, *ep; 338 register int dblkno = fsbtodb(blkno); 339 #ifdef DISKMON 340 register int i; 341 #endif 342 343 if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-PGSHIFT)) 344 blkno = 1 << ((sizeof(int)*NBBY-PGSHIFT) + 1); 345 dblkno = fsbtodb(blkno); 346 dp = BUFHASH(dev, dblkno); 347 loop: 348 (void) spl0(); 349 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { 350 if (bp->b_blkno != dblkno || bp->b_dev != dev || 351 bp->b_flags&B_INVAL) 352 continue; 353 (void) spl6(); 354 if (bp->b_flags&B_BUSY) { 355 bp->b_flags |= B_WANTED; 356 sleep((caddr_t)bp, PRIBIO+1); 357 goto loop; 358 } 359 (void) spl0(); 360 #ifdef DISKMON 361 i = 0; 362 dp = bp->av_forw; 363 while ((dp->b_flags & B_HEAD) == 0) { 364 i++; 365 dp = dp->av_forw; 366 } 367 if (i<64) 368 io_info.bufcount[i]++; 369 #endif 370 notavail(bp); 371 bp->b_flags |= B_CACHE; 372 return(bp); 373 } 374 if (major(dev) >= nblkdev) 375 panic("blkdev"); 376 (void) spl6(); 377 for (ep = &bfreelist[BQUEUES-1]; ep > bfreelist; ep--) 378 if (ep->av_forw != ep) 379 break; 380 if (ep == bfreelist) { /* no free blocks at all */ 381 ep->b_flags |= B_WANTED; 382 sleep((caddr_t)ep, PRIBIO+1); 383 goto loop; 384 } 385 (void) spl0(); 386 bp = ep->av_forw; 387 notavail(bp); 388 if (bp->b_flags & B_DELWRI) { 389 bp->b_flags |= B_ASYNC; 390 bwrite(bp); 391 goto loop; 392 } 393 #ifdef EPAWNJ 394 trace(TR_BRELSE, bp->b_dev, dbtofsb(bp->b_blkno)); 395 #endif 396 bp->b_flags = B_BUSY; 397 bp->b_back->b_forw = bp->b_forw; 398 bp->b_forw->b_back = bp->b_back; 399 bp->b_forw = dp->b_forw; 400 bp->b_back = dp; 401 dp->b_forw->b_back = bp; 402 dp->b_forw = bp; 403 bp->b_dev = dev; 404 bp->b_blkno = dblkno; 405 return(bp); 406 } 407 408 /* 409 * get an empty block, 410 * not assigned to any particular device 411 */ 412 struct buf * 413 geteblk() 414 { 415 register struct buf *bp, *dp; 416 417 loop: 418 (void) spl6(); 419 for (dp = &bfreelist[BQUEUES-1]; dp > bfreelist; dp--) 420 if (dp->av_forw != dp) 421 break; 422 if (dp == bfreelist) { /* no free blocks */ 423 dp->b_flags |= B_WANTED; 424 sleep((caddr_t)dp, PRIBIO+1); 425 goto loop; 426 } 427 (void) spl0(); 428 bp = dp->av_forw; 429 notavail(bp); 430 if (bp->b_flags & B_DELWRI) { 431 bp->b_flags |= B_ASYNC; 432 bwrite(bp); 433 goto loop; 434 } 435 #ifdef EPAWNJ 436 trace(TR_BRELSE, bp->b_dev, dbtofsb(bp->b_blkno)); 437 #endif 438 bp->b_flags = B_BUSY|B_INVAL; 439 bp->b_back->b_forw = bp->b_forw; 440 bp->b_forw->b_back = bp->b_back; 441 bp->b_forw = dp->b_forw; 442 bp->b_back = dp; 443 dp->b_forw->b_back = bp; 444 dp->b_forw = bp; 445 bp->b_dev = (dev_t)NODEV; 446 return(bp); 447 } 448 449 /* 450 * Wait for I/O completion on the buffer; return errors 451 * to the user. 452 */ 453 iowait(bp) 454 register struct buf *bp; 455 { 456 457 (void) spl6(); 458 while ((bp->b_flags&B_DONE)==0) 459 sleep((caddr_t)bp, PRIBIO); 460 (void) spl0(); 461 geterror(bp); 462 } 463 464 #ifdef UNFAST 465 /* 466 * Unlink a buffer from the available list and mark it busy. 467 * (internal interface) 468 */ 469 notavail(bp) 470 register struct buf *bp; 471 { 472 register s; 473 474 s = spl6(); 475 bp->av_back->av_forw = bp->av_forw; 476 bp->av_forw->av_back = bp->av_back; 477 bp->b_flags |= B_BUSY; 478 splx(s); 479 } 480 #endif 481 482 /* 483 * Mark I/O complete on a buffer. If the header 484 * indicates a dirty page push completion, the 485 * header is inserted into the ``cleaned'' list 486 * to be processed by the pageout daemon. Otherwise 487 * release it if I/O is asynchronous, and wake 488 * up anyone waiting for it. 489 */ 490 iodone(bp) 491 register struct buf *bp; 492 { 493 register int s; 494 495 if (bp->b_flags & B_DONE) 496 panic("dup iodone"); 497 bp->b_flags |= B_DONE; 498 if (bp->b_flags & B_DIRTY) { 499 if (bp->b_flags & B_ERROR) 500 panic("IO err in push"); 501 s = spl6(); 502 cnt.v_pgout++; 503 bp->av_forw = bclnlist; 504 bp->b_bcount = swsize[bp - swbuf]; 505 bp->b_pfcent = swpf[bp - swbuf]; 506 bclnlist = bp; 507 if (bswlist.b_flags & B_WANTED) 508 wakeup((caddr_t)&proc[2]); 509 splx(s); 510 return; 511 } 512 if (bp->b_flags&B_ASYNC) 513 brelse(bp); 514 else { 515 bp->b_flags &= ~B_WANTED; 516 wakeup((caddr_t)bp); 517 } 518 } 519 520 /* 521 * Zero the core associated with a buffer. 522 */ 523 clrbuf(bp) 524 struct buf *bp; 525 { 526 register *p; 527 register c; 528 529 p = bp->b_un.b_words; 530 c = BSIZE/sizeof(int); 531 do 532 *p++ = 0; 533 while (--c); 534 bp->b_resid = 0; 535 } 536 537 /* 538 * swap I/O - 539 * 540 * If the flag indicates a dirty page push initiated 541 * by the pageout daemon, we map the page into the i th 542 * virtual page of process 2 (the daemon itself) where i is 543 * the index of the swap header that has been allocated. 544 * We simply initialize the header and queue the I/O but 545 * do not wait for completion. When the I/O completes, 546 * iodone() will link the header to a list of cleaned 547 * pages to be processed by the pageout daemon. 548 */ 549 swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) 550 struct proc *p; 551 swblk_t dblkno; 552 caddr_t addr; 553 int flag, nbytes; 554 dev_t dev; 555 unsigned pfcent; 556 { 557 register struct buf *bp; 558 register int c; 559 int p2dp; 560 register struct pte *dpte, *vpte; 561 562 (void) spl6(); 563 while (bswlist.av_forw == NULL) { 564 bswlist.b_flags |= B_WANTED; 565 sleep((caddr_t)&bswlist, PSWP+1); 566 } 567 bp = bswlist.av_forw; 568 bswlist.av_forw = bp->av_forw; 569 (void) spl0(); 570 571 bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; 572 if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) 573 if (rdflg == B_READ) 574 sum.v_pswpin += btoc(nbytes); 575 else 576 sum.v_pswpout += btoc(nbytes); 577 bp->b_proc = p; 578 if (flag & B_DIRTY) { 579 p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; 580 dpte = dptopte(&proc[2], p2dp); 581 vpte = vtopte(p, btop(addr)); 582 for (c = 0; c < nbytes; c += NBPG) { 583 if (vpte->pg_pfnum == 0 || vpte->pg_fod) 584 panic("swap bad pte"); 585 *dpte++ = *vpte++; 586 } 587 bp->b_un.b_addr = (caddr_t)ctob(p2dp); 588 } else 589 bp->b_un.b_addr = addr; 590 while (nbytes > 0) { 591 c = imin(ctob(120), nbytes); 592 bp->b_bcount = c; 593 bp->b_blkno = dblkno; 594 bp->b_dev = dev; 595 if (flag & B_DIRTY) { 596 swpf[bp - swbuf] = pfcent; 597 swsize[bp - swbuf] = nbytes; 598 } 599 (*bdevsw[major(dev)].d_strategy)(bp); 600 if (flag & B_DIRTY) { 601 if (c < nbytes) 602 panic("big push"); 603 return; 604 } 605 (void) spl6(); 606 while((bp->b_flags&B_DONE)==0) 607 sleep((caddr_t)bp, PSWP); 608 (void) spl0(); 609 bp->b_un.b_addr += c; 610 bp->b_flags &= ~B_DONE; 611 if (bp->b_flags & B_ERROR) { 612 if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) 613 panic("hard IO err in swap"); 614 swkill(p, (char *)0); 615 } 616 nbytes -= c; 617 dblkno += btoc(c); 618 } 619 (void) spl6(); 620 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 621 bp->av_forw = bswlist.av_forw; 622 bswlist.av_forw = bp; 623 if (bswlist.b_flags & B_WANTED) { 624 bswlist.b_flags &= ~B_WANTED; 625 wakeup((caddr_t)&bswlist); 626 wakeup((caddr_t)&proc[2]); 627 } 628 (void) spl0(); 629 } 630 631 /* 632 * If rout == 0 then killed on swap error, else 633 * rout is the name of the routine where we ran out of 634 * swap space. 635 */ 636 swkill(p, rout) 637 struct proc *p; 638 char *rout; 639 { 640 char *mesg; 641 642 printf("pid %d: ", p->p_pid); 643 if (rout) 644 printf(mesg = "killed due to no swap space\n"); 645 else 646 printf(mesg = "killed on swap error\n"); 647 uprintf("sorry, pid %d was %s", p->p_pid, mesg); 648 /* 649 * To be sure no looping (e.g. in vmsched trying to 650 * swap out) mark process locked in core (as though 651 * done by user) after killing it so noone will try 652 * to swap it out. 653 */ 654 psignal(p, SIGKILL); 655 p->p_flag |= SULOCK; 656 } 657 658 /* 659 * make sure all write-behind blocks 660 * on dev (or NODEV for all) 661 * are flushed out. 662 * (from umount and update) 663 */ 664 bflush(dev) 665 dev_t dev; 666 { 667 register struct buf *bp; 668 register struct buf *flist; 669 670 loop: 671 (void) spl6(); 672 for (flist = bfreelist; flist < &bfreelist[BQUEUES]; flist++) 673 for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) { 674 if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) { 675 bp->b_flags |= B_ASYNC; 676 notavail(bp); 677 bwrite(bp); 678 goto loop; 679 } 680 } 681 (void) spl0(); 682 } 683 684 /* 685 * Raw I/O. The arguments are 686 * The strategy routine for the device 687 * A buffer, which will always be a special buffer 688 * header owned exclusively by the device for this purpose 689 * The device number 690 * Read/write flag 691 * Essentially all the work is computing physical addresses and 692 * validating them. 693 * If the user has the proper access privilidges, the process is 694 * marked 'delayed unlock' and the pages involved in the I/O are 695 * faulted and locked. After the completion of the I/O, the above pages 696 * are unlocked. 697 */ 698 physio(strat, bp, dev, rw, mincnt) 699 int (*strat)(); 700 register struct buf *bp; 701 unsigned (*mincnt)(); 702 { 703 register int c; 704 char *a; 705 706 if (useracc(u.u_base,u.u_count,rw==B_READ?B_WRITE:B_READ) == NULL) { 707 u.u_error = EFAULT; 708 return; 709 } 710 (void) spl6(); 711 while (bp->b_flags&B_BUSY) { 712 bp->b_flags |= B_WANTED; 713 sleep((caddr_t)bp, PRIBIO+1); 714 } 715 bp->b_error = 0; 716 bp->b_proc = u.u_procp; 717 bp->b_un.b_addr = u.u_base; 718 while (u.u_count != 0 && bp->b_error==0) { 719 bp->b_flags = B_BUSY | B_PHYS | rw; 720 bp->b_dev = dev; 721 bp->b_blkno = u.u_offset >> PGSHIFT; 722 bp->b_bcount = u.u_count; 723 (*mincnt)(bp); 724 c = bp->b_bcount; 725 u.u_procp->p_flag |= SPHYSIO; 726 vslock(a = bp->b_un.b_addr, c); 727 (*strat)(bp); 728 (void) spl6(); 729 while ((bp->b_flags&B_DONE) == 0) 730 sleep((caddr_t)bp, PRIBIO); 731 vsunlock(a, c, rw); 732 u.u_procp->p_flag &= ~SPHYSIO; 733 if (bp->b_flags&B_WANTED) 734 wakeup((caddr_t)bp); 735 (void) spl0(); 736 bp->b_un.b_addr += c; 737 u.u_count -= c; 738 u.u_offset += c; 739 } 740 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); 741 u.u_count = bp->b_resid; 742 geterror(bp); 743 } 744 745 /*ARGSUSED*/ 746 unsigned 747 minphys(bp) 748 struct buf *bp; 749 { 750 751 if (bp->b_bcount > 60 * 1024) 752 bp->b_bcount = 60 * 1024; 753 } 754 755 /* 756 * Pick up the device's error number and pass it to the user; 757 * if there is an error but the number is 0 set a generalized 758 * code. Actually the latter is always true because devices 759 * don't yet return specific errors. 760 */ 761 geterror(bp) 762 register struct buf *bp; 763 { 764 765 if (bp->b_flags&B_ERROR) 766 if ((u.u_error = bp->b_error)==0) 767 u.u_error = EIO; 768 } 769 770 /* 771 * Invalidate in core blocks belonging to closed or umounted filesystem 772 * 773 * This is not nicely done at all - the buffer ought to be removed from the 774 * hash chains & have its dev/blkno fields clobbered, but unfortunately we 775 * can't do that here, as it is quite possible that the block is still 776 * being used for i/o. Eventually, all disc drivers should be forced to 777 * have a close routine, which ought ensure that the queue is empty, then 778 * properly flush the queues. Until that happy day, this suffices for 779 * correctness. ... kre 780 */ 781 binval(dev) 782 dev_t dev; 783 { 784 register struct buf *bp; 785 register struct bufhd *hp; 786 #define dp ((struct buf *)hp) 787 788 for (hp = bufhash; hp < &bufhash[BUFHSZ]; hp++) 789 for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) 790 if (bp->b_dev == dev) 791 bp->b_flags |= B_INVAL; 792 } 793