1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_bio.c 8.5 (Berkeley) 01/04/94 11 */ 12 13 #include <sys/param.h> 14 #include <sys/systm.h> 15 #include <sys/resourcevar.h> 16 #include <sys/proc.h> 17 #include <sys/buf.h> 18 #include <sys/vnode.h> 19 #include <sys/trace.h> 20 #include <sys/mount.h> 21 #include <sys/kernel.h> 22 23 #include <vm/vm.h> 24 25 #include <nfs/nfsnode.h> 26 #include <nfs/rpcv2.h> 27 #include <nfs/nfsv2.h> 28 #include <nfs/nfs.h> 29 #include <nfs/nfsmount.h> 30 #include <nfs/nqnfs.h> 31 32 struct buf *incore(), *nfs_getcacheblk(); 33 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 34 extern int nfs_numasync; 35 36 /* 37 * Vnode op for read using bio 38 * Any similarity to readip() is purely coincidental 39 */ 40 nfs_bioread(vp, uio, ioflag, cred) 41 register struct vnode *vp; 42 register struct uio *uio; 43 int ioflag; 44 struct ucred *cred; 45 { 46 register struct nfsnode *np = VTONFS(vp); 47 register int biosize, diff; 48 struct buf *bp, *rabp; 49 struct vattr vattr; 50 struct proc *p; 51 struct nfsmount *nmp; 52 daddr_t lbn, bn, rabn; 53 caddr_t baddr; 54 int got_buf, nra, error = 0, n, on, not_readin; 55 56 #ifdef lint 57 ioflag = ioflag; 58 #endif /* lint */ 59 #ifdef DIAGNOSTIC 60 if (uio->uio_rw != UIO_READ) 61 panic("nfs_read mode"); 62 #endif 63 if (uio->uio_resid == 0) 64 return (0); 65 if (uio->uio_offset < 0 && vp->v_type != VDIR) 66 return (EINVAL); 67 nmp = VFSTONFS(vp->v_mount); 68 biosize = nmp->nm_rsize; 69 p = uio->uio_procp; 70 /* 71 * For nfs, cache consistency can only be maintained approximately. 72 * Although RFC1094 does not specify the criteria, the following is 73 * believed to be compatible with the reference port. 74 * For nqnfs, full cache consistency is maintained within the loop. 75 * For nfs: 76 * If the file's modify time on the server has changed since the 77 * last read rpc or you have written to the file, 78 * you may have lost data cache consistency with the 79 * server, so flush all of the file's data out of the cache. 80 * Then force a getattr rpc to ensure that you have up to date 81 * attributes. 82 * The mount flag NFSMNT_MYWRITE says "Assume that my writes are 83 * the ones changing the modify time. 84 * NB: This implies that cache data can be read when up to 85 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 86 * attributes this could be forced by setting n_attrstamp to 0 before 87 * the VOP_GETATTR() call. 88 */ 89 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) { 90 if (np->n_flag & NMODIFIED) { 91 if ((nmp->nm_flag & NFSMNT_MYWRITE) == 0 || 92 vp->v_type != VREG) { 93 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 94 return (error); 95 } 96 np->n_attrstamp = 0; 97 np->n_direofoffset = 0; 98 if (error = VOP_GETATTR(vp, &vattr, cred, p)) 99 return (error); 100 np->n_mtime = vattr.va_mtime.ts_sec; 101 } else { 102 if (error = VOP_GETATTR(vp, &vattr, cred, p)) 103 return (error); 104 if (np->n_mtime != vattr.va_mtime.ts_sec) { 105 np->n_direofoffset = 0; 106 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 107 return (error); 108 np->n_mtime = vattr.va_mtime.ts_sec; 109 } 110 } 111 } 112 do { 113 114 /* 115 * Get a valid lease. If cached data is stale, flush it. 116 */ 117 if (nmp->nm_flag & NFSMNT_NQNFS) { 118 if (NQNFS_CKINVALID(vp, np, NQL_READ)) { 119 do { 120 error = nqnfs_getlease(vp, NQL_READ, cred, p); 121 } while (error == NQNFS_EXPIRED); 122 if (error) 123 return (error); 124 if (np->n_lrev != np->n_brev || 125 (np->n_flag & NQNFSNONCACHE) || 126 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) { 127 if (vp->v_type == VDIR) { 128 np->n_direofoffset = 0; 129 cache_purge(vp); 130 } 131 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 132 return (error); 133 np->n_brev = np->n_lrev; 134 } 135 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) { 136 np->n_direofoffset = 0; 137 cache_purge(vp); 138 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 139 return (error); 140 } 141 } 142 if (np->n_flag & NQNFSNONCACHE) { 143 switch (vp->v_type) { 144 case VREG: 145 error = nfs_readrpc(vp, uio, cred); 146 break; 147 case VLNK: 148 error = nfs_readlinkrpc(vp, uio, cred); 149 break; 150 case VDIR: 151 error = nfs_readdirrpc(vp, uio, cred); 152 break; 153 }; 154 return (error); 155 } 156 baddr = (caddr_t)0; 157 switch (vp->v_type) { 158 case VREG: 159 nfsstats.biocache_reads++; 160 lbn = uio->uio_offset / biosize; 161 on = uio->uio_offset & (biosize-1); 162 bn = lbn * (biosize / DEV_BSIZE); 163 not_readin = 1; 164 165 /* 166 * Start the read ahead(s), as required. 167 */ 168 if (nfs_numasync > 0 && nmp->nm_readahead > 0 && 169 lbn == vp->v_lastr + 1) { 170 for (nra = 0; nra < nmp->nm_readahead && 171 (lbn + 1 + nra) * biosize < np->n_size; nra++) { 172 rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE); 173 if (!incore(vp, rabn)) { 174 rabp = nfs_getcacheblk(vp, rabn, biosize, p); 175 if (!rabp) 176 return (EINTR); 177 if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) { 178 rabp->b_flags |= (B_READ | B_ASYNC); 179 if (nfs_asyncio(rabp, cred)) { 180 rabp->b_flags |= B_INVAL; 181 brelse(rabp); 182 } 183 } 184 } 185 } 186 } 187 188 /* 189 * If the block is in the cache and has the required data 190 * in a valid region, just copy it out. 191 * Otherwise, get the block and write back/read in, 192 * as required. 193 */ 194 if ((bp = incore(vp, bn)) && 195 (bp->b_flags & (B_BUSY | B_WRITEINPROG)) == 196 (B_BUSY | B_WRITEINPROG)) 197 got_buf = 0; 198 else { 199 again: 200 bp = nfs_getcacheblk(vp, bn, biosize, p); 201 if (!bp) 202 return (EINTR); 203 got_buf = 1; 204 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 205 bp->b_flags |= B_READ; 206 not_readin = 0; 207 if (error = nfs_doio(bp, cred, p)) { 208 brelse(bp); 209 return (error); 210 } 211 } 212 } 213 n = min((unsigned)(biosize - on), uio->uio_resid); 214 diff = np->n_size - uio->uio_offset; 215 if (diff < n) 216 n = diff; 217 if (not_readin && n > 0) { 218 if (on < bp->b_validoff || (on + n) > bp->b_validend) { 219 if (!got_buf) { 220 bp = nfs_getcacheblk(vp, bn, biosize, p); 221 if (!bp) 222 return (EINTR); 223 got_buf = 1; 224 } 225 bp->b_flags |= B_INVAL; 226 if (bp->b_dirtyend > 0) { 227 if ((bp->b_flags & B_DELWRI) == 0) 228 panic("nfsbioread"); 229 if (VOP_BWRITE(bp) == EINTR) 230 return (EINTR); 231 } else 232 brelse(bp); 233 goto again; 234 } 235 } 236 vp->v_lastr = lbn; 237 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on); 238 if (diff < n) 239 n = diff; 240 break; 241 case VLNK: 242 nfsstats.biocache_readlinks++; 243 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p); 244 if (!bp) 245 return (EINTR); 246 if ((bp->b_flags & B_DONE) == 0) { 247 bp->b_flags |= B_READ; 248 if (error = nfs_doio(bp, cred, p)) { 249 brelse(bp); 250 return (error); 251 } 252 } 253 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 254 got_buf = 1; 255 on = 0; 256 break; 257 case VDIR: 258 nfsstats.biocache_readdirs++; 259 bn = (daddr_t)uio->uio_offset; 260 bp = nfs_getcacheblk(vp, bn, NFS_DIRBLKSIZ, p); 261 if (!bp) 262 return (EINTR); 263 if ((bp->b_flags & B_DONE) == 0) { 264 bp->b_flags |= B_READ; 265 if (error = nfs_doio(bp, cred, p)) { 266 brelse(bp); 267 return (error); 268 } 269 } 270 271 /* 272 * If not eof and read aheads are enabled, start one. 273 * (You need the current block first, so that you have the 274 * directory offset cookie of the next block. 275 */ 276 rabn = bp->b_blkno; 277 if (nfs_numasync > 0 && nmp->nm_readahead > 0 && 278 rabn != 0 && rabn != np->n_direofoffset && 279 !incore(vp, rabn)) { 280 rabp = nfs_getcacheblk(vp, rabn, NFS_DIRBLKSIZ, p); 281 if (rabp) { 282 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) { 283 rabp->b_flags |= (B_READ | B_ASYNC); 284 if (nfs_asyncio(rabp, cred)) { 285 rabp->b_flags |= B_INVAL; 286 brelse(rabp); 287 } 288 } 289 } 290 } 291 on = 0; 292 n = min(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid); 293 got_buf = 1; 294 break; 295 }; 296 297 if (n > 0) { 298 if (!baddr) 299 baddr = bp->b_data; 300 error = uiomove(baddr + on, (int)n, uio); 301 } 302 switch (vp->v_type) { 303 case VREG: 304 if (n + on == biosize || uio->uio_offset == np->n_size) 305 bp->b_flags |= B_AGE; 306 break; 307 case VLNK: 308 n = 0; 309 break; 310 case VDIR: 311 uio->uio_offset = bp->b_blkno; 312 break; 313 }; 314 if (got_buf) 315 brelse(bp); 316 } while (error == 0 && uio->uio_resid > 0 && n > 0); 317 return (error); 318 } 319 320 /* 321 * Vnode op for write using bio 322 */ 323 nfs_write(ap) 324 struct vop_write_args /* { 325 struct vnode *a_vp; 326 struct uio *a_uio; 327 int a_ioflag; 328 struct ucred *a_cred; 329 } */ *ap; 330 { 331 register int biosize; 332 register struct uio *uio = ap->a_uio; 333 struct proc *p = uio->uio_procp; 334 register struct vnode *vp = ap->a_vp; 335 struct nfsnode *np = VTONFS(vp); 336 register struct ucred *cred = ap->a_cred; 337 int ioflag = ap->a_ioflag; 338 struct buf *bp; 339 struct vattr vattr; 340 struct nfsmount *nmp; 341 daddr_t lbn, bn; 342 int n, on, error = 0; 343 344 #ifdef DIAGNOSTIC 345 if (uio->uio_rw != UIO_WRITE) 346 panic("nfs_write mode"); 347 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 348 panic("nfs_write proc"); 349 #endif 350 if (vp->v_type != VREG) 351 return (EIO); 352 if (np->n_flag & NWRITEERR) { 353 np->n_flag &= ~NWRITEERR; 354 return (np->n_error); 355 } 356 if (ioflag & (IO_APPEND | IO_SYNC)) { 357 if (np->n_flag & NMODIFIED) { 358 np->n_attrstamp = 0; 359 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 360 return (error); 361 } 362 if (ioflag & IO_APPEND) { 363 np->n_attrstamp = 0; 364 if (error = VOP_GETATTR(vp, &vattr, cred, p)) 365 return (error); 366 uio->uio_offset = np->n_size; 367 } 368 } 369 nmp = VFSTONFS(vp->v_mount); 370 if (uio->uio_offset < 0) 371 return (EINVAL); 372 if (uio->uio_resid == 0) 373 return (0); 374 /* 375 * Maybe this should be above the vnode op call, but so long as 376 * file servers have no limits, i don't think it matters 377 */ 378 if (p && uio->uio_offset + uio->uio_resid > 379 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 380 psignal(p, SIGXFSZ); 381 return (EFBIG); 382 } 383 /* 384 * I use nm_rsize, not nm_wsize so that all buffer cache blocks 385 * will be the same size within a filesystem. nfs_writerpc will 386 * still use nm_wsize when sizing the rpc's. 387 */ 388 biosize = nmp->nm_rsize; 389 do { 390 391 /* 392 * Check for a valid write lease. 393 * If non-cachable, just do the rpc 394 */ 395 if ((nmp->nm_flag & NFSMNT_NQNFS) && 396 NQNFS_CKINVALID(vp, np, NQL_WRITE)) { 397 do { 398 error = nqnfs_getlease(vp, NQL_WRITE, cred, p); 399 } while (error == NQNFS_EXPIRED); 400 if (error) 401 return (error); 402 if (np->n_lrev != np->n_brev || 403 (np->n_flag & NQNFSNONCACHE)) { 404 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 405 return (error); 406 np->n_brev = np->n_lrev; 407 } 408 } 409 if (np->n_flag & NQNFSNONCACHE) 410 return (nfs_writerpc(vp, uio, cred, ioflag)); 411 nfsstats.biocache_writes++; 412 lbn = uio->uio_offset / biosize; 413 on = uio->uio_offset & (biosize-1); 414 n = min((unsigned)(biosize - on), uio->uio_resid); 415 bn = lbn * (biosize / DEV_BSIZE); 416 again: 417 bp = nfs_getcacheblk(vp, bn, biosize, p); 418 if (!bp) 419 return (EINTR); 420 if (bp->b_wcred == NOCRED) { 421 crhold(cred); 422 bp->b_wcred = cred; 423 } 424 np->n_flag |= NMODIFIED; 425 if (uio->uio_offset + n > np->n_size) { 426 np->n_size = uio->uio_offset + n; 427 vnode_pager_setsize(vp, (u_long)np->n_size); 428 } 429 430 /* 431 * If the new write will leave a contiguous dirty 432 * area, just update the b_dirtyoff and b_dirtyend, 433 * otherwise force a write rpc of the old dirty area. 434 */ 435 if (bp->b_dirtyend > 0 && 436 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 437 bp->b_proc = p; 438 if (VOP_BWRITE(bp) == EINTR) 439 return (EINTR); 440 goto again; 441 } 442 443 /* 444 * Check for valid write lease and get one as required. 445 * In case getblk() and/or bwrite() delayed us. 446 */ 447 if ((nmp->nm_flag & NFSMNT_NQNFS) && 448 NQNFS_CKINVALID(vp, np, NQL_WRITE)) { 449 do { 450 error = nqnfs_getlease(vp, NQL_WRITE, cred, p); 451 } while (error == NQNFS_EXPIRED); 452 if (error) { 453 brelse(bp); 454 return (error); 455 } 456 if (np->n_lrev != np->n_brev || 457 (np->n_flag & NQNFSNONCACHE)) { 458 brelse(bp); 459 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1)) 460 return (error); 461 np->n_brev = np->n_lrev; 462 goto again; 463 } 464 } 465 if (error = uiomove((char *)bp->b_data + on, n, uio)) { 466 bp->b_flags |= B_ERROR; 467 brelse(bp); 468 return (error); 469 } 470 if (bp->b_dirtyend > 0) { 471 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 472 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 473 } else { 474 bp->b_dirtyoff = on; 475 bp->b_dirtyend = on + n; 476 } 477 #ifndef notdef 478 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff || 479 bp->b_validoff > bp->b_dirtyend) { 480 bp->b_validoff = bp->b_dirtyoff; 481 bp->b_validend = bp->b_dirtyend; 482 } else { 483 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 484 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 485 } 486 #else 487 bp->b_validoff = bp->b_dirtyoff; 488 bp->b_validend = bp->b_dirtyend; 489 #endif 490 if (ioflag & IO_APPEND) 491 bp->b_flags |= B_APPENDWRITE; 492 493 /* 494 * If the lease is non-cachable or IO_SYNC do bwrite(). 495 */ 496 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) { 497 bp->b_proc = p; 498 if (error = VOP_BWRITE(bp)) 499 return (error); 500 } else if ((n + on) == biosize && 501 (nmp->nm_flag & NFSMNT_NQNFS) == 0) { 502 bp->b_proc = (struct proc *)0; 503 bawrite(bp); 504 } else 505 bdwrite(bp); 506 } while (uio->uio_resid > 0 && n > 0); 507 return (0); 508 } 509 510 /* 511 * Get an nfs cache block. 512 * Allocate a new one if the block isn't currently in the cache 513 * and return the block marked busy. If the calling process is 514 * interrupted by a signal for an interruptible mount point, return 515 * NULL. 516 */ 517 struct buf * 518 nfs_getcacheblk(vp, bn, size, p) 519 struct vnode *vp; 520 daddr_t bn; 521 int size; 522 struct proc *p; 523 { 524 register struct buf *bp; 525 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 526 527 if (nmp->nm_flag & NFSMNT_INT) { 528 bp = getblk(vp, bn, size, PCATCH, 0); 529 while (bp == (struct buf *)0) { 530 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) 531 return ((struct buf *)0); 532 bp = getblk(vp, bn, size, 0, 2 * hz); 533 } 534 } else 535 bp = getblk(vp, bn, size, 0, 0); 536 return (bp); 537 } 538 539 /* 540 * Flush and invalidate all dirty buffers. If another process is already 541 * doing the flush, just wait for completion. 542 */ 543 nfs_vinvalbuf(vp, flags, cred, p, intrflg) 544 struct vnode *vp; 545 int flags; 546 struct ucred *cred; 547 struct proc *p; 548 int intrflg; 549 { 550 register struct nfsnode *np = VTONFS(vp); 551 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 552 int error = 0, slpflag, slptimeo; 553 554 if ((nmp->nm_flag & NFSMNT_INT) == 0) 555 intrflg = 0; 556 if (intrflg) { 557 slpflag = PCATCH; 558 slptimeo = 2 * hz; 559 } else { 560 slpflag = 0; 561 slptimeo = 0; 562 } 563 /* 564 * First wait for any other process doing a flush to complete. 565 */ 566 while (np->n_flag & NFLUSHINPROG) { 567 np->n_flag |= NFLUSHWANT; 568 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", 569 slptimeo); 570 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) 571 return (EINTR); 572 } 573 574 /* 575 * Now, flush as required. 576 */ 577 np->n_flag |= NFLUSHINPROG; 578 error = vinvalbuf(vp, flags, cred, p, slpflag, 0); 579 while (error) { 580 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 581 np->n_flag &= ~NFLUSHINPROG; 582 if (np->n_flag & NFLUSHWANT) { 583 np->n_flag &= ~NFLUSHWANT; 584 wakeup((caddr_t)&np->n_flag); 585 } 586 return (EINTR); 587 } 588 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo); 589 } 590 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 591 if (np->n_flag & NFLUSHWANT) { 592 np->n_flag &= ~NFLUSHWANT; 593 wakeup((caddr_t)&np->n_flag); 594 } 595 return (0); 596 } 597 598 /* 599 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 600 * This is mainly to avoid queueing async I/O requests when the nfsiods 601 * are all hung on a dead server. 602 */ 603 nfs_asyncio(bp, cred) 604 register struct buf *bp; 605 struct ucred *cred; 606 { 607 register int i; 608 609 if (nfs_numasync == 0) 610 return (EIO); 611 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 612 if (nfs_iodwant[i]) { 613 if (bp->b_flags & B_READ) { 614 if (bp->b_rcred == NOCRED && cred != NOCRED) { 615 crhold(cred); 616 bp->b_rcred = cred; 617 } 618 } else { 619 if (bp->b_wcred == NOCRED && cred != NOCRED) { 620 crhold(cred); 621 bp->b_wcred = cred; 622 } 623 } 624 625 TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist); 626 nfs_iodwant[i] = (struct proc *)0; 627 wakeup((caddr_t)&nfs_iodwant[i]); 628 return (0); 629 } 630 return (EIO); 631 } 632 633 /* 634 * Do an I/O operation to/from a cache block. This may be called 635 * synchronously or from an nfsiod. 636 */ 637 int 638 nfs_doio(bp, cr, p) 639 register struct buf *bp; 640 struct cred *cr; 641 struct proc *p; 642 { 643 register struct uio *uiop; 644 register struct vnode *vp; 645 struct nfsnode *np; 646 struct nfsmount *nmp; 647 int error, diff, len; 648 struct uio uio; 649 struct iovec io; 650 651 vp = bp->b_vp; 652 np = VTONFS(vp); 653 nmp = VFSTONFS(vp->v_mount); 654 uiop = &uio; 655 uiop->uio_iov = &io; 656 uiop->uio_iovcnt = 1; 657 uiop->uio_segflg = UIO_SYSSPACE; 658 uiop->uio_procp = p; 659 660 /* 661 * Historically, paging was done with physio, but no more. 662 */ 663 if (bp->b_flags & B_PHYS) 664 panic("doio phys"); 665 if (bp->b_flags & B_READ) { 666 io.iov_len = uiop->uio_resid = bp->b_bcount; 667 io.iov_base = bp->b_data; 668 uiop->uio_rw = UIO_READ; 669 switch (vp->v_type) { 670 case VREG: 671 uiop->uio_offset = bp->b_blkno * DEV_BSIZE; 672 nfsstats.read_bios++; 673 error = nfs_readrpc(vp, uiop, cr); 674 if (!error) { 675 bp->b_validoff = 0; 676 if (uiop->uio_resid) { 677 /* 678 * If len > 0, there is a hole in the file and 679 * no writes after the hole have been pushed to 680 * the server yet. 681 * Just zero fill the rest of the valid area. 682 */ 683 diff = bp->b_bcount - uiop->uio_resid; 684 len = np->n_size - (bp->b_blkno * DEV_BSIZE 685 + diff); 686 if (len > 0) { 687 len = min(len, uiop->uio_resid); 688 bzero((char *)bp->b_data + diff, len); 689 bp->b_validend = diff + len; 690 } else 691 bp->b_validend = diff; 692 } else 693 bp->b_validend = bp->b_bcount; 694 } 695 if (p && (vp->v_flag & VTEXT) && 696 (((nmp->nm_flag & NFSMNT_NQNFS) && 697 np->n_lrev != np->n_brev) || 698 (!(nmp->nm_flag & NFSMNT_NQNFS) && 699 np->n_mtime != np->n_vattr.va_mtime.ts_sec))) { 700 uprintf("Process killed due to text file modification\n"); 701 psignal(p, SIGKILL); 702 p->p_flag |= P_NOSWAP; 703 } 704 break; 705 case VLNK: 706 uiop->uio_offset = 0; 707 nfsstats.readlink_bios++; 708 error = nfs_readlinkrpc(vp, uiop, cr); 709 break; 710 case VDIR: 711 uiop->uio_offset = bp->b_lblkno; 712 nfsstats.readdir_bios++; 713 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) 714 error = nfs_readdirlookrpc(vp, uiop, cr); 715 else 716 error = nfs_readdirrpc(vp, uiop, cr); 717 /* 718 * Save offset cookie in b_blkno. 719 */ 720 bp->b_blkno = uiop->uio_offset; 721 break; 722 }; 723 if (error) { 724 bp->b_flags |= B_ERROR; 725 bp->b_error = error; 726 } 727 } else { 728 io.iov_len = uiop->uio_resid = bp->b_dirtyend 729 - bp->b_dirtyoff; 730 uiop->uio_offset = (bp->b_blkno * DEV_BSIZE) 731 + bp->b_dirtyoff; 732 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 733 uiop->uio_rw = UIO_WRITE; 734 nfsstats.write_bios++; 735 if (bp->b_flags & B_APPENDWRITE) 736 error = nfs_writerpc(vp, uiop, cr, IO_APPEND); 737 else 738 error = nfs_writerpc(vp, uiop, cr, 0); 739 bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE); 740 741 /* 742 * For an interrupted write, the buffer is still valid and the 743 * write hasn't been pushed to the server yet, so we can't set 744 * B_ERROR and report the interruption by setting B_EINTR. For 745 * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt 746 * is essentially a noop. 747 */ 748 if (error == EINTR) { 749 bp->b_flags &= ~B_INVAL; 750 bp->b_flags |= B_DELWRI; 751 752 /* 753 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the 754 * buffer to the clean list, we have to reassign it back to the 755 * dirty one. Ugh. 756 */ 757 if (bp->b_flags & B_ASYNC) 758 reassignbuf(bp, vp); 759 else 760 bp->b_flags |= B_EINTR; 761 } else { 762 if (error) { 763 bp->b_flags |= B_ERROR; 764 bp->b_error = np->n_error = error; 765 np->n_flag |= NWRITEERR; 766 } 767 bp->b_dirtyoff = bp->b_dirtyend = 0; 768 } 769 } 770 bp->b_resid = uiop->uio_resid; 771 biodone(bp); 772 return (error); 773 } 774