1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 37 * $FreeBSD: /repoman/r/ncvs/src/sys/nfsclient/nfs_bio.c,v 1.130 2004/04/14 23:23:55 peadar Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_bio.c,v 1.24 2005/08/27 20:23:06 joerg Exp $ 39 */ 40 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/resourcevar.h> 45 #include <sys/signalvar.h> 46 #include <sys/proc.h> 47 #include <sys/buf.h> 48 #include <sys/vnode.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/buf2.h> 52 #include <sys/msfbuf.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_pager.h> 59 #include <vm/vnode_pager.h> 60 61 #include <sys/thread2.h> 62 63 #include "rpcv2.h" 64 #include "nfsproto.h" 65 #include "nfs.h" 66 #include "nfsmount.h" 67 #include "nqnfs.h" 68 #include "nfsnode.h" 69 70 static struct buf *nfs_getcacheblk (struct vnode *vp, daddr_t bn, int size, 71 struct thread *td); 72 73 extern int nfs_numasync; 74 extern int nfs_pbuf_freecnt; 75 extern struct nfsstats nfsstats; 76 77 /* 78 * Vnode op for VM getpages. 79 * 80 * nfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, 81 * int a_reqpage, vm_ooffset_t a_offset) 82 */ 83 int 84 nfs_getpages(struct vop_getpages_args *ap) 85 { 86 struct thread *td = curthread; /* XXX */ 87 int i, error, nextoff, size, toff, count, npages; 88 struct uio uio; 89 struct iovec iov; 90 char *kva; 91 struct vnode *vp; 92 struct nfsmount *nmp; 93 vm_page_t *pages; 94 vm_page_t m; 95 struct msf_buf *msf; 96 97 vp = ap->a_vp; 98 nmp = VFSTONFS(vp->v_mount); 99 pages = ap->a_m; 100 count = ap->a_count; 101 102 if (vp->v_object == NULL) { 103 printf("nfs_getpages: called with non-merged cache vnode??\n"); 104 return VM_PAGER_ERROR; 105 } 106 107 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 108 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 109 (void)nfs_fsinfo(nmp, vp, td); 110 111 npages = btoc(count); 112 113 /* 114 * NOTE that partially valid pages may occur in cases other 115 * then file EOF, such as when a file is partially written and 116 * ftruncate()-extended to a larger size. It is also possible 117 * for the valid bits to be set on garbage beyond the file EOF and 118 * clear in the area before EOF (e.g. m->valid == 0xfc), which can 119 * occur due to vtruncbuf() and the buffer cache's handling of 120 * pages which 'straddle' buffers or when b_bufsize is not a 121 * multiple of PAGE_SIZE.... the buffer cache cannot normally 122 * clear the extra bits. This kind of situation occurs when you 123 * make a small write() (m->valid == 0x03) and then mmap() and 124 * fault in the buffer(m->valid = 0xFF). When NFS flushes the 125 * buffer (vinvalbuf() m->valid = 0xFC) we are left with a mess. 126 * 127 * This is combined with the possibility that the pages are partially 128 * dirty or that there is a buffer backing the pages that is dirty 129 * (even if m->dirty is 0). 130 * 131 * To solve this problem several hacks have been made: (1) NFS 132 * guarentees that the IO block size is a multiple of PAGE_SIZE and 133 * (2) The buffer cache, when invalidating an NFS buffer, will 134 * disregard the buffer's fragmentory b_bufsize and invalidate 135 * the whole page rather then just the piece the buffer owns. 136 * 137 * This allows us to assume that a partially valid page found here 138 * is fully valid (vm_fault will zero'd out areas of the page not 139 * marked as valid). 140 */ 141 m = pages[ap->a_reqpage]; 142 if (m->valid != 0) { 143 for (i = 0; i < npages; ++i) { 144 if (i != ap->a_reqpage) 145 vnode_pager_freepage(pages[i]); 146 } 147 return(0); 148 } 149 150 /* 151 * Use an MSF_BUF as a medium to retrieve data from the pages. 152 */ 153 msf_map_pagelist(&msf, pages, npages, 0); 154 KKASSERT(msf); 155 kva = msf_buf_kva(msf); 156 157 iov.iov_base = kva; 158 iov.iov_len = count; 159 uio.uio_iov = &iov; 160 uio.uio_iovcnt = 1; 161 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 162 uio.uio_resid = count; 163 uio.uio_segflg = UIO_SYSSPACE; 164 uio.uio_rw = UIO_READ; 165 uio.uio_td = td; 166 167 error = nfs_readrpc(vp, &uio); 168 msf_buf_free(msf); 169 170 if (error && (uio.uio_resid == count)) { 171 printf("nfs_getpages: error %d\n", error); 172 for (i = 0; i < npages; ++i) { 173 if (i != ap->a_reqpage) 174 vnode_pager_freepage(pages[i]); 175 } 176 return VM_PAGER_ERROR; 177 } 178 179 /* 180 * Calculate the number of bytes read and validate only that number 181 * of bytes. Note that due to pending writes, size may be 0. This 182 * does not mean that the remaining data is invalid! 183 */ 184 185 size = count - uio.uio_resid; 186 187 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 188 nextoff = toff + PAGE_SIZE; 189 m = pages[i]; 190 191 m->flags &= ~PG_ZERO; 192 193 if (nextoff <= size) { 194 /* 195 * Read operation filled an entire page 196 */ 197 m->valid = VM_PAGE_BITS_ALL; 198 vm_page_undirty(m); 199 } else if (size > toff) { 200 /* 201 * Read operation filled a partial page. 202 */ 203 m->valid = 0; 204 vm_page_set_validclean(m, 0, size - toff); 205 /* handled by vm_fault now */ 206 /* vm_page_zero_invalid(m, TRUE); */ 207 } else { 208 /* 209 * Read operation was short. If no error occured 210 * we may have hit a zero-fill section. We simply 211 * leave valid set to 0. 212 */ 213 ; 214 } 215 if (i != ap->a_reqpage) { 216 /* 217 * Whether or not to leave the page activated is up in 218 * the air, but we should put the page on a page queue 219 * somewhere (it already is in the object). Result: 220 * It appears that emperical results show that 221 * deactivating pages is best. 222 */ 223 224 /* 225 * Just in case someone was asking for this page we 226 * now tell them that it is ok to use. 227 */ 228 if (!error) { 229 if (m->flags & PG_WANTED) 230 vm_page_activate(m); 231 else 232 vm_page_deactivate(m); 233 vm_page_wakeup(m); 234 } else { 235 vnode_pager_freepage(m); 236 } 237 } 238 } 239 return 0; 240 } 241 242 /* 243 * Vnode op for VM putpages. 244 * 245 * nfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync, 246 * int *a_rtvals, vm_ooffset_t a_offset) 247 */ 248 int 249 nfs_putpages(struct vop_putpages_args *ap) 250 { 251 struct thread *td = curthread; 252 struct uio uio; 253 struct iovec iov; 254 char *kva; 255 int iomode, must_commit, i, error, npages, count; 256 off_t offset; 257 int *rtvals; 258 struct vnode *vp; 259 struct nfsmount *nmp; 260 struct nfsnode *np; 261 vm_page_t *pages; 262 struct msf_buf *msf; 263 264 vp = ap->a_vp; 265 np = VTONFS(vp); 266 nmp = VFSTONFS(vp->v_mount); 267 pages = ap->a_m; 268 count = ap->a_count; 269 rtvals = ap->a_rtvals; 270 npages = btoc(count); 271 offset = IDX_TO_OFF(pages[0]->pindex); 272 273 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 274 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 275 (void)nfs_fsinfo(nmp, vp, td); 276 277 for (i = 0; i < npages; i++) { 278 rtvals[i] = VM_PAGER_AGAIN; 279 } 280 281 /* 282 * When putting pages, do not extend file past EOF. 283 */ 284 285 if (offset + count > np->n_size) { 286 count = np->n_size - offset; 287 if (count < 0) 288 count = 0; 289 } 290 291 /* 292 * Use an MSF_BUF as a medium to retrieve data from the pages. 293 */ 294 msf_map_pagelist(&msf, pages, npages, 0); 295 KKASSERT(msf); 296 kva = msf_buf_kva(msf); 297 298 iov.iov_base = kva; 299 iov.iov_len = count; 300 uio.uio_iov = &iov; 301 uio.uio_iovcnt = 1; 302 uio.uio_offset = offset; 303 uio.uio_resid = count; 304 uio.uio_segflg = UIO_SYSSPACE; 305 uio.uio_rw = UIO_WRITE; 306 uio.uio_td = td; 307 308 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 309 iomode = NFSV3WRITE_UNSTABLE; 310 else 311 iomode = NFSV3WRITE_FILESYNC; 312 313 error = nfs_writerpc(vp, &uio, &iomode, &must_commit); 314 315 msf_buf_free(msf); 316 317 if (!error) { 318 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 319 for (i = 0; i < nwritten; i++) { 320 rtvals[i] = VM_PAGER_OK; 321 vm_page_undirty(pages[i]); 322 } 323 if (must_commit) 324 nfs_clearcommit(vp->v_mount); 325 } 326 return rtvals[0]; 327 } 328 329 /* 330 * Vnode op for read using bio 331 */ 332 int 333 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag) 334 { 335 struct nfsnode *np = VTONFS(vp); 336 int biosize, i; 337 struct buf *bp = 0, *rabp; 338 struct vattr vattr; 339 struct thread *td; 340 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 341 daddr_t lbn, rabn; 342 int bcount; 343 int seqcount; 344 int nra, error = 0, n = 0, on = 0; 345 346 #ifdef DIAGNOSTIC 347 if (uio->uio_rw != UIO_READ) 348 panic("nfs_read mode"); 349 #endif 350 if (uio->uio_resid == 0) 351 return (0); 352 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 353 return (EINVAL); 354 td = uio->uio_td; 355 356 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 357 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 358 (void)nfs_fsinfo(nmp, vp, td); 359 if (vp->v_type != VDIR && 360 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 361 return (EFBIG); 362 biosize = vp->v_mount->mnt_stat.f_iosize; 363 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 364 365 /* 366 * For nfs, cache consistency can only be maintained approximately. 367 * Although RFC1094 does not specify the criteria, the following is 368 * believed to be compatible with the reference port. 369 * 370 * NQNFS: Full cache coherency is maintained within the loop. 371 * 372 * NFS: If local changes have been made and this is a 373 * directory, the directory must be invalidated and 374 * the attribute cache must be cleared. 375 * 376 * GETATTR is called to synchronize the file size. 377 * 378 * If remote changes are detected local data is flushed 379 * and the cache is invalidated. 380 * 381 * 382 * NOTE: In the normal case the attribute cache is not 383 * cleared which means GETATTR may use cached data and 384 * not immediately detect changes made on the server. 385 */ 386 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) { 387 if ((np->n_flag & NLMODIFIED) && vp->v_type == VDIR) { 388 nfs_invaldir(vp); 389 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 390 if (error) 391 return (error); 392 np->n_attrstamp = 0; 393 } 394 error = VOP_GETATTR(vp, &vattr, td); 395 if (error) 396 return (error); 397 if (np->n_flag & NRMODIFIED) { 398 if (vp->v_type == VDIR) 399 nfs_invaldir(vp); 400 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 401 if (error) 402 return (error); 403 np->n_flag &= ~NRMODIFIED; 404 } 405 } 406 do { 407 408 /* 409 * Get a valid lease. If cached data is stale, flush it. 410 */ 411 if (nmp->nm_flag & NFSMNT_NQNFS) { 412 if (NQNFS_CKINVALID(vp, np, ND_READ)) { 413 do { 414 error = nqnfs_getlease(vp, ND_READ, td); 415 } while (error == NQNFS_EXPIRED); 416 if (error) 417 return (error); 418 if (np->n_lrev != np->n_brev || 419 (np->n_flag & NQNFSNONCACHE) || 420 ((np->n_flag & NLMODIFIED) && vp->v_type == VDIR)) { 421 if (vp->v_type == VDIR) 422 nfs_invaldir(vp); 423 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 424 if (error) 425 return (error); 426 np->n_brev = np->n_lrev; 427 } 428 } else if (vp->v_type == VDIR && (np->n_flag & NLMODIFIED)) { 429 nfs_invaldir(vp); 430 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 431 if (error) 432 return (error); 433 } 434 } 435 if (np->n_flag & NQNFSNONCACHE) { 436 switch (vp->v_type) { 437 case VREG: 438 return (nfs_readrpc(vp, uio)); 439 case VLNK: 440 return (nfs_readlinkrpc(vp, uio)); 441 case VDIR: 442 break; 443 default: 444 printf(" NQNFSNONCACHE: type %x unexpected\n", 445 vp->v_type); 446 }; 447 } 448 switch (vp->v_type) { 449 case VREG: 450 nfsstats.biocache_reads++; 451 lbn = uio->uio_offset / biosize; 452 on = uio->uio_offset & (biosize - 1); 453 454 /* 455 * Start the read ahead(s), as required. 456 */ 457 if (nfs_numasync > 0 && nmp->nm_readahead > 0) { 458 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 459 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 460 rabn = lbn + 1 + nra; 461 if (!incore(vp, rabn)) { 462 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 463 if (!rabp) 464 return (EINTR); 465 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 466 rabp->b_flags |= (B_READ | B_ASYNC); 467 vfs_busy_pages(rabp, 0); 468 if (nfs_asyncio(rabp, td)) { 469 rabp->b_flags |= B_INVAL|B_ERROR; 470 vfs_unbusy_pages(rabp); 471 brelse(rabp); 472 break; 473 } 474 } else { 475 brelse(rabp); 476 } 477 } 478 } 479 } 480 481 /* 482 * Obtain the buffer cache block. Figure out the buffer size 483 * when we are at EOF. If we are modifying the size of the 484 * buffer based on an EOF condition we need to hold 485 * nfs_rslock() through obtaining the buffer to prevent 486 * a potential writer-appender from messing with n_size. 487 * Otherwise we may accidently truncate the buffer and 488 * lose dirty data. 489 * 490 * Note that bcount is *not* DEV_BSIZE aligned. 491 */ 492 493 again: 494 bcount = biosize; 495 if ((off_t)lbn * biosize >= np->n_size) { 496 bcount = 0; 497 } else if ((off_t)(lbn + 1) * biosize > np->n_size) { 498 bcount = np->n_size - (off_t)lbn * biosize; 499 } 500 if (bcount != biosize) { 501 switch(nfs_rslock(np, td)) { 502 case ENOLCK: 503 goto again; 504 /* not reached */ 505 case EINTR: 506 case ERESTART: 507 return(EINTR); 508 /* not reached */ 509 default: 510 break; 511 } 512 } 513 514 bp = nfs_getcacheblk(vp, lbn, bcount, td); 515 516 if (bcount != biosize) 517 nfs_rsunlock(np, td); 518 if (!bp) 519 return (EINTR); 520 521 /* 522 * If B_CACHE is not set, we must issue the read. If this 523 * fails, we return an error. 524 */ 525 526 if ((bp->b_flags & B_CACHE) == 0) { 527 bp->b_flags |= B_READ; 528 vfs_busy_pages(bp, 0); 529 error = nfs_doio(bp, td); 530 if (error) { 531 brelse(bp); 532 return (error); 533 } 534 } 535 536 /* 537 * on is the offset into the current bp. Figure out how many 538 * bytes we can copy out of the bp. Note that bcount is 539 * NOT DEV_BSIZE aligned. 540 * 541 * Then figure out how many bytes we can copy into the uio. 542 */ 543 544 n = 0; 545 if (on < bcount) 546 n = min((unsigned)(bcount - on), uio->uio_resid); 547 break; 548 case VLNK: 549 nfsstats.biocache_readlinks++; 550 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 551 if (!bp) 552 return (EINTR); 553 if ((bp->b_flags & B_CACHE) == 0) { 554 bp->b_flags |= B_READ; 555 vfs_busy_pages(bp, 0); 556 error = nfs_doio(bp, td); 557 if (error) { 558 bp->b_flags |= B_ERROR; 559 brelse(bp); 560 return (error); 561 } 562 } 563 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 564 on = 0; 565 break; 566 case VDIR: 567 nfsstats.biocache_readdirs++; 568 if (np->n_direofoffset 569 && uio->uio_offset >= np->n_direofoffset) { 570 return (0); 571 } 572 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 573 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 574 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 575 if (!bp) 576 return (EINTR); 577 if ((bp->b_flags & B_CACHE) == 0) { 578 bp->b_flags |= B_READ; 579 vfs_busy_pages(bp, 0); 580 error = nfs_doio(bp, td); 581 if (error) { 582 brelse(bp); 583 } 584 while (error == NFSERR_BAD_COOKIE) { 585 printf("got bad cookie vp %p bp %p\n", vp, bp); 586 nfs_invaldir(vp); 587 error = nfs_vinvalbuf(vp, 0, td, 1); 588 /* 589 * Yuck! The directory has been modified on the 590 * server. The only way to get the block is by 591 * reading from the beginning to get all the 592 * offset cookies. 593 * 594 * Leave the last bp intact unless there is an error. 595 * Loop back up to the while if the error is another 596 * NFSERR_BAD_COOKIE (double yuch!). 597 */ 598 for (i = 0; i <= lbn && !error; i++) { 599 if (np->n_direofoffset 600 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 601 return (0); 602 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 603 if (!bp) 604 return (EINTR); 605 if ((bp->b_flags & B_CACHE) == 0) { 606 bp->b_flags |= B_READ; 607 vfs_busy_pages(bp, 0); 608 error = nfs_doio(bp, td); 609 /* 610 * no error + B_INVAL == directory EOF, 611 * use the block. 612 */ 613 if (error == 0 && (bp->b_flags & B_INVAL)) 614 break; 615 } 616 /* 617 * An error will throw away the block and the 618 * for loop will break out. If no error and this 619 * is not the block we want, we throw away the 620 * block and go for the next one via the for loop. 621 */ 622 if (error || i < lbn) 623 brelse(bp); 624 } 625 } 626 /* 627 * The above while is repeated if we hit another cookie 628 * error. If we hit an error and it wasn't a cookie error, 629 * we give up. 630 */ 631 if (error) 632 return (error); 633 } 634 635 /* 636 * If not eof and read aheads are enabled, start one. 637 * (You need the current block first, so that you have the 638 * directory offset cookie of the next block.) 639 */ 640 if (nfs_numasync > 0 && nmp->nm_readahead > 0 && 641 (bp->b_flags & B_INVAL) == 0 && 642 (np->n_direofoffset == 0 || 643 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 644 !(np->n_flag & NQNFSNONCACHE) && 645 !incore(vp, lbn + 1)) { 646 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 647 if (rabp) { 648 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 649 rabp->b_flags |= (B_READ | B_ASYNC); 650 vfs_busy_pages(rabp, 0); 651 if (nfs_asyncio(rabp, td)) { 652 rabp->b_flags |= B_INVAL|B_ERROR; 653 vfs_unbusy_pages(rabp); 654 brelse(rabp); 655 } 656 } else { 657 brelse(rabp); 658 } 659 } 660 } 661 /* 662 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 663 * chopped for the EOF condition, we cannot tell how large 664 * NFS directories are going to be until we hit EOF. So 665 * an NFS directory buffer is *not* chopped to its EOF. Now, 666 * it just so happens that b_resid will effectively chop it 667 * to EOF. *BUT* this information is lost if the buffer goes 668 * away and is reconstituted into a B_CACHE state ( due to 669 * being VMIO ) later. So we keep track of the directory eof 670 * in np->n_direofoffset and chop it off as an extra step 671 * right here. 672 */ 673 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 674 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 675 n = np->n_direofoffset - uio->uio_offset; 676 break; 677 default: 678 printf(" nfs_bioread: type %x unexpected\n",vp->v_type); 679 break; 680 }; 681 682 switch (vp->v_type) { 683 case VREG: 684 if (n > 0) 685 error = uiomove(bp->b_data + on, (int)n, uio); 686 break; 687 case VLNK: 688 if (n > 0) 689 error = uiomove(bp->b_data + on, (int)n, uio); 690 n = 0; 691 break; 692 case VDIR: 693 if (n > 0) { 694 off_t old_off = uio->uio_offset; 695 caddr_t cpos, epos; 696 struct nfs_dirent *dp; 697 698 cpos = bp->b_data + on; 699 epos = bp->b_data + on + n; 700 while (cpos < epos && error == 0 && uio->uio_resid > 0) { 701 dp = (struct nfs_dirent *)cpos; 702 if (vop_write_dirent(&error, uio, dp->nfs_ino, 703 dp->nfs_type, dp->nfs_namlen, dp->nfs_name)) 704 break; 705 cpos += dp->nfs_reclen; 706 } 707 n = 0; 708 if (error == 0) 709 uio->uio_offset = old_off + cpos - bp->b_data - on; 710 } 711 /* 712 * Invalidate buffer if caching is disabled, forcing a 713 * re-read from the remote later. 714 */ 715 if (np->n_flag & NQNFSNONCACHE) 716 bp->b_flags |= B_INVAL; 717 break; 718 default: 719 printf(" nfs_bioread: type %x unexpected\n",vp->v_type); 720 } 721 brelse(bp); 722 } while (error == 0 && uio->uio_resid > 0 && n > 0); 723 return (error); 724 } 725 726 /* 727 * Vnode op for write using bio 728 * 729 * nfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 730 * struct ucred *a_cred) 731 */ 732 int 733 nfs_write(struct vop_write_args *ap) 734 { 735 int biosize; 736 struct uio *uio = ap->a_uio; 737 struct thread *td = uio->uio_td; 738 struct vnode *vp = ap->a_vp; 739 struct nfsnode *np = VTONFS(vp); 740 int ioflag = ap->a_ioflag; 741 struct buf *bp; 742 struct vattr vattr; 743 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 744 daddr_t lbn; 745 int bcount; 746 int n, on, error = 0, iomode, must_commit; 747 int haverslock = 0; 748 749 #ifdef DIAGNOSTIC 750 if (uio->uio_rw != UIO_WRITE) 751 panic("nfs_write mode"); 752 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 753 panic("nfs_write proc"); 754 #endif 755 if (vp->v_type != VREG) 756 return (EIO); 757 if (np->n_flag & NWRITEERR) { 758 np->n_flag &= ~NWRITEERR; 759 return (np->n_error); 760 } 761 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 762 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 763 (void)nfs_fsinfo(nmp, vp, td); 764 765 /* 766 * Synchronously flush pending buffers if we are in synchronous 767 * mode or if we are appending. 768 */ 769 if (ioflag & (IO_APPEND | IO_SYNC)) { 770 if (np->n_flag & NLMODIFIED) { 771 np->n_attrstamp = 0; 772 error = nfs_flush(vp, MNT_WAIT, td, 0); 773 /* error = nfs_vinvalbuf(vp, V_SAVE, td, 1); */ 774 if (error) 775 return (error); 776 } 777 } 778 779 /* 780 * If IO_APPEND then load uio_offset. We restart here if we cannot 781 * get the append lock. 782 */ 783 restart: 784 if (ioflag & IO_APPEND) { 785 np->n_attrstamp = 0; 786 error = VOP_GETATTR(vp, &vattr, td); 787 if (error) 788 return (error); 789 uio->uio_offset = np->n_size; 790 } 791 792 if (uio->uio_offset < 0) 793 return (EINVAL); 794 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 795 return (EFBIG); 796 if (uio->uio_resid == 0) 797 return (0); 798 799 /* 800 * We need to obtain the rslock if we intend to modify np->n_size 801 * in order to guarentee the append point with multiple contending 802 * writers, to guarentee that no other appenders modify n_size 803 * while we are trying to obtain a truncated buffer (i.e. to avoid 804 * accidently truncating data written by another appender due to 805 * the race), and to ensure that the buffer is populated prior to 806 * our extending of the file. We hold rslock through the entire 807 * operation. 808 * 809 * Note that we do not synchronize the case where someone truncates 810 * the file while we are appending to it because attempting to lock 811 * this case may deadlock other parts of the system unexpectedly. 812 */ 813 if ((ioflag & IO_APPEND) || 814 uio->uio_offset + uio->uio_resid > np->n_size) { 815 switch(nfs_rslock(np, td)) { 816 case ENOLCK: 817 goto restart; 818 /* not reached */ 819 case EINTR: 820 case ERESTART: 821 return(EINTR); 822 /* not reached */ 823 default: 824 break; 825 } 826 haverslock = 1; 827 } 828 829 /* 830 * Maybe this should be above the vnode op call, but so long as 831 * file servers have no limits, i don't think it matters 832 */ 833 if (td->td_proc && uio->uio_offset + uio->uio_resid > 834 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 835 psignal(td->td_proc, SIGXFSZ); 836 if (haverslock) 837 nfs_rsunlock(np, td); 838 return (EFBIG); 839 } 840 841 biosize = vp->v_mount->mnt_stat.f_iosize; 842 843 do { 844 /* 845 * Check for a valid write lease. 846 */ 847 if ((nmp->nm_flag & NFSMNT_NQNFS) && 848 NQNFS_CKINVALID(vp, np, ND_WRITE)) { 849 do { 850 error = nqnfs_getlease(vp, ND_WRITE, td); 851 } while (error == NQNFS_EXPIRED); 852 if (error) 853 break; 854 if (np->n_lrev != np->n_brev || 855 (np->n_flag & NQNFSNONCACHE)) { 856 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 857 if (error) 858 break; 859 np->n_brev = np->n_lrev; 860 } 861 } 862 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) { 863 iomode = NFSV3WRITE_FILESYNC; 864 error = nfs_writerpc(vp, uio, &iomode, &must_commit); 865 if (must_commit) 866 nfs_clearcommit(vp->v_mount); 867 break; 868 } 869 nfsstats.biocache_writes++; 870 lbn = uio->uio_offset / biosize; 871 on = uio->uio_offset & (biosize-1); 872 n = min((unsigned)(biosize - on), uio->uio_resid); 873 again: 874 /* 875 * Handle direct append and file extension cases, calculate 876 * unaligned buffer size. 877 */ 878 879 if (uio->uio_offset == np->n_size && n) { 880 /* 881 * Get the buffer (in its pre-append state to maintain 882 * B_CACHE if it was previously set). Resize the 883 * nfsnode after we have locked the buffer to prevent 884 * readers from reading garbage. 885 */ 886 bcount = on; 887 bp = nfs_getcacheblk(vp, lbn, bcount, td); 888 889 if (bp != NULL) { 890 long save; 891 892 np->n_size = uio->uio_offset + n; 893 np->n_flag |= NLMODIFIED; 894 vnode_pager_setsize(vp, np->n_size); 895 896 save = bp->b_flags & B_CACHE; 897 bcount += n; 898 allocbuf(bp, bcount); 899 bp->b_flags |= save; 900 } 901 } else { 902 /* 903 * Obtain the locked cache block first, and then 904 * adjust the file's size as appropriate. 905 */ 906 bcount = on + n; 907 if ((off_t)lbn * biosize + bcount < np->n_size) { 908 if ((off_t)(lbn + 1) * biosize < np->n_size) 909 bcount = biosize; 910 else 911 bcount = np->n_size - (off_t)lbn * biosize; 912 } 913 bp = nfs_getcacheblk(vp, lbn, bcount, td); 914 if (uio->uio_offset + n > np->n_size) { 915 np->n_size = uio->uio_offset + n; 916 np->n_flag |= NLMODIFIED; 917 vnode_pager_setsize(vp, np->n_size); 918 } 919 } 920 921 if (!bp) { 922 error = EINTR; 923 break; 924 } 925 926 /* 927 * Issue a READ if B_CACHE is not set. In special-append 928 * mode, B_CACHE is based on the buffer prior to the write 929 * op and is typically set, avoiding the read. If a read 930 * is required in special append mode, the server will 931 * probably send us a short-read since we extended the file 932 * on our end, resulting in b_resid == 0 and, thusly, 933 * B_CACHE getting set. 934 * 935 * We can also avoid issuing the read if the write covers 936 * the entire buffer. We have to make sure the buffer state 937 * is reasonable in this case since we will not be initiating 938 * I/O. See the comments in kern/vfs_bio.c's getblk() for 939 * more information. 940 * 941 * B_CACHE may also be set due to the buffer being cached 942 * normally. 943 */ 944 945 if (on == 0 && n == bcount) { 946 bp->b_flags |= B_CACHE; 947 bp->b_flags &= ~(B_ERROR | B_INVAL); 948 } 949 950 if ((bp->b_flags & B_CACHE) == 0) { 951 bp->b_flags |= B_READ; 952 vfs_busy_pages(bp, 0); 953 error = nfs_doio(bp, td); 954 if (error) { 955 brelse(bp); 956 break; 957 } 958 } 959 if (!bp) { 960 error = EINTR; 961 break; 962 } 963 np->n_flag |= NLMODIFIED; 964 965 /* 966 * If dirtyend exceeds file size, chop it down. This should 967 * not normally occur but there is an append race where it 968 * might occur XXX, so we log it. 969 * 970 * If the chopping creates a reverse-indexed or degenerate 971 * situation with dirtyoff/end, we 0 both of them. 972 */ 973 974 if (bp->b_dirtyend > bcount) { 975 printf("NFS append race @%lx:%d\n", 976 (long)bp->b_blkno * DEV_BSIZE, 977 bp->b_dirtyend - bcount); 978 bp->b_dirtyend = bcount; 979 } 980 981 if (bp->b_dirtyoff >= bp->b_dirtyend) 982 bp->b_dirtyoff = bp->b_dirtyend = 0; 983 984 /* 985 * If the new write will leave a contiguous dirty 986 * area, just update the b_dirtyoff and b_dirtyend, 987 * otherwise force a write rpc of the old dirty area. 988 * 989 * While it is possible to merge discontiguous writes due to 990 * our having a B_CACHE buffer ( and thus valid read data 991 * for the hole), we don't because it could lead to 992 * significant cache coherency problems with multiple clients, 993 * especially if locking is implemented later on. 994 * 995 * as an optimization we could theoretically maintain 996 * a linked list of discontinuous areas, but we would still 997 * have to commit them separately so there isn't much 998 * advantage to it except perhaps a bit of asynchronization. 999 */ 1000 1001 if (bp->b_dirtyend > 0 && 1002 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 1003 if (VOP_BWRITE(bp->b_vp, bp) == EINTR) { 1004 error = EINTR; 1005 break; 1006 } 1007 goto again; 1008 } 1009 1010 /* 1011 * Check for valid write lease and get one as required. 1012 * In case getblk() and/or bwrite() delayed us. 1013 */ 1014 if ((nmp->nm_flag & NFSMNT_NQNFS) && 1015 NQNFS_CKINVALID(vp, np, ND_WRITE)) { 1016 do { 1017 error = nqnfs_getlease(vp, ND_WRITE, td); 1018 } while (error == NQNFS_EXPIRED); 1019 if (error) { 1020 brelse(bp); 1021 break; 1022 } 1023 if (np->n_lrev != np->n_brev || 1024 (np->n_flag & NQNFSNONCACHE)) { 1025 brelse(bp); 1026 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 1027 if (error) 1028 break; 1029 np->n_brev = np->n_lrev; 1030 goto again; 1031 } 1032 } 1033 1034 error = uiomove((char *)bp->b_data + on, n, uio); 1035 1036 /* 1037 * Since this block is being modified, it must be written 1038 * again and not just committed. Since write clustering does 1039 * not work for the stage 1 data write, only the stage 2 1040 * commit rpc, we have to clear B_CLUSTEROK as well. 1041 */ 1042 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1043 1044 if (error) { 1045 bp->b_flags |= B_ERROR; 1046 brelse(bp); 1047 break; 1048 } 1049 1050 /* 1051 * Only update dirtyoff/dirtyend if not a degenerate 1052 * condition. 1053 */ 1054 if (n) { 1055 if (bp->b_dirtyend > 0) { 1056 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 1057 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 1058 } else { 1059 bp->b_dirtyoff = on; 1060 bp->b_dirtyend = on + n; 1061 } 1062 vfs_bio_set_validclean(bp, on, n); 1063 } 1064 /* 1065 * If IO_NOWDRAIN then set B_NOWDRAIN (e.g. nfs-backed VN 1066 * filesystem). XXX also use for loopback NFS mounts. 1067 */ 1068 if (ioflag & IO_NOWDRAIN) 1069 bp->b_flags |= B_NOWDRAIN; 1070 1071 /* 1072 * If the lease is non-cachable or IO_SYNC do bwrite(). 1073 * 1074 * IO_INVAL appears to be unused. The idea appears to be 1075 * to turn off caching in this case. Very odd. XXX 1076 */ 1077 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) { 1078 if (ioflag & IO_INVAL) 1079 bp->b_flags |= B_NOCACHE; 1080 error = VOP_BWRITE(bp->b_vp, bp); 1081 if (error) 1082 break; 1083 if (np->n_flag & NQNFSNONCACHE) { 1084 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 1085 if (error) 1086 break; 1087 } 1088 } else if ((n + on) == biosize && 1089 (nmp->nm_flag & NFSMNT_NQNFS) == 0) { 1090 bp->b_flags |= B_ASYNC; 1091 (void)nfs_writebp(bp, 0, 0); 1092 } else { 1093 bdwrite(bp); 1094 } 1095 } while (uio->uio_resid > 0 && n > 0); 1096 1097 if (haverslock) 1098 nfs_rsunlock(np, td); 1099 1100 return (error); 1101 } 1102 1103 /* 1104 * Get an nfs cache block. 1105 * 1106 * Allocate a new one if the block isn't currently in the cache 1107 * and return the block marked busy. If the calling process is 1108 * interrupted by a signal for an interruptible mount point, return 1109 * NULL. 1110 * 1111 * The caller must carefully deal with the possible B_INVAL state of 1112 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it 1113 * indirectly), so synchronous reads can be issued without worrying about 1114 * the B_INVAL state. We have to be a little more careful when dealing 1115 * with writes (see comments in nfs_write()) when extending a file past 1116 * its EOF. 1117 */ 1118 static struct buf * 1119 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1120 { 1121 struct buf *bp; 1122 struct mount *mp; 1123 struct nfsmount *nmp; 1124 1125 mp = vp->v_mount; 1126 nmp = VFSTONFS(mp); 1127 1128 if (nmp->nm_flag & NFSMNT_INT) { 1129 bp = getblk(vp, bn, size, PCATCH, 0); 1130 while (bp == (struct buf *)0) { 1131 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) 1132 return ((struct buf *)0); 1133 bp = getblk(vp, bn, size, 0, 2 * hz); 1134 } 1135 } else { 1136 bp = getblk(vp, bn, size, 0, 0); 1137 } 1138 1139 if (vp->v_type == VREG) { 1140 int biosize; 1141 1142 biosize = mp->mnt_stat.f_iosize; 1143 bp->b_blkno = bn * (biosize / DEV_BSIZE); 1144 } 1145 return (bp); 1146 } 1147 1148 /* 1149 * Flush and invalidate all dirty buffers. If another process is already 1150 * doing the flush, just wait for completion. 1151 */ 1152 int 1153 nfs_vinvalbuf(struct vnode *vp, int flags, 1154 struct thread *td, int intrflg) 1155 { 1156 struct nfsnode *np = VTONFS(vp); 1157 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1158 int error = 0, slpflag, slptimeo; 1159 1160 if (vp->v_flag & VRECLAIMED) 1161 return (0); 1162 1163 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1164 intrflg = 0; 1165 if (intrflg) { 1166 slpflag = PCATCH; 1167 slptimeo = 2 * hz; 1168 } else { 1169 slpflag = 0; 1170 slptimeo = 0; 1171 } 1172 /* 1173 * First wait for any other process doing a flush to complete. 1174 */ 1175 while (np->n_flag & NFLUSHINPROG) { 1176 np->n_flag |= NFLUSHWANT; 1177 error = tsleep((caddr_t)&np->n_flag, 0, "nfsvinval", slptimeo); 1178 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, td)) 1179 return (EINTR); 1180 } 1181 1182 /* 1183 * Now, flush as required. 1184 */ 1185 np->n_flag |= NFLUSHINPROG; 1186 error = vinvalbuf(vp, flags, td, slpflag, 0); 1187 while (error) { 1188 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, td)) { 1189 np->n_flag &= ~NFLUSHINPROG; 1190 if (np->n_flag & NFLUSHWANT) { 1191 np->n_flag &= ~NFLUSHWANT; 1192 wakeup((caddr_t)&np->n_flag); 1193 } 1194 return (EINTR); 1195 } 1196 error = vinvalbuf(vp, flags, td, 0, slptimeo); 1197 } 1198 np->n_flag &= ~(NLMODIFIED | NFLUSHINPROG); 1199 if (np->n_flag & NFLUSHWANT) { 1200 np->n_flag &= ~NFLUSHWANT; 1201 wakeup((caddr_t)&np->n_flag); 1202 } 1203 return (0); 1204 } 1205 1206 /* 1207 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1208 * This is mainly to avoid queueing async I/O requests when the nfsiods 1209 * are all hung on a dead server. 1210 * 1211 * Note: nfs_asyncio() does not clear (B_ERROR|B_INVAL) but when the bp 1212 * is eventually dequeued by the async daemon, nfs_doio() *will*. 1213 */ 1214 int 1215 nfs_asyncio(struct buf *bp, struct thread *td) 1216 { 1217 struct nfsmount *nmp; 1218 int i; 1219 int gotiod; 1220 int slpflag = 0; 1221 int slptimeo = 0; 1222 int error; 1223 1224 /* 1225 * If no async daemons then return EIO to force caller to run the rpc 1226 * synchronously. 1227 */ 1228 if (nfs_numasync == 0) 1229 return (EIO); 1230 1231 nmp = VFSTONFS(bp->b_vp->v_mount); 1232 1233 /* 1234 * Commits are usually short and sweet so lets save some cpu and 1235 * leave the async daemons for more important rpc's (such as reads 1236 * and writes). 1237 */ 1238 if ((bp->b_flags & (B_READ|B_NEEDCOMMIT)) == B_NEEDCOMMIT && 1239 (nmp->nm_bufqiods > nfs_numasync / 2)) { 1240 return(EIO); 1241 } 1242 1243 again: 1244 if (nmp->nm_flag & NFSMNT_INT) 1245 slpflag = PCATCH; 1246 gotiod = FALSE; 1247 1248 /* 1249 * Find a free iod to process this request. 1250 */ 1251 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 1252 if (nfs_iodwant[i]) { 1253 /* 1254 * Found one, so wake it up and tell it which 1255 * mount to process. 1256 */ 1257 NFS_DPF(ASYNCIO, 1258 ("nfs_asyncio: waking iod %d for mount %p\n", 1259 i, nmp)); 1260 nfs_iodwant[i] = NULL; 1261 nfs_iodmount[i] = nmp; 1262 nmp->nm_bufqiods++; 1263 wakeup((caddr_t)&nfs_iodwant[i]); 1264 gotiod = TRUE; 1265 break; 1266 } 1267 1268 /* 1269 * If none are free, we may already have an iod working on this mount 1270 * point. If so, it will process our request. 1271 */ 1272 if (!gotiod) { 1273 if (nmp->nm_bufqiods > 0) { 1274 NFS_DPF(ASYNCIO, 1275 ("nfs_asyncio: %d iods are already processing mount %p\n", 1276 nmp->nm_bufqiods, nmp)); 1277 gotiod = TRUE; 1278 } 1279 } 1280 1281 /* 1282 * If we have an iod which can process the request, then queue 1283 * the buffer. 1284 */ 1285 if (gotiod) { 1286 /* 1287 * Ensure that the queue never grows too large. We still want 1288 * to asynchronize so we block rather then return EIO. 1289 */ 1290 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 1291 NFS_DPF(ASYNCIO, 1292 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 1293 nmp->nm_bufqwant = TRUE; 1294 error = tsleep(&nmp->nm_bufq, slpflag, 1295 "nfsaio", slptimeo); 1296 if (error) { 1297 if (nfs_sigintr(nmp, NULL, td)) 1298 return (EINTR); 1299 if (slpflag == PCATCH) { 1300 slpflag = 0; 1301 slptimeo = 2 * hz; 1302 } 1303 } 1304 /* 1305 * We might have lost our iod while sleeping, 1306 * so check and loop if nescessary. 1307 */ 1308 if (nmp->nm_bufqiods == 0) { 1309 NFS_DPF(ASYNCIO, 1310 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1311 goto again; 1312 } 1313 } 1314 BUF_KERNPROC(bp); 1315 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1316 nmp->nm_bufqlen++; 1317 return (0); 1318 } 1319 1320 /* 1321 * All the iods are busy on other mounts, so return EIO to 1322 * force the caller to process the i/o synchronously. 1323 */ 1324 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 1325 return (EIO); 1326 } 1327 1328 /* 1329 * Do an I/O operation to/from a cache block. This may be called 1330 * synchronously or from an nfsiod. 1331 * 1332 * NOTE! TD MIGHT BE NULL 1333 */ 1334 int 1335 nfs_doio(struct buf *bp, struct thread *td) 1336 { 1337 struct uio *uiop; 1338 struct vnode *vp; 1339 struct nfsnode *np; 1340 struct nfsmount *nmp; 1341 int error = 0, iomode, must_commit = 0; 1342 struct uio uio; 1343 struct iovec io; 1344 1345 vp = bp->b_vp; 1346 np = VTONFS(vp); 1347 nmp = VFSTONFS(vp->v_mount); 1348 uiop = &uio; 1349 uiop->uio_iov = &io; 1350 uiop->uio_iovcnt = 1; 1351 uiop->uio_segflg = UIO_SYSSPACE; 1352 uiop->uio_td = td; 1353 1354 /* 1355 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We 1356 * do this here so we do not have to do it in all the code that 1357 * calls us. 1358 */ 1359 bp->b_flags &= ~(B_ERROR | B_INVAL); 1360 1361 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp)); 1362 1363 /* 1364 * Historically, paging was done with physio, but no more. 1365 */ 1366 if (bp->b_flags & B_PHYS) { 1367 /* 1368 * ...though reading /dev/drum still gets us here. 1369 */ 1370 io.iov_len = uiop->uio_resid = bp->b_bcount; 1371 /* mapping was done by vmapbuf() */ 1372 io.iov_base = bp->b_data; 1373 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1374 if (bp->b_flags & B_READ) { 1375 uiop->uio_rw = UIO_READ; 1376 nfsstats.read_physios++; 1377 error = nfs_readrpc(vp, uiop); 1378 } else { 1379 int com; 1380 1381 iomode = NFSV3WRITE_DATASYNC; 1382 uiop->uio_rw = UIO_WRITE; 1383 nfsstats.write_physios++; 1384 error = nfs_writerpc(vp, uiop, &iomode, &com); 1385 } 1386 if (error) { 1387 bp->b_flags |= B_ERROR; 1388 bp->b_error = error; 1389 } 1390 } else if (bp->b_flags & B_READ) { 1391 io.iov_len = uiop->uio_resid = bp->b_bcount; 1392 io.iov_base = bp->b_data; 1393 uiop->uio_rw = UIO_READ; 1394 1395 switch (vp->v_type) { 1396 case VREG: 1397 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1398 nfsstats.read_bios++; 1399 error = nfs_readrpc(vp, uiop); 1400 1401 if (!error) { 1402 if (uiop->uio_resid) { 1403 /* 1404 * If we had a short read with no error, we must have 1405 * hit a file hole. We should zero-fill the remainder. 1406 * This can also occur if the server hits the file EOF. 1407 * 1408 * Holes used to be able to occur due to pending 1409 * writes, but that is not possible any longer. 1410 */ 1411 int nread = bp->b_bcount - uiop->uio_resid; 1412 int left = uiop->uio_resid; 1413 1414 if (left > 0) 1415 bzero((char *)bp->b_data + nread, left); 1416 uiop->uio_resid = 0; 1417 } 1418 } 1419 if (td && td->td_proc && (vp->v_flag & VTEXT) && 1420 (((nmp->nm_flag & NFSMNT_NQNFS) && 1421 NQNFS_CKINVALID(vp, np, ND_READ) && 1422 np->n_lrev != np->n_brev) || 1423 (!(nmp->nm_flag & NFSMNT_NQNFS) && 1424 np->n_mtime != np->n_vattr.va_mtime.tv_sec))) { 1425 uprintf("Process killed due to text file modification\n"); 1426 psignal(td->td_proc, SIGKILL); 1427 PHOLD(td->td_proc); 1428 } 1429 break; 1430 case VLNK: 1431 uiop->uio_offset = (off_t)0; 1432 nfsstats.readlink_bios++; 1433 error = nfs_readlinkrpc(vp, uiop); 1434 break; 1435 case VDIR: 1436 nfsstats.readdir_bios++; 1437 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1438 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1439 error = nfs_readdirplusrpc(vp, uiop); 1440 if (error == NFSERR_NOTSUPP) 1441 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1442 } 1443 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1444 error = nfs_readdirrpc(vp, uiop); 1445 /* 1446 * end-of-directory sets B_INVAL but does not generate an 1447 * error. 1448 */ 1449 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1450 bp->b_flags |= B_INVAL; 1451 break; 1452 default: 1453 printf("nfs_doio: type %x unexpected\n",vp->v_type); 1454 break; 1455 }; 1456 if (error) { 1457 bp->b_flags |= B_ERROR; 1458 bp->b_error = error; 1459 } 1460 } else { 1461 /* 1462 * If we only need to commit, try to commit 1463 */ 1464 if (bp->b_flags & B_NEEDCOMMIT) { 1465 int retv; 1466 off_t off; 1467 1468 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1469 retv = nfs_commit(bp->b_vp, off, 1470 bp->b_dirtyend - bp->b_dirtyoff, td); 1471 if (retv == 0) { 1472 bp->b_dirtyoff = bp->b_dirtyend = 0; 1473 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1474 bp->b_resid = 0; 1475 biodone(bp); 1476 return (0); 1477 } 1478 if (retv == NFSERR_STALEWRITEVERF) { 1479 nfs_clearcommit(bp->b_vp->v_mount); 1480 } 1481 } 1482 1483 /* 1484 * Setup for actual write 1485 */ 1486 1487 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1488 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1489 1490 if (bp->b_dirtyend > bp->b_dirtyoff) { 1491 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1492 - bp->b_dirtyoff; 1493 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1494 + bp->b_dirtyoff; 1495 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1496 uiop->uio_rw = UIO_WRITE; 1497 nfsstats.write_bios++; 1498 1499 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1500 iomode = NFSV3WRITE_UNSTABLE; 1501 else 1502 iomode = NFSV3WRITE_FILESYNC; 1503 1504 error = nfs_writerpc(vp, uiop, &iomode, &must_commit); 1505 1506 /* 1507 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1508 * to cluster the buffers needing commit. This will allow 1509 * the system to submit a single commit rpc for the whole 1510 * cluster. We can do this even if the buffer is not 100% 1511 * dirty (relative to the NFS blocksize), so we optimize the 1512 * append-to-file-case. 1513 * 1514 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1515 * cleared because write clustering only works for commit 1516 * rpc's, not for the data portion of the write). 1517 */ 1518 1519 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1520 bp->b_flags |= B_NEEDCOMMIT; 1521 if (bp->b_dirtyoff == 0 1522 && bp->b_dirtyend == bp->b_bcount) 1523 bp->b_flags |= B_CLUSTEROK; 1524 } else { 1525 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1526 } 1527 1528 /* 1529 * For an interrupted write, the buffer is still valid 1530 * and the write hasn't been pushed to the server yet, 1531 * so we can't set B_ERROR and report the interruption 1532 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1533 * is not relevant, so the rpc attempt is essentially 1534 * a noop. For the case of a V3 write rpc not being 1535 * committed to stable storage, the block is still 1536 * dirty and requires either a commit rpc or another 1537 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1538 * the block is reused. This is indicated by setting 1539 * the B_DELWRI and B_NEEDCOMMIT flags. 1540 * 1541 * If the buffer is marked B_PAGING, it does not reside on 1542 * the vp's paging queues so we cannot call bdirty(). The 1543 * bp in this case is not an NFS cache block so we should 1544 * be safe. XXX 1545 */ 1546 if (error == EINTR 1547 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1548 crit_enter(); 1549 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1550 if ((bp->b_flags & B_PAGING) == 0) { 1551 bdirty(bp); 1552 bp->b_flags &= ~B_DONE; 1553 } 1554 if (error && (bp->b_flags & B_ASYNC) == 0) 1555 bp->b_flags |= B_EINTR; 1556 crit_exit(); 1557 } else { 1558 if (error) { 1559 bp->b_flags |= B_ERROR; 1560 bp->b_error = np->n_error = error; 1561 np->n_flag |= NWRITEERR; 1562 } 1563 bp->b_dirtyoff = bp->b_dirtyend = 0; 1564 } 1565 } else { 1566 bp->b_resid = 0; 1567 biodone(bp); 1568 return (0); 1569 } 1570 } 1571 bp->b_resid = uiop->uio_resid; 1572 if (must_commit) 1573 nfs_clearcommit(vp->v_mount); 1574 biodone(bp); 1575 return (error); 1576 } 1577 1578 /* 1579 * Used to aid in handling ftruncate() operations on the NFS client side. 1580 * Truncation creates a number of special problems for NFS. We have to 1581 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1582 * we have to properly handle VM pages or (potentially dirty) buffers 1583 * that straddle the truncation point. 1584 */ 1585 1586 int 1587 nfs_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize) 1588 { 1589 struct nfsnode *np = VTONFS(vp); 1590 u_quad_t tsize = np->n_size; 1591 int biosize = vp->v_mount->mnt_stat.f_iosize; 1592 int error = 0; 1593 1594 np->n_size = nsize; 1595 1596 if (np->n_size < tsize) { 1597 struct buf *bp; 1598 daddr_t lbn; 1599 int bufsize; 1600 1601 /* 1602 * vtruncbuf() doesn't get the buffer overlapping the 1603 * truncation point. We may have a B_DELWRI and/or B_CACHE 1604 * buffer that now needs to be truncated. 1605 */ 1606 error = vtruncbuf(vp, td, nsize, biosize); 1607 lbn = nsize / biosize; 1608 bufsize = nsize & (biosize - 1); 1609 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1610 if (bp->b_dirtyoff > bp->b_bcount) 1611 bp->b_dirtyoff = bp->b_bcount; 1612 if (bp->b_dirtyend > bp->b_bcount) 1613 bp->b_dirtyend = bp->b_bcount; 1614 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1615 brelse(bp); 1616 } else { 1617 vnode_pager_setsize(vp, nsize); 1618 } 1619 return(error); 1620 } 1621 1622