1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 37 * $FreeBSD: /repoman/r/ncvs/src/sys/nfsclient/nfs_bio.c,v 1.130 2004/04/14 23:23:55 peadar Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_bio.c,v 1.45 2008/07/18 00:09:39 dillon Exp $ 39 */ 40 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/resourcevar.h> 45 #include <sys/signalvar.h> 46 #include <sys/proc.h> 47 #include <sys/buf.h> 48 #include <sys/vnode.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/msfbuf.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_pager.h> 59 #include <vm/vnode_pager.h> 60 61 #include <sys/buf2.h> 62 #include <sys/thread2.h> 63 #include <vm/vm_page2.h> 64 65 #include "rpcv2.h" 66 #include "nfsproto.h" 67 #include "nfs.h" 68 #include "nfsmount.h" 69 #include "nfsnode.h" 70 #include "xdr_subs.h" 71 #include "nfsm_subs.h" 72 73 74 static struct buf *nfs_getcacheblk(struct vnode *vp, off_t loffset, 75 int size, struct thread *td); 76 static int nfs_check_dirent(struct nfs_dirent *dp, int maxlen); 77 static void nfsiodone_sync(struct bio *bio); 78 static void nfs_readrpc_bio_done(nfsm_info_t info); 79 static void nfs_writerpc_bio_done(nfsm_info_t info); 80 static void nfs_commitrpc_bio_done(nfsm_info_t info); 81 82 /* 83 * Vnode op for VM getpages. 84 * 85 * nfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, 86 * int a_reqpage, vm_ooffset_t a_offset) 87 */ 88 int 89 nfs_getpages(struct vop_getpages_args *ap) 90 { 91 struct thread *td = curthread; /* XXX */ 92 int i, error, nextoff, size, toff, count, npages; 93 struct uio uio; 94 struct iovec iov; 95 char *kva; 96 struct vnode *vp; 97 struct nfsmount *nmp; 98 vm_page_t *pages; 99 vm_page_t m; 100 struct msf_buf *msf; 101 102 vp = ap->a_vp; 103 nmp = VFSTONFS(vp->v_mount); 104 pages = ap->a_m; 105 count = ap->a_count; 106 107 if (vp->v_object == NULL) { 108 kprintf("nfs_getpages: called with non-merged cache vnode??\n"); 109 return VM_PAGER_ERROR; 110 } 111 112 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 113 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 114 (void)nfs_fsinfo(nmp, vp, td); 115 116 npages = btoc(count); 117 118 /* 119 * NOTE that partially valid pages may occur in cases other 120 * then file EOF, such as when a file is partially written and 121 * ftruncate()-extended to a larger size. It is also possible 122 * for the valid bits to be set on garbage beyond the file EOF and 123 * clear in the area before EOF (e.g. m->valid == 0xfc), which can 124 * occur due to vtruncbuf() and the buffer cache's handling of 125 * pages which 'straddle' buffers or when b_bufsize is not a 126 * multiple of PAGE_SIZE.... the buffer cache cannot normally 127 * clear the extra bits. This kind of situation occurs when you 128 * make a small write() (m->valid == 0x03) and then mmap() and 129 * fault in the buffer(m->valid = 0xFF). When NFS flushes the 130 * buffer (vinvalbuf() m->valid = 0xFC) we are left with a mess. 131 * 132 * This is combined with the possibility that the pages are partially 133 * dirty or that there is a buffer backing the pages that is dirty 134 * (even if m->dirty is 0). 135 * 136 * To solve this problem several hacks have been made: (1) NFS 137 * guarentees that the IO block size is a multiple of PAGE_SIZE and 138 * (2) The buffer cache, when invalidating an NFS buffer, will 139 * disregard the buffer's fragmentory b_bufsize and invalidate 140 * the whole page rather then just the piece the buffer owns. 141 * 142 * This allows us to assume that a partially valid page found here 143 * is fully valid (vm_fault will zero'd out areas of the page not 144 * marked as valid). 145 */ 146 m = pages[ap->a_reqpage]; 147 if (m->valid != 0) { 148 for (i = 0; i < npages; ++i) { 149 if (i != ap->a_reqpage) 150 vnode_pager_freepage(pages[i]); 151 } 152 return(0); 153 } 154 155 /* 156 * Use an MSF_BUF as a medium to retrieve data from the pages. 157 */ 158 msf_map_pagelist(&msf, pages, npages, 0); 159 KKASSERT(msf); 160 kva = msf_buf_kva(msf); 161 162 iov.iov_base = kva; 163 iov.iov_len = count; 164 uio.uio_iov = &iov; 165 uio.uio_iovcnt = 1; 166 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 167 uio.uio_resid = count; 168 uio.uio_segflg = UIO_SYSSPACE; 169 uio.uio_rw = UIO_READ; 170 uio.uio_td = td; 171 172 error = nfs_readrpc_uio(vp, &uio); 173 msf_buf_free(msf); 174 175 if (error && ((int)uio.uio_resid == count)) { 176 kprintf("nfs_getpages: error %d\n", error); 177 for (i = 0; i < npages; ++i) { 178 if (i != ap->a_reqpage) 179 vnode_pager_freepage(pages[i]); 180 } 181 return VM_PAGER_ERROR; 182 } 183 184 /* 185 * Calculate the number of bytes read and validate only that number 186 * of bytes. Note that due to pending writes, size may be 0. This 187 * does not mean that the remaining data is invalid! 188 */ 189 190 size = count - (int)uio.uio_resid; 191 192 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 193 nextoff = toff + PAGE_SIZE; 194 m = pages[i]; 195 196 m->flags &= ~PG_ZERO; 197 198 /* 199 * NOTE: vm_page_undirty/clear_dirty etc do not clear the 200 * pmap modified bit. 201 */ 202 if (nextoff <= size) { 203 /* 204 * Read operation filled an entire page 205 */ 206 m->valid = VM_PAGE_BITS_ALL; 207 vm_page_undirty(m); 208 } else if (size > toff) { 209 /* 210 * Read operation filled a partial page. 211 */ 212 m->valid = 0; 213 vm_page_set_valid(m, 0, size - toff); 214 vm_page_clear_dirty_end_nonincl(m, 0, size - toff); 215 /* handled by vm_fault now */ 216 /* vm_page_zero_invalid(m, TRUE); */ 217 } else { 218 /* 219 * Read operation was short. If no error occured 220 * we may have hit a zero-fill section. We simply 221 * leave valid set to 0. 222 */ 223 ; 224 } 225 if (i != ap->a_reqpage) { 226 /* 227 * Whether or not to leave the page activated is up in 228 * the air, but we should put the page on a page queue 229 * somewhere (it already is in the object). Result: 230 * It appears that emperical results show that 231 * deactivating pages is best. 232 */ 233 234 /* 235 * Just in case someone was asking for this page we 236 * now tell them that it is ok to use. 237 */ 238 if (!error) { 239 if (m->flags & PG_WANTED) 240 vm_page_activate(m); 241 else 242 vm_page_deactivate(m); 243 vm_page_wakeup(m); 244 } else { 245 vnode_pager_freepage(m); 246 } 247 } 248 } 249 return 0; 250 } 251 252 /* 253 * Vnode op for VM putpages. 254 * 255 * The pmap modified bit was cleared prior to the putpages and probably 256 * couldn't get set again until after our I/O completed, since the page 257 * should not be mapped. But don't count on it. The m->dirty bits must 258 * be completely cleared when we finish even if the count is truncated. 259 * 260 * nfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync, 261 * int *a_rtvals, vm_ooffset_t a_offset) 262 */ 263 int 264 nfs_putpages(struct vop_putpages_args *ap) 265 { 266 struct thread *td = curthread; 267 struct uio uio; 268 struct iovec iov; 269 char *kva; 270 int iomode, must_commit, i, error, npages, count; 271 off_t offset; 272 int *rtvals; 273 struct vnode *vp; 274 struct nfsmount *nmp; 275 struct nfsnode *np; 276 vm_page_t *pages; 277 struct msf_buf *msf; 278 279 vp = ap->a_vp; 280 np = VTONFS(vp); 281 nmp = VFSTONFS(vp->v_mount); 282 pages = ap->a_m; 283 count = ap->a_count; 284 rtvals = ap->a_rtvals; 285 npages = btoc(count); 286 offset = IDX_TO_OFF(pages[0]->pindex); 287 288 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 289 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 290 (void)nfs_fsinfo(nmp, vp, td); 291 292 for (i = 0; i < npages; i++) { 293 rtvals[i] = VM_PAGER_AGAIN; 294 } 295 296 /* 297 * When putting pages, do not extend file past EOF. 298 */ 299 300 if (offset + count > np->n_size) { 301 count = np->n_size - offset; 302 if (count < 0) 303 count = 0; 304 } 305 306 /* 307 * Use an MSF_BUF as a medium to retrieve data from the pages. 308 */ 309 msf_map_pagelist(&msf, pages, npages, 0); 310 KKASSERT(msf); 311 kva = msf_buf_kva(msf); 312 313 iov.iov_base = kva; 314 iov.iov_len = count; 315 uio.uio_iov = &iov; 316 uio.uio_iovcnt = 1; 317 uio.uio_offset = offset; 318 uio.uio_resid = (size_t)count; 319 uio.uio_segflg = UIO_SYSSPACE; 320 uio.uio_rw = UIO_WRITE; 321 uio.uio_td = td; 322 323 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 324 iomode = NFSV3WRITE_UNSTABLE; 325 else 326 iomode = NFSV3WRITE_FILESYNC; 327 328 error = nfs_writerpc_uio(vp, &uio, &iomode, &must_commit); 329 330 msf_buf_free(msf); 331 332 if (error == 0) { 333 int nwritten; 334 335 nwritten = round_page(count - (int)uio.uio_resid) / PAGE_SIZE; 336 for (i = 0; i < nwritten; i++) { 337 rtvals[i] = VM_PAGER_OK; 338 vm_page_undirty(pages[i]); 339 } 340 if (must_commit) 341 nfs_clearcommit(vp->v_mount); 342 } 343 return rtvals[0]; 344 } 345 346 /* 347 * Vnode op for read using bio 348 */ 349 int 350 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag) 351 { 352 struct nfsnode *np = VTONFS(vp); 353 int biosize, i; 354 struct buf *bp, *rabp; 355 struct vattr vattr; 356 struct thread *td; 357 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 358 off_t lbn, rabn; 359 off_t raoffset; 360 off_t loffset; 361 int seqcount; 362 int nra, error = 0; 363 int boff = 0; 364 size_t n; 365 366 #ifdef DIAGNOSTIC 367 if (uio->uio_rw != UIO_READ) 368 panic("nfs_read mode"); 369 #endif 370 if (uio->uio_resid == 0) 371 return (0); 372 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 373 return (EINVAL); 374 td = uio->uio_td; 375 376 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 377 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 378 (void)nfs_fsinfo(nmp, vp, td); 379 if (vp->v_type != VDIR && 380 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 381 return (EFBIG); 382 biosize = vp->v_mount->mnt_stat.f_iosize; 383 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 384 385 /* 386 * For nfs, cache consistency can only be maintained approximately. 387 * Although RFC1094 does not specify the criteria, the following is 388 * believed to be compatible with the reference port. 389 * 390 * NFS: If local changes have been made and this is a 391 * directory, the directory must be invalidated and 392 * the attribute cache must be cleared. 393 * 394 * GETATTR is called to synchronize the file size. 395 * 396 * If remote changes are detected local data is flushed 397 * and the cache is invalidated. 398 * 399 * NOTE: In the normal case the attribute cache is not 400 * cleared which means GETATTR may use cached data and 401 * not immediately detect changes made on the server. 402 */ 403 if ((np->n_flag & NLMODIFIED) && vp->v_type == VDIR) { 404 nfs_invaldir(vp); 405 error = nfs_vinvalbuf(vp, V_SAVE, 1); 406 if (error) 407 return (error); 408 np->n_attrstamp = 0; 409 } 410 error = VOP_GETATTR(vp, &vattr); 411 if (error) 412 return (error); 413 if (np->n_flag & NRMODIFIED) { 414 if (vp->v_type == VDIR) 415 nfs_invaldir(vp); 416 error = nfs_vinvalbuf(vp, V_SAVE, 1); 417 if (error) 418 return (error); 419 np->n_flag &= ~NRMODIFIED; 420 } 421 422 /* 423 * Loop until uio exhausted or we hit EOF 424 */ 425 do { 426 bp = NULL; 427 428 switch (vp->v_type) { 429 case VREG: 430 nfsstats.biocache_reads++; 431 lbn = uio->uio_offset / biosize; 432 boff = uio->uio_offset & (biosize - 1); 433 loffset = (off_t)lbn * biosize; 434 435 /* 436 * Start the read ahead(s), as required. 437 */ 438 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp)) { 439 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 440 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 441 rabn = lbn + 1 + nra; 442 raoffset = (off_t)rabn * biosize; 443 if (findblk(vp, raoffset, FINDBLK_TEST) == NULL) { 444 rabp = nfs_getcacheblk(vp, raoffset, biosize, td); 445 if (!rabp) 446 return (EINTR); 447 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 448 rabp->b_cmd = BUF_CMD_READ; 449 vfs_busy_pages(vp, rabp); 450 nfs_asyncio(vp, &rabp->b_bio2); 451 } else { 452 brelse(rabp); 453 } 454 } 455 } 456 } 457 458 /* 459 * Obtain the buffer cache block. Figure out the buffer size 460 * when we are at EOF. If we are modifying the size of the 461 * buffer based on an EOF condition we need to hold 462 * nfs_rslock() through obtaining the buffer to prevent 463 * a potential writer-appender from messing with n_size. 464 * Otherwise we may accidently truncate the buffer and 465 * lose dirty data. 466 * 467 * Note that bcount is *not* DEV_BSIZE aligned. 468 */ 469 if (loffset + boff >= np->n_size) { 470 n = 0; 471 break; 472 } 473 bp = nfs_getcacheblk(vp, loffset, biosize, td); 474 475 if (bp == NULL) 476 return (EINTR); 477 478 /* 479 * If B_CACHE is not set, we must issue the read. If this 480 * fails, we return an error. 481 */ 482 if ((bp->b_flags & B_CACHE) == 0) { 483 bp->b_cmd = BUF_CMD_READ; 484 bp->b_bio2.bio_done = nfsiodone_sync; 485 bp->b_bio2.bio_flags |= BIO_SYNC; 486 vfs_busy_pages(vp, bp); 487 error = nfs_doio(vp, &bp->b_bio2, td); 488 if (error) { 489 brelse(bp); 490 return (error); 491 } 492 } 493 494 /* 495 * on is the offset into the current bp. Figure out how many 496 * bytes we can copy out of the bp. Note that bcount is 497 * NOT DEV_BSIZE aligned. 498 * 499 * Then figure out how many bytes we can copy into the uio. 500 */ 501 n = biosize - boff; 502 if (n > uio->uio_resid) 503 n = uio->uio_resid; 504 if (loffset + boff + n > np->n_size) 505 n = np->n_size - loffset - boff; 506 break; 507 case VLNK: 508 biosize = min(NFS_MAXPATHLEN, np->n_size); 509 nfsstats.biocache_readlinks++; 510 bp = nfs_getcacheblk(vp, (off_t)0, biosize, td); 511 if (bp == NULL) 512 return (EINTR); 513 if ((bp->b_flags & B_CACHE) == 0) { 514 bp->b_cmd = BUF_CMD_READ; 515 bp->b_bio2.bio_done = nfsiodone_sync; 516 bp->b_bio2.bio_flags |= BIO_SYNC; 517 vfs_busy_pages(vp, bp); 518 error = nfs_doio(vp, &bp->b_bio2, td); 519 if (error) { 520 bp->b_flags |= B_ERROR | B_INVAL; 521 brelse(bp); 522 return (error); 523 } 524 } 525 n = szmin(uio->uio_resid, (size_t)bp->b_bcount - bp->b_resid); 526 boff = 0; 527 break; 528 case VDIR: 529 nfsstats.biocache_readdirs++; 530 if (np->n_direofoffset && 531 uio->uio_offset >= np->n_direofoffset 532 ) { 533 return (0); 534 } 535 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 536 boff = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 537 loffset = uio->uio_offset - boff; 538 bp = nfs_getcacheblk(vp, loffset, NFS_DIRBLKSIZ, td); 539 if (bp == NULL) 540 return (EINTR); 541 542 if ((bp->b_flags & B_CACHE) == 0) { 543 bp->b_cmd = BUF_CMD_READ; 544 bp->b_bio2.bio_done = nfsiodone_sync; 545 bp->b_bio2.bio_flags |= BIO_SYNC; 546 vfs_busy_pages(vp, bp); 547 error = nfs_doio(vp, &bp->b_bio2, td); 548 if (error) 549 brelse(bp); 550 while (error == NFSERR_BAD_COOKIE) { 551 kprintf("got bad cookie vp %p bp %p\n", vp, bp); 552 nfs_invaldir(vp); 553 error = nfs_vinvalbuf(vp, 0, 1); 554 /* 555 * Yuck! The directory has been modified on the 556 * server. The only way to get the block is by 557 * reading from the beginning to get all the 558 * offset cookies. 559 * 560 * Leave the last bp intact unless there is an error. 561 * Loop back up to the while if the error is another 562 * NFSERR_BAD_COOKIE (double yuch!). 563 */ 564 for (i = 0; i <= lbn && !error; i++) { 565 if (np->n_direofoffset 566 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 567 return (0); 568 bp = nfs_getcacheblk(vp, (off_t)i * NFS_DIRBLKSIZ, 569 NFS_DIRBLKSIZ, td); 570 if (!bp) 571 return (EINTR); 572 if ((bp->b_flags & B_CACHE) == 0) { 573 bp->b_cmd = BUF_CMD_READ; 574 bp->b_bio2.bio_done = nfsiodone_sync; 575 bp->b_bio2.bio_flags |= BIO_SYNC; 576 vfs_busy_pages(vp, bp); 577 error = nfs_doio(vp, &bp->b_bio2, td); 578 /* 579 * no error + B_INVAL == directory EOF, 580 * use the block. 581 */ 582 if (error == 0 && (bp->b_flags & B_INVAL)) 583 break; 584 } 585 /* 586 * An error will throw away the block and the 587 * for loop will break out. If no error and this 588 * is not the block we want, we throw away the 589 * block and go for the next one via the for loop. 590 */ 591 if (error || i < lbn) 592 brelse(bp); 593 } 594 } 595 /* 596 * The above while is repeated if we hit another cookie 597 * error. If we hit an error and it wasn't a cookie error, 598 * we give up. 599 */ 600 if (error) 601 return (error); 602 } 603 604 /* 605 * If not eof and read aheads are enabled, start one. 606 * (You need the current block first, so that you have the 607 * directory offset cookie of the next block.) 608 */ 609 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp) && 610 (bp->b_flags & B_INVAL) == 0 && 611 (np->n_direofoffset == 0 || 612 loffset + NFS_DIRBLKSIZ < np->n_direofoffset) && 613 findblk(vp, loffset + NFS_DIRBLKSIZ, FINDBLK_TEST) == NULL 614 ) { 615 rabp = nfs_getcacheblk(vp, loffset + NFS_DIRBLKSIZ, 616 NFS_DIRBLKSIZ, td); 617 if (rabp) { 618 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 619 rabp->b_cmd = BUF_CMD_READ; 620 vfs_busy_pages(vp, rabp); 621 nfs_asyncio(vp, &rabp->b_bio2); 622 } else { 623 brelse(rabp); 624 } 625 } 626 } 627 /* 628 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 629 * chopped for the EOF condition, we cannot tell how large 630 * NFS directories are going to be until we hit EOF. So 631 * an NFS directory buffer is *not* chopped to its EOF. Now, 632 * it just so happens that b_resid will effectively chop it 633 * to EOF. *BUT* this information is lost if the buffer goes 634 * away and is reconstituted into a B_CACHE state ( due to 635 * being VMIO ) later. So we keep track of the directory eof 636 * in np->n_direofoffset and chop it off as an extra step 637 * right here. 638 * 639 * NOTE: boff could already be beyond EOF. 640 */ 641 if ((size_t)boff > NFS_DIRBLKSIZ - bp->b_resid) { 642 n = 0; 643 } else { 644 n = szmin(uio->uio_resid, 645 NFS_DIRBLKSIZ - bp->b_resid - (size_t)boff); 646 } 647 if (np->n_direofoffset && 648 n > (size_t)(np->n_direofoffset - uio->uio_offset)) { 649 n = (size_t)(np->n_direofoffset - uio->uio_offset); 650 } 651 break; 652 default: 653 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type); 654 n = 0; 655 break; 656 }; 657 658 switch (vp->v_type) { 659 case VREG: 660 if (n > 0) 661 error = uiomove(bp->b_data + boff, n, uio); 662 break; 663 case VLNK: 664 if (n > 0) 665 error = uiomove(bp->b_data + boff, n, uio); 666 n = 0; 667 break; 668 case VDIR: 669 if (n > 0) { 670 off_t old_off = uio->uio_offset; 671 caddr_t cpos, epos; 672 struct nfs_dirent *dp; 673 674 /* 675 * We are casting cpos to nfs_dirent, it must be 676 * int-aligned. 677 */ 678 if (boff & 3) { 679 error = EINVAL; 680 break; 681 } 682 683 cpos = bp->b_data + boff; 684 epos = bp->b_data + boff + n; 685 while (cpos < epos && error == 0 && uio->uio_resid > 0) { 686 dp = (struct nfs_dirent *)cpos; 687 error = nfs_check_dirent(dp, (int)(epos - cpos)); 688 if (error) 689 break; 690 if (vop_write_dirent(&error, uio, dp->nfs_ino, 691 dp->nfs_type, dp->nfs_namlen, dp->nfs_name)) { 692 break; 693 } 694 cpos += dp->nfs_reclen; 695 } 696 n = 0; 697 if (error == 0) { 698 uio->uio_offset = old_off + cpos - 699 bp->b_data - boff; 700 } 701 } 702 break; 703 default: 704 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type); 705 } 706 if (bp) 707 brelse(bp); 708 } while (error == 0 && uio->uio_resid > 0 && n > 0); 709 return (error); 710 } 711 712 /* 713 * Userland can supply any 'seek' offset when reading a NFS directory. 714 * Validate the structure so we don't panic the kernel. Note that 715 * the element name is nul terminated and the nul is not included 716 * in nfs_namlen. 717 */ 718 static 719 int 720 nfs_check_dirent(struct nfs_dirent *dp, int maxlen) 721 { 722 int nfs_name_off = offsetof(struct nfs_dirent, nfs_name[0]); 723 724 if (nfs_name_off >= maxlen) 725 return (EINVAL); 726 if (dp->nfs_reclen < nfs_name_off || dp->nfs_reclen > maxlen) 727 return (EINVAL); 728 if (nfs_name_off + dp->nfs_namlen >= dp->nfs_reclen) 729 return (EINVAL); 730 if (dp->nfs_reclen & 3) 731 return (EINVAL); 732 return (0); 733 } 734 735 /* 736 * Vnode op for write using bio 737 * 738 * nfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 739 * struct ucred *a_cred) 740 */ 741 int 742 nfs_write(struct vop_write_args *ap) 743 { 744 struct uio *uio = ap->a_uio; 745 struct thread *td = uio->uio_td; 746 struct vnode *vp = ap->a_vp; 747 struct nfsnode *np = VTONFS(vp); 748 int ioflag = ap->a_ioflag; 749 struct buf *bp; 750 struct vattr vattr; 751 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 752 off_t loffset; 753 int boff, bytes; 754 int error = 0; 755 int haverslock = 0; 756 int bcount; 757 int biosize; 758 759 #ifdef DIAGNOSTIC 760 if (uio->uio_rw != UIO_WRITE) 761 panic("nfs_write mode"); 762 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 763 panic("nfs_write proc"); 764 #endif 765 if (vp->v_type != VREG) 766 return (EIO); 767 if (np->n_flag & NWRITEERR) { 768 np->n_flag &= ~NWRITEERR; 769 return (np->n_error); 770 } 771 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 772 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 773 (void)nfs_fsinfo(nmp, vp, td); 774 775 /* 776 * Synchronously flush pending buffers if we are in synchronous 777 * mode or if we are appending. 778 */ 779 if (ioflag & (IO_APPEND | IO_SYNC)) { 780 if (np->n_flag & NLMODIFIED) { 781 np->n_attrstamp = 0; 782 error = nfs_flush(vp, MNT_WAIT, td, 0); 783 /* error = nfs_vinvalbuf(vp, V_SAVE, 1); */ 784 if (error) 785 return (error); 786 } 787 } 788 789 /* 790 * If IO_APPEND then load uio_offset. We restart here if we cannot 791 * get the append lock. 792 */ 793 restart: 794 if (ioflag & IO_APPEND) { 795 np->n_attrstamp = 0; 796 error = VOP_GETATTR(vp, &vattr); 797 if (error) 798 return (error); 799 uio->uio_offset = np->n_size; 800 } 801 802 if (uio->uio_offset < 0) 803 return (EINVAL); 804 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 805 return (EFBIG); 806 if (uio->uio_resid == 0) 807 return (0); 808 809 /* 810 * We need to obtain the rslock if we intend to modify np->n_size 811 * in order to guarentee the append point with multiple contending 812 * writers, to guarentee that no other appenders modify n_size 813 * while we are trying to obtain a truncated buffer (i.e. to avoid 814 * accidently truncating data written by another appender due to 815 * the race), and to ensure that the buffer is populated prior to 816 * our extending of the file. We hold rslock through the entire 817 * operation. 818 * 819 * Note that we do not synchronize the case where someone truncates 820 * the file while we are appending to it because attempting to lock 821 * this case may deadlock other parts of the system unexpectedly. 822 */ 823 if ((ioflag & IO_APPEND) || 824 uio->uio_offset + uio->uio_resid > np->n_size) { 825 switch(nfs_rslock(np)) { 826 case ENOLCK: 827 goto restart; 828 /* not reached */ 829 case EINTR: 830 case ERESTART: 831 return(EINTR); 832 /* not reached */ 833 default: 834 break; 835 } 836 haverslock = 1; 837 } 838 839 /* 840 * Maybe this should be above the vnode op call, but so long as 841 * file servers have no limits, i don't think it matters 842 */ 843 if (td->td_proc && uio->uio_offset + uio->uio_resid > 844 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 845 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 846 if (haverslock) 847 nfs_rsunlock(np); 848 return (EFBIG); 849 } 850 851 biosize = vp->v_mount->mnt_stat.f_iosize; 852 853 do { 854 nfsstats.biocache_writes++; 855 boff = uio->uio_offset & (biosize-1); 856 loffset = uio->uio_offset - boff; 857 bytes = (int)szmin((unsigned)(biosize - boff), uio->uio_resid); 858 again: 859 /* 860 * Handle direct append and file extension cases, calculate 861 * unaligned buffer size. When extending B_CACHE will be 862 * set if possible. See UIO_NOCOPY note below. 863 */ 864 if (uio->uio_offset + bytes > np->n_size) { 865 np->n_flag |= NLMODIFIED; 866 bp = nfs_meta_setsize(vp, td, loffset, boff, bytes); 867 } else { 868 bp = nfs_getcacheblk(vp, loffset, biosize, td); 869 } 870 if (bp == NULL) { 871 error = EINTR; 872 break; 873 } 874 875 /* 876 * Actual bytes in buffer which we care about 877 */ 878 if (loffset + biosize < np->n_size) 879 bcount = biosize; 880 else 881 bcount = (int)(np->n_size - loffset); 882 883 /* 884 * Avoid a read by setting B_CACHE where the data we 885 * intend to write covers the entire buffer. Note 886 * that the buffer may have been set to B_CACHE by 887 * nfs_meta_setsize() above or otherwise inherited the 888 * flag, but if B_CACHE isn't set the buffer may be 889 * uninitialized and must be zero'd to accomodate 890 * future seek+write's. 891 * 892 * See the comments in kern/vfs_bio.c's getblk() for 893 * more information. 894 * 895 * When doing a UIO_NOCOPY write the buffer is not 896 * overwritten and we cannot just set B_CACHE unconditionally 897 * for full-block writes. 898 */ 899 if (boff == 0 && bytes == biosize && 900 uio->uio_segflg != UIO_NOCOPY) { 901 bp->b_flags |= B_CACHE; 902 bp->b_flags &= ~(B_ERROR | B_INVAL); 903 } 904 905 /* 906 * b_resid may be set due to file EOF if we extended out. 907 * The NFS bio code will zero the difference anyway so 908 * just acknowledged the fact and set b_resid to 0. 909 */ 910 if ((bp->b_flags & B_CACHE) == 0) { 911 bp->b_cmd = BUF_CMD_READ; 912 bp->b_bio2.bio_done = nfsiodone_sync; 913 bp->b_bio2.bio_flags |= BIO_SYNC; 914 vfs_busy_pages(vp, bp); 915 error = nfs_doio(vp, &bp->b_bio2, td); 916 if (error) { 917 brelse(bp); 918 break; 919 } 920 bp->b_resid = 0; 921 } 922 np->n_flag |= NLMODIFIED; 923 924 /* 925 * If dirtyend exceeds file size, chop it down. This should 926 * not normally occur but there is an append race where it 927 * might occur XXX, so we log it. 928 * 929 * If the chopping creates a reverse-indexed or degenerate 930 * situation with dirtyoff/end, we 0 both of them. 931 */ 932 if (bp->b_dirtyend > bcount) { 933 kprintf("NFS append race @%08llx:%d\n", 934 (long long)bp->b_bio2.bio_offset, 935 bp->b_dirtyend - bcount); 936 bp->b_dirtyend = bcount; 937 } 938 939 if (bp->b_dirtyoff >= bp->b_dirtyend) 940 bp->b_dirtyoff = bp->b_dirtyend = 0; 941 942 /* 943 * If the new write will leave a contiguous dirty 944 * area, just update the b_dirtyoff and b_dirtyend, 945 * otherwise force a write rpc of the old dirty area. 946 * 947 * While it is possible to merge discontiguous writes due to 948 * our having a B_CACHE buffer ( and thus valid read data 949 * for the hole), we don't because it could lead to 950 * significant cache coherency problems with multiple clients, 951 * especially if locking is implemented later on. 952 * 953 * as an optimization we could theoretically maintain 954 * a linked list of discontinuous areas, but we would still 955 * have to commit them separately so there isn't much 956 * advantage to it except perhaps a bit of asynchronization. 957 */ 958 if (bp->b_dirtyend > 0 && 959 (boff > bp->b_dirtyend || 960 (boff + bytes) < bp->b_dirtyoff) 961 ) { 962 if (bwrite(bp) == EINTR) { 963 error = EINTR; 964 break; 965 } 966 goto again; 967 } 968 969 error = uiomove(bp->b_data + boff, bytes, uio); 970 971 /* 972 * Since this block is being modified, it must be written 973 * again and not just committed. Since write clustering does 974 * not work for the stage 1 data write, only the stage 2 975 * commit rpc, we have to clear B_CLUSTEROK as well. 976 */ 977 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 978 979 if (error) { 980 brelse(bp); 981 break; 982 } 983 984 /* 985 * Only update dirtyoff/dirtyend if not a degenerate 986 * condition. 987 * 988 * The underlying VM pages have been marked valid by 989 * virtue of acquiring the bp. Because the entire buffer 990 * is marked dirty we do not have to worry about cleaning 991 * out the related dirty bits (and wouldn't really know 992 * how to deal with byte ranges anyway) 993 */ 994 if (bytes) { 995 if (bp->b_dirtyend > 0) { 996 bp->b_dirtyoff = imin(boff, bp->b_dirtyoff); 997 bp->b_dirtyend = imax(boff + bytes, 998 bp->b_dirtyend); 999 } else { 1000 bp->b_dirtyoff = boff; 1001 bp->b_dirtyend = boff + bytes; 1002 } 1003 } 1004 1005 /* 1006 * If the lease is non-cachable or IO_SYNC do bwrite(). 1007 * 1008 * IO_INVAL appears to be unused. The idea appears to be 1009 * to turn off caching in this case. Very odd. XXX 1010 * 1011 * If nfs_async is set bawrite() will use an unstable write 1012 * (build dirty bufs on the server), so we might as well 1013 * push it out with bawrite(). If nfs_async is not set we 1014 * use bdwrite() to cache dirty bufs on the client. 1015 */ 1016 if (ioflag & IO_SYNC) { 1017 if (ioflag & IO_INVAL) 1018 bp->b_flags |= B_NOCACHE; 1019 error = bwrite(bp); 1020 if (error) 1021 break; 1022 } else if (boff + bytes == biosize && nfs_async) { 1023 bawrite(bp); 1024 } else { 1025 bdwrite(bp); 1026 } 1027 } while (uio->uio_resid > 0 && bytes > 0); 1028 1029 if (haverslock) 1030 nfs_rsunlock(np); 1031 1032 return (error); 1033 } 1034 1035 /* 1036 * Get an nfs cache block. 1037 * 1038 * Allocate a new one if the block isn't currently in the cache 1039 * and return the block marked busy. If the calling process is 1040 * interrupted by a signal for an interruptible mount point, return 1041 * NULL. 1042 * 1043 * The caller must carefully deal with the possible B_INVAL state of 1044 * the buffer. nfs_startio() clears B_INVAL (and nfs_asyncio() clears it 1045 * indirectly), so synchronous reads can be issued without worrying about 1046 * the B_INVAL state. We have to be a little more careful when dealing 1047 * with writes (see comments in nfs_write()) when extending a file past 1048 * its EOF. 1049 */ 1050 static struct buf * 1051 nfs_getcacheblk(struct vnode *vp, off_t loffset, int size, struct thread *td) 1052 { 1053 struct buf *bp; 1054 struct mount *mp; 1055 struct nfsmount *nmp; 1056 1057 mp = vp->v_mount; 1058 nmp = VFSTONFS(mp); 1059 1060 if (nmp->nm_flag & NFSMNT_INT) { 1061 bp = getblk(vp, loffset, size, GETBLK_PCATCH, 0); 1062 while (bp == NULL) { 1063 if (nfs_sigintr(nmp, NULL, td)) 1064 return (NULL); 1065 bp = getblk(vp, loffset, size, 0, 2 * hz); 1066 } 1067 } else { 1068 bp = getblk(vp, loffset, size, 0, 0); 1069 } 1070 1071 /* 1072 * bio2, the 'device' layer. Since BIOs use 64 bit byte offsets 1073 * now, no translation is necessary. 1074 */ 1075 bp->b_bio2.bio_offset = loffset; 1076 return (bp); 1077 } 1078 1079 /* 1080 * Flush and invalidate all dirty buffers. If another process is already 1081 * doing the flush, just wait for completion. 1082 */ 1083 int 1084 nfs_vinvalbuf(struct vnode *vp, int flags, int intrflg) 1085 { 1086 struct nfsnode *np = VTONFS(vp); 1087 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1088 int error = 0, slpflag, slptimeo; 1089 thread_t td = curthread; 1090 1091 if (vp->v_flag & VRECLAIMED) 1092 return (0); 1093 1094 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1095 intrflg = 0; 1096 if (intrflg) { 1097 slpflag = PCATCH; 1098 slptimeo = 2 * hz; 1099 } else { 1100 slpflag = 0; 1101 slptimeo = 0; 1102 } 1103 /* 1104 * First wait for any other process doing a flush to complete. 1105 */ 1106 while (np->n_flag & NFLUSHINPROG) { 1107 np->n_flag |= NFLUSHWANT; 1108 error = tsleep((caddr_t)&np->n_flag, 0, "nfsvinval", slptimeo); 1109 if (error && intrflg && nfs_sigintr(nmp, NULL, td)) 1110 return (EINTR); 1111 } 1112 1113 /* 1114 * Now, flush as required. 1115 */ 1116 np->n_flag |= NFLUSHINPROG; 1117 error = vinvalbuf(vp, flags, slpflag, 0); 1118 while (error) { 1119 if (intrflg && nfs_sigintr(nmp, NULL, td)) { 1120 np->n_flag &= ~NFLUSHINPROG; 1121 if (np->n_flag & NFLUSHWANT) { 1122 np->n_flag &= ~NFLUSHWANT; 1123 wakeup((caddr_t)&np->n_flag); 1124 } 1125 return (EINTR); 1126 } 1127 error = vinvalbuf(vp, flags, 0, slptimeo); 1128 } 1129 np->n_flag &= ~(NLMODIFIED | NFLUSHINPROG); 1130 if (np->n_flag & NFLUSHWANT) { 1131 np->n_flag &= ~NFLUSHWANT; 1132 wakeup((caddr_t)&np->n_flag); 1133 } 1134 return (0); 1135 } 1136 1137 /* 1138 * Return true (non-zero) if the txthread and rxthread are operational 1139 * and we do not already have too many not-yet-started BIO's built up. 1140 */ 1141 int 1142 nfs_asyncok(struct nfsmount *nmp) 1143 { 1144 return (nmp->nm_bioqlen < nfs_maxasyncbio && 1145 nmp->nm_bioqlen < nmp->nm_maxasync_scaled / NFS_ASYSCALE && 1146 nmp->nm_rxstate <= NFSSVC_PENDING && 1147 nmp->nm_txstate <= NFSSVC_PENDING); 1148 } 1149 1150 /* 1151 * The read-ahead code calls this to queue a bio to the txthread. 1152 * 1153 * We don't touch the bio otherwise... that is, we do not even 1154 * construct or send the initial rpc. The txthread will do it 1155 * for us. 1156 * 1157 * NOTE! nm_bioqlen is not decremented until the request completes, 1158 * so it does not reflect the number of bio's on bioq. 1159 */ 1160 void 1161 nfs_asyncio(struct vnode *vp, struct bio *bio) 1162 { 1163 struct buf *bp = bio->bio_buf; 1164 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1165 1166 KKASSERT(vp->v_tag == VT_NFS); 1167 BUF_KERNPROC(bp); 1168 bio->bio_driver_info = vp; 1169 crit_enter(); 1170 TAILQ_INSERT_TAIL(&nmp->nm_bioq, bio, bio_act); 1171 atomic_add_int(&nmp->nm_bioqlen, 1); 1172 crit_exit(); 1173 nfssvc_iod_writer_wakeup(nmp); 1174 } 1175 1176 /* 1177 * nfs_dio() - Execute a BIO operation synchronously. The BIO will be 1178 * completed and its error returned. The caller is responsible 1179 * for brelse()ing it. ONLY USE FOR BIO_SYNC IOs! Otherwise 1180 * our error probe will be against an invalid pointer. 1181 * 1182 * nfs_startio()- Execute a BIO operation assynchronously. 1183 * 1184 * NOTE: nfs_asyncio() is used to initiate an asynchronous BIO operation, 1185 * which basically just queues it to the txthread. nfs_startio() 1186 * actually initiates the I/O AFTER it has gotten to the txthread. 1187 * 1188 * NOTE: td might be NULL. 1189 * 1190 * NOTE: Caller has already busied the I/O. 1191 */ 1192 void 1193 nfs_startio(struct vnode *vp, struct bio *bio, struct thread *td) 1194 { 1195 struct buf *bp = bio->bio_buf; 1196 struct nfsnode *np; 1197 struct nfsmount *nmp; 1198 1199 KKASSERT(vp->v_tag == VT_NFS); 1200 np = VTONFS(vp); 1201 nmp = VFSTONFS(vp->v_mount); 1202 1203 /* 1204 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We 1205 * do this here so we do not have to do it in all the code that 1206 * calls us. 1207 */ 1208 bp->b_flags &= ~(B_ERROR | B_INVAL); 1209 1210 KASSERT(bp->b_cmd != BUF_CMD_DONE, 1211 ("nfs_doio: bp %p already marked done!", bp)); 1212 1213 if (bp->b_cmd == BUF_CMD_READ) { 1214 switch (vp->v_type) { 1215 case VREG: 1216 nfsstats.read_bios++; 1217 nfs_readrpc_bio(vp, bio); 1218 break; 1219 case VLNK: 1220 #if 0 1221 bio->bio_offset = 0; 1222 nfsstats.readlink_bios++; 1223 nfs_readlinkrpc_bio(vp, bio); 1224 #else 1225 nfs_doio(vp, bio, td); 1226 #endif 1227 break; 1228 case VDIR: 1229 /* 1230 * NOTE: If nfs_readdirplusrpc_bio() is requested but 1231 * not supported, it will chain to 1232 * nfs_readdirrpc_bio(). 1233 */ 1234 #if 0 1235 nfsstats.readdir_bios++; 1236 uiop->uio_offset = bio->bio_offset; 1237 if (nmp->nm_flag & NFSMNT_RDIRPLUS) 1238 nfs_readdirplusrpc_bio(vp, bio); 1239 else 1240 nfs_readdirrpc_bio(vp, bio); 1241 #else 1242 nfs_doio(vp, bio, td); 1243 #endif 1244 break; 1245 default: 1246 kprintf("nfs_doio: type %x unexpected\n",vp->v_type); 1247 bp->b_flags |= B_ERROR; 1248 bp->b_error = EINVAL; 1249 biodone(bio); 1250 break; 1251 } 1252 } else { 1253 /* 1254 * If we only need to commit, try to commit. If this fails 1255 * it will chain through to the write. Basically all the logic 1256 * in nfs_doio() is replicated. 1257 */ 1258 KKASSERT(bp->b_cmd == BUF_CMD_WRITE); 1259 if (bp->b_flags & B_NEEDCOMMIT) 1260 nfs_commitrpc_bio(vp, bio); 1261 else 1262 nfs_writerpc_bio(vp, bio); 1263 } 1264 } 1265 1266 int 1267 nfs_doio(struct vnode *vp, struct bio *bio, struct thread *td) 1268 { 1269 struct buf *bp = bio->bio_buf; 1270 struct uio *uiop; 1271 struct nfsnode *np; 1272 struct nfsmount *nmp; 1273 int error = 0; 1274 int iomode, must_commit; 1275 size_t n; 1276 struct uio uio; 1277 struct iovec io; 1278 1279 KKASSERT(vp->v_tag == VT_NFS); 1280 np = VTONFS(vp); 1281 nmp = VFSTONFS(vp->v_mount); 1282 uiop = &uio; 1283 uiop->uio_iov = &io; 1284 uiop->uio_iovcnt = 1; 1285 uiop->uio_segflg = UIO_SYSSPACE; 1286 uiop->uio_td = td; 1287 1288 /* 1289 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We 1290 * do this here so we do not have to do it in all the code that 1291 * calls us. 1292 */ 1293 bp->b_flags &= ~(B_ERROR | B_INVAL); 1294 1295 KASSERT(bp->b_cmd != BUF_CMD_DONE, 1296 ("nfs_doio: bp %p already marked done!", bp)); 1297 1298 if (bp->b_cmd == BUF_CMD_READ) { 1299 io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount; 1300 io.iov_base = bp->b_data; 1301 uiop->uio_rw = UIO_READ; 1302 1303 switch (vp->v_type) { 1304 case VREG: 1305 /* 1306 * When reading from a regular file zero-fill any residual. 1307 * Note that this residual has nothing to do with NFS short 1308 * reads, which nfs_readrpc_uio() will handle for us. 1309 * 1310 * We have to do this because when we are write extending 1311 * a file the server may not have the same notion of 1312 * filesize as we do. Our BIOs should already be sized 1313 * (b_bcount) to account for the file EOF. 1314 */ 1315 nfsstats.read_bios++; 1316 uiop->uio_offset = bio->bio_offset; 1317 error = nfs_readrpc_uio(vp, uiop); 1318 if (error == 0 && uiop->uio_resid) { 1319 n = (size_t)bp->b_bcount - uiop->uio_resid; 1320 bzero(bp->b_data + n, bp->b_bcount - n); 1321 uiop->uio_resid = 0; 1322 } 1323 if (td && td->td_proc && (vp->v_flag & VTEXT) && 1324 np->n_mtime != np->n_vattr.va_mtime.tv_sec) { 1325 uprintf("Process killed due to text file modification\n"); 1326 ksignal(td->td_proc, SIGKILL); 1327 } 1328 break; 1329 case VLNK: 1330 uiop->uio_offset = 0; 1331 nfsstats.readlink_bios++; 1332 error = nfs_readlinkrpc_uio(vp, uiop); 1333 break; 1334 case VDIR: 1335 nfsstats.readdir_bios++; 1336 uiop->uio_offset = bio->bio_offset; 1337 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1338 error = nfs_readdirplusrpc_uio(vp, uiop); 1339 if (error == NFSERR_NOTSUPP) 1340 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1341 } 1342 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1343 error = nfs_readdirrpc_uio(vp, uiop); 1344 /* 1345 * end-of-directory sets B_INVAL but does not generate an 1346 * error. 1347 */ 1348 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1349 bp->b_flags |= B_INVAL; 1350 break; 1351 default: 1352 kprintf("nfs_doio: type %x unexpected\n",vp->v_type); 1353 break; 1354 }; 1355 if (error) { 1356 bp->b_flags |= B_ERROR; 1357 bp->b_error = error; 1358 } 1359 bp->b_resid = uiop->uio_resid; 1360 } else { 1361 /* 1362 * If we only need to commit, try to commit. 1363 * 1364 * NOTE: The I/O has already been staged for the write and 1365 * its pages busied, so b_dirtyoff/end is valid. 1366 */ 1367 KKASSERT(bp->b_cmd == BUF_CMD_WRITE); 1368 if (bp->b_flags & B_NEEDCOMMIT) { 1369 int retv; 1370 off_t off; 1371 1372 off = bio->bio_offset + bp->b_dirtyoff; 1373 retv = nfs_commitrpc_uio(vp, off, 1374 bp->b_dirtyend - bp->b_dirtyoff, 1375 td); 1376 if (retv == 0) { 1377 bp->b_dirtyoff = bp->b_dirtyend = 0; 1378 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1379 bp->b_resid = 0; 1380 biodone(bio); 1381 return(0); 1382 } 1383 if (retv == NFSERR_STALEWRITEVERF) { 1384 nfs_clearcommit(vp->v_mount); 1385 } 1386 } 1387 1388 /* 1389 * Setup for actual write 1390 */ 1391 if (bio->bio_offset + bp->b_dirtyend > np->n_size) 1392 bp->b_dirtyend = np->n_size - bio->bio_offset; 1393 1394 if (bp->b_dirtyend > bp->b_dirtyoff) { 1395 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1396 - bp->b_dirtyoff; 1397 uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff; 1398 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1399 uiop->uio_rw = UIO_WRITE; 1400 nfsstats.write_bios++; 1401 1402 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0) 1403 iomode = NFSV3WRITE_UNSTABLE; 1404 else 1405 iomode = NFSV3WRITE_FILESYNC; 1406 1407 must_commit = 0; 1408 error = nfs_writerpc_uio(vp, uiop, &iomode, &must_commit); 1409 1410 /* 1411 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1412 * to cluster the buffers needing commit. This will allow 1413 * the system to submit a single commit rpc for the whole 1414 * cluster. We can do this even if the buffer is not 100% 1415 * dirty (relative to the NFS blocksize), so we optimize the 1416 * append-to-file-case. 1417 * 1418 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1419 * cleared because write clustering only works for commit 1420 * rpc's, not for the data portion of the write). 1421 */ 1422 1423 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1424 bp->b_flags |= B_NEEDCOMMIT; 1425 if (bp->b_dirtyoff == 0 1426 && bp->b_dirtyend == bp->b_bcount) 1427 bp->b_flags |= B_CLUSTEROK; 1428 } else { 1429 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1430 } 1431 1432 /* 1433 * For an interrupted write, the buffer is still valid 1434 * and the write hasn't been pushed to the server yet, 1435 * so we can't set B_ERROR and report the interruption 1436 * by setting B_EINTR. For the async case, B_EINTR 1437 * is not relevant, so the rpc attempt is essentially 1438 * a noop. For the case of a V3 write rpc not being 1439 * committed to stable storage, the block is still 1440 * dirty and requires either a commit rpc or another 1441 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1442 * the block is reused. This is indicated by setting 1443 * the B_DELWRI and B_NEEDCOMMIT flags. 1444 * 1445 * If the buffer is marked B_PAGING, it does not reside on 1446 * the vp's paging queues so we cannot call bdirty(). The 1447 * bp in this case is not an NFS cache block so we should 1448 * be safe. XXX 1449 */ 1450 if (error == EINTR 1451 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1452 crit_enter(); 1453 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1454 if ((bp->b_flags & B_PAGING) == 0) 1455 bdirty(bp); 1456 if (error) 1457 bp->b_flags |= B_EINTR; 1458 crit_exit(); 1459 } else { 1460 if (error) { 1461 bp->b_flags |= B_ERROR; 1462 bp->b_error = np->n_error = error; 1463 np->n_flag |= NWRITEERR; 1464 } 1465 bp->b_dirtyoff = bp->b_dirtyend = 0; 1466 } 1467 if (must_commit) 1468 nfs_clearcommit(vp->v_mount); 1469 bp->b_resid = uiop->uio_resid; 1470 } else { 1471 bp->b_resid = 0; 1472 } 1473 } 1474 1475 /* 1476 * I/O was run synchronously, biodone() it and calculate the 1477 * error to return. 1478 */ 1479 biodone(bio); 1480 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 1481 if (bp->b_flags & B_EINTR) 1482 return (EINTR); 1483 if (bp->b_flags & B_ERROR) 1484 return (bp->b_error ? bp->b_error : EIO); 1485 return (0); 1486 } 1487 1488 /* 1489 * Used to aid in handling ftruncate() and non-trivial write-extend 1490 * operations on the NFS client side. Note that trivial write-extend 1491 * operations (appending with no write hole) are handled by nfs_write() 1492 * directly to avoid silly flushes. 1493 * 1494 * Truncation creates a number of special problems for NFS. We have to 1495 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1496 * we have to properly handle VM pages or (potentially dirty) buffers 1497 * that straddle the truncation point. 1498 * 1499 * File extension no longer has an issue now that the buffer size is 1500 * fixed. When extending the intended overwrite area is specified 1501 * by (boff, bytes). This function uses the parameters to determine 1502 * what areas must be zerod. If there are no gaps we set B_CACHE. 1503 */ 1504 struct buf * 1505 nfs_meta_setsize(struct vnode *vp, struct thread *td, off_t nbase, 1506 int boff, int bytes) 1507 { 1508 1509 struct nfsnode *np = VTONFS(vp); 1510 off_t osize = np->n_size; 1511 off_t nsize; 1512 int biosize = vp->v_mount->mnt_stat.f_iosize; 1513 int error = 0; 1514 struct buf *bp; 1515 1516 nsize = nbase + boff + bytes; 1517 np->n_size = nsize; 1518 1519 if (nsize < osize) { 1520 /* 1521 * vtruncbuf() doesn't get the buffer overlapping the 1522 * truncation point, but it will invalidate pages in 1523 * that buffer and zero the appropriate byte range in 1524 * the page straddling EOF. 1525 */ 1526 error = vtruncbuf(vp, nsize, biosize); 1527 1528 /* 1529 * NFS doesn't do a good job tracking changes in the EOF 1530 * so it may not revisit the buffer if the file is extended. 1531 * 1532 * After truncating just clear B_CACHE on the buffer 1533 * straddling EOF. If the buffer is dirty then clean 1534 * out the portion beyond the file EOF. 1535 */ 1536 if (error) { 1537 bp = NULL; 1538 } else { 1539 bp = nfs_getcacheblk(vp, nbase, biosize, td); 1540 if (bp->b_flags & B_DELWRI) { 1541 if (bp->b_dirtyoff > bp->b_bcount) 1542 bp->b_dirtyoff = bp->b_bcount; 1543 if (bp->b_dirtyend > bp->b_bcount) 1544 bp->b_dirtyend = bp->b_bcount; 1545 boff = (int)nsize & (biosize - 1); 1546 bzero(bp->b_data + boff, biosize - boff); 1547 } else if (nsize != nbase) { 1548 boff = (int)nsize & (biosize - 1); 1549 bzero(bp->b_data + boff, biosize - boff); 1550 } 1551 } 1552 } else { 1553 /* 1554 * The newly expanded portions of the buffer should already 1555 * be zero'd out if B_CACHE is set. If B_CACHE is not 1556 * set and the buffer is beyond osize we can safely zero it 1557 * and set B_CACHE to avoid issuing unnecessary degenerate 1558 * read rpcs. 1559 * 1560 * Don't do this if the caller is going to overwrite the 1561 * entire buffer anyway (and also don't set B_CACHE!). 1562 * This allows the caller to optimize the operation. 1563 */ 1564 KKASSERT(nsize >= 0); 1565 vnode_pager_setsize(vp, (vm_ooffset_t)nsize); 1566 1567 bp = nfs_getcacheblk(vp, nbase, biosize, td); 1568 if ((bp->b_flags & B_CACHE) == 0 && nbase >= osize && 1569 !(boff == 0 && bytes == biosize) 1570 ) { 1571 bzero(bp->b_data, biosize); 1572 bp->b_flags |= B_CACHE; 1573 bp->b_flags &= ~(B_ERROR | B_INVAL); 1574 } 1575 } 1576 return(bp); 1577 } 1578 1579 /* 1580 * Synchronous completion for nfs_doio. Call bpdone() with elseit=FALSE. 1581 * Caller is responsible for brelse()'ing the bp. 1582 */ 1583 static void 1584 nfsiodone_sync(struct bio *bio) 1585 { 1586 bio->bio_flags = 0; 1587 bpdone(bio->bio_buf, 0); 1588 } 1589 1590 /* 1591 * nfs read rpc - BIO version 1592 */ 1593 void 1594 nfs_readrpc_bio(struct vnode *vp, struct bio *bio) 1595 { 1596 struct buf *bp = bio->bio_buf; 1597 u_int32_t *tl; 1598 struct nfsmount *nmp; 1599 int error = 0, len, tsiz; 1600 struct nfsm_info *info; 1601 1602 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK); 1603 info->mrep = NULL; 1604 info->v3 = NFS_ISV3(vp); 1605 1606 nmp = VFSTONFS(vp->v_mount); 1607 tsiz = bp->b_bcount; 1608 KKASSERT(tsiz <= nmp->nm_rsize); 1609 if (bio->bio_offset + tsiz > nmp->nm_maxfilesize) { 1610 error = EFBIG; 1611 goto nfsmout; 1612 } 1613 nfsstats.rpccnt[NFSPROC_READ]++; 1614 len = tsiz; 1615 nfsm_reqhead(info, vp, NFSPROC_READ, 1616 NFSX_FH(info->v3) + NFSX_UNSIGNED * 3); 1617 ERROROUT(nfsm_fhtom(info, vp)); 1618 tl = nfsm_build(info, NFSX_UNSIGNED * 3); 1619 if (info->v3) { 1620 txdr_hyper(bio->bio_offset, tl); 1621 *(tl + 2) = txdr_unsigned(len); 1622 } else { 1623 *tl++ = txdr_unsigned(bio->bio_offset); 1624 *tl++ = txdr_unsigned(len); 1625 *tl = 0; 1626 } 1627 info->bio = bio; 1628 info->done = nfs_readrpc_bio_done; 1629 nfsm_request_bio(info, vp, NFSPROC_READ, NULL, 1630 nfs_vpcred(vp, ND_READ)); 1631 return; 1632 nfsmout: 1633 kfree(info, M_NFSREQ); 1634 bp->b_error = error; 1635 bp->b_flags |= B_ERROR; 1636 biodone(bio); 1637 } 1638 1639 static void 1640 nfs_readrpc_bio_done(nfsm_info_t info) 1641 { 1642 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount); 1643 struct bio *bio = info->bio; 1644 struct buf *bp = bio->bio_buf; 1645 u_int32_t *tl; 1646 int attrflag; 1647 int retlen; 1648 int eof; 1649 int error = 0; 1650 1651 KKASSERT(info->state == NFSM_STATE_DONE); 1652 1653 if (info->v3) { 1654 ERROROUT(nfsm_postop_attr(info, info->vp, &attrflag, 1655 NFS_LATTR_NOSHRINK)); 1656 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED)); 1657 eof = fxdr_unsigned(int, *(tl + 1)); 1658 } else { 1659 ERROROUT(nfsm_loadattr(info, info->vp, NULL)); 1660 eof = 0; 1661 } 1662 NEGATIVEOUT(retlen = nfsm_strsiz(info, nmp->nm_rsize)); 1663 ERROROUT(nfsm_mtobio(info, bio, retlen)); 1664 m_freem(info->mrep); 1665 info->mrep = NULL; 1666 1667 /* 1668 * No error occured, if retlen is less then bcount and no EOF 1669 * and NFSv3 a zero-fill short read occured. 1670 * 1671 * For NFSv2 a short-read indicates EOF. 1672 */ 1673 if (retlen < bp->b_bcount && info->v3 && eof == 0) { 1674 bzero(bp->b_data + retlen, bp->b_bcount - retlen); 1675 retlen = bp->b_bcount; 1676 } 1677 1678 /* 1679 * If we hit an EOF we still zero-fill, but return the expected 1680 * b_resid anyway. This should normally not occur since async 1681 * BIOs are not used for read-before-write case. Races against 1682 * the server can cause it though and we don't want to leave 1683 * garbage in the buffer. 1684 */ 1685 if (retlen < bp->b_bcount) { 1686 bzero(bp->b_data + retlen, bp->b_bcount - retlen); 1687 } 1688 bp->b_resid = 0; 1689 /* bp->b_resid = bp->b_bcount - retlen; */ 1690 nfsmout: 1691 kfree(info, M_NFSREQ); 1692 if (error) { 1693 bp->b_error = error; 1694 bp->b_flags |= B_ERROR; 1695 } 1696 biodone(bio); 1697 } 1698 1699 /* 1700 * nfs write call - BIO version 1701 * 1702 * NOTE: Caller has already busied the I/O. 1703 */ 1704 void 1705 nfs_writerpc_bio(struct vnode *vp, struct bio *bio) 1706 { 1707 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1708 struct nfsnode *np = VTONFS(vp); 1709 struct buf *bp = bio->bio_buf; 1710 u_int32_t *tl; 1711 int len; 1712 int iomode; 1713 int error = 0; 1714 struct nfsm_info *info; 1715 off_t offset; 1716 1717 /* 1718 * Setup for actual write. Just clean up the bio if there 1719 * is nothing to do. b_dirtyoff/end have already been staged 1720 * by the bp's pages getting busied. 1721 */ 1722 if (bio->bio_offset + bp->b_dirtyend > np->n_size) 1723 bp->b_dirtyend = np->n_size - bio->bio_offset; 1724 1725 if (bp->b_dirtyend <= bp->b_dirtyoff) { 1726 bp->b_resid = 0; 1727 biodone(bio); 1728 return; 1729 } 1730 len = bp->b_dirtyend - bp->b_dirtyoff; 1731 offset = bio->bio_offset + bp->b_dirtyoff; 1732 if (offset + len > nmp->nm_maxfilesize) { 1733 bp->b_flags |= B_ERROR; 1734 bp->b_error = EFBIG; 1735 biodone(bio); 1736 return; 1737 } 1738 bp->b_resid = len; 1739 nfsstats.write_bios++; 1740 1741 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK); 1742 info->mrep = NULL; 1743 info->v3 = NFS_ISV3(vp); 1744 info->info_writerpc.must_commit = 0; 1745 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0) 1746 iomode = NFSV3WRITE_UNSTABLE; 1747 else 1748 iomode = NFSV3WRITE_FILESYNC; 1749 1750 KKASSERT(len <= nmp->nm_wsize); 1751 1752 nfsstats.rpccnt[NFSPROC_WRITE]++; 1753 nfsm_reqhead(info, vp, NFSPROC_WRITE, 1754 NFSX_FH(info->v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1755 ERROROUT(nfsm_fhtom(info, vp)); 1756 if (info->v3) { 1757 tl = nfsm_build(info, 5 * NFSX_UNSIGNED); 1758 txdr_hyper(offset, tl); 1759 tl += 2; 1760 *tl++ = txdr_unsigned(len); 1761 *tl++ = txdr_unsigned(iomode); 1762 *tl = txdr_unsigned(len); 1763 } else { 1764 u_int32_t x; 1765 1766 tl = nfsm_build(info, 4 * NFSX_UNSIGNED); 1767 /* Set both "begin" and "current" to non-garbage. */ 1768 x = txdr_unsigned((u_int32_t)offset); 1769 *tl++ = x; /* "begin offset" */ 1770 *tl++ = x; /* "current offset" */ 1771 x = txdr_unsigned(len); 1772 *tl++ = x; /* total to this offset */ 1773 *tl = x; /* size of this write */ 1774 } 1775 ERROROUT(nfsm_biotom(info, bio, bp->b_dirtyoff, len)); 1776 info->bio = bio; 1777 info->done = nfs_writerpc_bio_done; 1778 nfsm_request_bio(info, vp, NFSPROC_WRITE, NULL, 1779 nfs_vpcred(vp, ND_WRITE)); 1780 return; 1781 nfsmout: 1782 kfree(info, M_NFSREQ); 1783 bp->b_error = error; 1784 bp->b_flags |= B_ERROR; 1785 biodone(bio); 1786 } 1787 1788 static void 1789 nfs_writerpc_bio_done(nfsm_info_t info) 1790 { 1791 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount); 1792 struct nfsnode *np = VTONFS(info->vp); 1793 struct bio *bio = info->bio; 1794 struct buf *bp = bio->bio_buf; 1795 int wccflag = NFSV3_WCCRATTR; 1796 int iomode = NFSV3WRITE_FILESYNC; 1797 int commit; 1798 int rlen; 1799 int error; 1800 int len = bp->b_resid; /* b_resid was set to shortened length */ 1801 u_int32_t *tl; 1802 1803 if (info->v3) { 1804 /* 1805 * The write RPC returns a before and after mtime. The 1806 * nfsm_wcc_data() macro checks the before n_mtime 1807 * against the before time and stores the after time 1808 * in the nfsnode's cached vattr and n_mtime field. 1809 * The NRMODIFIED bit will be set if the before 1810 * time did not match the original mtime. 1811 */ 1812 wccflag = NFSV3_WCCCHK; 1813 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag)); 1814 if (error == 0) { 1815 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED + NFSX_V3WRITEVERF)); 1816 rlen = fxdr_unsigned(int, *tl++); 1817 if (rlen == 0) { 1818 error = NFSERR_IO; 1819 m_freem(info->mrep); 1820 info->mrep = NULL; 1821 goto nfsmout; 1822 } else if (rlen < len) { 1823 #if 0 1824 /* 1825 * XXX what do we do here? 1826 */ 1827 backup = len - rlen; 1828 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base - backup; 1829 uiop->uio_iov->iov_len += backup; 1830 uiop->uio_offset -= backup; 1831 uiop->uio_resid += backup; 1832 len = rlen; 1833 #endif 1834 } 1835 commit = fxdr_unsigned(int, *tl++); 1836 1837 /* 1838 * Return the lowest committment level 1839 * obtained by any of the RPCs. 1840 */ 1841 if (iomode == NFSV3WRITE_FILESYNC) 1842 iomode = commit; 1843 else if (iomode == NFSV3WRITE_DATASYNC && 1844 commit == NFSV3WRITE_UNSTABLE) 1845 iomode = commit; 1846 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){ 1847 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF); 1848 nmp->nm_state |= NFSSTA_HASWRITEVERF; 1849 } else if (bcmp(tl, nmp->nm_verf, NFSX_V3WRITEVERF)) { 1850 info->info_writerpc.must_commit = 1; 1851 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF); 1852 } 1853 } 1854 } else { 1855 ERROROUT(nfsm_loadattr(info, info->vp, NULL)); 1856 } 1857 m_freem(info->mrep); 1858 info->mrep = NULL; 1859 len = 0; 1860 nfsmout: 1861 if (info->vp->v_mount->mnt_flag & MNT_ASYNC) 1862 iomode = NFSV3WRITE_FILESYNC; 1863 bp->b_resid = len; 1864 1865 /* 1866 * End of RPC. Now clean up the bp. 1867 * 1868 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1869 * to cluster the buffers needing commit. This will allow 1870 * the system to submit a single commit rpc for the whole 1871 * cluster. We can do this even if the buffer is not 100% 1872 * dirty (relative to the NFS blocksize), so we optimize the 1873 * append-to-file-case. 1874 * 1875 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1876 * cleared because write clustering only works for commit 1877 * rpc's, not for the data portion of the write). 1878 */ 1879 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1880 bp->b_flags |= B_NEEDCOMMIT; 1881 if (bp->b_dirtyoff == 0 && bp->b_dirtyend == bp->b_bcount) 1882 bp->b_flags |= B_CLUSTEROK; 1883 } else { 1884 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1885 } 1886 1887 /* 1888 * For an interrupted write, the buffer is still valid 1889 * and the write hasn't been pushed to the server yet, 1890 * so we can't set B_ERROR and report the interruption 1891 * by setting B_EINTR. For the async case, B_EINTR 1892 * is not relevant, so the rpc attempt is essentially 1893 * a noop. For the case of a V3 write rpc not being 1894 * committed to stable storage, the block is still 1895 * dirty and requires either a commit rpc or another 1896 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1897 * the block is reused. This is indicated by setting 1898 * the B_DELWRI and B_NEEDCOMMIT flags. 1899 * 1900 * If the buffer is marked B_PAGING, it does not reside on 1901 * the vp's paging queues so we cannot call bdirty(). The 1902 * bp in this case is not an NFS cache block so we should 1903 * be safe. XXX 1904 */ 1905 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1906 crit_enter(); 1907 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1908 if ((bp->b_flags & B_PAGING) == 0) 1909 bdirty(bp); 1910 if (error) 1911 bp->b_flags |= B_EINTR; 1912 crit_exit(); 1913 } else { 1914 if (error) { 1915 bp->b_flags |= B_ERROR; 1916 bp->b_error = np->n_error = error; 1917 np->n_flag |= NWRITEERR; 1918 } 1919 bp->b_dirtyoff = bp->b_dirtyend = 0; 1920 } 1921 if (info->info_writerpc.must_commit) 1922 nfs_clearcommit(info->vp->v_mount); 1923 kfree(info, M_NFSREQ); 1924 if (error) { 1925 bp->b_flags |= B_ERROR; 1926 bp->b_error = error; 1927 } 1928 biodone(bio); 1929 } 1930 1931 /* 1932 * Nfs Version 3 commit rpc - BIO version 1933 * 1934 * This function issues the commit rpc and will chain to a write 1935 * rpc if necessary. 1936 */ 1937 void 1938 nfs_commitrpc_bio(struct vnode *vp, struct bio *bio) 1939 { 1940 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1941 struct buf *bp = bio->bio_buf; 1942 struct nfsm_info *info; 1943 int error = 0; 1944 u_int32_t *tl; 1945 1946 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { 1947 bp->b_dirtyoff = bp->b_dirtyend = 0; 1948 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1949 bp->b_resid = 0; 1950 biodone(bio); 1951 return; 1952 } 1953 1954 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK); 1955 info->mrep = NULL; 1956 info->v3 = 1; 1957 1958 nfsstats.rpccnt[NFSPROC_COMMIT]++; 1959 nfsm_reqhead(info, vp, NFSPROC_COMMIT, NFSX_FH(1)); 1960 ERROROUT(nfsm_fhtom(info, vp)); 1961 tl = nfsm_build(info, 3 * NFSX_UNSIGNED); 1962 txdr_hyper(bio->bio_offset + bp->b_dirtyoff, tl); 1963 tl += 2; 1964 *tl = txdr_unsigned(bp->b_dirtyend - bp->b_dirtyoff); 1965 info->bio = bio; 1966 info->done = nfs_commitrpc_bio_done; 1967 nfsm_request_bio(info, vp, NFSPROC_COMMIT, NULL, 1968 nfs_vpcred(vp, ND_WRITE)); 1969 return; 1970 nfsmout: 1971 /* 1972 * Chain to write RPC on (early) error 1973 */ 1974 kfree(info, M_NFSREQ); 1975 nfs_writerpc_bio(vp, bio); 1976 } 1977 1978 static void 1979 nfs_commitrpc_bio_done(nfsm_info_t info) 1980 { 1981 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount); 1982 struct bio *bio = info->bio; 1983 struct buf *bp = bio->bio_buf; 1984 u_int32_t *tl; 1985 int wccflag = NFSV3_WCCRATTR; 1986 int error = 0; 1987 1988 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag)); 1989 if (error == 0) { 1990 NULLOUT(tl = nfsm_dissect(info, NFSX_V3WRITEVERF)); 1991 if (bcmp(nmp->nm_verf, tl, NFSX_V3WRITEVERF)) { 1992 bcopy(tl, nmp->nm_verf, NFSX_V3WRITEVERF); 1993 error = NFSERR_STALEWRITEVERF; 1994 } 1995 } 1996 m_freem(info->mrep); 1997 info->mrep = NULL; 1998 1999 /* 2000 * On completion we must chain to a write bio if an 2001 * error occurred. 2002 */ 2003 nfsmout: 2004 kfree(info, M_NFSREQ); 2005 if (error == 0) { 2006 bp->b_dirtyoff = bp->b_dirtyend = 0; 2007 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 2008 bp->b_resid = 0; 2009 biodone(bio); 2010 } else { 2011 nfs_writerpc_bio(info->vp, bio); 2012 } 2013 } 2014 2015