1 /* $OpenBSD: nfs_vnops.c,v 1.138 2011/07/09 00:24:44 beck Exp $ */ 2 /* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 36 */ 37 38 39 /* 40 * vnode op calls for Sun NFS version 2 and 3 41 */ 42 43 #include <sys/param.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/systm.h> 47 #include <sys/resourcevar.h> 48 #include <sys/poll.h> 49 #include <sys/proc.h> 50 #include <sys/mount.h> 51 #include <sys/buf.h> 52 #include <sys/malloc.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/conf.h> 56 #include <sys/namei.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/fcntl.h> 60 #include <sys/lockf.h> 61 #include <sys/hash.h> 62 #include <sys/queue.h> 63 #include <sys/specdev.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include <miscfs/fifofs/fifo.h> 68 69 #include <nfs/rpcv2.h> 70 #include <nfs/nfsproto.h> 71 #include <nfs/nfs.h> 72 #include <nfs/nfsnode.h> 73 #include <nfs/nfsmount.h> 74 #include <nfs/xdr_subs.h> 75 #include <nfs/nfsm_subs.h> 76 #include <nfs/nfs_var.h> 77 78 #include <net/if.h> 79 #include <netinet/in.h> 80 #include <netinet/in_var.h> 81 82 #include <dev/rndvar.h> 83 84 void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *); 85 86 /* Global vfs data structures for nfs. */ 87 struct vops nfs_vops = { 88 .vop_lookup = nfs_lookup, 89 .vop_create = nfs_create, 90 .vop_mknod = nfs_mknod, 91 .vop_open = nfs_open, 92 .vop_close = nfs_close, 93 .vop_access = nfs_access, 94 .vop_getattr = nfs_getattr, 95 .vop_setattr = nfs_setattr, 96 .vop_read = nfs_read, 97 .vop_write = nfs_write, 98 .vop_ioctl = nfs_ioctl, 99 .vop_poll = nfs_poll, 100 .vop_kqfilter = nfs_kqfilter, 101 .vop_revoke = vop_generic_revoke, 102 .vop_fsync = nfs_fsync, 103 .vop_remove = nfs_remove, 104 .vop_link = nfs_link, 105 .vop_rename = nfs_rename, 106 .vop_mkdir = nfs_mkdir, 107 .vop_rmdir = nfs_rmdir, 108 .vop_symlink = nfs_symlink, 109 .vop_readdir = nfs_readdir, 110 .vop_readlink = nfs_readlink, 111 .vop_abortop = vop_generic_abortop, 112 .vop_inactive = nfs_inactive, 113 .vop_reclaim = nfs_reclaim, 114 .vop_lock = vop_generic_lock, /* XXX: beck@ must fix this. */ 115 .vop_unlock = vop_generic_unlock, 116 .vop_bmap = nfs_bmap, 117 .vop_strategy = nfs_strategy, 118 .vop_print = nfs_print, 119 .vop_islocked = vop_generic_islocked, 120 .vop_pathconf = nfs_pathconf, 121 .vop_advlock = nfs_advlock, 122 .vop_bwrite = nfs_bwrite 123 }; 124 125 /* Special device vnode ops. */ 126 struct vops nfs_specvops = { 127 .vop_close = nfsspec_close, 128 .vop_access = nfsspec_access, 129 .vop_getattr = nfs_getattr, 130 .vop_setattr = nfs_setattr, 131 .vop_read = nfsspec_read, 132 .vop_write = nfsspec_write, 133 .vop_fsync = nfs_fsync, 134 .vop_inactive = nfs_inactive, 135 .vop_reclaim = nfs_reclaim, 136 .vop_lock = vop_generic_lock, 137 .vop_unlock = vop_generic_unlock, 138 .vop_print = nfs_print, 139 .vop_islocked = vop_generic_islocked, 140 141 /* XXX: Keep in sync with spec_vops. */ 142 .vop_lookup = vop_generic_lookup, 143 .vop_create = spec_badop, 144 .vop_mknod = spec_badop, 145 .vop_open = spec_open, 146 .vop_ioctl = spec_ioctl, 147 .vop_poll = spec_poll, 148 .vop_kqfilter = spec_kqfilter, 149 .vop_revoke = vop_generic_revoke, 150 .vop_remove = spec_badop, 151 .vop_link = spec_badop, 152 .vop_rename = spec_badop, 153 .vop_mkdir = spec_badop, 154 .vop_rmdir = spec_badop, 155 .vop_symlink = spec_badop, 156 .vop_readdir = spec_badop, 157 .vop_readlink = spec_badop, 158 .vop_abortop = spec_badop, 159 .vop_bmap = vop_generic_bmap, 160 .vop_strategy = spec_strategy, 161 .vop_pathconf = spec_pathconf, 162 .vop_advlock = spec_advlock, 163 .vop_bwrite = vop_generic_bwrite, 164 }; 165 166 #ifdef FIFO 167 struct vops nfs_fifovops = { 168 .vop_close = nfsfifo_close, 169 .vop_access = nfsspec_access, 170 .vop_getattr = nfs_getattr, 171 .vop_setattr = nfs_setattr, 172 .vop_read = nfsfifo_read, 173 .vop_write = nfsfifo_write, 174 .vop_fsync = nfs_fsync, 175 .vop_inactive = nfs_inactive, 176 .vop_reclaim = nfsfifo_reclaim, 177 .vop_lock = vop_generic_lock, 178 .vop_unlock = vop_generic_unlock, 179 .vop_print = nfs_print, 180 .vop_islocked = vop_generic_islocked, 181 .vop_bwrite = vop_generic_bwrite, 182 183 /* XXX: Keep in sync with fifo_vops. */ 184 .vop_lookup = vop_generic_lookup, 185 .vop_create = fifo_badop, 186 .vop_mknod = fifo_badop, 187 .vop_open = fifo_open, 188 .vop_ioctl = fifo_ioctl, 189 .vop_poll = fifo_poll, 190 .vop_kqfilter = fifo_kqfilter, 191 .vop_revoke = vop_generic_revoke, 192 .vop_remove = fifo_badop, 193 .vop_link = fifo_badop, 194 .vop_rename = fifo_badop, 195 .vop_mkdir = fifo_badop, 196 .vop_rmdir = fifo_badop, 197 .vop_symlink = fifo_badop, 198 .vop_readdir = fifo_badop, 199 .vop_readlink = fifo_badop, 200 .vop_abortop = fifo_badop, 201 .vop_bmap = vop_generic_bmap, 202 .vop_strategy = fifo_badop, 203 .vop_pathconf = fifo_pathconf, 204 .vop_advlock = fifo_advlock, 205 }; 206 #endif /* FIFO */ 207 208 /* 209 * Global variables 210 */ 211 extern u_int32_t nfs_true, nfs_false; 212 extern u_int32_t nfs_xdrneg1; 213 extern struct nfsstats nfsstats; 214 extern nfstype nfsv3_type[9]; 215 int nfs_numasync = 0; 216 217 void 218 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 219 { 220 struct nfsnode *np; 221 222 if (vp != NULL) { 223 np = VTONFS(vp); 224 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 225 } else { 226 np = VTONFS(dvp); 227 if (!np->n_ctime) 228 np->n_ctime = np->n_vattr.va_mtime.tv_sec; 229 } 230 231 cache_enter(dvp, vp, cnp); 232 } 233 234 /* 235 * nfs null call from vfs. 236 */ 237 int 238 nfs_null(struct vnode *vp, struct ucred *cred, struct proc *procp) 239 { 240 struct nfsm_info info; 241 int error = 0; 242 243 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(0); 244 error = nfs_request(vp, NFSPROC_NULL, &info); 245 m_freem(info.nmi_mrep); 246 return (error); 247 } 248 249 /* 250 * nfs access vnode op. 251 * For nfs version 2, just return ok. File accesses may fail later. 252 * For nfs version 3, use the access rpc to check accessibility. If file modes 253 * are changed on the server, accesses might still fail later. 254 */ 255 int 256 nfs_access(void *v) 257 { 258 struct vop_access_args *ap = v; 259 struct vnode *vp = ap->a_vp; 260 u_int32_t *tl; 261 int32_t t1; 262 caddr_t cp2; 263 int error = 0, attrflag; 264 u_int32_t mode, rmode; 265 int v3 = NFS_ISV3(vp); 266 int cachevalid; 267 struct nfsm_info info; 268 269 struct nfsnode *np = VTONFS(vp); 270 271 /* 272 * Disallow write attempts on filesystems mounted read-only; 273 * unless the file is a socket, fifo, or a block or character 274 * device resident on the filesystem. 275 */ 276 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 277 switch (vp->v_type) { 278 case VREG: 279 case VDIR: 280 case VLNK: 281 return (EROFS); 282 default: 283 break; 284 } 285 } 286 287 /* 288 * Check access cache first. If a request has been made for this uid 289 * shortly before, use the cached result. 290 */ 291 cachevalid = (np->n_accstamp != -1 && 292 (time_second - np->n_accstamp) < nfs_attrtimeo(np) && 293 np->n_accuid == ap->a_cred->cr_uid); 294 295 if (cachevalid) { 296 if (!np->n_accerror) { 297 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 298 return (np->n_accerror); 299 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 300 return (np->n_accerror); 301 } 302 303 /* 304 * For nfs v3, do an access rpc, otherwise you are stuck emulating 305 * ufs_access() locally using the vattr. This may not be correct, 306 * since the server may apply other access criteria such as 307 * client uid-->server uid mapping that we do not know about, but 308 * this is better than just returning anything that is lying about 309 * in the cache. 310 */ 311 if (v3) { 312 nfsstats.rpccnt[NFSPROC_ACCESS]++; 313 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED); 314 nfsm_fhtom(&info, vp, v3); 315 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 316 if (ap->a_mode & VREAD) 317 mode = NFSV3ACCESS_READ; 318 else 319 mode = 0; 320 if (vp->v_type == VDIR) { 321 if (ap->a_mode & VWRITE) 322 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 323 NFSV3ACCESS_DELETE); 324 if (ap->a_mode & VEXEC) 325 mode |= NFSV3ACCESS_LOOKUP; 326 } else { 327 if (ap->a_mode & VWRITE) 328 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 329 if (ap->a_mode & VEXEC) 330 mode |= NFSV3ACCESS_EXECUTE; 331 } 332 *tl = txdr_unsigned(mode); 333 334 info.nmi_procp = ap->a_p; 335 info.nmi_cred = ap->a_cred; 336 error = nfs_request(vp, NFSPROC_ACCESS, &info); 337 338 nfsm_postop_attr(vp, attrflag); 339 if (error) { 340 m_freem(info.nmi_mrep); 341 goto nfsmout; 342 } 343 344 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 345 rmode = fxdr_unsigned(u_int32_t, *tl); 346 /* 347 * The NFS V3 spec does not clarify whether or not 348 * the returned access bits can be a superset of 349 * the ones requested, so... 350 */ 351 if ((rmode & mode) != mode) 352 error = EACCES; 353 354 m_freem(info.nmi_mrep); 355 } else 356 return (nfsspec_access(ap)); 357 358 359 /* 360 * If we got the same result as for a previous, different request, OR 361 * it in. Don't update the timestamp in that case. 362 */ 363 if (!error || error == EACCES) { 364 if (cachevalid && np->n_accstamp != -1 && 365 error == np->n_accerror) { 366 if (!error) 367 np->n_accmode |= ap->a_mode; 368 else { 369 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 370 np->n_accmode = ap->a_mode; 371 } 372 } else { 373 np->n_accstamp = time_second; 374 np->n_accuid = ap->a_cred->cr_uid; 375 np->n_accmode = ap->a_mode; 376 np->n_accerror = error; 377 } 378 } 379 nfsmout: 380 return (error); 381 } 382 383 /* 384 * nfs open vnode op 385 * Check to see if the type is ok 386 * and that deletion is not in progress. 387 * For paged in text files, you will need to flush the page cache 388 * if consistency is lost. 389 */ 390 int 391 nfs_open(void *v) 392 { 393 struct vop_open_args *ap = v; 394 struct vnode *vp = ap->a_vp; 395 struct nfsnode *np = VTONFS(vp); 396 struct vattr vattr; 397 int error; 398 399 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 400 #ifdef DIAGNOSTIC 401 printf("open eacces vtyp=%d\n",vp->v_type); 402 #endif 403 return (EACCES); 404 } 405 406 /* 407 * Initialize read and write creds here, for swapfiles 408 * and other paths that don't set the creds themselves. 409 */ 410 411 if (ap->a_mode & FREAD) { 412 if (np->n_rcred) { 413 crfree(np->n_rcred); 414 } 415 np->n_rcred = ap->a_cred; 416 crhold(np->n_rcred); 417 } 418 if (ap->a_mode & FWRITE) { 419 if (np->n_wcred) { 420 crfree(np->n_wcred); 421 } 422 np->n_wcred = ap->a_cred; 423 crhold(np->n_wcred); 424 } 425 426 if (np->n_flag & NMODIFIED) { 427 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 428 if (error == EINTR) 429 return (error); 430 uvm_vnp_uncache(vp); 431 NFS_INVALIDATE_ATTRCACHE(np); 432 if (vp->v_type == VDIR) 433 np->n_direofoffset = 0; 434 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 435 if (error) 436 return (error); 437 np->n_mtime = vattr.va_mtime; 438 } else { 439 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 440 if (error) 441 return (error); 442 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) { 443 if (vp->v_type == VDIR) 444 np->n_direofoffset = 0; 445 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 446 if (error == EINTR) 447 return (error); 448 uvm_vnp_uncache(vp); 449 np->n_mtime = vattr.va_mtime; 450 } 451 } 452 /* For open/close consistency. */ 453 NFS_INVALIDATE_ATTRCACHE(np); 454 return (0); 455 } 456 457 /* 458 * nfs close vnode op 459 * What an NFS client should do upon close after writing is a debatable issue. 460 * Most NFS clients push delayed writes to the server upon close, basically for 461 * two reasons: 462 * 1 - So that any write errors may be reported back to the client process 463 * doing the close system call. By far the two most likely errors are 464 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 465 * 2 - To put a worst case upper bound on cache inconsistency between 466 * multiple clients for the file. 467 * There is also a consistency problem for Version 2 of the protocol w.r.t. 468 * not being able to tell if other clients are writing a file concurrently, 469 * since there is no way of knowing if the changed modify time in the reply 470 * is only due to the write for this client. 471 * (NFS Version 3 provides weak cache consistency data in the reply that 472 * should be sufficient to detect and handle this case.) 473 * 474 * The current code does the following: 475 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 476 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 477 * or commit them (this satisfies 1 and 2 except for the 478 * case where the server crashes after this close but 479 * before the commit RPC, which is felt to be "good 480 * enough". Changing the last argument to nfs_flush() to 481 * a 1 would force a commit operation, if it is felt a 482 * commit is necessary now. 483 */ 484 int 485 nfs_close(void *v) 486 { 487 struct vop_close_args *ap = v; 488 struct vnode *vp = ap->a_vp; 489 struct nfsnode *np = VTONFS(vp); 490 int error = 0; 491 492 if (vp->v_type == VREG) { 493 if (np->n_flag & NMODIFIED) { 494 if (NFS_ISV3(vp)) { 495 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 496 np->n_flag &= ~NMODIFIED; 497 } else 498 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 499 NFS_INVALIDATE_ATTRCACHE(np); 500 } 501 if (np->n_flag & NWRITEERR) { 502 np->n_flag &= ~NWRITEERR; 503 error = np->n_error; 504 } 505 } 506 return (error); 507 } 508 509 /* 510 * nfs getattr call from vfs. 511 */ 512 int 513 nfs_getattr(void *v) 514 { 515 struct vop_getattr_args *ap = v; 516 struct vnode *vp = ap->a_vp; 517 struct nfsnode *np = VTONFS(vp); 518 struct nfsm_info info; 519 int32_t t1; 520 int error = 0; 521 522 info.nmi_v3 = NFS_ISV3(vp); 523 524 /* 525 * Update local times for special files. 526 */ 527 if (np->n_flag & (NACC | NUPD)) 528 np->n_flag |= NCHG; 529 /* 530 * First look in the cache. 531 */ 532 if (nfs_getattrcache(vp, ap->a_vap) == 0) 533 return (0); 534 535 nfsstats.rpccnt[NFSPROC_GETATTR]++; 536 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 537 nfsm_fhtom(&info, vp, info.nmi_v3); 538 info.nmi_procp = ap->a_p; 539 info.nmi_cred = ap->a_cred; 540 error = nfs_request(vp, NFSPROC_GETATTR, &info); 541 if (!error) 542 nfsm_loadattr(vp, ap->a_vap); 543 m_freem(info.nmi_mrep); 544 nfsmout: 545 return (error); 546 } 547 548 /* 549 * nfs setattr call. 550 */ 551 int 552 nfs_setattr(void *v) 553 { 554 struct vop_setattr_args *ap = v; 555 struct vnode *vp = ap->a_vp; 556 struct nfsnode *np = VTONFS(vp); 557 struct vattr *vap = ap->a_vap; 558 int hint = NOTE_ATTRIB; 559 int error = 0; 560 u_quad_t tsize = 0; 561 562 /* 563 * Setting of flags is not supported. 564 */ 565 if (vap->va_flags != VNOVAL) 566 return (EOPNOTSUPP); 567 568 /* 569 * Disallow write attempts if the filesystem is mounted read-only. 570 */ 571 if ((vap->va_uid != (uid_t)VNOVAL || 572 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 573 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 574 (vp->v_mount->mnt_flag & MNT_RDONLY)) 575 return (EROFS); 576 if (vap->va_size != VNOVAL) { 577 switch (vp->v_type) { 578 case VDIR: 579 return (EISDIR); 580 case VCHR: 581 case VBLK: 582 case VSOCK: 583 case VFIFO: 584 if (vap->va_mtime.tv_sec == VNOVAL && 585 vap->va_atime.tv_sec == VNOVAL && 586 vap->va_mode == (mode_t)VNOVAL && 587 vap->va_uid == (uid_t)VNOVAL && 588 vap->va_gid == (gid_t)VNOVAL) 589 return (0); 590 vap->va_size = VNOVAL; 591 break; 592 default: 593 /* 594 * Disallow write attempts if the filesystem is 595 * mounted read-only. 596 */ 597 if (vp->v_mount->mnt_flag & MNT_RDONLY) 598 return (EROFS); 599 if (vap->va_size == 0) 600 error = nfs_vinvalbuf(vp, 0, 601 ap->a_cred, ap->a_p); 602 else 603 error = nfs_vinvalbuf(vp, V_SAVE, 604 ap->a_cred, ap->a_p); 605 if (error) 606 return (error); 607 tsize = np->n_size; 608 np->n_size = np->n_vattr.va_size = vap->va_size; 609 uvm_vnp_setsize(vp, np->n_size); 610 }; 611 } else if ((vap->va_mtime.tv_sec != VNOVAL || 612 vap->va_atime.tv_sec != VNOVAL) && 613 vp->v_type == VREG && 614 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 615 ap->a_p)) == EINTR) 616 return (error); 617 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 618 if (error && vap->va_size != VNOVAL) { 619 np->n_size = np->n_vattr.va_size = tsize; 620 uvm_vnp_setsize(vp, np->n_size); 621 } 622 623 if (vap->va_size != VNOVAL && vap->va_size < tsize) 624 hint |= NOTE_TRUNCATE; 625 626 VN_KNOTE(vp, hint); /* XXX setattrrpc? */ 627 628 return (error); 629 } 630 631 /* 632 * Do an nfs setattr rpc. 633 */ 634 int 635 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 636 struct proc *procp) 637 { 638 struct nfsv2_sattr *sp; 639 struct nfsm_info info; 640 int32_t t1; 641 caddr_t cp2; 642 u_int32_t *tl; 643 int error = 0, wccflag = NFSV3_WCCRATTR; 644 int v3 = NFS_ISV3(vp); 645 646 info.nmi_v3 = NFS_ISV3(vp); 647 648 nfsstats.rpccnt[NFSPROC_SETATTR]++; 649 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_SATTR(v3)); 650 nfsm_fhtom(&info, vp, v3); 651 652 if (info.nmi_v3) { 653 nfsm_v3attrbuild(&info.nmi_mb, vap, 1); 654 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 655 *tl = nfs_false; 656 } else { 657 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 658 if (vap->va_mode == (mode_t)VNOVAL) 659 sp->sa_mode = nfs_xdrneg1; 660 else 661 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 662 if (vap->va_uid == (uid_t)VNOVAL) 663 sp->sa_uid = nfs_xdrneg1; 664 else 665 sp->sa_uid = txdr_unsigned(vap->va_uid); 666 if (vap->va_gid == (gid_t)VNOVAL) 667 sp->sa_gid = nfs_xdrneg1; 668 else 669 sp->sa_gid = txdr_unsigned(vap->va_gid); 670 sp->sa_size = txdr_unsigned(vap->va_size); 671 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 672 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 673 } 674 675 info.nmi_procp = procp; 676 info.nmi_cred = cred; 677 error = nfs_request(vp, NFSPROC_SETATTR, &info); 678 679 if (info.nmi_v3) 680 nfsm_wcc_data(vp, wccflag); 681 else if (error == 0) 682 nfsm_loadattr(vp, NULL); 683 684 m_freem(info.nmi_mrep); 685 nfsmout: 686 return (error); 687 } 688 689 /* 690 * nfs lookup call, one step at a time... 691 * First look in cache 692 * If not found, unlock the directory nfsnode and do the rpc 693 */ 694 int 695 nfs_lookup(void *v) 696 { 697 struct vop_lookup_args *ap = v; 698 struct componentname *cnp = ap->a_cnp; 699 struct vnode *dvp = ap->a_dvp; 700 struct vnode **vpp = ap->a_vpp; 701 struct proc *p = cnp->cn_proc; 702 struct nfsm_info info; 703 int flags; 704 struct vnode *newvp; 705 u_int32_t *tl; 706 int32_t t1; 707 struct nfsmount *nmp; 708 caddr_t cp2; 709 long len; 710 nfsfh_t *fhp; 711 struct nfsnode *np; 712 int lockparent, wantparent, error = 0, attrflag, fhsize; 713 714 info.nmi_v3 = NFS_ISV3(dvp); 715 716 cnp->cn_flags &= ~PDIRUNLOCK; 717 flags = cnp->cn_flags; 718 719 *vpp = NULLVP; 720 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 721 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 722 return (EROFS); 723 if (dvp->v_type != VDIR) 724 return (ENOTDIR); 725 lockparent = flags & LOCKPARENT; 726 wantparent = flags & (LOCKPARENT|WANTPARENT); 727 nmp = VFSTONFS(dvp->v_mount); 728 np = VTONFS(dvp); 729 730 /* 731 * Before tediously performing a linear scan of the directory, 732 * check the name cache to see if the directory/name pair 733 * we are looking for is known already. 734 * If the directory/name pair is found in the name cache, 735 * we have to ensure the directory has not changed from 736 * the time the cache entry has been created. If it has, 737 * the cache entry has to be ignored. 738 */ 739 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) { 740 struct vattr vattr; 741 int err2; 742 743 if (error && error != ENOENT) { 744 *vpp = NULLVP; 745 return (error); 746 } 747 748 if (cnp->cn_flags & PDIRUNLOCK) { 749 err2 = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 750 if (err2 != 0) { 751 *vpp = NULLVP; 752 return (err2); 753 } 754 cnp->cn_flags &= ~PDIRUNLOCK; 755 } 756 757 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_proc); 758 if (err2 != 0) { 759 if (error == 0) { 760 if (*vpp != dvp) 761 vput(*vpp); 762 else 763 vrele(*vpp); 764 } 765 *vpp = NULLVP; 766 return (err2); 767 } 768 769 if (error == ENOENT) { 770 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred, 771 cnp->cn_proc) && vattr.va_mtime.tv_sec == 772 VTONFS(dvp)->n_ctime) 773 return (ENOENT); 774 cache_purge(dvp); 775 np->n_ctime = 0; 776 goto dorpc; 777 } 778 779 newvp = *vpp; 780 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc) 781 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) 782 { 783 nfsstats.lookupcache_hits++; 784 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 785 cnp->cn_flags |= SAVENAME; 786 if ((!lockparent || !(flags & ISLASTCN)) && 787 newvp != dvp) 788 VOP_UNLOCK(dvp, 0, p); 789 return (0); 790 } 791 cache_purge(newvp); 792 if (newvp != dvp) 793 vput(newvp); 794 else 795 vrele(newvp); 796 *vpp = NULLVP; 797 } 798 dorpc: 799 error = 0; 800 newvp = NULLVP; 801 nfsstats.lookupcache_misses++; 802 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 803 len = cnp->cn_namelen; 804 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 805 NFSX_UNSIGNED + nfsm_rndup(len)); 806 nfsm_fhtom(&info, dvp, info.nmi_v3); 807 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 808 809 info.nmi_procp = cnp->cn_proc; 810 info.nmi_cred = cnp->cn_cred; 811 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 812 813 if (error) { 814 if (info.nmi_v3) 815 nfsm_postop_attr(dvp, attrflag); 816 m_freem(info.nmi_mrep); 817 goto nfsmout; 818 } 819 820 nfsm_getfh(fhp, fhsize, info.nmi_v3); 821 822 /* 823 * Handle RENAME case... 824 */ 825 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 826 if (NFS_CMPFH(np, fhp, fhsize)) { 827 m_freem(info.nmi_mrep); 828 return (EISDIR); 829 } 830 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 831 if (error) { 832 m_freem(info.nmi_mrep); 833 return (error); 834 } 835 newvp = NFSTOV(np); 836 if (info.nmi_v3) { 837 nfsm_postop_attr(newvp, attrflag); 838 nfsm_postop_attr(dvp, attrflag); 839 } else 840 nfsm_loadattr(newvp, NULL); 841 *vpp = newvp; 842 m_freem(info.nmi_mrep); 843 cnp->cn_flags |= SAVENAME; 844 if (!lockparent) { 845 VOP_UNLOCK(dvp, 0, p); 846 cnp->cn_flags |= PDIRUNLOCK; 847 } 848 return (0); 849 } 850 851 /* 852 * The postop attr handling is duplicated for each if case, 853 * because it should be done while dvp is locked (unlocking 854 * dvp is different for each case). 855 */ 856 857 if (NFS_CMPFH(np, fhp, fhsize)) { 858 vref(dvp); 859 newvp = dvp; 860 if (info.nmi_v3) { 861 nfsm_postop_attr(newvp, attrflag); 862 nfsm_postop_attr(dvp, attrflag); 863 } else 864 nfsm_loadattr(newvp, NULL); 865 } else if (flags & ISDOTDOT) { 866 VOP_UNLOCK(dvp, 0, p); 867 cnp->cn_flags |= PDIRUNLOCK; 868 869 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 870 if (error) { 871 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p) == 0) 872 cnp->cn_flags &= ~PDIRUNLOCK; 873 m_freem(info.nmi_mrep); 874 return (error); 875 } 876 newvp = NFSTOV(np); 877 878 if (info.nmi_v3) { 879 nfsm_postop_attr(newvp, attrflag); 880 nfsm_postop_attr(dvp, attrflag); 881 } else 882 nfsm_loadattr(newvp, NULL); 883 884 if (lockparent && (flags & ISLASTCN)) { 885 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p))) { 886 m_freem(info.nmi_mrep); 887 vput(newvp); 888 return error; 889 } 890 cnp->cn_flags &= ~PDIRUNLOCK; 891 } 892 893 } else { 894 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 895 if (error) { 896 m_freem(info.nmi_mrep); 897 return error; 898 } 899 newvp = NFSTOV(np); 900 if (info.nmi_v3) { 901 nfsm_postop_attr(newvp, attrflag); 902 nfsm_postop_attr(dvp, attrflag); 903 } else 904 nfsm_loadattr(newvp, NULL); 905 if (!lockparent || !(flags & ISLASTCN)) { 906 VOP_UNLOCK(dvp, 0, p); 907 cnp->cn_flags |= PDIRUNLOCK; 908 } 909 } 910 911 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 912 cnp->cn_flags |= SAVENAME; 913 if ((cnp->cn_flags & MAKEENTRY) && 914 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 915 nfs_cache_enter(dvp, newvp, cnp); 916 } 917 918 *vpp = newvp; 919 m_freem(info.nmi_mrep); 920 921 nfsmout: 922 if (error) { 923 /* 924 * We get here only because of errors returned by 925 * the RPC. Otherwise we'll have returned above 926 * (the nfsm_* macros will jump to nfsmout 927 * on error). 928 */ 929 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 930 cnp->cn_nameiop != CREATE) { 931 nfs_cache_enter(dvp, NULL, cnp); 932 } 933 if (newvp != NULLVP) { 934 vrele(newvp); 935 if (newvp != dvp) 936 VOP_UNLOCK(newvp, 0, p); 937 } 938 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 939 (flags & ISLASTCN) && error == ENOENT) { 940 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 941 error = EROFS; 942 else 943 error = EJUSTRETURN; 944 } 945 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 946 cnp->cn_flags |= SAVENAME; 947 *vpp = NULL; 948 } 949 return (error); 950 } 951 952 /* 953 * nfs read call. 954 * Just call nfs_bioread() to do the work. 955 */ 956 int 957 nfs_read(void *v) 958 { 959 struct vop_read_args *ap = v; 960 struct vnode *vp = ap->a_vp; 961 962 if (vp->v_type != VREG) 963 return (EPERM); 964 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 965 } 966 967 /* 968 * nfs readlink call 969 */ 970 int 971 nfs_readlink(void *v) 972 { 973 struct vop_readlink_args *ap = v; 974 struct vnode *vp = ap->a_vp; 975 976 if (vp->v_type != VLNK) 977 return (EPERM); 978 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred)); 979 } 980 981 /* 982 * Do a readlink rpc. 983 * Called by nfs_doio() from below the buffer cache. 984 */ 985 int 986 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 987 { 988 struct nfsm_info info; 989 u_int32_t *tl; 990 int32_t t1; 991 caddr_t cp2; 992 int error = 0, len, attrflag; 993 994 info.nmi_v3 = NFS_ISV3(vp); 995 996 nfsstats.rpccnt[NFSPROC_READLINK]++; 997 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 998 nfsm_fhtom(&info, vp, info.nmi_v3); 999 1000 info.nmi_procp = curproc; 1001 info.nmi_cred = cred; 1002 error = nfs_request(vp, NFSPROC_READLINK, &info); 1003 1004 if (info.nmi_v3) 1005 nfsm_postop_attr(vp, attrflag); 1006 if (!error) { 1007 nfsm_strsiz(len, NFS_MAXPATHLEN); 1008 nfsm_mtouio(uiop, len); 1009 } 1010 1011 m_freem(info.nmi_mrep); 1012 1013 nfsmout: 1014 return (error); 1015 } 1016 1017 /* 1018 * nfs read rpc call 1019 * Ditto above 1020 */ 1021 int 1022 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1023 { 1024 struct nfsm_info info; 1025 u_int32_t *tl; 1026 int32_t t1; 1027 caddr_t cp2; 1028 struct nfsmount *nmp; 1029 int error = 0, len, retlen, tsiz, eof, attrflag; 1030 1031 info.nmi_v3 = NFS_ISV3(vp); 1032 1033 eof = 0; 1034 1035 nmp = VFSTONFS(vp->v_mount); 1036 tsiz = uiop->uio_resid; 1037 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1038 return (EFBIG); 1039 while (tsiz > 0) { 1040 nfsstats.rpccnt[NFSPROC_READ]++; 1041 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1042 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1043 NFSX_UNSIGNED * 3); 1044 nfsm_fhtom(&info, vp, info.nmi_v3); 1045 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED * 3); 1046 if (info.nmi_v3) { 1047 txdr_hyper(uiop->uio_offset, tl); 1048 *(tl + 2) = txdr_unsigned(len); 1049 } else { 1050 *tl++ = txdr_unsigned(uiop->uio_offset); 1051 *tl++ = txdr_unsigned(len); 1052 *tl = 0; 1053 } 1054 1055 info.nmi_procp = curproc; 1056 info.nmi_cred = VTONFS(vp)->n_rcred; 1057 error = nfs_request(vp, NFSPROC_READ, &info); 1058 if (info.nmi_v3) 1059 nfsm_postop_attr(vp, attrflag); 1060 if (error) { 1061 m_freem(info.nmi_mrep); 1062 goto nfsmout; 1063 } 1064 1065 if (info.nmi_v3) { 1066 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1067 eof = fxdr_unsigned(int, *(tl + 1)); 1068 } else { 1069 nfsm_loadattr(vp, NULL); 1070 } 1071 1072 nfsm_strsiz(retlen, nmp->nm_rsize); 1073 nfsm_mtouio(uiop, retlen); 1074 m_freem(info.nmi_mrep); 1075 tsiz -= retlen; 1076 if (info.nmi_v3) { 1077 if (eof || retlen == 0) 1078 tsiz = 0; 1079 } else if (retlen < len) 1080 tsiz = 0; 1081 } 1082 1083 nfsmout: 1084 return (error); 1085 } 1086 1087 /* 1088 * nfs write call 1089 */ 1090 int 1091 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit) 1092 { 1093 struct nfsm_info info; 1094 u_int32_t *tl; 1095 int32_t t1, backup; 1096 caddr_t cp2; 1097 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1098 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1099 int committed = NFSV3WRITE_FILESYNC; 1100 1101 info.nmi_v3 = NFS_ISV3(vp); 1102 1103 #ifdef DIAGNOSTIC 1104 if (uiop->uio_iovcnt != 1) 1105 panic("nfs: writerpc iovcnt > 1"); 1106 #endif 1107 *must_commit = 0; 1108 tsiz = uiop->uio_resid; 1109 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1110 return (EFBIG); 1111 while (tsiz > 0) { 1112 nfsstats.rpccnt[NFSPROC_WRITE]++; 1113 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1114 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 1115 + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1116 nfsm_fhtom(&info, vp, info.nmi_v3); 1117 if (info.nmi_v3) { 1118 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 1119 txdr_hyper(uiop->uio_offset, tl); 1120 tl += 2; 1121 *tl++ = txdr_unsigned(len); 1122 *tl++ = txdr_unsigned(*iomode); 1123 *tl = txdr_unsigned(len); 1124 } else { 1125 u_int32_t x; 1126 1127 tl = nfsm_build(&info.nmi_mb, 4 * NFSX_UNSIGNED); 1128 /* Set both "begin" and "current" to non-garbage. */ 1129 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1130 *tl++ = x; /* "begin offset" */ 1131 *tl++ = x; /* "current offset" */ 1132 x = txdr_unsigned(len); 1133 *tl++ = x; /* total to this offset */ 1134 *tl = x; /* size of this write */ 1135 1136 } 1137 nfsm_uiotombuf(&info.nmi_mb, uiop, len); 1138 1139 info.nmi_procp = curproc; 1140 info.nmi_cred = VTONFS(vp)->n_wcred; 1141 error = nfs_request(vp, NFSPROC_WRITE, &info); 1142 if (info.nmi_v3) { 1143 wccflag = NFSV3_WCCCHK; 1144 nfsm_wcc_data(vp, wccflag); 1145 } 1146 1147 if (error) { 1148 m_freem(info.nmi_mrep); 1149 goto nfsmout; 1150 } 1151 1152 if (info.nmi_v3) { 1153 wccflag = NFSV3_WCCCHK; 1154 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1155 + NFSX_V3WRITEVERF); 1156 rlen = fxdr_unsigned(int, *tl++); 1157 if (rlen == 0) { 1158 error = NFSERR_IO; 1159 break; 1160 } else if (rlen < len) { 1161 backup = len - rlen; 1162 uiop->uio_iov->iov_base = 1163 (char *)uiop->uio_iov->iov_base - 1164 backup; 1165 uiop->uio_iov->iov_len += backup; 1166 uiop->uio_offset -= backup; 1167 uiop->uio_resid += backup; 1168 len = rlen; 1169 } 1170 commit = fxdr_unsigned(int, *tl++); 1171 1172 /* 1173 * Return the lowest committment level 1174 * obtained by any of the RPCs. 1175 */ 1176 if (committed == NFSV3WRITE_FILESYNC) 1177 committed = commit; 1178 else if (committed == NFSV3WRITE_DATASYNC && 1179 commit == NFSV3WRITE_UNSTABLE) 1180 committed = commit; 1181 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { 1182 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1183 NFSX_V3WRITEVERF); 1184 nmp->nm_flag |= NFSMNT_HASWRITEVERF; 1185 } else if (bcmp((caddr_t)tl, 1186 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { 1187 *must_commit = 1; 1188 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1189 NFSX_V3WRITEVERF); 1190 } 1191 } else { 1192 nfsm_loadattr(vp, NULL); 1193 } 1194 if (wccflag) 1195 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime; 1196 m_freem(info.nmi_mrep); 1197 tsiz -= len; 1198 } 1199 nfsmout: 1200 *iomode = committed; 1201 if (error) 1202 uiop->uio_resid = tsiz; 1203 return (error); 1204 } 1205 1206 /* 1207 * nfs mknod rpc 1208 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1209 * mode set to specify the file type and the size field for rdev. 1210 */ 1211 int 1212 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1213 struct vattr *vap) 1214 { 1215 struct nfsv2_sattr *sp; 1216 struct nfsm_info info; 1217 u_int32_t *tl; 1218 int32_t t1; 1219 struct vnode *newvp = NULL; 1220 struct nfsnode *np = NULL; 1221 char *cp2; 1222 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1223 u_int32_t rdev; 1224 1225 info.nmi_v3 = NFS_ISV3(dvp); 1226 1227 if (vap->va_type == VCHR || vap->va_type == VBLK) 1228 rdev = txdr_unsigned(vap->va_rdev); 1229 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1230 rdev = nfs_xdrneg1; 1231 else { 1232 VOP_ABORTOP(dvp, cnp); 1233 vput(dvp); 1234 return (EOPNOTSUPP); 1235 } 1236 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1237 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1238 4 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1239 NFSX_SATTR(info.nmi_v3)); 1240 nfsm_fhtom(&info, dvp, info.nmi_v3); 1241 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1242 1243 if (info.nmi_v3) { 1244 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1245 *tl++ = vtonfsv3_type(vap->va_type); 1246 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1247 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1248 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 1249 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1250 *tl = txdr_unsigned(minor(vap->va_rdev)); 1251 } 1252 } else { 1253 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1254 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1255 sp->sa_uid = nfs_xdrneg1; 1256 sp->sa_gid = nfs_xdrneg1; 1257 sp->sa_size = rdev; 1258 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1259 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1260 } 1261 1262 KASSERT(cnp->cn_proc == curproc); 1263 info.nmi_procp = cnp->cn_proc; 1264 info.nmi_cred = cnp->cn_cred; 1265 error = nfs_request(dvp, NFSPROC_MKNOD, &info); 1266 if (!error) { 1267 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1268 if (!gotvp) { 1269 if (newvp) { 1270 vrele(newvp); 1271 newvp = NULL; 1272 } 1273 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1274 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1275 if (!error) 1276 newvp = NFSTOV(np); 1277 } 1278 } 1279 if (info.nmi_v3) 1280 nfsm_wcc_data(dvp, wccflag); 1281 m_freem(info.nmi_mrep); 1282 1283 nfsmout: 1284 if (error) { 1285 if (newvp) 1286 vrele(newvp); 1287 } else { 1288 if (cnp->cn_flags & MAKEENTRY) 1289 nfs_cache_enter(dvp, newvp, cnp); 1290 *vpp = newvp; 1291 } 1292 pool_put(&namei_pool, cnp->cn_pnbuf); 1293 VTONFS(dvp)->n_flag |= NMODIFIED; 1294 if (!wccflag) 1295 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1296 vrele(dvp); 1297 return (error); 1298 } 1299 1300 /* 1301 * nfs mknod vop 1302 * just call nfs_mknodrpc() to do the work. 1303 */ 1304 int 1305 nfs_mknod(void *v) 1306 { 1307 struct vop_mknod_args *ap = v; 1308 struct vnode *newvp; 1309 int error; 1310 1311 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1312 if (!error) 1313 vrele(newvp); 1314 1315 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1316 1317 return (error); 1318 } 1319 1320 int 1321 nfs_create(void *v) 1322 { 1323 struct vop_create_args *ap = v; 1324 struct vnode *dvp = ap->a_dvp; 1325 struct vattr *vap = ap->a_vap; 1326 struct componentname *cnp = ap->a_cnp; 1327 struct nfsv2_sattr *sp; 1328 struct nfsm_info info; 1329 u_int32_t *tl; 1330 int32_t t1; 1331 struct nfsnode *np = NULL; 1332 struct vnode *newvp = NULL; 1333 caddr_t cp2; 1334 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1335 1336 info.nmi_v3 = NFS_ISV3(dvp); 1337 1338 /* 1339 * Oops, not for me.. 1340 */ 1341 if (vap->va_type == VSOCK) 1342 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1343 1344 if (vap->va_vaflags & VA_EXCLUSIVE) 1345 fmode |= O_EXCL; 1346 1347 again: 1348 nfsstats.rpccnt[NFSPROC_CREATE]++; 1349 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1350 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1351 NFSX_SATTR(info.nmi_v3)); 1352 nfsm_fhtom(&info, dvp, info.nmi_v3); 1353 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1354 if (info.nmi_v3) { 1355 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1356 if (fmode & O_EXCL) { 1357 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1358 tl = nfsm_build(&info.nmi_mb, NFSX_V3CREATEVERF); 1359 *tl++ = arc4random(); 1360 *tl = arc4random(); 1361 } else { 1362 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1363 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1364 } 1365 } else { 1366 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1367 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1368 sp->sa_uid = nfs_xdrneg1; 1369 sp->sa_gid = nfs_xdrneg1; 1370 sp->sa_size = 0; 1371 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1372 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1373 } 1374 1375 KASSERT(cnp->cn_proc == curproc); 1376 info.nmi_procp = cnp->cn_proc; 1377 info.nmi_cred = cnp->cn_cred; 1378 error = nfs_request(dvp, NFSPROC_CREATE, &info); 1379 if (!error) { 1380 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1381 if (!gotvp) { 1382 if (newvp) { 1383 vrele(newvp); 1384 newvp = NULL; 1385 } 1386 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1387 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1388 if (!error) 1389 newvp = NFSTOV(np); 1390 } 1391 } 1392 if (info.nmi_v3) 1393 nfsm_wcc_data(dvp, wccflag); 1394 m_freem(info.nmi_mrep); 1395 1396 nfsmout: 1397 if (error) { 1398 if (info.nmi_v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1399 fmode &= ~O_EXCL; 1400 goto again; 1401 } 1402 if (newvp) 1403 vrele(newvp); 1404 } else if (info.nmi_v3 && (fmode & O_EXCL)) 1405 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1406 if (!error) { 1407 if (cnp->cn_flags & MAKEENTRY) 1408 nfs_cache_enter(dvp, newvp, cnp); 1409 *ap->a_vpp = newvp; 1410 } 1411 pool_put(&namei_pool, cnp->cn_pnbuf); 1412 VTONFS(dvp)->n_flag |= NMODIFIED; 1413 if (!wccflag) 1414 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1415 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1416 vrele(dvp); 1417 return (error); 1418 } 1419 1420 /* 1421 * nfs file remove call 1422 * To try and make nfs semantics closer to ufs semantics, a file that has 1423 * other processes using the vnode is renamed instead of removed and then 1424 * removed later on the last close. 1425 * - If v_usecount > 1 1426 * If a rename is not already in the works 1427 * call nfs_sillyrename() to set it up 1428 * else 1429 * do the remove rpc 1430 */ 1431 int 1432 nfs_remove(void *v) 1433 { 1434 struct vop_remove_args *ap = v; 1435 struct vnode *vp = ap->a_vp; 1436 struct vnode *dvp = ap->a_dvp; 1437 struct componentname *cnp = ap->a_cnp; 1438 struct nfsnode *np = VTONFS(vp); 1439 int error = 0; 1440 struct vattr vattr; 1441 1442 #ifdef DIAGNOSTIC 1443 if ((cnp->cn_flags & HASBUF) == 0) 1444 panic("nfs_remove: no name"); 1445 if (vp->v_usecount < 1) 1446 panic("nfs_remove: bad v_usecount"); 1447 #endif 1448 if (vp->v_type == VDIR) 1449 error = EPERM; 1450 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1451 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1452 vattr.va_nlink > 1)) { 1453 /* 1454 * Purge the name cache so that the chance of a lookup for 1455 * the name succeeding while the remove is in progress is 1456 * minimized. Without node locking it can still happen, such 1457 * that an I/O op returns ESTALE, but since you get this if 1458 * another host removes the file.. 1459 */ 1460 cache_purge(vp); 1461 /* 1462 * throw away biocache buffers, mainly to avoid 1463 * unnecessary delayed writes later. 1464 */ 1465 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc); 1466 /* Do the rpc */ 1467 if (error != EINTR) 1468 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1469 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1470 /* 1471 * Kludge City: If the first reply to the remove rpc is lost.. 1472 * the reply to the retransmitted request will be ENOENT 1473 * since the file was in fact removed 1474 * Therefore, we cheat and return success. 1475 */ 1476 if (error == ENOENT) 1477 error = 0; 1478 } else if (!np->n_sillyrename) 1479 error = nfs_sillyrename(dvp, vp, cnp); 1480 pool_put(&namei_pool, cnp->cn_pnbuf); 1481 NFS_INVALIDATE_ATTRCACHE(np); 1482 vrele(dvp); 1483 vrele(vp); 1484 1485 VN_KNOTE(vp, NOTE_DELETE); 1486 VN_KNOTE(dvp, NOTE_WRITE); 1487 1488 return (error); 1489 } 1490 1491 /* 1492 * nfs file remove rpc called from nfs_inactive 1493 */ 1494 int 1495 nfs_removeit(struct sillyrename *sp) 1496 { 1497 /* 1498 * Make sure that the directory vnode is still valid. 1499 * XXX we should lock sp->s_dvp here. 1500 * 1501 * NFS can potentially try to nuke a silly *after* the directory 1502 * has already been pushed out on a forced unmount. Since the silly 1503 * is going to go away anyway, this is fine. 1504 */ 1505 if (sp->s_dvp->v_type == VBAD) 1506 return (0); 1507 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1508 NULL)); 1509 } 1510 1511 /* 1512 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1513 */ 1514 int 1515 nfs_removerpc(struct vnode *dvp, char *name, int namelen, struct ucred *cred, 1516 struct proc *proc) 1517 { 1518 struct nfsm_info info; 1519 u_int32_t *tl; 1520 int32_t t1; 1521 caddr_t cp2; 1522 int error = 0, wccflag = NFSV3_WCCRATTR; 1523 1524 info.nmi_v3 = NFS_ISV3(dvp); 1525 1526 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1527 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1528 NFSX_UNSIGNED + nfsm_rndup(namelen)); 1529 nfsm_fhtom(&info, dvp, info.nmi_v3); 1530 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1531 1532 info.nmi_procp = proc; 1533 info.nmi_cred = cred; 1534 error = nfs_request(dvp, NFSPROC_REMOVE, &info); 1535 if (info.nmi_v3) 1536 nfsm_wcc_data(dvp, wccflag); 1537 m_freem(info.nmi_mrep); 1538 1539 nfsmout: 1540 VTONFS(dvp)->n_flag |= NMODIFIED; 1541 if (!wccflag) 1542 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1543 return (error); 1544 } 1545 1546 /* 1547 * nfs file rename call 1548 */ 1549 int 1550 nfs_rename(void *v) 1551 { 1552 struct vop_rename_args *ap = v; 1553 struct vnode *fvp = ap->a_fvp; 1554 struct vnode *tvp = ap->a_tvp; 1555 struct vnode *fdvp = ap->a_fdvp; 1556 struct vnode *tdvp = ap->a_tdvp; 1557 struct componentname *tcnp = ap->a_tcnp; 1558 struct componentname *fcnp = ap->a_fcnp; 1559 int error; 1560 1561 #ifdef DIAGNOSTIC 1562 if ((tcnp->cn_flags & HASBUF) == 0 || 1563 (fcnp->cn_flags & HASBUF) == 0) 1564 panic("nfs_rename: no name"); 1565 #endif 1566 /* Check for cross-device rename */ 1567 if ((fvp->v_mount != tdvp->v_mount) || 1568 (tvp && (fvp->v_mount != tvp->v_mount))) { 1569 error = EXDEV; 1570 goto out; 1571 } 1572 1573 /* 1574 * If the tvp exists and is in use, sillyrename it before doing the 1575 * rename of the new file over it. 1576 */ 1577 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1578 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1579 VN_KNOTE(tvp, NOTE_DELETE); 1580 vrele(tvp); 1581 tvp = NULL; 1582 } 1583 1584 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1585 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1586 tcnp->cn_proc); 1587 1588 VN_KNOTE(fdvp, NOTE_WRITE); 1589 VN_KNOTE(tdvp, NOTE_WRITE); 1590 1591 if (fvp->v_type == VDIR) { 1592 if (tvp != NULL && tvp->v_type == VDIR) 1593 cache_purge(tdvp); 1594 cache_purge(fdvp); 1595 } 1596 out: 1597 if (tdvp == tvp) 1598 vrele(tdvp); 1599 else 1600 vput(tdvp); 1601 if (tvp) 1602 vput(tvp); 1603 vrele(fdvp); 1604 vrele(fvp); 1605 /* 1606 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1607 */ 1608 if (error == ENOENT) 1609 error = 0; 1610 return (error); 1611 } 1612 1613 /* 1614 * nfs file rename rpc called from nfs_remove() above 1615 */ 1616 int 1617 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, 1618 struct sillyrename *sp) 1619 { 1620 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1621 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curproc)); 1622 } 1623 1624 /* 1625 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1626 */ 1627 int 1628 nfs_renamerpc(struct vnode *fdvp, char *fnameptr, int fnamelen, 1629 struct vnode *tdvp, char *tnameptr, int tnamelen, struct ucred *cred, 1630 struct proc *proc) 1631 { 1632 struct nfsm_info info; 1633 u_int32_t *tl; 1634 int32_t t1; 1635 caddr_t cp2; 1636 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1637 1638 info.nmi_v3 = NFS_ISV3(fdvp); 1639 1640 nfsstats.rpccnt[NFSPROC_RENAME]++; 1641 info.nmi_mb = info.nmi_mreq = nfsm_reqhead((NFSX_FH(info.nmi_v3) + 1642 NFSX_UNSIGNED) * 2 + nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); 1643 nfsm_fhtom(&info, fdvp, info.nmi_v3); 1644 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1645 nfsm_fhtom(&info, tdvp, info.nmi_v3); 1646 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1647 1648 info.nmi_procp = proc; 1649 info.nmi_cred = cred; 1650 error = nfs_request(fdvp, NFSPROC_RENAME, &info); 1651 if (info.nmi_v3) { 1652 nfsm_wcc_data(fdvp, fwccflag); 1653 nfsm_wcc_data(tdvp, twccflag); 1654 } 1655 m_freem(info.nmi_mrep); 1656 1657 nfsmout: 1658 VTONFS(fdvp)->n_flag |= NMODIFIED; 1659 VTONFS(tdvp)->n_flag |= NMODIFIED; 1660 if (!fwccflag) 1661 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1662 if (!twccflag) 1663 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1664 return (error); 1665 } 1666 1667 /* 1668 * nfs hard link create call 1669 */ 1670 int 1671 nfs_link(void *v) 1672 { 1673 struct vop_link_args *ap = v; 1674 struct vnode *vp = ap->a_vp; 1675 struct vnode *dvp = ap->a_dvp; 1676 struct componentname *cnp = ap->a_cnp; 1677 struct nfsm_info info; 1678 u_int32_t *tl; 1679 int32_t t1; 1680 caddr_t cp2; 1681 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1682 1683 info.nmi_v3 = NFS_ISV3(vp); 1684 1685 if (dvp->v_mount != vp->v_mount) { 1686 pool_put(&namei_pool, cnp->cn_pnbuf); 1687 if (vp == dvp) 1688 vrele(dvp); 1689 else 1690 vput(dvp); 1691 return (EXDEV); 1692 } 1693 1694 /* 1695 * Push all writes to the server, so that the attribute cache 1696 * doesn't get "out of sync" with the server. 1697 * XXX There should be a better way! 1698 */ 1699 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1700 1701 nfsstats.rpccnt[NFSPROC_LINK]++; 1702 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(2 * NFSX_FH(info.nmi_v3) + 1703 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1704 nfsm_fhtom(&info, vp, info.nmi_v3); 1705 nfsm_fhtom(&info, dvp, info.nmi_v3); 1706 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1707 1708 info.nmi_procp = cnp->cn_proc; 1709 info.nmi_cred = cnp->cn_cred; 1710 error = nfs_request(vp, NFSPROC_LINK, &info); 1711 if (info.nmi_v3) { 1712 nfsm_postop_attr(vp, attrflag); 1713 nfsm_wcc_data(dvp, wccflag); 1714 } 1715 m_freem(info.nmi_mrep); 1716 nfsmout: 1717 pool_put(&namei_pool, cnp->cn_pnbuf); 1718 VTONFS(dvp)->n_flag |= NMODIFIED; 1719 if (!attrflag) 1720 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 1721 if (!wccflag) 1722 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1723 1724 VN_KNOTE(vp, NOTE_LINK); 1725 VN_KNOTE(dvp, NOTE_WRITE); 1726 vput(dvp); 1727 return (error); 1728 } 1729 1730 /* 1731 * nfs symbolic link create call 1732 */ 1733 int 1734 nfs_symlink(void *v) 1735 { 1736 struct vop_symlink_args *ap = v; 1737 struct vnode *dvp = ap->a_dvp; 1738 struct vattr *vap = ap->a_vap; 1739 struct componentname *cnp = ap->a_cnp; 1740 struct nfsv2_sattr *sp; 1741 struct nfsm_info info; 1742 u_int32_t *tl; 1743 int32_t t1; 1744 caddr_t cp2; 1745 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1746 struct vnode *newvp = NULL; 1747 1748 info.nmi_v3 = NFS_ISV3(dvp); 1749 1750 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1751 slen = strlen(ap->a_target); 1752 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1753 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + 1754 NFSX_SATTR(info.nmi_v3)); 1755 nfsm_fhtom(&info, dvp, info.nmi_v3); 1756 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1757 if (info.nmi_v3) 1758 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1759 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1760 if (!info.nmi_v3) { 1761 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1762 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1763 sp->sa_uid = nfs_xdrneg1; 1764 sp->sa_gid = nfs_xdrneg1; 1765 sp->sa_size = nfs_xdrneg1; 1766 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1767 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1768 } 1769 1770 info.nmi_procp = cnp->cn_proc; 1771 info.nmi_cred = cnp->cn_cred; 1772 error = nfs_request(dvp, NFSPROC_SYMLINK, &info); 1773 if (info.nmi_v3) { 1774 if (!error) 1775 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1776 nfsm_wcc_data(dvp, wccflag); 1777 } 1778 m_freem(info.nmi_mrep); 1779 1780 nfsmout: 1781 if (newvp) 1782 vrele(newvp); 1783 pool_put(&namei_pool, cnp->cn_pnbuf); 1784 VTONFS(dvp)->n_flag |= NMODIFIED; 1785 if (!wccflag) 1786 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1787 VN_KNOTE(dvp, NOTE_WRITE); 1788 vrele(dvp); 1789 return (error); 1790 } 1791 1792 /* 1793 * nfs make dir call 1794 */ 1795 int 1796 nfs_mkdir(void *v) 1797 { 1798 struct vop_mkdir_args *ap = v; 1799 struct vnode *dvp = ap->a_dvp; 1800 struct vattr *vap = ap->a_vap; 1801 struct componentname *cnp = ap->a_cnp; 1802 struct nfsv2_sattr *sp; 1803 struct nfsm_info info; 1804 u_int32_t *tl; 1805 int32_t t1; 1806 int len; 1807 struct nfsnode *np = NULL; 1808 struct vnode *newvp = NULL; 1809 caddr_t cp2; 1810 int error = 0, wccflag = NFSV3_WCCRATTR; 1811 int gotvp = 0; 1812 1813 info.nmi_v3 = NFS_ISV3(dvp); 1814 1815 len = cnp->cn_namelen; 1816 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1817 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1818 NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(info.nmi_v3)); 1819 nfsm_fhtom(&info, dvp, info.nmi_v3); 1820 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1821 1822 if (info.nmi_v3) { 1823 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1824 } else { 1825 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1826 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1827 sp->sa_uid = nfs_xdrneg1; 1828 sp->sa_gid = nfs_xdrneg1; 1829 sp->sa_size = nfs_xdrneg1; 1830 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1831 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1832 } 1833 1834 info.nmi_procp = cnp->cn_proc; 1835 info.nmi_cred = cnp->cn_cred; 1836 error = nfs_request(dvp, NFSPROC_MKDIR, &info); 1837 if (!error) 1838 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1839 if (info.nmi_v3) 1840 nfsm_wcc_data(dvp, wccflag); 1841 m_freem(info.nmi_mrep); 1842 1843 nfsmout: 1844 VTONFS(dvp)->n_flag |= NMODIFIED; 1845 if (!wccflag) 1846 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1847 1848 if (error == 0 && newvp == NULL) { 1849 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1850 cnp->cn_proc, &np); 1851 if (!error) { 1852 newvp = NFSTOV(np); 1853 if (newvp->v_type != VDIR) 1854 error = EEXIST; 1855 } 1856 } 1857 if (error) { 1858 if (newvp) 1859 vrele(newvp); 1860 } else { 1861 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1862 if (cnp->cn_flags & MAKEENTRY) 1863 nfs_cache_enter(dvp, newvp, cnp); 1864 *ap->a_vpp = newvp; 1865 } 1866 pool_put(&namei_pool, cnp->cn_pnbuf); 1867 vrele(dvp); 1868 return (error); 1869 } 1870 1871 /* 1872 * nfs remove directory call 1873 */ 1874 int 1875 nfs_rmdir(void *v) 1876 { 1877 struct vop_rmdir_args *ap = v; 1878 struct vnode *vp = ap->a_vp; 1879 struct vnode *dvp = ap->a_dvp; 1880 struct componentname *cnp = ap->a_cnp; 1881 struct nfsm_info info; 1882 u_int32_t *tl; 1883 int32_t t1; 1884 caddr_t cp2; 1885 int error = 0, wccflag = NFSV3_WCCRATTR; 1886 1887 info.nmi_v3 = NFS_ISV3(dvp); 1888 1889 if (dvp == vp) { 1890 vrele(dvp); 1891 vrele(dvp); 1892 pool_put(&namei_pool, cnp->cn_pnbuf); 1893 return (EINVAL); 1894 } 1895 1896 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1897 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1898 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1899 nfsm_fhtom(&info, dvp, info.nmi_v3); 1900 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1901 1902 info.nmi_procp = cnp->cn_proc; 1903 info.nmi_cred = cnp->cn_cred; 1904 error = nfs_request(dvp, NFSPROC_RMDIR, &info); 1905 if (info.nmi_v3) 1906 nfsm_wcc_data(dvp, wccflag); 1907 m_freem(info.nmi_mrep); 1908 1909 nfsmout: 1910 pool_put(&namei_pool, cnp->cn_pnbuf); 1911 VTONFS(dvp)->n_flag |= NMODIFIED; 1912 if (!wccflag) 1913 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1914 1915 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1916 VN_KNOTE(vp, NOTE_DELETE); 1917 1918 cache_purge(vp); 1919 vrele(vp); 1920 vrele(dvp); 1921 /* 1922 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 1923 */ 1924 if (error == ENOENT) 1925 error = 0; 1926 return (error); 1927 } 1928 1929 1930 /* 1931 * The readdir logic below has a big design bug. It stores the NFS cookie in 1932 * the returned uio->uio_offset but does not store the verifier (it cannot). 1933 * Instead, the code stores the verifier in the nfsnode and applies that 1934 * verifies to all cookies, no matter what verifier was originally with 1935 * the cookie. 1936 * 1937 * From a practical standpoint, this is not a problem since almost all 1938 * NFS servers do not change the validity of cookies across deletes 1939 * and inserts. 1940 */ 1941 1942 struct nfs_dirent { 1943 u_int32_t cookie[2]; 1944 struct dirent dirent; 1945 }; 1946 1947 #define NFS_DIRHDSIZ (sizeof (struct nfs_dirent) - (MAXNAMLEN + 1)) 1948 #define NFS_DIRENT_OVERHEAD offsetof(struct nfs_dirent, dirent) 1949 1950 /* 1951 * nfs readdir call 1952 */ 1953 int 1954 nfs_readdir(void *v) 1955 { 1956 struct vop_readdir_args *ap = v; 1957 struct vnode *vp = ap->a_vp; 1958 struct nfsnode *np = VTONFS(vp); 1959 struct uio *uio = ap->a_uio; 1960 int tresid, error = 0; 1961 struct vattr vattr; 1962 u_long *cookies = NULL; 1963 int ncookies = 0, cnt; 1964 u_int64_t newoff = uio->uio_offset; 1965 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1966 struct uio readdir_uio; 1967 struct iovec readdir_iovec; 1968 struct proc * p = uio->uio_procp; 1969 int done = 0, eof = 0; 1970 struct ucred *cred = ap->a_cred; 1971 void *data; 1972 1973 if (vp->v_type != VDIR) 1974 return (EPERM); 1975 /* 1976 * First, check for hit on the EOF offset cache 1977 */ 1978 if (np->n_direofoffset != 0 && 1979 uio->uio_offset == np->n_direofoffset) { 1980 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 1981 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)) { 1982 nfsstats.direofcache_hits++; 1983 *ap->a_eofflag = 1; 1984 return (0); 1985 } 1986 } 1987 1988 if (uio->uio_resid < NFS_FABLKSIZE) 1989 return (EINVAL); 1990 1991 tresid = uio->uio_resid; 1992 1993 if (uio->uio_rw != UIO_READ) 1994 return (EINVAL); 1995 1996 if (ap->a_cookies) { 1997 ncookies = uio->uio_resid / 20; 1998 1999 cookies = malloc(sizeof(*cookies) * ncookies, M_TEMP, 2000 M_WAITOK); 2001 *ap->a_ncookies = ncookies; 2002 *ap->a_cookies = cookies; 2003 } 2004 2005 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 2006 (void)nfs_fsinfo(nmp, vp, cred, p); 2007 2008 cnt = 5; 2009 2010 data = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 2011 do { 2012 struct nfs_dirent *ndp = data; 2013 2014 readdir_iovec.iov_len = NFS_DIRBLKSIZ; 2015 readdir_iovec.iov_base = data; 2016 readdir_uio.uio_offset = newoff; 2017 readdir_uio.uio_iov = &readdir_iovec; 2018 readdir_uio.uio_iovcnt = 1; 2019 readdir_uio.uio_segflg = UIO_SYSSPACE; 2020 readdir_uio.uio_rw = UIO_READ; 2021 readdir_uio.uio_resid = NFS_DIRBLKSIZ; 2022 readdir_uio.uio_procp = curproc; 2023 2024 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 2025 error = nfs_readdirplusrpc(vp, &readdir_uio, cred, 2026 &eof); 2027 if (error == NFSERR_NOTSUPP) 2028 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 2029 } 2030 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 2031 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof); 2032 2033 if (error == NFSERR_BAD_COOKIE) 2034 error = EINVAL; 2035 2036 while (error == 0 && 2037 (ap->a_cookies == NULL || ncookies != 0) && 2038 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) { 2039 struct dirent *dp = &ndp->dirent; 2040 int reclen = dp->d_reclen; 2041 2042 dp->d_reclen -= NFS_DIRENT_OVERHEAD; 2043 2044 if (uio->uio_resid < dp->d_reclen) { 2045 eof = 0; 2046 done = 1; 2047 break; 2048 } 2049 2050 error = uiomove((caddr_t)dp, dp->d_reclen, uio); 2051 if (error) 2052 break; 2053 2054 newoff = fxdr_hyper(&ndp->cookie[0]); 2055 2056 if (ap->a_cookies != NULL) { 2057 *cookies = newoff; 2058 cookies++; 2059 ncookies--; 2060 } 2061 2062 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen); 2063 } 2064 } while (!error && !done && !eof && cnt--); 2065 2066 free(data, M_TEMP); 2067 data = NULL; 2068 2069 if (ap->a_cookies) { 2070 if (error) { 2071 free(*ap->a_cookies, M_TEMP); 2072 *ap->a_cookies = NULL; 2073 *ap->a_ncookies = 0; 2074 } else { 2075 *ap->a_ncookies -= ncookies; 2076 } 2077 } 2078 2079 if (!error) 2080 uio->uio_offset = newoff; 2081 2082 if (!error && (eof || uio->uio_resid == tresid)) { 2083 nfsstats.direofcache_misses++; 2084 *ap->a_eofflag = 1; 2085 return (0); 2086 } 2087 2088 *ap->a_eofflag = 0; 2089 return (error); 2090 } 2091 2092 2093 /* 2094 * The function below stuff the cookies in after the name 2095 */ 2096 2097 /* 2098 * Readdir rpc call. 2099 */ 2100 int 2101 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2102 int *end_of_directory) 2103 { 2104 int len, left; 2105 struct nfs_dirent *ndp = NULL; 2106 struct dirent *dp = NULL; 2107 struct nfsm_info info; 2108 u_int32_t *tl; 2109 caddr_t cp; 2110 int32_t t1; 2111 caddr_t cp2; 2112 nfsuint64 cookie; 2113 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2114 struct nfsnode *dnp = VTONFS(vp); 2115 u_quad_t fileno; 2116 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 2117 int attrflag; 2118 2119 info.nmi_v3 = NFS_ISV3(vp); 2120 2121 #ifdef DIAGNOSTIC 2122 if (uiop->uio_iovcnt != 1 || 2123 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2124 panic("nfs readdirrpc bad uio"); 2125 #endif 2126 2127 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2128 2129 /* 2130 * Loop around doing readdir rpc's of size nm_readdirsize 2131 * truncated to a multiple of NFS_READDIRBLKSIZ. 2132 * The stopping criteria is EOF or buffer full. 2133 */ 2134 while (more_dirs && bigenough) { 2135 nfsstats.rpccnt[NFSPROC_READDIR]++; 2136 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 2137 + NFSX_READDIR(info.nmi_v3)); 2138 nfsm_fhtom(&info, vp, info.nmi_v3); 2139 if (info.nmi_v3) { 2140 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 2141 *tl++ = cookie.nfsuquad[0]; 2142 *tl++ = cookie.nfsuquad[1]; 2143 if (cookie.nfsuquad[0] == 0 && 2144 cookie.nfsuquad[1] == 0) { 2145 *tl++ = 0; 2146 *tl++ = 0; 2147 } else { 2148 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2149 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2150 } 2151 } else { 2152 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 2153 *tl++ = cookie.nfsuquad[1]; 2154 } 2155 *tl = txdr_unsigned(nmp->nm_readdirsize); 2156 2157 info.nmi_procp = uiop->uio_procp; 2158 info.nmi_cred = cred; 2159 error = nfs_request(vp, NFSPROC_READDIR, &info); 2160 if (info.nmi_v3) 2161 nfsm_postop_attr(vp, attrflag); 2162 2163 if (error) { 2164 m_freem(info.nmi_mrep); 2165 goto nfsmout; 2166 } 2167 2168 if (info.nmi_v3) { 2169 nfsm_dissect(tl, u_int32_t *, 2170 2 * NFSX_UNSIGNED); 2171 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2172 dnp->n_cookieverf.nfsuquad[1] = *tl; 2173 } 2174 2175 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2176 more_dirs = fxdr_unsigned(int, *tl); 2177 2178 /* loop thru the dir entries, doctoring them to 4bsd form */ 2179 while (more_dirs && bigenough) { 2180 if (info.nmi_v3) { 2181 nfsm_dissect(tl, u_int32_t *, 2182 3 * NFSX_UNSIGNED); 2183 fileno = fxdr_hyper(tl); 2184 len = fxdr_unsigned(int, *(tl + 2)); 2185 } else { 2186 nfsm_dissect(tl, u_int32_t *, 2187 2 * NFSX_UNSIGNED); 2188 fileno = fxdr_unsigned(u_quad_t, *tl++); 2189 len = fxdr_unsigned(int, *tl); 2190 } 2191 if (len <= 0 || len > NFS_MAXNAMLEN) { 2192 error = EBADRPC; 2193 m_freem(info.nmi_mrep); 2194 goto nfsmout; 2195 } 2196 tlen = nfsm_rndup(len + 1); 2197 left = NFS_READDIRBLKSIZ - blksiz; 2198 if ((tlen + NFS_DIRHDSIZ) > left) { 2199 dp->d_reclen += left; 2200 uiop->uio_iov->iov_base += left; 2201 uiop->uio_iov->iov_len -= left; 2202 uiop->uio_resid -= left; 2203 blksiz = 0; 2204 } 2205 if ((tlen + NFS_DIRHDSIZ) > uiop->uio_resid) 2206 bigenough = 0; 2207 if (bigenough) { 2208 ndp = (struct nfs_dirent *) 2209 uiop->uio_iov->iov_base; 2210 dp = &ndp->dirent; 2211 dp->d_fileno = (int)fileno; 2212 dp->d_namlen = len; 2213 dp->d_reclen = tlen + NFS_DIRHDSIZ; 2214 dp->d_type = DT_UNKNOWN; 2215 blksiz += dp->d_reclen; 2216 if (blksiz == NFS_READDIRBLKSIZ) 2217 blksiz = 0; 2218 uiop->uio_resid -= NFS_DIRHDSIZ; 2219 uiop->uio_iov->iov_base = 2220 (char *)uiop->uio_iov->iov_base + 2221 NFS_DIRHDSIZ; 2222 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2223 nfsm_mtouio(uiop, len); 2224 cp = uiop->uio_iov->iov_base; 2225 tlen -= len; 2226 *cp = '\0'; /* null terminate */ 2227 uiop->uio_iov->iov_base += tlen; 2228 uiop->uio_iov->iov_len -= tlen; 2229 uiop->uio_resid -= tlen; 2230 } else 2231 nfsm_adv(nfsm_rndup(len)); 2232 if (info.nmi_v3) { 2233 nfsm_dissect(tl, u_int32_t *, 2234 3 * NFSX_UNSIGNED); 2235 } else { 2236 nfsm_dissect(tl, u_int32_t *, 2237 2 * NFSX_UNSIGNED); 2238 } 2239 if (bigenough) { 2240 if (info.nmi_v3) { 2241 ndp->cookie[0] = cookie.nfsuquad[0] = 2242 *tl++; 2243 } else 2244 ndp->cookie[0] = 0; 2245 2246 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2247 } else if (info.nmi_v3) 2248 tl += 2; 2249 else 2250 tl++; 2251 more_dirs = fxdr_unsigned(int, *tl); 2252 } 2253 /* 2254 * If at end of rpc data, get the eof boolean 2255 */ 2256 if (!more_dirs) { 2257 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2258 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2259 } 2260 m_freem(info.nmi_mrep); 2261 } 2262 /* 2263 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2264 * by increasing d_reclen for the last record. 2265 */ 2266 if (blksiz > 0) { 2267 left = NFS_READDIRBLKSIZ - blksiz; 2268 dp->d_reclen += left; 2269 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2270 left; 2271 uiop->uio_iov->iov_len -= left; 2272 uiop->uio_resid -= left; 2273 } 2274 2275 /* 2276 * We are now either at the end of the directory or have filled the 2277 * block. 2278 */ 2279 if (bigenough) { 2280 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2281 if (end_of_directory) *end_of_directory = 1; 2282 } else { 2283 if (uiop->uio_resid > 0) 2284 printf("EEK! readdirrpc resid > 0\n"); 2285 } 2286 2287 nfsmout: 2288 return (error); 2289 } 2290 2291 /* 2292 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2293 */ 2294 int 2295 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2296 int *end_of_directory) 2297 { 2298 int len, left; 2299 struct nfs_dirent *ndirp = NULL; 2300 struct dirent *dp = NULL; 2301 struct nfsm_info info; 2302 u_int32_t *tl; 2303 caddr_t cp; 2304 int32_t t1; 2305 struct vnode *newvp; 2306 caddr_t cp2, dpossav1, dpossav2; 2307 struct mbuf *mdsav1, *mdsav2; 2308 struct nameidata nami, *ndp = &nami; 2309 struct componentname *cnp = &ndp->ni_cnd; 2310 nfsuint64 cookie; 2311 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2312 struct nfsnode *dnp = VTONFS(vp), *np; 2313 nfsfh_t *fhp; 2314 u_quad_t fileno; 2315 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2316 int attrflag, fhsize; 2317 2318 #ifdef DIAGNOSTIC 2319 if (uiop->uio_iovcnt != 1 || 2320 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2321 panic("nfs readdirplusrpc bad uio"); 2322 #endif 2323 ndp->ni_dvp = vp; 2324 newvp = NULLVP; 2325 2326 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2327 2328 /* 2329 * Loop around doing readdir rpc's of size nm_readdirsize 2330 * truncated to a multiple of NFS_READDIRBLKSIZ. 2331 * The stopping criteria is EOF or buffer full. 2332 */ 2333 while (more_dirs && bigenough) { 2334 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2335 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2336 nfsm_fhtom(&info, vp, 1); 2337 tl = nfsm_build(&info.nmi_mb, 6 * NFSX_UNSIGNED); 2338 *tl++ = cookie.nfsuquad[0]; 2339 *tl++ = cookie.nfsuquad[1]; 2340 if (cookie.nfsuquad[0] == 0 && 2341 cookie.nfsuquad[1] == 0) { 2342 *tl++ = 0; 2343 *tl++ = 0; 2344 } else { 2345 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2346 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2347 } 2348 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2349 *tl = txdr_unsigned(nmp->nm_rsize); 2350 2351 info.nmi_procp = uiop->uio_procp; 2352 info.nmi_cred = cred; 2353 error = nfs_request(vp, NFSPROC_READDIRPLUS, &info); 2354 nfsm_postop_attr(vp, attrflag); 2355 if (error) { 2356 m_freem(info.nmi_mrep); 2357 goto nfsmout; 2358 } 2359 2360 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2361 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2362 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2363 more_dirs = fxdr_unsigned(int, *tl); 2364 2365 /* loop thru the dir entries, doctoring them to 4bsd form */ 2366 while (more_dirs && bigenough) { 2367 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2368 fileno = fxdr_hyper(tl); 2369 len = fxdr_unsigned(int, *(tl + 2)); 2370 if (len <= 0 || len > NFS_MAXNAMLEN) { 2371 error = EBADRPC; 2372 m_freem(info.nmi_mrep); 2373 goto nfsmout; 2374 } 2375 tlen = nfsm_rndup(len + 1); 2376 left = NFS_READDIRBLKSIZ - blksiz; 2377 if ((tlen + NFS_DIRHDSIZ) > left) { 2378 dp->d_reclen += left; 2379 uiop->uio_iov->iov_base = 2380 (char *)uiop->uio_iov->iov_base + left; 2381 uiop->uio_iov->iov_len -= left; 2382 uiop->uio_resid -= left; 2383 blksiz = 0; 2384 } 2385 if ((tlen + NFS_DIRHDSIZ) > uiop->uio_resid) 2386 bigenough = 0; 2387 if (bigenough) { 2388 ndirp = (struct nfs_dirent *) 2389 uiop->uio_iov->iov_base; 2390 dp = &ndirp->dirent; 2391 dp->d_fileno = (int)fileno; 2392 dp->d_namlen = len; 2393 dp->d_reclen = tlen + NFS_DIRHDSIZ; 2394 dp->d_type = DT_UNKNOWN; 2395 blksiz += dp->d_reclen; 2396 if (blksiz == NFS_READDIRBLKSIZ) 2397 blksiz = 0; 2398 uiop->uio_resid -= NFS_DIRHDSIZ; 2399 uiop->uio_iov->iov_base = 2400 (char *)uiop->uio_iov->iov_base + 2401 NFS_DIRHDSIZ; 2402 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2403 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2404 cnp->cn_namelen = len; 2405 nfsm_mtouio(uiop, len); 2406 cp = uiop->uio_iov->iov_base; 2407 tlen -= len; 2408 *cp = '\0'; 2409 uiop->uio_iov->iov_base += tlen; 2410 uiop->uio_iov->iov_len -= tlen; 2411 uiop->uio_resid -= tlen; 2412 } else 2413 nfsm_adv(nfsm_rndup(len)); 2414 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2415 if (bigenough) { 2416 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++; 2417 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2418 } else 2419 tl += 2; 2420 2421 /* 2422 * Since the attributes are before the file handle 2423 * (sigh), we must skip over the attributes and then 2424 * come back and get them. 2425 */ 2426 attrflag = fxdr_unsigned(int, *tl); 2427 if (attrflag) { 2428 dpossav1 = info.nmi_dpos; 2429 mdsav1 = info.nmi_md; 2430 nfsm_adv(NFSX_V3FATTR); 2431 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2432 doit = fxdr_unsigned(int, *tl); 2433 if (doit) { 2434 nfsm_getfh(fhp, fhsize, 1); 2435 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2436 vref(vp); 2437 newvp = vp; 2438 np = dnp; 2439 } else { 2440 error = nfs_nget(vp->v_mount, 2441 fhp, fhsize, &np); 2442 if (error) 2443 doit = 0; 2444 else 2445 newvp = NFSTOV(np); 2446 } 2447 } 2448 if (doit && bigenough) { 2449 dpossav2 = info.nmi_dpos; 2450 info.nmi_dpos = dpossav1; 2451 mdsav2 = info.nmi_md; 2452 info.nmi_md = mdsav1; 2453 nfsm_loadattr(newvp, NULL); 2454 info.nmi_dpos = dpossav2; 2455 info.nmi_md = mdsav2; 2456 dp->d_type = IFTODT( 2457 VTTOIF(np->n_vattr.va_type)); 2458 if (cnp->cn_namelen <= NCHNAMLEN) { 2459 ndp->ni_vp = newvp; 2460 cache_purge(ndp->ni_dvp); 2461 nfs_cache_enter(ndp->ni_dvp, 2462 ndp->ni_vp, cnp); 2463 } 2464 } 2465 } else { 2466 /* Just skip over the file handle */ 2467 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2468 i = fxdr_unsigned(int, *tl); 2469 nfsm_adv(nfsm_rndup(i)); 2470 } 2471 if (newvp != NULLVP) { 2472 vrele(newvp); 2473 newvp = NULLVP; 2474 } 2475 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2476 more_dirs = fxdr_unsigned(int, *tl); 2477 } 2478 /* 2479 * If at end of rpc data, get the eof boolean 2480 */ 2481 if (!more_dirs) { 2482 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2483 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2484 } 2485 m_freem(info.nmi_mrep); 2486 } 2487 /* 2488 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2489 * by increasing d_reclen for the last record. 2490 */ 2491 if (blksiz > 0) { 2492 left = NFS_READDIRBLKSIZ - blksiz; 2493 dp->d_reclen += left; 2494 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2495 left; 2496 uiop->uio_iov->iov_len -= left; 2497 uiop->uio_resid -= left; 2498 } 2499 2500 /* 2501 * We are now either at the end of the directory or have filled the 2502 * block. 2503 */ 2504 if (bigenough) { 2505 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2506 if (end_of_directory) *end_of_directory = 1; 2507 } else { 2508 if (uiop->uio_resid > 0) 2509 printf("EEK! readdirplusrpc resid > 0\n"); 2510 } 2511 2512 nfsmout: 2513 if (newvp != NULLVP) 2514 vrele(newvp); 2515 return (error); 2516 } 2517 2518 /* 2519 * Silly rename. To make the NFS filesystem that is stateless look a little 2520 * more like the "ufs" a remove of an active vnode is translated to a rename 2521 * to a funny looking filename that is removed by nfs_inactive on the 2522 * nfsnode. There is the potential for another process on a different client 2523 * to create the same funny name between the nfs_lookitup() fails and the 2524 * nfs_rename() completes, but... 2525 */ 2526 int 2527 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2528 { 2529 struct sillyrename *sp; 2530 struct nfsnode *np; 2531 int error; 2532 2533 cache_purge(dvp); 2534 np = VTONFS(vp); 2535 sp = malloc(sizeof(struct sillyrename), M_NFSREQ, M_WAITOK); 2536 sp->s_cred = crdup(cnp->cn_cred); 2537 sp->s_dvp = dvp; 2538 vref(dvp); 2539 2540 if (vp->v_type == VDIR) { 2541 #ifdef DIAGNOSTIC 2542 printf("nfs: sillyrename dir\n"); 2543 #endif 2544 error = EINVAL; 2545 goto bad; 2546 } 2547 2548 /* Try lookitups until we get one that isn't there */ 2549 while (1) { 2550 /* Fudge together a funny name */ 2551 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name, 2552 ".nfs%08X%08X", arc4random(), arc4random()); 2553 if (sp->s_namlen > sizeof sp->s_name) 2554 sp->s_namlen = strlen(sp->s_name); 2555 2556 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2557 cnp->cn_proc, NULL)) 2558 break; 2559 } 2560 2561 error = nfs_renameit(dvp, cnp, sp); 2562 if (error) 2563 goto bad; 2564 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2565 cnp->cn_proc, &np); 2566 np->n_sillyrename = sp; 2567 return (0); 2568 bad: 2569 vrele(sp->s_dvp); 2570 crfree(sp->s_cred); 2571 free(sp, M_NFSREQ); 2572 return (error); 2573 } 2574 2575 /* 2576 * Look up a file name and optionally either update the file handle or 2577 * allocate an nfsnode, depending on the value of npp. 2578 * npp == NULL --> just do the lookup 2579 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2580 * handled too 2581 * *npp != NULL --> update the file handle in the vnode 2582 */ 2583 int 2584 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2585 struct proc *procp, struct nfsnode **npp) 2586 { 2587 struct nfsm_info info; 2588 u_int32_t *tl; 2589 int32_t t1; 2590 struct vnode *newvp = NULL; 2591 struct nfsnode *np, *dnp = VTONFS(dvp); 2592 caddr_t cp2; 2593 int error = 0, fhlen, attrflag; 2594 nfsfh_t *nfhp; 2595 2596 info.nmi_v3 = NFS_ISV3(dvp); 2597 2598 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2599 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + NFSX_UNSIGNED + 2600 nfsm_rndup(len)); 2601 nfsm_fhtom(&info, dvp, info.nmi_v3); 2602 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2603 2604 info.nmi_procp = procp; 2605 info.nmi_cred = cred; 2606 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 2607 if (error && !info.nmi_v3) { 2608 m_freem(info.nmi_mrep); 2609 goto nfsmout; 2610 } 2611 2612 if (npp && !error) { 2613 nfsm_getfh(nfhp, fhlen, info.nmi_v3); 2614 if (*npp) { 2615 np = *npp; 2616 np->n_fhp = &np->n_fh; 2617 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen); 2618 np->n_fhsize = fhlen; 2619 newvp = NFSTOV(np); 2620 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2621 vref(dvp); 2622 newvp = dvp; 2623 np = dnp; 2624 } else { 2625 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2626 if (error) { 2627 m_freem(info.nmi_mrep); 2628 return (error); 2629 } 2630 newvp = NFSTOV(np); 2631 } 2632 if (info.nmi_v3) { 2633 nfsm_postop_attr(newvp, attrflag); 2634 if (!attrflag && *npp == NULL) { 2635 m_freem(info.nmi_mrep); 2636 vrele(newvp); 2637 return (ENOENT); 2638 } 2639 } else 2640 nfsm_loadattr(newvp, NULL); 2641 } 2642 m_freem(info.nmi_mrep); 2643 nfsmout: 2644 if (npp && *npp == NULL) { 2645 if (error) { 2646 if (newvp) 2647 vrele(newvp); 2648 } else 2649 *npp = np; 2650 } 2651 return (error); 2652 } 2653 2654 /* 2655 * Nfs Version 3 commit rpc 2656 */ 2657 int 2658 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct proc *procp) 2659 { 2660 struct nfsm_info info; 2661 u_int32_t *tl; 2662 int32_t t1; 2663 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2664 caddr_t cp2; 2665 int error = 0, wccflag = NFSV3_WCCRATTR; 2666 2667 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) 2668 return (0); 2669 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2670 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)); 2671 nfsm_fhtom(&info, vp, 1); 2672 2673 tl = nfsm_build(&info.nmi_mb, 3 * NFSX_UNSIGNED); 2674 txdr_hyper(offset, tl); 2675 tl += 2; 2676 *tl = txdr_unsigned(cnt); 2677 2678 info.nmi_procp = procp; 2679 info.nmi_cred = VTONFS(vp)->n_wcred; 2680 error = nfs_request(vp, NFSPROC_COMMIT, &info); 2681 nfsm_wcc_data(vp, wccflag); 2682 2683 if (!error) { 2684 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 2685 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl, 2686 NFSX_V3WRITEVERF)) { 2687 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 2688 NFSX_V3WRITEVERF); 2689 error = NFSERR_STALEWRITEVERF; 2690 } 2691 } 2692 m_freem(info.nmi_mrep); 2693 2694 nfsmout: 2695 return (error); 2696 } 2697 2698 /* 2699 * Kludge City.. 2700 * - make nfs_bmap() essentially a no-op that does no translation 2701 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2702 * (Maybe I could use the process's page mapping, but I was concerned that 2703 * Kernel Write might not be enabled and also figured copyout() would do 2704 * a lot more work than bcopy() and also it currently happens in the 2705 * context of the swapper process (2). 2706 */ 2707 int 2708 nfs_bmap(void *v) 2709 { 2710 struct vop_bmap_args *ap = v; 2711 struct vnode *vp = ap->a_vp; 2712 2713 if (ap->a_vpp != NULL) 2714 *ap->a_vpp = vp; 2715 if (ap->a_bnp != NULL) 2716 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2717 return (0); 2718 } 2719 2720 /* 2721 * Strategy routine. 2722 * For async requests when nfsiod(s) are running, queue the request by 2723 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2724 * request. 2725 */ 2726 int 2727 nfs_strategy(void *v) 2728 { 2729 struct vop_strategy_args *ap = v; 2730 struct buf *bp = ap->a_bp; 2731 struct proc *p; 2732 int error = 0; 2733 2734 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 2735 panic("nfs physio/async"); 2736 if (bp->b_flags & B_ASYNC) 2737 p = NULL; 2738 else 2739 p = curproc; /* XXX */ 2740 /* 2741 * If the op is asynchronous and an i/o daemon is waiting 2742 * queue the request, wake it up and wait for completion 2743 * otherwise just do it ourselves. 2744 */ 2745 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp, 0)) 2746 error = nfs_doio(bp, p); 2747 return (error); 2748 } 2749 2750 /* 2751 * fsync vnode op. Just call nfs_flush() with commit == 1. 2752 */ 2753 int 2754 nfs_fsync(void *v) 2755 { 2756 struct vop_fsync_args *ap = v; 2757 2758 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2759 } 2760 2761 /* 2762 * Flush all the blocks associated with a vnode. 2763 * Walk through the buffer pool and push any dirty pages 2764 * associated with the vnode. 2765 */ 2766 int 2767 nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p, 2768 int commit) 2769 { 2770 struct nfsnode *np = VTONFS(vp); 2771 struct buf *bp; 2772 int i; 2773 struct buf *nbp; 2774 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2775 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2776 int passone = 1; 2777 u_quad_t off = (u_quad_t)-1, endoff = 0, toff; 2778 #ifndef NFS_COMMITBVECSIZ 2779 #define NFS_COMMITBVECSIZ 20 2780 #endif 2781 struct buf *bvec[NFS_COMMITBVECSIZ]; 2782 2783 if (nmp->nm_flag & NFSMNT_INT) 2784 slpflag = PCATCH; 2785 if (!commit) 2786 passone = 0; 2787 /* 2788 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2789 * server, but nas not been committed to stable storage on the server 2790 * yet. On the first pass, the byte range is worked out and the commit 2791 * rpc is done. On the second pass, nfs_writebp() is called to do the 2792 * job. 2793 */ 2794 again: 2795 bvecpos = 0; 2796 if (NFS_ISV3(vp) && commit) { 2797 s = splbio(); 2798 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 2799 nbp = LIST_NEXT(bp, b_vnbufs); 2800 if (bvecpos >= NFS_COMMITBVECSIZ) 2801 break; 2802 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2803 != (B_DELWRI | B_NEEDCOMMIT)) 2804 continue; 2805 bremfree(bp); 2806 bp->b_flags |= B_WRITEINPROG; 2807 buf_acquire(bp); 2808 /* 2809 * A list of these buffers is kept so that the 2810 * second loop knows which buffers have actually 2811 * been committed. This is necessary, since there 2812 * may be a race between the commit rpc and new 2813 * uncommitted writes on the file. 2814 */ 2815 bvec[bvecpos++] = bp; 2816 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2817 bp->b_dirtyoff; 2818 if (toff < off) 2819 off = toff; 2820 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2821 if (toff > endoff) 2822 endoff = toff; 2823 } 2824 splx(s); 2825 } 2826 if (bvecpos > 0) { 2827 /* 2828 * Commit data on the server, as required. 2829 */ 2830 bcstats.pendingwrites++; 2831 bcstats.numwrites++; 2832 retv = nfs_commit(vp, off, (int)(endoff - off), p); 2833 if (retv == NFSERR_STALEWRITEVERF) 2834 nfs_clearcommit(vp->v_mount); 2835 /* 2836 * Now, either mark the blocks I/O done or mark the 2837 * blocks dirty, depending on whether the commit 2838 * succeeded. 2839 */ 2840 for (i = 0; i < bvecpos; i++) { 2841 bp = bvec[i]; 2842 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2843 if (retv) { 2844 if (i == 0) 2845 bcstats.pendingwrites--; 2846 brelse(bp); 2847 } else { 2848 if (i > 0) 2849 bcstats.pendingwrites++; 2850 s = splbio(); 2851 buf_undirty(bp); 2852 vp->v_numoutput++; 2853 bp->b_flags |= B_ASYNC; 2854 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 2855 bp->b_dirtyoff = bp->b_dirtyend = 0; 2856 biodone(bp); 2857 splx(s); 2858 } 2859 } 2860 } 2861 2862 /* 2863 * Start/do any write(s) that are required. 2864 */ 2865 loop: 2866 s = splbio(); 2867 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 2868 nbp = LIST_NEXT(bp, b_vnbufs); 2869 if (bp->b_flags & B_BUSY) { 2870 if (waitfor != MNT_WAIT || passone) 2871 continue; 2872 bp->b_flags |= B_WANTED; 2873 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 2874 "nfsfsync", slptimeo); 2875 splx(s); 2876 if (error) { 2877 if (nfs_sigintr(nmp, NULL, p)) 2878 return (EINTR); 2879 if (slpflag == PCATCH) { 2880 slpflag = 0; 2881 slptimeo = 2 * hz; 2882 } 2883 } 2884 goto loop; 2885 } 2886 if ((bp->b_flags & B_DELWRI) == 0) 2887 panic("nfs_fsync: not dirty"); 2888 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2889 continue; 2890 bremfree(bp); 2891 if (passone || !commit) { 2892 bp->b_flags |= B_ASYNC; 2893 } else { 2894 bp->b_flags |= (B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2895 } 2896 buf_acquire(bp); 2897 splx(s); 2898 VOP_BWRITE(bp); 2899 goto loop; 2900 } 2901 splx(s); 2902 if (passone) { 2903 passone = 0; 2904 goto again; 2905 } 2906 if (waitfor == MNT_WAIT) { 2907 loop2: 2908 s = splbio(); 2909 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo); 2910 splx(s); 2911 if (error) { 2912 if (nfs_sigintr(nmp, NULL, p)) 2913 return (EINTR); 2914 if (slpflag == PCATCH) { 2915 slpflag = 0; 2916 slptimeo = 2 * hz; 2917 } 2918 goto loop2; 2919 } 2920 2921 if (LIST_FIRST(&vp->v_dirtyblkhd) && commit) { 2922 #if 0 2923 vprint("nfs_fsync: dirty", vp); 2924 #endif 2925 goto loop; 2926 } 2927 } 2928 if (np->n_flag & NWRITEERR) { 2929 error = np->n_error; 2930 np->n_flag &= ~NWRITEERR; 2931 } 2932 return (error); 2933 } 2934 2935 /* 2936 * Return POSIX pathconf information applicable to nfs. 2937 * 2938 * The NFS V2 protocol doesn't support this, so just return EINVAL 2939 * for V2. 2940 */ 2941 /* ARGSUSED */ 2942 int 2943 nfs_pathconf(void *v) 2944 { 2945 #if 0 2946 struct vop_pathconf_args *ap = v; 2947 #endif 2948 2949 return (EINVAL); 2950 } 2951 2952 /* 2953 * NFS advisory byte-level locks. 2954 */ 2955 int 2956 nfs_advlock(void *v) 2957 { 2958 struct vop_advlock_args *ap = v; 2959 struct nfsnode *np = VTONFS(ap->a_vp); 2960 2961 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op, 2962 ap->a_fl, ap->a_flags)); 2963 } 2964 2965 /* 2966 * Print out the contents of an nfsnode. 2967 */ 2968 int 2969 nfs_print(void *v) 2970 { 2971 struct vop_print_args *ap = v; 2972 struct vnode *vp = ap->a_vp; 2973 struct nfsnode *np = VTONFS(vp); 2974 2975 printf("tag VT_NFS, fileid %ld fsid 0x%lx", 2976 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 2977 #ifdef FIFO 2978 if (vp->v_type == VFIFO) 2979 fifo_printinfo(vp); 2980 #endif 2981 printf("\n"); 2982 return (0); 2983 } 2984 2985 /* 2986 * Just call nfs_writebp() with the force argument set to 1. 2987 */ 2988 int 2989 nfs_bwrite(void *v) 2990 { 2991 struct vop_bwrite_args *ap = v; 2992 2993 return (nfs_writebp(ap->a_bp, 1)); 2994 } 2995 2996 /* 2997 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless 2998 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 2999 */ 3000 int 3001 nfs_writebp(struct buf *bp, int force) 3002 { 3003 int oldflags = bp->b_flags, retv = 1; 3004 struct proc *p = curproc; /* XXX */ 3005 off_t off; 3006 size_t cnt; 3007 int s; 3008 struct vnode *vp; 3009 struct nfsnode *np; 3010 3011 if(!(bp->b_flags & B_BUSY)) 3012 panic("bwrite: buffer is not busy???"); 3013 3014 vp = bp->b_vp; 3015 np = VTONFS(vp); 3016 3017 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 3018 3019 s = splbio(); 3020 buf_undirty(bp); 3021 3022 if ((oldflags & B_ASYNC) && !(oldflags & B_DELWRI) && p) 3023 ++p->p_stats->p_ru.ru_oublock; 3024 3025 bp->b_vp->v_numoutput++; 3026 splx(s); 3027 3028 /* 3029 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 3030 * an actual write will have to be scheduled via. VOP_STRATEGY(). 3031 * If B_WRITEINPROG is already set, then push it with a write anyhow. 3032 */ 3033 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 3034 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 3035 cnt = bp->b_dirtyend - bp->b_dirtyoff; 3036 3037 rw_enter_write(&np->n_commitlock); 3038 if (!(bp->b_flags & B_NEEDCOMMIT)) { 3039 rw_exit_write(&np->n_commitlock); 3040 return (0); 3041 } 3042 3043 /* 3044 * If it's already been commited by somebody else, 3045 * bail. 3046 */ 3047 if (!nfs_in_committed_range(vp, bp)) { 3048 int pushedrange = 0; 3049 /* 3050 * Since we're going to do this, push as much 3051 * as we can. 3052 */ 3053 3054 if (nfs_in_tobecommitted_range(vp, bp)) { 3055 pushedrange = 1; 3056 off = np->n_pushlo; 3057 cnt = np->n_pushhi - np->n_pushlo; 3058 } 3059 3060 bp->b_flags |= B_WRITEINPROG; 3061 bcstats.pendingwrites++; 3062 bcstats.numwrites++; 3063 retv = nfs_commit(bp->b_vp, off, cnt, curproc); 3064 bp->b_flags &= ~B_WRITEINPROG; 3065 3066 if (retv == 0) { 3067 if (pushedrange) 3068 nfs_merge_commit_ranges(vp); 3069 else 3070 nfs_add_committed_range(vp, bp); 3071 } else 3072 bcstats.pendingwrites--; 3073 } else 3074 retv = 0; /* It has already been commited. */ 3075 3076 rw_exit_write(&np->n_commitlock); 3077 if (!retv) { 3078 bp->b_dirtyoff = bp->b_dirtyend = 0; 3079 bp->b_flags &= ~B_NEEDCOMMIT; 3080 s = splbio(); 3081 biodone(bp); 3082 splx(s); 3083 } else if (retv == NFSERR_STALEWRITEVERF) 3084 nfs_clearcommit(bp->b_vp->v_mount); 3085 } 3086 if (retv) { 3087 if (force) 3088 bp->b_flags |= B_WRITEINPROG; 3089 VOP_STRATEGY(bp); 3090 } 3091 3092 if( (oldflags & B_ASYNC) == 0) { 3093 int rtval; 3094 3095 bp->b_flags |= B_RAW; 3096 rtval = biowait(bp); 3097 if (!(oldflags & B_DELWRI) && p) { 3098 ++p->p_stats->p_ru.ru_oublock; 3099 } 3100 brelse(bp); 3101 return (rtval); 3102 } 3103 3104 return (0); 3105 } 3106 3107 /* 3108 * nfs special file access vnode op. 3109 * Essentially just get vattr and then imitate iaccess() since the device is 3110 * local to the client. 3111 */ 3112 int 3113 nfsspec_access(void *v) 3114 { 3115 struct vop_access_args *ap = v; 3116 struct vattr va; 3117 struct vnode *vp = ap->a_vp; 3118 int error; 3119 3120 /* 3121 * Disallow write attempts on filesystems mounted read-only; 3122 * unless the file is a socket, fifo, or a block or character 3123 * device resident on the filesystem. 3124 */ 3125 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3126 switch (vp->v_type) { 3127 case VREG: 3128 case VDIR: 3129 case VLNK: 3130 return (EROFS); 3131 default: 3132 break; 3133 } 3134 } 3135 3136 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p); 3137 if (error) 3138 return (error); 3139 3140 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, 3141 ap->a_mode, ap->a_cred)); 3142 } 3143 3144 int 3145 nfs_poll(void *v) 3146 { 3147 struct vop_poll_args *ap = v; 3148 3149 /* 3150 * We should really check to see if I/O is possible. 3151 */ 3152 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 3153 } 3154 3155 /* 3156 * Read wrapper for special devices. 3157 */ 3158 int 3159 nfsspec_read(void *v) 3160 { 3161 struct vop_read_args *ap = v; 3162 struct nfsnode *np = VTONFS(ap->a_vp); 3163 3164 /* 3165 * Set access flag. 3166 */ 3167 np->n_flag |= NACC; 3168 getnanotime(&np->n_atim); 3169 return (spec_read(ap)); 3170 } 3171 3172 /* 3173 * Write wrapper for special devices. 3174 */ 3175 int 3176 nfsspec_write(void *v) 3177 { 3178 struct vop_write_args *ap = v; 3179 struct nfsnode *np = VTONFS(ap->a_vp); 3180 3181 /* 3182 * Set update flag. 3183 */ 3184 np->n_flag |= NUPD; 3185 getnanotime(&np->n_mtim); 3186 return (spec_write(ap)); 3187 } 3188 3189 /* 3190 * Close wrapper for special devices. 3191 * 3192 * Update the times on the nfsnode then do device close. 3193 */ 3194 int 3195 nfsspec_close(void *v) 3196 { 3197 struct vop_close_args *ap = v; 3198 struct vnode *vp = ap->a_vp; 3199 struct nfsnode *np = VTONFS(vp); 3200 struct vattr vattr; 3201 3202 if (np->n_flag & (NACC | NUPD)) { 3203 np->n_flag |= NCHG; 3204 if (vp->v_usecount == 1 && 3205 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3206 VATTR_NULL(&vattr); 3207 if (np->n_flag & NACC) 3208 vattr.va_atime = np->n_atim; 3209 if (np->n_flag & NUPD) 3210 vattr.va_mtime = np->n_mtim; 3211 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3212 } 3213 } 3214 return (spec_close(ap)); 3215 } 3216 3217 #ifdef FIFO 3218 /* 3219 * Read wrapper for fifos. 3220 */ 3221 int 3222 nfsfifo_read(void *v) 3223 { 3224 struct vop_read_args *ap = v; 3225 struct nfsnode *np = VTONFS(ap->a_vp); 3226 3227 /* 3228 * Set access flag. 3229 */ 3230 np->n_flag |= NACC; 3231 getnanotime(&np->n_atim); 3232 return (fifo_read(ap)); 3233 } 3234 3235 /* 3236 * Write wrapper for fifos. 3237 */ 3238 int 3239 nfsfifo_write(void *v) 3240 { 3241 struct vop_write_args *ap = v; 3242 struct nfsnode *np = VTONFS(ap->a_vp); 3243 3244 /* 3245 * Set update flag. 3246 */ 3247 np->n_flag |= NUPD; 3248 getnanotime(&np->n_mtim); 3249 return (fifo_write(ap)); 3250 } 3251 3252 /* 3253 * Close wrapper for fifos. 3254 * 3255 * Update the times on the nfsnode then do fifo close. 3256 */ 3257 int 3258 nfsfifo_close(void *v) 3259 { 3260 struct vop_close_args *ap = v; 3261 struct vnode *vp = ap->a_vp; 3262 struct nfsnode *np = VTONFS(vp); 3263 struct vattr vattr; 3264 3265 if (np->n_flag & (NACC | NUPD)) { 3266 if (np->n_flag & NACC) { 3267 getnanotime(&np->n_atim); 3268 } 3269 if (np->n_flag & NUPD) { 3270 getnanotime(&np->n_mtim); 3271 } 3272 np->n_flag |= NCHG; 3273 if (vp->v_usecount == 1 && 3274 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3275 VATTR_NULL(&vattr); 3276 if (np->n_flag & NACC) 3277 vattr.va_atime = np->n_atim; 3278 if (np->n_flag & NUPD) 3279 vattr.va_mtime = np->n_mtim; 3280 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3281 } 3282 } 3283 return (fifo_close(ap)); 3284 } 3285 3286 int 3287 nfsfifo_reclaim(void *v) 3288 { 3289 fifo_reclaim(v); 3290 return (nfs_reclaim(v)); 3291 } 3292 #endif /* ! FIFO */ 3293