1 /* $OpenBSD: nfs_subs.c,v 1.133 2016/09/15 02:00:18 dlg Exp $ */ 2 /* $NetBSD: nfs_subs.c,v 1.27.4.3 1996/07/08 20:34:24 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95 36 */ 37 38 39 /* 40 * These functions support the macros and help fiddle mbuf chains for 41 * the nfs op functions. They do things like create the rpc header and 42 * copy data between mbuf chains and uio lists. 43 */ 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/mount.h> 48 #include <sys/vnode.h> 49 #include <sys/namei.h> 50 #include <sys/mbuf.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/stat.h> 54 #include <sys/pool.h> 55 #include <sys/time.h> 56 57 #include <nfs/rpcv2.h> 58 #include <nfs/nfsproto.h> 59 #include <nfs/nfsnode.h> 60 #include <nfs/nfs.h> 61 #include <nfs/xdr_subs.h> 62 #include <nfs/nfsm_subs.h> 63 #include <nfs/nfsmount.h> 64 #include <nfs/nfs_var.h> 65 66 #include <uvm/uvm_extern.h> 67 68 #include <netinet/in.h> 69 70 #include <crypto/idgen.h> 71 72 int nfs_attrtimeo(struct nfsnode *np); 73 74 /* 75 * Data items converted to xdr at startup, since they are constant 76 * This is kinda hokey, but may save a little time doing byte swaps 77 */ 78 u_int32_t nfs_xdrneg1; 79 u_int32_t rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr, 80 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted; 81 u_int32_t nfs_prog, nfs_true, nfs_false; 82 83 /* And other global data */ 84 nfstype nfsv2_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON, 85 NFCHR, NFNON }; 86 nfstype nfsv3_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK, 87 NFFIFO, NFNON }; 88 enum vtype nv2tov_type[8] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON }; 89 enum vtype nv3tov_type[8]={ VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO }; 90 int nfs_ticks; 91 struct nfsstats nfsstats; 92 93 /* 94 * Mapping of old NFS Version 2 RPC numbers to generic numbers. 95 */ 96 int nfsv3_procid[NFS_NPROCS] = { 97 NFSPROC_NULL, 98 NFSPROC_GETATTR, 99 NFSPROC_SETATTR, 100 NFSPROC_NOOP, 101 NFSPROC_LOOKUP, 102 NFSPROC_READLINK, 103 NFSPROC_READ, 104 NFSPROC_NOOP, 105 NFSPROC_WRITE, 106 NFSPROC_CREATE, 107 NFSPROC_REMOVE, 108 NFSPROC_RENAME, 109 NFSPROC_LINK, 110 NFSPROC_SYMLINK, 111 NFSPROC_MKDIR, 112 NFSPROC_RMDIR, 113 NFSPROC_READDIR, 114 NFSPROC_FSSTAT, 115 NFSPROC_NOOP, 116 NFSPROC_NOOP, 117 NFSPROC_NOOP, 118 NFSPROC_NOOP, 119 NFSPROC_NOOP 120 }; 121 122 /* 123 * and the reverse mapping from generic to Version 2 procedure numbers 124 */ 125 int nfsv2_procid[NFS_NPROCS] = { 126 NFSV2PROC_NULL, 127 NFSV2PROC_GETATTR, 128 NFSV2PROC_SETATTR, 129 NFSV2PROC_LOOKUP, 130 NFSV2PROC_NOOP, 131 NFSV2PROC_READLINK, 132 NFSV2PROC_READ, 133 NFSV2PROC_WRITE, 134 NFSV2PROC_CREATE, 135 NFSV2PROC_MKDIR, 136 NFSV2PROC_SYMLINK, 137 NFSV2PROC_CREATE, 138 NFSV2PROC_REMOVE, 139 NFSV2PROC_RMDIR, 140 NFSV2PROC_RENAME, 141 NFSV2PROC_LINK, 142 NFSV2PROC_READDIR, 143 NFSV2PROC_NOOP, 144 NFSV2PROC_STATFS, 145 NFSV2PROC_NOOP, 146 NFSV2PROC_NOOP, 147 NFSV2PROC_NOOP, 148 NFSV2PROC_NOOP 149 }; 150 151 /* 152 * Maps errno values to nfs error numbers. 153 * Use NFSERR_IO as the catch all for ones not specifically defined in 154 * RFC 1094. 155 */ 156 static u_char nfsrv_v2errmap[] = { 157 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, 158 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 159 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, 160 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, 161 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 162 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, 163 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 164 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 165 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 166 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 167 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 168 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 169 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, 170 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE 171 /* Everything after this maps to NFSERR_IO, so far */ 172 }; 173 174 /* 175 * Maps errno values to nfs error numbers. 176 * Although it is not obvious whether or not NFS clients really care if 177 * a returned error value is in the specified list for the procedure, the 178 * safest thing to do is filter them appropriately. For Version 2, the 179 * X/Open XNFS document is the only specification that defines error values 180 * for each RPC (The RFC simply lists all possible error values for all RPCs), 181 * so I have decided to not do this for Version 2. 182 * The first entry is the default error return and the rest are the valid 183 * errors for that RPC in increasing numeric order. 184 */ 185 static short nfsv3err_null[] = { 186 0, 187 0, 188 }; 189 190 static short nfsv3err_getattr[] = { 191 NFSERR_IO, 192 NFSERR_IO, 193 NFSERR_STALE, 194 NFSERR_BADHANDLE, 195 NFSERR_SERVERFAULT, 196 0, 197 }; 198 199 static short nfsv3err_setattr[] = { 200 NFSERR_IO, 201 NFSERR_PERM, 202 NFSERR_IO, 203 NFSERR_ACCES, 204 NFSERR_INVAL, 205 NFSERR_NOSPC, 206 NFSERR_ROFS, 207 NFSERR_DQUOT, 208 NFSERR_STALE, 209 NFSERR_BADHANDLE, 210 NFSERR_NOT_SYNC, 211 NFSERR_SERVERFAULT, 212 0, 213 }; 214 215 static short nfsv3err_lookup[] = { 216 NFSERR_IO, 217 NFSERR_NOENT, 218 NFSERR_IO, 219 NFSERR_ACCES, 220 NFSERR_NOTDIR, 221 NFSERR_NAMETOL, 222 NFSERR_STALE, 223 NFSERR_BADHANDLE, 224 NFSERR_SERVERFAULT, 225 0, 226 }; 227 228 static short nfsv3err_access[] = { 229 NFSERR_IO, 230 NFSERR_IO, 231 NFSERR_STALE, 232 NFSERR_BADHANDLE, 233 NFSERR_SERVERFAULT, 234 0, 235 }; 236 237 static short nfsv3err_readlink[] = { 238 NFSERR_IO, 239 NFSERR_IO, 240 NFSERR_ACCES, 241 NFSERR_INVAL, 242 NFSERR_STALE, 243 NFSERR_BADHANDLE, 244 NFSERR_NOTSUPP, 245 NFSERR_SERVERFAULT, 246 0, 247 }; 248 249 static short nfsv3err_read[] = { 250 NFSERR_IO, 251 NFSERR_IO, 252 NFSERR_NXIO, 253 NFSERR_ACCES, 254 NFSERR_INVAL, 255 NFSERR_STALE, 256 NFSERR_BADHANDLE, 257 NFSERR_SERVERFAULT, 258 0, 259 }; 260 261 static short nfsv3err_write[] = { 262 NFSERR_IO, 263 NFSERR_IO, 264 NFSERR_ACCES, 265 NFSERR_INVAL, 266 NFSERR_FBIG, 267 NFSERR_NOSPC, 268 NFSERR_ROFS, 269 NFSERR_DQUOT, 270 NFSERR_STALE, 271 NFSERR_BADHANDLE, 272 NFSERR_SERVERFAULT, 273 0, 274 }; 275 276 static short nfsv3err_create[] = { 277 NFSERR_IO, 278 NFSERR_IO, 279 NFSERR_ACCES, 280 NFSERR_EXIST, 281 NFSERR_NOTDIR, 282 NFSERR_NOSPC, 283 NFSERR_ROFS, 284 NFSERR_NAMETOL, 285 NFSERR_DQUOT, 286 NFSERR_STALE, 287 NFSERR_BADHANDLE, 288 NFSERR_NOTSUPP, 289 NFSERR_SERVERFAULT, 290 0, 291 }; 292 293 static short nfsv3err_mkdir[] = { 294 NFSERR_IO, 295 NFSERR_IO, 296 NFSERR_ACCES, 297 NFSERR_EXIST, 298 NFSERR_NOTDIR, 299 NFSERR_NOSPC, 300 NFSERR_ROFS, 301 NFSERR_NAMETOL, 302 NFSERR_DQUOT, 303 NFSERR_STALE, 304 NFSERR_BADHANDLE, 305 NFSERR_NOTSUPP, 306 NFSERR_SERVERFAULT, 307 0, 308 }; 309 310 static short nfsv3err_symlink[] = { 311 NFSERR_IO, 312 NFSERR_IO, 313 NFSERR_ACCES, 314 NFSERR_EXIST, 315 NFSERR_NOTDIR, 316 NFSERR_NOSPC, 317 NFSERR_ROFS, 318 NFSERR_NAMETOL, 319 NFSERR_DQUOT, 320 NFSERR_STALE, 321 NFSERR_BADHANDLE, 322 NFSERR_NOTSUPP, 323 NFSERR_SERVERFAULT, 324 0, 325 }; 326 327 static short nfsv3err_mknod[] = { 328 NFSERR_IO, 329 NFSERR_IO, 330 NFSERR_ACCES, 331 NFSERR_EXIST, 332 NFSERR_NOTDIR, 333 NFSERR_NOSPC, 334 NFSERR_ROFS, 335 NFSERR_NAMETOL, 336 NFSERR_DQUOT, 337 NFSERR_STALE, 338 NFSERR_BADHANDLE, 339 NFSERR_NOTSUPP, 340 NFSERR_SERVERFAULT, 341 NFSERR_BADTYPE, 342 0, 343 }; 344 345 static short nfsv3err_remove[] = { 346 NFSERR_IO, 347 NFSERR_NOENT, 348 NFSERR_IO, 349 NFSERR_ACCES, 350 NFSERR_NOTDIR, 351 NFSERR_ROFS, 352 NFSERR_NAMETOL, 353 NFSERR_STALE, 354 NFSERR_BADHANDLE, 355 NFSERR_SERVERFAULT, 356 0, 357 }; 358 359 static short nfsv3err_rmdir[] = { 360 NFSERR_IO, 361 NFSERR_NOENT, 362 NFSERR_IO, 363 NFSERR_ACCES, 364 NFSERR_EXIST, 365 NFSERR_NOTDIR, 366 NFSERR_INVAL, 367 NFSERR_ROFS, 368 NFSERR_NAMETOL, 369 NFSERR_NOTEMPTY, 370 NFSERR_STALE, 371 NFSERR_BADHANDLE, 372 NFSERR_NOTSUPP, 373 NFSERR_SERVERFAULT, 374 0, 375 }; 376 377 static short nfsv3err_rename[] = { 378 NFSERR_IO, 379 NFSERR_NOENT, 380 NFSERR_IO, 381 NFSERR_ACCES, 382 NFSERR_EXIST, 383 NFSERR_XDEV, 384 NFSERR_NOTDIR, 385 NFSERR_ISDIR, 386 NFSERR_INVAL, 387 NFSERR_NOSPC, 388 NFSERR_ROFS, 389 NFSERR_MLINK, 390 NFSERR_NAMETOL, 391 NFSERR_NOTEMPTY, 392 NFSERR_DQUOT, 393 NFSERR_STALE, 394 NFSERR_BADHANDLE, 395 NFSERR_NOTSUPP, 396 NFSERR_SERVERFAULT, 397 0, 398 }; 399 400 static short nfsv3err_link[] = { 401 NFSERR_IO, 402 NFSERR_IO, 403 NFSERR_ACCES, 404 NFSERR_EXIST, 405 NFSERR_XDEV, 406 NFSERR_NOTDIR, 407 NFSERR_INVAL, 408 NFSERR_NOSPC, 409 NFSERR_ROFS, 410 NFSERR_MLINK, 411 NFSERR_NAMETOL, 412 NFSERR_DQUOT, 413 NFSERR_STALE, 414 NFSERR_BADHANDLE, 415 NFSERR_NOTSUPP, 416 NFSERR_SERVERFAULT, 417 0, 418 }; 419 420 static short nfsv3err_readdir[] = { 421 NFSERR_IO, 422 NFSERR_IO, 423 NFSERR_ACCES, 424 NFSERR_NOTDIR, 425 NFSERR_STALE, 426 NFSERR_BADHANDLE, 427 NFSERR_BAD_COOKIE, 428 NFSERR_TOOSMALL, 429 NFSERR_SERVERFAULT, 430 0, 431 }; 432 433 static short nfsv3err_readdirplus[] = { 434 NFSERR_IO, 435 NFSERR_IO, 436 NFSERR_ACCES, 437 NFSERR_NOTDIR, 438 NFSERR_STALE, 439 NFSERR_BADHANDLE, 440 NFSERR_BAD_COOKIE, 441 NFSERR_NOTSUPP, 442 NFSERR_TOOSMALL, 443 NFSERR_SERVERFAULT, 444 0, 445 }; 446 447 static short nfsv3err_fsstat[] = { 448 NFSERR_IO, 449 NFSERR_IO, 450 NFSERR_STALE, 451 NFSERR_BADHANDLE, 452 NFSERR_SERVERFAULT, 453 0, 454 }; 455 456 static short nfsv3err_fsinfo[] = { 457 NFSERR_STALE, 458 NFSERR_STALE, 459 NFSERR_BADHANDLE, 460 NFSERR_SERVERFAULT, 461 0, 462 }; 463 464 static short nfsv3err_pathconf[] = { 465 NFSERR_STALE, 466 NFSERR_STALE, 467 NFSERR_BADHANDLE, 468 NFSERR_SERVERFAULT, 469 0, 470 }; 471 472 static short nfsv3err_commit[] = { 473 NFSERR_IO, 474 NFSERR_IO, 475 NFSERR_STALE, 476 NFSERR_BADHANDLE, 477 NFSERR_SERVERFAULT, 478 0, 479 }; 480 481 static short *nfsrv_v3errmap[] = { 482 nfsv3err_null, 483 nfsv3err_getattr, 484 nfsv3err_setattr, 485 nfsv3err_lookup, 486 nfsv3err_access, 487 nfsv3err_readlink, 488 nfsv3err_read, 489 nfsv3err_write, 490 nfsv3err_create, 491 nfsv3err_mkdir, 492 nfsv3err_symlink, 493 nfsv3err_mknod, 494 nfsv3err_remove, 495 nfsv3err_rmdir, 496 nfsv3err_rename, 497 nfsv3err_link, 498 nfsv3err_readdir, 499 nfsv3err_readdirplus, 500 nfsv3err_fsstat, 501 nfsv3err_fsinfo, 502 nfsv3err_pathconf, 503 nfsv3err_commit, 504 }; 505 506 struct pool nfsreqpl; 507 508 /* 509 * Create the header for an rpc request packet 510 * The hsiz is the size of the rest of the nfs request header. 511 * (just used to decide if a cluster is a good idea) 512 */ 513 struct mbuf * 514 nfsm_reqhead(int hsiz) 515 { 516 struct mbuf *mb; 517 518 MGET(mb, M_WAIT, MT_DATA); 519 if (hsiz > MLEN) 520 MCLGET(mb, M_WAIT); 521 mb->m_len = 0; 522 523 /* Finally, return values */ 524 return (mb); 525 } 526 527 /* 528 * Return an unpredictable XID in XDR form. 529 */ 530 u_int32_t 531 nfs_get_xid(void) 532 { 533 static struct idgen32_ctx nfs_xid_ctx; 534 static int called = 0; 535 536 if (!called) { 537 called = 1; 538 idgen32_init(&nfs_xid_ctx); 539 } 540 return (txdr_unsigned(idgen32(&nfs_xid_ctx))); 541 } 542 543 /* 544 * Build the RPC header and fill in the authorization info. 545 * Right now we are pretty centric around RPCAUTH_UNIX, in the 546 * future, this function will need some love to be able to handle 547 * other authorization methods, such as Kerberos. 548 */ 549 void 550 nfsm_rpchead(struct nfsreq *req, struct ucred *cr, int auth_type) 551 { 552 struct mbuf *mb; 553 u_int32_t *tl; 554 int i, authsiz, auth_len, ngroups; 555 556 KASSERT(auth_type == RPCAUTH_UNIX); 557 558 /* 559 * RPCAUTH_UNIX fits in an hdr mbuf, in the future other 560 * authorization methods need to figure out their own sizes 561 * and allocate and chain mbuf's accorindgly. 562 */ 563 mb = req->r_mreq; 564 565 /* 566 * We need to start out by finding how big the authorization cred 567 * and verifer are for the auth_type, to be able to correctly 568 * align the mbuf header/chain. 569 */ 570 switch (auth_type) { 571 case RPCAUTH_UNIX: 572 /* 573 * In the RPCAUTH_UNIX case, the size is the static 574 * part as shown in RFC1831 + the number of groups, 575 * RPCAUTH_UNIX has a zero verifer. 576 */ 577 if (cr->cr_ngroups > req->r_nmp->nm_numgrps) 578 ngroups = req->r_nmp->nm_numgrps; 579 else 580 ngroups = cr->cr_ngroups; 581 582 auth_len = (ngroups << 2) + 5 * NFSX_UNSIGNED; 583 authsiz = nfsm_rndup(auth_len); 584 /* The authorization size + the size of the static part */ 585 MH_ALIGN(mb, authsiz + 10 * NFSX_UNSIGNED); 586 break; 587 } 588 589 mb->m_len = 0; 590 591 /* First the RPC header. */ 592 tl = nfsm_build(&mb, 6 * NFSX_UNSIGNED); 593 594 /* Get a new (non-zero) xid */ 595 *tl++ = req->r_xid = nfs_get_xid(); 596 *tl++ = rpc_call; 597 *tl++ = rpc_vers; 598 *tl++ = nfs_prog; 599 if (ISSET(req->r_nmp->nm_flag, NFSMNT_NFSV3)) { 600 *tl++ = txdr_unsigned(NFS_VER3); 601 *tl = txdr_unsigned(req->r_procnum); 602 } else { 603 *tl++ = txdr_unsigned(NFS_VER2); 604 *tl = txdr_unsigned(nfsv2_procid[req->r_procnum]); 605 } 606 607 /* The Authorization cred and its verifier */ 608 switch (auth_type) { 609 case RPCAUTH_UNIX: 610 tl = nfsm_build(&mb, auth_len + 4 * NFSX_UNSIGNED); 611 *tl++ = txdr_unsigned(RPCAUTH_UNIX); 612 *tl++ = txdr_unsigned(authsiz); 613 614 /* The authorization cred */ 615 *tl++ = 0; /* stamp */ 616 *tl++ = 0; /* NULL hostname */ 617 *tl++ = txdr_unsigned(cr->cr_uid); 618 *tl++ = txdr_unsigned(cr->cr_gid); 619 *tl++ = txdr_unsigned(ngroups); 620 for (i = 0; i < ngroups; i++) 621 *tl++ = txdr_unsigned(cr->cr_groups[i]); 622 /* The authorization verifier */ 623 *tl++ = txdr_unsigned(RPCAUTH_NULL); 624 *tl = 0; 625 break; 626 } 627 628 mb->m_pkthdr.len += authsiz + 10 * NFSX_UNSIGNED; 629 mb->m_pkthdr.ph_ifidx = 0; 630 } 631 632 /* 633 * copies mbuf chain to the uio scatter/gather list 634 */ 635 int 636 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos) 637 { 638 char *mbufcp, *uiocp; 639 int xfer, left, len; 640 struct mbuf *mp; 641 long uiosiz, rem; 642 int error = 0; 643 644 mp = *mrep; 645 mbufcp = *dpos; 646 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 647 rem = nfsm_padlen(siz); 648 while (siz > 0) { 649 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 650 return (EFBIG); 651 left = uiop->uio_iov->iov_len; 652 uiocp = uiop->uio_iov->iov_base; 653 if (left > siz) 654 left = siz; 655 uiosiz = left; 656 while (left > 0) { 657 while (len == 0) { 658 mp = mp->m_next; 659 if (mp == NULL) 660 return (EBADRPC); 661 mbufcp = mtod(mp, caddr_t); 662 len = mp->m_len; 663 } 664 xfer = (left > len) ? len : left; 665 if (uiop->uio_segflg == UIO_SYSSPACE) 666 memcpy(uiocp, mbufcp, xfer); 667 else 668 copyout(mbufcp, uiocp, xfer); 669 left -= xfer; 670 len -= xfer; 671 mbufcp += xfer; 672 uiocp += xfer; 673 uiop->uio_offset += xfer; 674 uiop->uio_resid -= xfer; 675 } 676 if (uiop->uio_iov->iov_len <= siz) { 677 uiop->uio_iovcnt--; 678 uiop->uio_iov++; 679 } else { 680 uiop->uio_iov->iov_base = 681 (char *)uiop->uio_iov->iov_base + uiosiz; 682 uiop->uio_iov->iov_len -= uiosiz; 683 } 684 siz -= uiosiz; 685 } 686 *dpos = mbufcp; 687 *mrep = mp; 688 if (rem > 0) { 689 if (len < rem) 690 error = nfs_adv(mrep, dpos, rem, len); 691 else 692 *dpos += rem; 693 } 694 return (error); 695 } 696 697 /* 698 * Copy a uio scatter/gather list to an mbuf chain. 699 */ 700 void 701 nfsm_uiotombuf(struct mbuf **mp, struct uio *uiop, size_t len) 702 { 703 struct mbuf *mb, *mb2; 704 size_t xfer, pad; 705 706 mb = *mp; 707 708 pad = nfsm_padlen(len); 709 710 /* XXX -- the following should be done by the caller */ 711 uiop->uio_resid = len; 712 uiop->uio_rw = UIO_WRITE; 713 714 while (len) { 715 xfer = ulmin(len, M_TRAILINGSPACE(mb)); 716 uiomove(mb_offset(mb), xfer, uiop); 717 mb->m_len += xfer; 718 len -= xfer; 719 if (len > 0) { 720 MGET(mb2, M_WAIT, MT_DATA); 721 if (len > MLEN) 722 MCLGET(mb2, M_WAIT); 723 mb2->m_len = 0; 724 mb->m_next = mb2; 725 mb = mb2; 726 } 727 } 728 729 if (pad > 0) { 730 if (pad > M_TRAILINGSPACE(mb)) { 731 MGET(mb2, M_WAIT, MT_DATA); 732 mb2->m_len = 0; 733 mb->m_next = mb2; 734 mb = mb2; 735 } 736 memset(mb_offset(mb), 0, pad); 737 mb->m_len += pad; 738 } 739 740 *mp = mb; 741 } 742 743 /* 744 * Copy a buffer to an mbuf chain 745 */ 746 void 747 nfsm_buftombuf(struct mbuf **mp, void *buf, size_t len) 748 { 749 struct iovec iov; 750 struct uio io; 751 752 iov.iov_base = buf; 753 iov.iov_len = len; 754 755 io.uio_iov = &iov; 756 io.uio_iovcnt = 1; 757 io.uio_resid = len; 758 io.uio_segflg = UIO_SYSSPACE; 759 io.uio_rw = UIO_WRITE; 760 761 nfsm_uiotombuf(mp, &io, len); 762 } 763 764 /* 765 * Copy a string to an mbuf chain 766 */ 767 void 768 nfsm_strtombuf(struct mbuf **mp, void *str, size_t len) 769 { 770 struct iovec iov[2]; 771 struct uio io; 772 uint32_t strlen; 773 774 strlen = txdr_unsigned(len); 775 776 iov[0].iov_base = &strlen; 777 iov[0].iov_len = sizeof(uint32_t); 778 iov[1].iov_base = str; 779 iov[1].iov_len = len; 780 781 io.uio_iov = iov; 782 io.uio_iovcnt = 2; 783 io.uio_resid = sizeof(uint32_t) + len; 784 io.uio_segflg = UIO_SYSSPACE; 785 io.uio_rw = UIO_WRITE; 786 787 nfsm_uiotombuf(mp, &io, io.uio_resid); 788 } 789 790 /* 791 * Help break down an mbuf chain by setting the first siz bytes contiguous 792 * pointed to by returned val. 793 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough 794 * cases. (The macros use the vars. dpos and dpos2) 795 */ 796 int 797 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2) 798 { 799 struct mbuf *mp, *mp2; 800 int siz2, xfer; 801 caddr_t p; 802 803 mp = *mdp; 804 while (left == 0) { 805 *mdp = mp = mp->m_next; 806 if (mp == NULL) 807 return (EBADRPC); 808 left = mp->m_len; 809 *dposp = mtod(mp, caddr_t); 810 } 811 if (left >= siz) { 812 *cp2 = *dposp; 813 *dposp += siz; 814 } else if (mp->m_next == NULL) { 815 return (EBADRPC); 816 } else if (siz > MHLEN) { 817 panic("nfs S too big"); 818 } else { 819 MGET(mp2, M_WAIT, MT_DATA); 820 mp2->m_next = mp->m_next; 821 mp->m_next = mp2; 822 mp->m_len -= left; 823 mp = mp2; 824 *cp2 = p = mtod(mp, caddr_t); 825 bcopy(*dposp, p, left); /* Copy what was left */ 826 siz2 = siz-left; 827 p += left; 828 mp2 = mp->m_next; 829 /* Loop around copying up the siz2 bytes */ 830 while (siz2 > 0) { 831 if (mp2 == NULL) 832 return (EBADRPC); 833 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 834 if (xfer > 0) { 835 bcopy(mtod(mp2, caddr_t), p, xfer); 836 mp2->m_data += xfer; 837 mp2->m_len -= xfer; 838 p += xfer; 839 siz2 -= xfer; 840 } 841 if (siz2 > 0) 842 mp2 = mp2->m_next; 843 } 844 mp->m_len = siz; 845 *mdp = mp2; 846 *dposp = mtod(mp2, caddr_t); 847 } 848 return (0); 849 } 850 851 /* 852 * Advance the position in the mbuf chain. 853 */ 854 int 855 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left) 856 { 857 struct mbuf *m; 858 int s; 859 860 m = *mdp; 861 s = left; 862 while (s < offs) { 863 offs -= s; 864 m = m->m_next; 865 if (m == NULL) 866 return (EBADRPC); 867 s = m->m_len; 868 } 869 *mdp = m; 870 *dposp = mtod(m, caddr_t)+offs; 871 return (0); 872 } 873 874 /* 875 * Called once to initialize data structures... 876 */ 877 void 878 nfs_init(void) 879 { 880 rpc_vers = txdr_unsigned(RPC_VER2); 881 rpc_call = txdr_unsigned(RPC_CALL); 882 rpc_reply = txdr_unsigned(RPC_REPLY); 883 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 884 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 885 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 886 rpc_autherr = txdr_unsigned(RPC_AUTHERR); 887 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 888 nfs_prog = txdr_unsigned(NFS_PROG); 889 nfs_true = txdr_unsigned(1); 890 nfs_false = txdr_unsigned(0); 891 nfs_xdrneg1 = txdr_unsigned(-1); 892 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000; 893 if (nfs_ticks < 1) 894 nfs_ticks = 1; 895 #ifdef NFSSERVER 896 nfsrv_init(0); /* Init server data structures */ 897 nfsrv_initcache(); /* Init the server request cache */ 898 #endif /* NFSSERVER */ 899 900 pool_init(&nfsreqpl, sizeof(struct nfsreq), 0, IPL_NONE, PR_WAITOK, 901 "nfsreqpl", NULL); 902 } 903 904 #ifdef NFSCLIENT 905 int 906 nfs_vfs_init(struct vfsconf *vfsp) 907 { 908 extern struct pool nfs_node_pool; 909 910 TAILQ_INIT(&nfs_bufq); 911 912 pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, IPL_NONE, 913 PR_WAITOK, "nfsnodepl", NULL); 914 915 return (0); 916 } 917 918 /* 919 * Attribute cache routines. 920 * nfs_loadattrcache() - loads or updates the cache contents from attributes 921 * that are on the mbuf list 922 * nfs_getattrcache() - returns valid attributes if found in cache, returns 923 * error otherwise 924 */ 925 926 /* 927 * Load the attribute cache (that lives in the nfsnode entry) with 928 * the values on the mbuf list and 929 * Iff vap not NULL 930 * copy the attributes to *vaper 931 */ 932 int 933 nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp, 934 struct vattr *vaper) 935 { 936 struct vnode *vp = *vpp; 937 struct vattr *vap; 938 struct nfs_fattr *fp; 939 extern struct vops nfs_specvops; 940 struct nfsnode *np; 941 int32_t t1; 942 caddr_t cp2; 943 int error = 0; 944 int32_t rdev; 945 struct mbuf *md; 946 enum vtype vtyp; 947 mode_t vmode; 948 struct timespec mtime; 949 struct vnode *nvp; 950 int v3 = NFS_ISV3(vp); 951 uid_t uid; 952 gid_t gid; 953 954 md = *mdp; 955 t1 = (mtod(md, caddr_t) + md->m_len) - *dposp; 956 error = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, &cp2); 957 if (error) 958 return (error); 959 fp = (struct nfs_fattr *)cp2; 960 if (v3) { 961 vtyp = nfsv3tov_type(fp->fa_type); 962 vmode = fxdr_unsigned(mode_t, fp->fa_mode); 963 rdev = makedev(fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata1), 964 fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata2)); 965 fxdr_nfsv3time(&fp->fa3_mtime, &mtime); 966 } else { 967 vtyp = nfsv2tov_type(fp->fa_type); 968 vmode = fxdr_unsigned(mode_t, fp->fa_mode); 969 if (vtyp == VNON || vtyp == VREG) 970 vtyp = IFTOVT(vmode); 971 rdev = fxdr_unsigned(int32_t, fp->fa2_rdev); 972 fxdr_nfsv2time(&fp->fa2_mtime, &mtime); 973 974 /* 975 * Really ugly NFSv2 kludge. 976 */ 977 if (vtyp == VCHR && rdev == 0xffffffff) 978 vtyp = VFIFO; 979 } 980 981 /* 982 * If v_type == VNON it is a new node, so fill in the v_type, 983 * n_mtime fields. Check to see if it represents a special 984 * device, and if so, check for a possible alias. Once the 985 * correct vnode has been obtained, fill in the rest of the 986 * information. 987 */ 988 np = VTONFS(vp); 989 if (vp->v_type != vtyp) { 990 cache_purge(vp); 991 vp->v_type = vtyp; 992 if (vp->v_type == VFIFO) { 993 #ifndef FIFO 994 return (EOPNOTSUPP); 995 #else 996 extern struct vops nfs_fifovops; 997 vp->v_op = &nfs_fifovops; 998 #endif /* FIFO */ 999 } 1000 if (vp->v_type == VCHR || vp->v_type == VBLK) { 1001 vp->v_op = &nfs_specvops; 1002 nvp = checkalias(vp, (dev_t)rdev, vp->v_mount); 1003 if (nvp) { 1004 /* 1005 * Discard unneeded vnode, but save its nfsnode. 1006 * Since the nfsnode does not have a lock, its 1007 * vnode lock has to be carried over. 1008 */ 1009 1010 nvp->v_data = vp->v_data; 1011 vp->v_data = NULL; 1012 vp->v_op = &spec_vops; 1013 vrele(vp); 1014 vgone(vp); 1015 /* 1016 * Reinitialize aliased node. 1017 */ 1018 np->n_vnode = nvp; 1019 *vpp = vp = nvp; 1020 } 1021 } 1022 np->n_mtime = mtime; 1023 } 1024 vap = &np->n_vattr; 1025 vap->va_type = vtyp; 1026 vap->va_rdev = (dev_t)rdev; 1027 vap->va_mtime = mtime; 1028 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1029 1030 uid = fxdr_unsigned(uid_t, fp->fa_uid); 1031 gid = fxdr_unsigned(gid_t, fp->fa_gid); 1032 /* Invalidate access cache if uid, gid or mode changed. */ 1033 if (np->n_accstamp != -1 && 1034 (gid != vap->va_gid || uid != vap->va_uid || 1035 (vmode & 07777) != vap->va_mode)) 1036 np->n_accstamp = -1; 1037 1038 vap->va_mode = (vmode & 07777); 1039 1040 switch (vtyp) { 1041 case VBLK: 1042 vap->va_blocksize = BLKDEV_IOSIZE; 1043 break; 1044 case VCHR: 1045 vap->va_blocksize = MAXBSIZE; 1046 break; 1047 default: 1048 vap->va_blocksize = v3 ? vp->v_mount->mnt_stat.f_iosize : 1049 fxdr_unsigned(int32_t, fp->fa2_blocksize); 1050 break; 1051 } 1052 if (v3) { 1053 vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink); 1054 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 1055 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 1056 vap->va_size = fxdr_hyper(&fp->fa3_size); 1057 vap->va_bytes = fxdr_hyper(&fp->fa3_used); 1058 vap->va_fileid = fxdr_hyper(&fp->fa3_fileid); 1059 fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime); 1060 fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime); 1061 vap->va_flags = 0; 1062 vap->va_filerev = 0; 1063 } else { 1064 vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink); 1065 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 1066 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 1067 vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size); 1068 vap->va_bytes = 1069 (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) * 1070 NFS_FABLKSIZE; 1071 vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid); 1072 fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime); 1073 vap->va_flags = 0; 1074 vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t, 1075 fp->fa2_ctime.nfsv2_sec); 1076 vap->va_ctime.tv_nsec = 0; 1077 vap->va_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec); 1078 vap->va_filerev = 0; 1079 } 1080 1081 if (vap->va_size != np->n_size) { 1082 if (vap->va_type == VREG) { 1083 if (np->n_flag & NMODIFIED) { 1084 if (vap->va_size < np->n_size) 1085 vap->va_size = np->n_size; 1086 else 1087 np->n_size = vap->va_size; 1088 } else 1089 np->n_size = vap->va_size; 1090 uvm_vnp_setsize(vp, np->n_size); 1091 } else 1092 np->n_size = vap->va_size; 1093 } 1094 np->n_attrstamp = time_second; 1095 if (vaper != NULL) { 1096 bcopy(vap, vaper, sizeof(*vap)); 1097 if (np->n_flag & NCHG) { 1098 if (np->n_flag & NACC) 1099 vaper->va_atime = np->n_atim; 1100 if (np->n_flag & NUPD) 1101 vaper->va_mtime = np->n_mtim; 1102 } 1103 } 1104 return (0); 1105 } 1106 1107 int 1108 nfs_attrtimeo(struct nfsnode *np) 1109 { 1110 struct vnode *vp = np->n_vnode; 1111 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1112 int tenthage = (time_second - np->n_mtime.tv_sec) / 10; 1113 int minto, maxto; 1114 1115 if (vp->v_type == VDIR) { 1116 maxto = nmp->nm_acdirmax; 1117 minto = nmp->nm_acdirmin; 1118 } 1119 else { 1120 maxto = nmp->nm_acregmax; 1121 minto = nmp->nm_acregmin; 1122 } 1123 1124 if (np->n_flag & NMODIFIED || tenthage < minto) 1125 return minto; 1126 else if (tenthage < maxto) 1127 return tenthage; 1128 else 1129 return maxto; 1130 } 1131 1132 /* 1133 * Check the time stamp 1134 * If the cache is valid, copy contents to *vap and return 0 1135 * otherwise return an error 1136 */ 1137 int 1138 nfs_getattrcache(struct vnode *vp, struct vattr *vaper) 1139 { 1140 struct nfsnode *np = VTONFS(vp); 1141 struct vattr *vap; 1142 1143 if (np->n_attrstamp == 0 || 1144 (time_second - np->n_attrstamp) >= nfs_attrtimeo(np)) { 1145 nfsstats.attrcache_misses++; 1146 return (ENOENT); 1147 } 1148 nfsstats.attrcache_hits++; 1149 vap = &np->n_vattr; 1150 if (vap->va_size != np->n_size) { 1151 if (vap->va_type == VREG) { 1152 if (np->n_flag & NMODIFIED) { 1153 if (vap->va_size < np->n_size) 1154 vap->va_size = np->n_size; 1155 else 1156 np->n_size = vap->va_size; 1157 } else 1158 np->n_size = vap->va_size; 1159 uvm_vnp_setsize(vp, np->n_size); 1160 } else 1161 np->n_size = vap->va_size; 1162 } 1163 bcopy(vap, vaper, sizeof(struct vattr)); 1164 if (np->n_flag & NCHG) { 1165 if (np->n_flag & NACC) 1166 vaper->va_atime = np->n_atim; 1167 if (np->n_flag & NUPD) 1168 vaper->va_mtime = np->n_mtim; 1169 } 1170 return (0); 1171 } 1172 #endif /* NFSCLIENT */ 1173 1174 /* 1175 * Set up nameidata for a lookup() call and do it 1176 */ 1177 int 1178 nfs_namei(struct nameidata *ndp, fhandle_t *fhp, int len, 1179 struct nfssvc_sock *slp, struct mbuf *nam, struct mbuf **mdp, 1180 caddr_t *dposp, struct vnode **retdirp, struct proc *p) 1181 { 1182 int i, rem; 1183 struct mbuf *md; 1184 char *fromcp, *tocp; 1185 struct vnode *dp; 1186 int error, rdonly; 1187 struct componentname *cnp = &ndp->ni_cnd; 1188 1189 *retdirp = NULL; 1190 cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK); 1191 /* 1192 * Copy the name from the mbuf list to ndp->ni_pnbuf 1193 * and set the various ndp fields appropriately. 1194 */ 1195 fromcp = *dposp; 1196 tocp = cnp->cn_pnbuf; 1197 md = *mdp; 1198 rem = mtod(md, caddr_t) + md->m_len - fromcp; 1199 for (i = 0; i < len; i++) { 1200 while (rem == 0) { 1201 md = md->m_next; 1202 if (md == NULL) { 1203 error = EBADRPC; 1204 goto out; 1205 } 1206 fromcp = mtod(md, caddr_t); 1207 rem = md->m_len; 1208 } 1209 if (*fromcp == '\0' || *fromcp == '/') { 1210 error = EACCES; 1211 goto out; 1212 } 1213 *tocp++ = *fromcp++; 1214 rem--; 1215 } 1216 *tocp = '\0'; 1217 *mdp = md; 1218 *dposp = fromcp; 1219 len = nfsm_padlen(len); 1220 if (len > 0) { 1221 if (rem >= len) 1222 *dposp += len; 1223 else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0) 1224 goto out; 1225 } 1226 ndp->ni_pathlen = tocp - cnp->cn_pnbuf; 1227 cnp->cn_nameptr = cnp->cn_pnbuf; 1228 /* 1229 * Extract and set starting directory. 1230 */ 1231 error = nfsrv_fhtovp(fhp, 0, &dp, ndp->ni_cnd.cn_cred, slp, 1232 nam, &rdonly); 1233 if (error) 1234 goto out; 1235 if (dp->v_type != VDIR) { 1236 vrele(dp); 1237 error = ENOTDIR; 1238 goto out; 1239 } 1240 vref(dp); 1241 *retdirp = dp; 1242 ndp->ni_startdir = dp; 1243 if (rdonly) 1244 cnp->cn_flags |= (NOCROSSMOUNT | RDONLY); 1245 else 1246 cnp->cn_flags |= NOCROSSMOUNT; 1247 1248 /* 1249 * Should be 0, if not someone didn't init ndp with NDINIT, 1250 * go find and murder the offender messily. 1251 */ 1252 KASSERT (ndp->ni_p_path == NULL && ndp->ni_p_size == 0); 1253 1254 /* 1255 * And call lookup() to do the real work 1256 */ 1257 cnp->cn_proc = p; 1258 error = vfs_lookup(ndp); 1259 if (error) 1260 goto out; 1261 /* 1262 * Check for encountering a symbolic link 1263 */ 1264 if (cnp->cn_flags & ISSYMLINK) { 1265 if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) 1266 vput(ndp->ni_dvp); 1267 else 1268 vrele(ndp->ni_dvp); 1269 vput(ndp->ni_vp); 1270 ndp->ni_vp = NULL; 1271 error = EINVAL; 1272 goto out; 1273 } 1274 /* 1275 * Check for saved name request 1276 */ 1277 if (cnp->cn_flags & (SAVENAME | SAVESTART)) { 1278 cnp->cn_flags |= HASBUF; 1279 return (0); 1280 } 1281 out: 1282 pool_put(&namei_pool, cnp->cn_pnbuf); 1283 return (error); 1284 } 1285 1286 /* 1287 * A fiddled version of m_adj() that ensures null fill to a long 1288 * boundary and only trims off the back end 1289 */ 1290 void 1291 nfsm_adj(struct mbuf *mp, int len, int nul) 1292 { 1293 struct mbuf *m; 1294 int count, i; 1295 char *cp; 1296 1297 /* 1298 * Trim from tail. Scan the mbuf chain, 1299 * calculating its length and finding the last mbuf. 1300 * If the adjustment only affects this mbuf, then just 1301 * adjust and return. Otherwise, rescan and truncate 1302 * after the remaining size. 1303 */ 1304 count = 0; 1305 m = mp; 1306 for (;;) { 1307 count += m->m_len; 1308 if (m->m_next == NULL) 1309 break; 1310 m = m->m_next; 1311 } 1312 if (m->m_len > len) { 1313 m->m_len -= len; 1314 if (nul > 0) { 1315 cp = mtod(m, caddr_t)+m->m_len-nul; 1316 for (i = 0; i < nul; i++) 1317 *cp++ = '\0'; 1318 } 1319 return; 1320 } 1321 count -= len; 1322 if (count < 0) 1323 count = 0; 1324 /* 1325 * Correct length for chain is "count". 1326 * Find the mbuf with last data, adjust its length, 1327 * and toss data from remaining mbufs on chain. 1328 */ 1329 for (m = mp; m; m = m->m_next) { 1330 if (m->m_len >= count) { 1331 m->m_len = count; 1332 if (nul > 0) { 1333 cp = mtod(m, caddr_t)+m->m_len-nul; 1334 for (i = 0; i < nul; i++) 1335 *cp++ = '\0'; 1336 } 1337 break; 1338 } 1339 count -= m->m_len; 1340 } 1341 for (m = m->m_next;m;m = m->m_next) 1342 m->m_len = 0; 1343 } 1344 1345 /* 1346 * Make these functions instead of macros, so that the kernel text size 1347 * doesn't get too big... 1348 */ 1349 void 1350 nfsm_srvwcc(struct nfsrv_descript *nfsd, int before_ret, 1351 struct vattr *before_vap, int after_ret, struct vattr *after_vap, 1352 struct nfsm_info *info) 1353 { 1354 u_int32_t *tl; 1355 1356 if (before_ret) { 1357 tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED); 1358 *tl = nfs_false; 1359 } else { 1360 tl = nfsm_build(&info->nmi_mb, 7 * NFSX_UNSIGNED); 1361 *tl++ = nfs_true; 1362 txdr_hyper(before_vap->va_size, tl); 1363 tl += 2; 1364 txdr_nfsv3time(&(before_vap->va_mtime), tl); 1365 tl += 2; 1366 txdr_nfsv3time(&(before_vap->va_ctime), tl); 1367 } 1368 nfsm_srvpostop_attr(nfsd, after_ret, after_vap, info); 1369 } 1370 1371 void 1372 nfsm_srvpostop_attr(struct nfsrv_descript *nfsd, int after_ret, 1373 struct vattr *after_vap, struct nfsm_info *info) 1374 { 1375 u_int32_t *tl; 1376 struct nfs_fattr *fp; 1377 1378 if (after_ret) { 1379 tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED); 1380 *tl = nfs_false; 1381 } else { 1382 tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED + NFSX_V3FATTR); 1383 *tl++ = nfs_true; 1384 fp = (struct nfs_fattr *)tl; 1385 nfsm_srvfattr(nfsd, after_vap, fp); 1386 } 1387 } 1388 1389 void 1390 nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap, 1391 struct nfs_fattr *fp) 1392 { 1393 1394 fp->fa_nlink = txdr_unsigned(vap->va_nlink); 1395 fp->fa_uid = txdr_unsigned(vap->va_uid); 1396 fp->fa_gid = txdr_unsigned(vap->va_gid); 1397 if (nfsd->nd_flag & ND_NFSV3) { 1398 fp->fa_type = vtonfsv3_type(vap->va_type); 1399 fp->fa_mode = vtonfsv3_mode(vap->va_mode); 1400 txdr_hyper(vap->va_size, &fp->fa3_size); 1401 txdr_hyper(vap->va_bytes, &fp->fa3_used); 1402 fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev)); 1403 fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev)); 1404 fp->fa3_fsid.nfsuquad[0] = 0; 1405 fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid); 1406 txdr_hyper(vap->va_fileid, &fp->fa3_fileid); 1407 txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime); 1408 txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime); 1409 txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime); 1410 } else { 1411 fp->fa_type = vtonfsv2_type(vap->va_type); 1412 fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1413 fp->fa2_size = txdr_unsigned(vap->va_size); 1414 fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize); 1415 if (vap->va_type == VFIFO) 1416 fp->fa2_rdev = 0xffffffff; 1417 else 1418 fp->fa2_rdev = txdr_unsigned(vap->va_rdev); 1419 fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE); 1420 fp->fa2_fsid = txdr_unsigned(vap->va_fsid); 1421 fp->fa2_fileid = txdr_unsigned((u_int32_t)vap->va_fileid); 1422 txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime); 1423 txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime); 1424 txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime); 1425 } 1426 } 1427 1428 /* 1429 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 1430 * - look up fsid in mount list (if not found ret error) 1431 * - get vp and export rights by calling VFS_FHTOVP() and VFS_CHECKEXP() 1432 * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon 1433 * - if not lockflag unlock it with VOP_UNLOCK() 1434 */ 1435 int 1436 nfsrv_fhtovp(fhandle_t *fhp, int lockflag, struct vnode **vpp, 1437 struct ucred *cred, struct nfssvc_sock *slp, struct mbuf *nam, 1438 int *rdonlyp) 1439 { 1440 struct proc *p = curproc; /* XXX */ 1441 struct mount *mp; 1442 int i; 1443 struct ucred *credanon; 1444 int error, exflags; 1445 struct sockaddr_in *saddr; 1446 1447 *vpp = NULL; 1448 mp = vfs_getvfs(&fhp->fh_fsid); 1449 1450 if (!mp) 1451 return (ESTALE); 1452 error = VFS_CHECKEXP(mp, nam, &exflags, &credanon); 1453 if (error) 1454 return (error); 1455 error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp); 1456 if (error) 1457 return (error); 1458 1459 saddr = mtod(nam, struct sockaddr_in *); 1460 if (saddr->sin_family == AF_INET && 1461 (ntohs(saddr->sin_port) >= IPPORT_RESERVED || 1462 (slp->ns_so->so_type == SOCK_STREAM && ntohs(saddr->sin_port) == 20))) { 1463 vput(*vpp); 1464 return (NFSERR_AUTHERR | AUTH_TOOWEAK); 1465 } 1466 1467 /* Check/setup credentials. */ 1468 if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) { 1469 cred->cr_uid = credanon->cr_uid; 1470 cred->cr_gid = credanon->cr_gid; 1471 for (i = 0; i < credanon->cr_ngroups && i < NGROUPS_MAX; i++) 1472 cred->cr_groups[i] = credanon->cr_groups[i]; 1473 cred->cr_ngroups = i; 1474 } 1475 if (exflags & MNT_EXRDONLY) 1476 *rdonlyp = 1; 1477 else 1478 *rdonlyp = 0; 1479 if (!lockflag) 1480 VOP_UNLOCK(*vpp, p); 1481 1482 return (0); 1483 } 1484 1485 /* 1486 * This function compares two net addresses by family and returns non zero 1487 * if they are the same host, or if there is any doubt it returns 0. 1488 * The AF_INET family is handled as a special case so that address mbufs 1489 * don't need to be saved to store "struct in_addr", which is only 4 bytes. 1490 */ 1491 int 1492 netaddr_match(int family, union nethostaddr *haddr, struct mbuf *nam) 1493 { 1494 struct sockaddr_in *inetaddr; 1495 1496 switch (family) { 1497 case AF_INET: 1498 inetaddr = mtod(nam, struct sockaddr_in *); 1499 if (inetaddr->sin_family == AF_INET && 1500 inetaddr->sin_addr.s_addr == haddr->had_inetaddr) 1501 return (1); 1502 break; 1503 default: 1504 break; 1505 }; 1506 return (0); 1507 } 1508 1509 /* 1510 * The write verifier has changed (probably due to a server reboot), so all 1511 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the 1512 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT 1513 * flag. Once done the new write verifier can be set for the mount point. 1514 */ 1515 void 1516 nfs_clearcommit(struct mount *mp) 1517 { 1518 struct vnode *vp, *nvp; 1519 struct buf *bp, *nbp; 1520 int s; 1521 1522 s = splbio(); 1523 loop: 1524 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 1525 if (vp->v_mount != mp) /* Paranoia */ 1526 goto loop; 1527 nvp = LIST_NEXT(vp, v_mntvnodes); 1528 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 1529 nbp = LIST_NEXT(bp, b_vnbufs); 1530 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 1531 == (B_DELWRI | B_NEEDCOMMIT)) 1532 bp->b_flags &= ~B_NEEDCOMMIT; 1533 } 1534 } 1535 splx(s); 1536 } 1537 1538 void 1539 nfs_merge_commit_ranges(struct vnode *vp) 1540 { 1541 struct nfsnode *np = VTONFS(vp); 1542 1543 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) { 1544 np->n_pushedlo = np->n_pushlo; 1545 np->n_pushedhi = np->n_pushhi; 1546 np->n_commitflags |= NFS_COMMIT_PUSHED_VALID; 1547 } else { 1548 if (np->n_pushlo < np->n_pushedlo) 1549 np->n_pushedlo = np->n_pushlo; 1550 if (np->n_pushhi > np->n_pushedhi) 1551 np->n_pushedhi = np->n_pushhi; 1552 } 1553 1554 np->n_pushlo = np->n_pushhi = 0; 1555 np->n_commitflags &= ~NFS_COMMIT_PUSH_VALID; 1556 } 1557 1558 int 1559 nfs_in_committed_range(struct vnode *vp, struct buf *bp) 1560 { 1561 struct nfsnode *np = VTONFS(vp); 1562 off_t lo, hi; 1563 1564 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) 1565 return 0; 1566 lo = (off_t)bp->b_blkno * DEV_BSIZE; 1567 hi = lo + bp->b_dirtyend; 1568 1569 return (lo >= np->n_pushedlo && hi <= np->n_pushedhi); 1570 } 1571 1572 int 1573 nfs_in_tobecommitted_range(struct vnode *vp, struct buf *bp) 1574 { 1575 struct nfsnode *np = VTONFS(vp); 1576 off_t lo, hi; 1577 1578 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) 1579 return 0; 1580 lo = (off_t)bp->b_blkno * DEV_BSIZE; 1581 hi = lo + bp->b_dirtyend; 1582 1583 return (lo >= np->n_pushlo && hi <= np->n_pushhi); 1584 } 1585 1586 void 1587 nfs_add_committed_range(struct vnode *vp, struct buf *bp) 1588 { 1589 struct nfsnode *np = VTONFS(vp); 1590 off_t lo, hi; 1591 1592 lo = (off_t)bp->b_blkno * DEV_BSIZE; 1593 hi = lo + bp->b_dirtyend; 1594 1595 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) { 1596 np->n_pushedlo = lo; 1597 np->n_pushedhi = hi; 1598 np->n_commitflags |= NFS_COMMIT_PUSHED_VALID; 1599 } else { 1600 if (hi > np->n_pushedhi) 1601 np->n_pushedhi = hi; 1602 if (lo < np->n_pushedlo) 1603 np->n_pushedlo = lo; 1604 } 1605 } 1606 1607 void 1608 nfs_del_committed_range(struct vnode *vp, struct buf *bp) 1609 { 1610 struct nfsnode *np = VTONFS(vp); 1611 off_t lo, hi; 1612 1613 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) 1614 return; 1615 1616 lo = (off_t)bp->b_blkno * DEV_BSIZE; 1617 hi = lo + bp->b_dirtyend; 1618 1619 if (lo > np->n_pushedhi || hi < np->n_pushedlo) 1620 return; 1621 if (lo <= np->n_pushedlo) 1622 np->n_pushedlo = hi; 1623 else if (hi >= np->n_pushedhi) 1624 np->n_pushedhi = lo; 1625 else { 1626 /* 1627 * XXX There's only one range. If the deleted range 1628 * is in the middle, pick the largest of the 1629 * contiguous ranges that it leaves. 1630 */ 1631 if ((np->n_pushedlo - lo) > (hi - np->n_pushedhi)) 1632 np->n_pushedhi = lo; 1633 else 1634 np->n_pushedlo = hi; 1635 } 1636 } 1637 1638 void 1639 nfs_add_tobecommitted_range(struct vnode *vp, struct buf *bp) 1640 { 1641 struct nfsnode *np = VTONFS(vp); 1642 off_t lo, hi; 1643 1644 lo = (off_t)bp->b_blkno * DEV_BSIZE; 1645 hi = lo + bp->b_dirtyend; 1646 1647 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) { 1648 np->n_pushlo = lo; 1649 np->n_pushhi = hi; 1650 np->n_commitflags |= NFS_COMMIT_PUSH_VALID; 1651 } else { 1652 if (lo < np->n_pushlo) 1653 np->n_pushlo = lo; 1654 if (hi > np->n_pushhi) 1655 np->n_pushhi = hi; 1656 } 1657 } 1658 1659 void 1660 nfs_del_tobecommitted_range(struct vnode *vp, struct buf *bp) 1661 { 1662 struct nfsnode *np = VTONFS(vp); 1663 off_t lo, hi; 1664 1665 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) 1666 return; 1667 1668 lo = (off_t)bp->b_blkno * DEV_BSIZE; 1669 hi = lo + bp->b_dirtyend; 1670 1671 if (lo > np->n_pushhi || hi < np->n_pushlo) 1672 return; 1673 1674 if (lo <= np->n_pushlo) 1675 np->n_pushlo = hi; 1676 else if (hi >= np->n_pushhi) 1677 np->n_pushhi = lo; 1678 else { 1679 /* 1680 * XXX There's only one range. If the deleted range 1681 * is in the middle, pick the largest of the 1682 * contiguous ranges that it leaves. 1683 */ 1684 if ((np->n_pushlo - lo) > (hi - np->n_pushhi)) 1685 np->n_pushhi = lo; 1686 else 1687 np->n_pushlo = hi; 1688 } 1689 } 1690 1691 /* 1692 * Map errnos to NFS error numbers. For Version 3 also filter out error 1693 * numbers not specified for the associated procedure. 1694 */ 1695 int 1696 nfsrv_errmap(struct nfsrv_descript *nd, int err) 1697 { 1698 short *defaulterrp, *errp; 1699 1700 if (nd->nd_flag & ND_NFSV3) { 1701 if (nd->nd_procnum <= NFSPROC_COMMIT) { 1702 errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum]; 1703 while (*++errp) { 1704 if (*errp == err) 1705 return (err); 1706 else if (*errp > err) 1707 break; 1708 } 1709 return ((int)*defaulterrp); 1710 } else 1711 return (err & 0xffff); 1712 } 1713 if (err <= nitems(nfsrv_v2errmap)) 1714 return ((int)nfsrv_v2errmap[err - 1]); 1715 return (NFSERR_IO); 1716 } 1717 1718 /* 1719 * If full is non zero, set all fields, otherwise just set mode and time fields 1720 */ 1721 void 1722 nfsm_v3attrbuild(struct mbuf **mp, struct vattr *a, int full) 1723 { 1724 struct mbuf *mb; 1725 u_int32_t *tl; 1726 1727 mb = *mp; 1728 1729 if (a->va_mode != (mode_t)VNOVAL) { 1730 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED); 1731 *tl++ = nfs_true; 1732 *tl = txdr_unsigned(a->va_mode); 1733 } else { 1734 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1735 *tl = nfs_false; 1736 } 1737 if (full && a->va_uid != (uid_t)VNOVAL) { 1738 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED); 1739 *tl++ = nfs_true; 1740 *tl = txdr_unsigned(a->va_uid); 1741 } else { 1742 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1743 *tl = nfs_false; 1744 } 1745 if (full && a->va_gid != (gid_t)VNOVAL) { 1746 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED); 1747 *tl++ = nfs_true; 1748 *tl = txdr_unsigned((a)->va_gid); 1749 } else { 1750 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1751 *tl = nfs_false; 1752 } 1753 if (full && a->va_size != VNOVAL) { 1754 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED); 1755 *tl++ = nfs_true; 1756 txdr_hyper(a->va_size, tl); 1757 } else { 1758 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1759 *tl = nfs_false; 1760 } 1761 if (a->va_atime.tv_nsec != VNOVAL) { 1762 if (a->va_atime.tv_sec != time_second) { 1763 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED); 1764 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); 1765 txdr_nfsv3time(&a->va_atime, tl); 1766 } else { 1767 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1768 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); 1769 } 1770 } else { 1771 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1772 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); 1773 } 1774 if (a->va_mtime.tv_nsec != VNOVAL) { 1775 if (a->va_mtime.tv_sec != time_second) { 1776 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED); 1777 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); 1778 txdr_nfsv3time(&a->va_mtime, tl); 1779 } else { 1780 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1781 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); 1782 } 1783 } else { 1784 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1785 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); 1786 } 1787 1788 *mp = mb; 1789 } 1790 1791 /* 1792 * Ensure a contiguous buffer len bytes long 1793 */ 1794 void * 1795 nfsm_build(struct mbuf **mp, u_int len) 1796 { 1797 struct mbuf *mb, *mb2; 1798 caddr_t bpos; 1799 1800 mb = *mp; 1801 bpos = mb_offset(mb); 1802 1803 if (len > M_TRAILINGSPACE(mb)) { 1804 MGET(mb2, M_WAIT, MT_DATA); 1805 if (len > MLEN) 1806 panic("build > MLEN"); 1807 mb->m_next = mb2; 1808 mb = mb2; 1809 mb->m_len = 0; 1810 bpos = mtod(mb, caddr_t); 1811 } 1812 mb->m_len += len; 1813 1814 *mp = mb; 1815 1816 return (bpos); 1817 } 1818 1819 void 1820 nfsm_fhtom(struct nfsm_info *info, struct vnode *v, int v3) 1821 { 1822 struct nfsnode *n = VTONFS(v); 1823 1824 if (v3) { 1825 nfsm_strtombuf(&info->nmi_mb, n->n_fhp, n->n_fhsize); 1826 } else { 1827 nfsm_buftombuf(&info->nmi_mb, n->n_fhp, NFSX_V2FH); 1828 } 1829 } 1830 1831 void 1832 nfsm_srvfhtom(struct mbuf **mp, fhandle_t *f, int v3) 1833 { 1834 if (v3) { 1835 nfsm_strtombuf(mp, f, NFSX_V3FH); 1836 } else { 1837 nfsm_buftombuf(mp, f, NFSX_V2FH); 1838 } 1839 } 1840 1841 int 1842 nfsm_srvsattr(struct mbuf **mp, struct vattr *va, struct mbuf *mrep, 1843 caddr_t *dposp) 1844 { 1845 struct nfsm_info info; 1846 uint32_t *tl, t1; 1847 caddr_t cp2; 1848 int error = 0; 1849 1850 info.nmi_md = *mp; 1851 info.nmi_dpos = *dposp; 1852 1853 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1854 if (*tl == nfs_true) { 1855 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1856 va->va_mode = nfstov_mode(*tl); 1857 } 1858 1859 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1860 if (*tl == nfs_true) { 1861 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1862 va->va_uid = fxdr_unsigned(uid_t, *tl); 1863 } 1864 1865 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1866 if (*tl == nfs_true) { 1867 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1868 va->va_gid = fxdr_unsigned(gid_t, *tl); 1869 } 1870 1871 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1872 if (*tl == nfs_true) { 1873 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1874 va->va_size = fxdr_hyper(tl); 1875 } 1876 1877 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1878 switch (fxdr_unsigned(int, *tl)) { 1879 case NFSV3SATTRTIME_TOCLIENT: 1880 va->va_vaflags |= VA_UTIMES_CHANGE; 1881 va->va_vaflags &= ~VA_UTIMES_NULL; 1882 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1883 fxdr_nfsv3time(tl, &va->va_atime); 1884 break; 1885 case NFSV3SATTRTIME_TOSERVER: 1886 va->va_vaflags |= VA_UTIMES_CHANGE; 1887 getnanotime(&va->va_atime); 1888 break; 1889 }; 1890 1891 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1892 switch (fxdr_unsigned(int, *tl)) { 1893 case NFSV3SATTRTIME_TOCLIENT: 1894 va->va_vaflags |= VA_UTIMES_CHANGE; 1895 va->va_vaflags &= ~VA_UTIMES_NULL; 1896 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1897 fxdr_nfsv3time(tl, &va->va_mtime); 1898 break; 1899 case NFSV3SATTRTIME_TOSERVER: 1900 va->va_vaflags |= VA_UTIMES_CHANGE; 1901 getnanotime(&va->va_mtime); 1902 break; 1903 }; 1904 1905 *dposp = info.nmi_dpos; 1906 *mp = info.nmi_md; 1907 nfsmout: 1908 return (error); 1909 } 1910 1911 void 1912 txdr_nfsv2time(const struct timespec *from, struct nfsv2_time *to) 1913 { 1914 if (from->tv_nsec == VNOVAL) { 1915 to->nfsv2_sec = nfs_xdrneg1; 1916 to->nfsv2_usec = nfs_xdrneg1; 1917 } else if (from->tv_sec == -1) { 1918 /* 1919 * can't request a time of -1; send 1920 * -1.000001 == {-2,999999} instead 1921 */ 1922 to->nfsv2_sec = htonl(-2); 1923 to->nfsv2_usec = htonl(999999); 1924 } else { 1925 to->nfsv2_sec = htonl(from->tv_sec); 1926 to->nfsv2_usec = htonl(from->tv_nsec / 1000); 1927 } 1928 } 1929