1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_subs.c 7.68 (Berkeley) 02/02/93 11 */ 12 13 /* 14 * These functions support the macros and help fiddle mbuf chains for 15 * the nfs op functions. They do things like create the rpc header and 16 * copy data between mbuf chains and uio lists. 17 */ 18 #include <sys/param.h> 19 #include <sys/proc.h> 20 #include <sys/systm.h> 21 #include <sys/kernel.h> 22 #include <sys/mount.h> 23 #include <sys/vnode.h> 24 #include <sys/namei.h> 25 #include <sys/mbuf.h> 26 #include <sys/socket.h> 27 #include <sys/stat.h> 28 29 #include <nfs/rpcv2.h> 30 #include <nfs/nfsv2.h> 31 #include <nfs/nfsnode.h> 32 #include <nfs/nfs.h> 33 #include <nfs/xdr_subs.h> 34 #include <nfs/nfsm_subs.h> 35 #include <nfs/nfsmount.h> 36 #include <nfs/nqnfs.h> 37 #include <nfs/nfsrtt.h> 38 39 #include <miscfs/specfs/specdev.h> 40 41 #include <netinet/in.h> 42 #ifdef ISO 43 #include <netiso/iso.h> 44 #endif 45 46 #define TRUE 1 47 #define FALSE 0 48 49 /* 50 * Data items converted to xdr at startup, since they are constant 51 * This is kinda hokey, but may save a little time doing byte swaps 52 */ 53 u_long nfs_procids[NFS_NPROCS]; 54 u_long nfs_xdrneg1; 55 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr, 56 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted, rpc_rejectedcred, 57 rpc_auth_kerb; 58 u_long nfs_vers, nfs_prog, nfs_true, nfs_false; 59 60 /* And other global data */ 61 static u_long nfs_xid = 0; 62 enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON }; 63 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 64 extern struct queue_entry nfs_bufq; 65 extern struct nfsreq nfsreqh; 66 extern int nqnfs_piggy[NFS_NPROCS]; 67 extern struct nfsrtt nfsrtt; 68 extern time_t nqnfsstarttime; 69 extern u_long nqnfs_prog, nqnfs_vers; 70 extern int nqsrv_clockskew; 71 extern int nqsrv_writeslack; 72 extern int nqsrv_maxlease; 73 74 /* 75 * Create the header for an rpc request packet 76 * The hsiz is the size of the rest of the nfs request header. 77 * (just used to decide if a cluster is a good idea) 78 */ 79 struct mbuf * 80 nfsm_reqh(vp, procid, hsiz, bposp) 81 struct vnode *vp; 82 u_long procid; 83 int hsiz; 84 caddr_t *bposp; 85 { 86 register struct mbuf *mb; 87 register u_long *tl; 88 register caddr_t bpos; 89 struct mbuf *mb2; 90 struct nfsmount *nmp; 91 int nqflag; 92 93 MGET(mb, M_WAIT, MT_DATA); 94 if (hsiz >= MINCLSIZE) 95 MCLGET(mb, M_WAIT); 96 mb->m_len = 0; 97 bpos = mtod(mb, caddr_t); 98 99 /* 100 * For NQNFS, add lease request. 101 */ 102 if (vp) { 103 nmp = VFSTONFS(vp->v_mount); 104 if (nmp->nm_flag & NFSMNT_NQNFS) { 105 nqflag = NQNFS_NEEDLEASE(vp, procid); 106 if (nqflag) { 107 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 108 *tl++ = txdr_unsigned(nqflag); 109 *tl = txdr_unsigned(nmp->nm_leaseterm); 110 } else { 111 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 112 *tl = 0; 113 } 114 } 115 } 116 /* Finally, return values */ 117 *bposp = bpos; 118 return (mb); 119 } 120 121 /* 122 * Build the RPC header and fill in the authorization info. 123 * The authorization string argument is only used when the credentials 124 * come from outside of the kernel. 125 * Returns the head of the mbuf list. 126 */ 127 struct mbuf * 128 nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest, 129 mrest_len, mbp, xidp) 130 register struct ucred *cr; 131 int nqnfs; 132 int procid; 133 int auth_type; 134 int auth_len; 135 char *auth_str; 136 struct mbuf *mrest; 137 int mrest_len; 138 struct mbuf **mbp; 139 u_long *xidp; 140 { 141 register struct mbuf *mb; 142 register u_long *tl; 143 register caddr_t bpos; 144 register int i; 145 struct mbuf *mreq, *mb2; 146 int siz, grpsiz, authsiz; 147 148 authsiz = nfsm_rndup(auth_len); 149 if (auth_type == RPCAUTH_NQNFS) 150 authsiz += 2 * NFSX_UNSIGNED; 151 MGETHDR(mb, M_WAIT, MT_DATA); 152 if ((authsiz + 10*NFSX_UNSIGNED) >= MINCLSIZE) { 153 MCLGET(mb, M_WAIT); 154 } else if ((authsiz + 10*NFSX_UNSIGNED) < MHLEN) { 155 MH_ALIGN(mb, authsiz + 10*NFSX_UNSIGNED); 156 } else { 157 MH_ALIGN(mb, 8*NFSX_UNSIGNED); 158 } 159 mb->m_len = 0; 160 mreq = mb; 161 bpos = mtod(mb, caddr_t); 162 163 /* 164 * First the RPC header. 165 */ 166 nfsm_build(tl, u_long *, 8*NFSX_UNSIGNED); 167 if (++nfs_xid == 0) 168 nfs_xid++; 169 *tl++ = *xidp = txdr_unsigned(nfs_xid); 170 *tl++ = rpc_call; 171 *tl++ = rpc_vers; 172 if (nqnfs) { 173 *tl++ = txdr_unsigned(NQNFS_PROG); 174 *tl++ = txdr_unsigned(NQNFS_VER1); 175 } else { 176 *tl++ = txdr_unsigned(NFS_PROG); 177 *tl++ = txdr_unsigned(NFS_VER2); 178 } 179 *tl++ = txdr_unsigned(procid); 180 181 /* 182 * And then the authorization cred. 183 */ 184 *tl++ = txdr_unsigned(auth_type); 185 *tl = txdr_unsigned(authsiz); 186 switch (auth_type) { 187 case RPCAUTH_UNIX: 188 nfsm_build(tl, u_long *, auth_len); 189 *tl++ = 0; /* stamp ?? */ 190 *tl++ = 0; /* NULL hostname */ 191 *tl++ = txdr_unsigned(cr->cr_uid); 192 *tl++ = txdr_unsigned(cr->cr_groups[0]); 193 grpsiz = (auth_len >> 2) - 5; 194 *tl++ = txdr_unsigned(grpsiz); 195 for (i = 1; i <= grpsiz; i++) 196 *tl++ = txdr_unsigned(cr->cr_groups[i]); 197 break; 198 case RPCAUTH_NQNFS: 199 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 200 *tl++ = txdr_unsigned(cr->cr_uid); 201 *tl = txdr_unsigned(auth_len); 202 siz = auth_len; 203 while (siz > 0) { 204 if (M_TRAILINGSPACE(mb) == 0) { 205 MGET(mb2, M_WAIT, MT_DATA); 206 if (siz >= MINCLSIZE) 207 MCLGET(mb2, M_WAIT); 208 mb->m_next = mb2; 209 mb = mb2; 210 mb->m_len = 0; 211 bpos = mtod(mb, caddr_t); 212 } 213 i = min(siz, M_TRAILINGSPACE(mb)); 214 bcopy(auth_str, bpos, i); 215 mb->m_len += i; 216 auth_str += i; 217 bpos += i; 218 siz -= i; 219 } 220 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) { 221 for (i = 0; i < siz; i++) 222 *bpos++ = '\0'; 223 mb->m_len += siz; 224 } 225 break; 226 }; 227 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 228 *tl++ = txdr_unsigned(RPCAUTH_NULL); 229 *tl = 0; 230 mb->m_next = mrest; 231 mreq->m_pkthdr.len = authsiz + 10*NFSX_UNSIGNED + mrest_len; 232 mreq->m_pkthdr.rcvif = (struct ifnet *)0; 233 *mbp = mb; 234 return (mreq); 235 } 236 237 /* 238 * copies mbuf chain to the uio scatter/gather list 239 */ 240 nfsm_mbuftouio(mrep, uiop, siz, dpos) 241 struct mbuf **mrep; 242 register struct uio *uiop; 243 int siz; 244 caddr_t *dpos; 245 { 246 register char *mbufcp, *uiocp; 247 register int xfer, left, len; 248 register struct mbuf *mp; 249 long uiosiz, rem; 250 int error = 0; 251 252 mp = *mrep; 253 mbufcp = *dpos; 254 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 255 rem = nfsm_rndup(siz)-siz; 256 while (siz > 0) { 257 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 258 return (EFBIG); 259 left = uiop->uio_iov->iov_len; 260 uiocp = uiop->uio_iov->iov_base; 261 if (left > siz) 262 left = siz; 263 uiosiz = left; 264 while (left > 0) { 265 while (len == 0) { 266 mp = mp->m_next; 267 if (mp == NULL) 268 return (EBADRPC); 269 mbufcp = mtod(mp, caddr_t); 270 len = mp->m_len; 271 } 272 xfer = (left > len) ? len : left; 273 #ifdef notdef 274 /* Not Yet.. */ 275 if (uiop->uio_iov->iov_op != NULL) 276 (*(uiop->uio_iov->iov_op)) 277 (mbufcp, uiocp, xfer); 278 else 279 #endif 280 if (uiop->uio_segflg == UIO_SYSSPACE) 281 bcopy(mbufcp, uiocp, xfer); 282 else 283 copyout(mbufcp, uiocp, xfer); 284 left -= xfer; 285 len -= xfer; 286 mbufcp += xfer; 287 uiocp += xfer; 288 uiop->uio_offset += xfer; 289 uiop->uio_resid -= xfer; 290 } 291 if (uiop->uio_iov->iov_len <= siz) { 292 uiop->uio_iovcnt--; 293 uiop->uio_iov++; 294 } else { 295 uiop->uio_iov->iov_base += uiosiz; 296 uiop->uio_iov->iov_len -= uiosiz; 297 } 298 siz -= uiosiz; 299 } 300 *dpos = mbufcp; 301 *mrep = mp; 302 if (rem > 0) { 303 if (len < rem) 304 error = nfs_adv(mrep, dpos, rem, len); 305 else 306 *dpos += rem; 307 } 308 return (error); 309 } 310 311 /* 312 * copies a uio scatter/gather list to an mbuf chain... 313 */ 314 nfsm_uiotombuf(uiop, mq, siz, bpos) 315 register struct uio *uiop; 316 struct mbuf **mq; 317 int siz; 318 caddr_t *bpos; 319 { 320 register char *uiocp; 321 register struct mbuf *mp, *mp2; 322 register int xfer, left, mlen; 323 int uiosiz, clflg, rem; 324 char *cp; 325 326 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 327 clflg = 1; 328 else 329 clflg = 0; 330 rem = nfsm_rndup(siz)-siz; 331 mp = mp2 = *mq; 332 while (siz > 0) { 333 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 334 return (EINVAL); 335 left = uiop->uio_iov->iov_len; 336 uiocp = uiop->uio_iov->iov_base; 337 if (left > siz) 338 left = siz; 339 uiosiz = left; 340 while (left > 0) { 341 mlen = M_TRAILINGSPACE(mp); 342 if (mlen == 0) { 343 MGET(mp, M_WAIT, MT_DATA); 344 if (clflg) 345 MCLGET(mp, M_WAIT); 346 mp->m_len = 0; 347 mp2->m_next = mp; 348 mp2 = mp; 349 mlen = M_TRAILINGSPACE(mp); 350 } 351 xfer = (left > mlen) ? mlen : left; 352 #ifdef notdef 353 /* Not Yet.. */ 354 if (uiop->uio_iov->iov_op != NULL) 355 (*(uiop->uio_iov->iov_op)) 356 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 357 else 358 #endif 359 if (uiop->uio_segflg == UIO_SYSSPACE) 360 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 361 else 362 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 363 mp->m_len += xfer; 364 left -= xfer; 365 uiocp += xfer; 366 uiop->uio_offset += xfer; 367 uiop->uio_resid -= xfer; 368 } 369 if (uiop->uio_iov->iov_len <= siz) { 370 uiop->uio_iovcnt--; 371 uiop->uio_iov++; 372 } else { 373 uiop->uio_iov->iov_base += uiosiz; 374 uiop->uio_iov->iov_len -= uiosiz; 375 } 376 siz -= uiosiz; 377 } 378 if (rem > 0) { 379 if (rem > M_TRAILINGSPACE(mp)) { 380 MGET(mp, M_WAIT, MT_DATA); 381 mp->m_len = 0; 382 mp2->m_next = mp; 383 } 384 cp = mtod(mp, caddr_t)+mp->m_len; 385 for (left = 0; left < rem; left++) 386 *cp++ = '\0'; 387 mp->m_len += rem; 388 *bpos = cp; 389 } else 390 *bpos = mtod(mp, caddr_t)+mp->m_len; 391 *mq = mp; 392 return (0); 393 } 394 395 /* 396 * Help break down an mbuf chain by setting the first siz bytes contiguous 397 * pointed to by returned val. 398 * If Updateflg == True we can overwrite the first part of the mbuf data 399 * (in this case it can never sleep, so it can be called from interrupt level) 400 * it may however block when Updateflg == False 401 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough 402 * cases. (The macros use the vars. dpos and dpos2) 403 */ 404 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2) 405 struct mbuf **mdp; 406 caddr_t *dposp; 407 int siz; 408 int left; 409 int updateflg; 410 caddr_t *cp2; 411 { 412 register struct mbuf *mp, *mp2; 413 register int siz2, xfer; 414 register caddr_t p; 415 416 mp = *mdp; 417 while (left == 0) { 418 *mdp = mp = mp->m_next; 419 if (mp == NULL) 420 return (EBADRPC); 421 left = mp->m_len; 422 *dposp = mtod(mp, caddr_t); 423 } 424 if (left >= siz) { 425 *cp2 = *dposp; 426 *dposp += siz; 427 } else if (mp->m_next == NULL) { 428 return (EBADRPC); 429 } else if (siz > MHLEN) { 430 panic("nfs S too big"); 431 } else { 432 /* Iff update, you can overwrite, else must alloc new mbuf */ 433 if (updateflg) { 434 NFSMINOFF(mp); 435 } else { 436 MGET(mp2, M_WAIT, MT_DATA); 437 mp2->m_next = mp->m_next; 438 mp->m_next = mp2; 439 mp->m_len -= left; 440 mp = mp2; 441 } 442 *cp2 = p = mtod(mp, caddr_t); 443 bcopy(*dposp, p, left); /* Copy what was left */ 444 siz2 = siz-left; 445 p += left; 446 mp2 = mp->m_next; 447 /* Loop around copying up the siz2 bytes */ 448 while (siz2 > 0) { 449 if (mp2 == NULL) 450 return (EBADRPC); 451 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 452 if (xfer > 0) { 453 bcopy(mtod(mp2, caddr_t), p, xfer); 454 NFSMADV(mp2, xfer); 455 mp2->m_len -= xfer; 456 p += xfer; 457 siz2 -= xfer; 458 } 459 if (siz2 > 0) 460 mp2 = mp2->m_next; 461 } 462 mp->m_len = siz; 463 *mdp = mp2; 464 *dposp = mtod(mp2, caddr_t); 465 } 466 return (0); 467 } 468 469 /* 470 * Advance the position in the mbuf chain. 471 */ 472 nfs_adv(mdp, dposp, offs, left) 473 struct mbuf **mdp; 474 caddr_t *dposp; 475 int offs; 476 int left; 477 { 478 register struct mbuf *m; 479 register int s; 480 481 m = *mdp; 482 s = left; 483 while (s < offs) { 484 offs -= s; 485 m = m->m_next; 486 if (m == NULL) 487 return (EBADRPC); 488 s = m->m_len; 489 } 490 *mdp = m; 491 *dposp = mtod(m, caddr_t)+offs; 492 return (0); 493 } 494 495 /* 496 * Copy a string into mbufs for the hard cases... 497 */ 498 nfsm_strtmbuf(mb, bpos, cp, siz) 499 struct mbuf **mb; 500 char **bpos; 501 char *cp; 502 long siz; 503 { 504 register struct mbuf *m1, *m2; 505 long left, xfer, len, tlen; 506 u_long *tl; 507 int putsize; 508 509 putsize = 1; 510 m2 = *mb; 511 left = M_TRAILINGSPACE(m2); 512 if (left > 0) { 513 tl = ((u_long *)(*bpos)); 514 *tl++ = txdr_unsigned(siz); 515 putsize = 0; 516 left -= NFSX_UNSIGNED; 517 m2->m_len += NFSX_UNSIGNED; 518 if (left > 0) { 519 bcopy(cp, (caddr_t) tl, left); 520 siz -= left; 521 cp += left; 522 m2->m_len += left; 523 left = 0; 524 } 525 } 526 /* Loop around adding mbufs */ 527 while (siz > 0) { 528 MGET(m1, M_WAIT, MT_DATA); 529 if (siz > MLEN) 530 MCLGET(m1, M_WAIT); 531 m1->m_len = NFSMSIZ(m1); 532 m2->m_next = m1; 533 m2 = m1; 534 tl = mtod(m1, u_long *); 535 tlen = 0; 536 if (putsize) { 537 *tl++ = txdr_unsigned(siz); 538 m1->m_len -= NFSX_UNSIGNED; 539 tlen = NFSX_UNSIGNED; 540 putsize = 0; 541 } 542 if (siz < m1->m_len) { 543 len = nfsm_rndup(siz); 544 xfer = siz; 545 if (xfer < len) 546 *(tl+(xfer>>2)) = 0; 547 } else { 548 xfer = len = m1->m_len; 549 } 550 bcopy(cp, (caddr_t) tl, xfer); 551 m1->m_len = len+tlen; 552 siz -= xfer; 553 cp += xfer; 554 } 555 *mb = m1; 556 *bpos = mtod(m1, caddr_t)+m1->m_len; 557 return (0); 558 } 559 560 /* 561 * Called once to initialize data structures... 562 */ 563 nfs_init() 564 { 565 register int i; 566 union nqsrvthead *lhp; 567 568 nfsrtt.pos = 0; 569 rpc_vers = txdr_unsigned(RPC_VER2); 570 rpc_call = txdr_unsigned(RPC_CALL); 571 rpc_reply = txdr_unsigned(RPC_REPLY); 572 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 573 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 574 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 575 rpc_autherr = txdr_unsigned(RPC_AUTHERR); 576 rpc_rejectedcred = txdr_unsigned(AUTH_REJECTCRED); 577 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 578 rpc_auth_kerb = txdr_unsigned(RPCAUTH_NQNFS); 579 nfs_vers = txdr_unsigned(NFS_VER2); 580 nfs_prog = txdr_unsigned(NFS_PROG); 581 nfs_true = txdr_unsigned(TRUE); 582 nfs_false = txdr_unsigned(FALSE); 583 /* Loop thru nfs procids */ 584 for (i = 0; i < NFS_NPROCS; i++) 585 nfs_procids[i] = txdr_unsigned(i); 586 /* Ensure async daemons disabled */ 587 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 588 nfs_iodwant[i] = (struct proc *)0; 589 queue_init(&nfs_bufq); 590 nfs_xdrneg1 = txdr_unsigned(-1); 591 nfs_nhinit(); /* Init the nfsnode table */ 592 nfsrv_init(0); /* Init server data structures */ 593 nfsrv_initcache(); /* Init the server request cache */ 594 595 /* 596 * Initialize the nqnfs server stuff. 597 */ 598 if (nqnfsstarttime == 0) { 599 nqnfsstarttime = boottime.tv_sec + nqsrv_maxlease 600 + nqsrv_clockskew + nqsrv_writeslack; 601 NQLOADNOVRAM(nqnfsstarttime); 602 nqnfs_prog = txdr_unsigned(NQNFS_PROG); 603 nqnfs_vers = txdr_unsigned(NQNFS_VER1); 604 nqthead.th_head[0] = &nqthead; 605 nqthead.th_head[1] = &nqthead; 606 nqfhead = hashinit(NQLCHSZ, M_NQLEASE, &nqfheadhash); 607 } 608 609 /* 610 * Initialize reply list and start timer 611 */ 612 nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh; 613 nfs_timer(); 614 } 615 616 /* 617 * Attribute cache routines. 618 * nfs_loadattrcache() - loads or updates the cache contents from attributes 619 * that are on the mbuf list 620 * nfs_getattrcache() - returns valid attributes if found in cache, returns 621 * error otherwise 622 */ 623 624 /* 625 * Load the attribute cache (that lives in the nfsnode entry) with 626 * the values on the mbuf list and 627 * Iff vap not NULL 628 * copy the attributes to *vaper 629 */ 630 nfs_loadattrcache(vpp, mdp, dposp, vaper) 631 struct vnode **vpp; 632 struct mbuf **mdp; 633 caddr_t *dposp; 634 struct vattr *vaper; 635 { 636 register struct vnode *vp = *vpp; 637 register struct vattr *vap; 638 register struct nfsv2_fattr *fp; 639 extern int (**spec_nfsv2nodeop_p)(); 640 register struct nfsnode *np, *nq, **nhpp; 641 register long t1; 642 caddr_t dpos, cp2; 643 int error = 0, isnq; 644 struct mbuf *md; 645 enum vtype vtyp; 646 u_short vmode; 647 long rdev; 648 struct timespec mtime; 649 struct vnode *nvp; 650 651 md = *mdp; 652 dpos = *dposp; 653 t1 = (mtod(md, caddr_t) + md->m_len) - dpos; 654 isnq = (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS); 655 if (error = nfsm_disct(&md, &dpos, NFSX_FATTR(isnq), t1, TRUE, &cp2)) 656 return (error); 657 fp = (struct nfsv2_fattr *)cp2; 658 vtyp = nfstov_type(fp->fa_type); 659 vmode = fxdr_unsigned(u_short, fp->fa_mode); 660 if (vtyp == VNON || vtyp == VREG) 661 vtyp = IFTOVT(vmode); 662 if (isnq) { 663 rdev = fxdr_unsigned(long, fp->fa_nqrdev); 664 fxdr_nqtime(&fp->fa_nqmtime, &mtime); 665 } else { 666 rdev = fxdr_unsigned(long, fp->fa_nfsrdev); 667 fxdr_nfstime(&fp->fa_nfsmtime, &mtime); 668 } 669 /* 670 * If v_type == VNON it is a new node, so fill in the v_type, 671 * n_mtime fields. Check to see if it represents a special 672 * device, and if so, check for a possible alias. Once the 673 * correct vnode has been obtained, fill in the rest of the 674 * information. 675 */ 676 np = VTONFS(vp); 677 if (vp->v_type == VNON) { 678 if (vtyp == VCHR && rdev == 0xffffffff) 679 vp->v_type = vtyp = VFIFO; 680 else 681 vp->v_type = vtyp; 682 if (vp->v_type == VFIFO) { 683 #ifdef FIFO 684 extern int (**fifo_nfsv2nodeop_p)(); 685 vp->v_op = fifo_nfsv2nodeop_p; 686 #else 687 return (EOPNOTSUPP); 688 #endif /* FIFO */ 689 } 690 if (vp->v_type == VCHR || vp->v_type == VBLK) { 691 vp->v_op = spec_nfsv2nodeop_p; 692 if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) { 693 /* 694 * Discard unneeded vnode, but save its nfsnode. 695 */ 696 if (nq = np->n_forw) 697 nq->n_back = np->n_back; 698 *np->n_back = nq; 699 nvp->v_data = vp->v_data; 700 vp->v_data = NULL; 701 vp->v_op = spec_vnodeop_p; 702 vrele(vp); 703 vgone(vp); 704 /* 705 * Reinitialize aliased node. 706 */ 707 np->n_vnode = nvp; 708 nhpp = (struct nfsnode **)nfs_hash(&np->n_fh); 709 if (nq = *nhpp) 710 nq->n_back = &np->n_forw; 711 np->n_forw = nq; 712 np->n_back = nhpp; 713 *nhpp = np; 714 *vpp = vp = nvp; 715 } 716 } 717 np->n_mtime = mtime.ts_sec; 718 } 719 vap = &np->n_vattr; 720 vap->va_type = vtyp; 721 vap->va_mode = (vmode & 07777); 722 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 723 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 724 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 725 vap->va_rdev = (dev_t)rdev; 726 vap->va_mtime = mtime; 727 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 728 if (isnq) { 729 fxdr_hyper(&fp->fa_nqsize, &vap->va_size); 730 vap->va_blocksize = fxdr_unsigned(long, fp->fa_nqblocksize); 731 fxdr_hyper(&fp->fa_nqbytes, &vap->va_bytes); 732 vap->va_fileid = fxdr_unsigned(long, fp->fa_nqfileid); 733 fxdr_nqtime(&fp->fa_nqatime, &vap->va_atime); 734 vap->va_flags = fxdr_unsigned(u_long, fp->fa_nqflags); 735 fxdr_nqtime(&fp->fa_nqctime, &vap->va_ctime); 736 vap->va_gen = fxdr_unsigned(u_long, fp->fa_nqgen); 737 fxdr_hyper(&fp->fa_nqfilerev, &vap->va_filerev); 738 } else { 739 vap->va_size = fxdr_unsigned(u_long, fp->fa_nfssize); 740 vap->va_blocksize = fxdr_unsigned(long, fp->fa_nfsblocksize); 741 vap->va_bytes = fxdr_unsigned(long, fp->fa_nfsblocks) * NFS_FABLKSIZE; 742 vap->va_fileid = fxdr_unsigned(long, fp->fa_nfsfileid); 743 vap->va_atime.ts_sec = fxdr_unsigned(long, fp->fa_nfsatime.nfs_sec); 744 vap->va_atime.ts_nsec = 0; 745 vap->va_flags = fxdr_unsigned(u_long, fp->fa_nfsatime.nfs_usec); 746 vap->va_ctime.ts_sec = fxdr_unsigned(long, fp->fa_nfsctime.nfs_sec); 747 vap->va_ctime.ts_nsec = 0; 748 vap->va_gen = fxdr_unsigned(u_long, fp->fa_nfsctime.nfs_usec); 749 vap->va_filerev = 0; 750 } 751 if (vap->va_size != np->n_size) { 752 if (vap->va_type == VREG) { 753 if (np->n_flag & NMODIFIED) { 754 if (vap->va_size < np->n_size) 755 vap->va_size = np->n_size; 756 else 757 np->n_size = vap->va_size; 758 } else 759 np->n_size = vap->va_size; 760 vnode_pager_setsize(vp, (u_long)np->n_size); 761 } else 762 np->n_size = vap->va_size; 763 } 764 np->n_attrstamp = time.tv_sec; 765 *dposp = dpos; 766 *mdp = md; 767 if (vaper != NULL) { 768 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); 769 #ifdef notdef 770 if ((np->n_flag & NMODIFIED) && np->n_size > vap->va_size) 771 if (np->n_size > vap->va_size) 772 vaper->va_size = np->n_size; 773 #endif 774 if (np->n_flag & NCHG) { 775 if (np->n_flag & NACC) { 776 vaper->va_atime.ts_sec = np->n_atim.tv_sec; 777 vaper->va_atime.ts_nsec = 778 np->n_atim.tv_usec * 1000; 779 } 780 if (np->n_flag & NUPD) { 781 vaper->va_mtime.ts_sec = np->n_mtim.tv_sec; 782 vaper->va_mtime.ts_nsec = 783 np->n_mtim.tv_usec * 1000; 784 } 785 } 786 } 787 return (0); 788 } 789 790 /* 791 * Check the time stamp 792 * If the cache is valid, copy contents to *vap and return 0 793 * otherwise return an error 794 */ 795 nfs_getattrcache(vp, vaper) 796 register struct vnode *vp; 797 struct vattr *vaper; 798 { 799 register struct nfsnode *np = VTONFS(vp); 800 register struct vattr *vap; 801 802 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQLOOKLEASE) { 803 if (!NQNFS_CKCACHABLE(vp, NQL_READ) || np->n_attrstamp == 0) { 804 nfsstats.attrcache_misses++; 805 return (ENOENT); 806 } 807 } else if ((time.tv_sec - np->n_attrstamp) >= NFS_ATTRTIMEO(np)) { 808 nfsstats.attrcache_misses++; 809 return (ENOENT); 810 } 811 nfsstats.attrcache_hits++; 812 vap = &np->n_vattr; 813 if (vap->va_size != np->n_size) { 814 if (vap->va_type == VREG) { 815 if (np->n_flag & NMODIFIED) { 816 if (vap->va_size < np->n_size) 817 vap->va_size = np->n_size; 818 else 819 np->n_size = vap->va_size; 820 } else 821 np->n_size = vap->va_size; 822 vnode_pager_setsize(vp, (u_long)np->n_size); 823 } else 824 np->n_size = vap->va_size; 825 } 826 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); 827 #ifdef notdef 828 if ((np->n_flag & NMODIFIED) == 0) { 829 np->n_size = vaper->va_size; 830 vnode_pager_setsize(vp, (u_long)np->n_size); 831 } else if (np->n_size > vaper->va_size) 832 if (np->n_size > vaper->va_size) 833 vaper->va_size = np->n_size; 834 #endif 835 if (np->n_flag & NCHG) { 836 if (np->n_flag & NACC) { 837 vaper->va_atime.ts_sec = np->n_atim.tv_sec; 838 vaper->va_atime.ts_nsec = np->n_atim.tv_usec * 1000; 839 } 840 if (np->n_flag & NUPD) { 841 vaper->va_mtime.ts_sec = np->n_mtim.tv_sec; 842 vaper->va_mtime.ts_nsec = np->n_mtim.tv_usec * 1000; 843 } 844 } 845 return (0); 846 } 847 848 /* 849 * Set up nameidata for a lookup() call and do it 850 */ 851 nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p) 852 register struct nameidata *ndp; 853 fhandle_t *fhp; 854 int len; 855 struct nfssvc_sock *slp; 856 struct mbuf *nam; 857 struct mbuf **mdp; 858 caddr_t *dposp; 859 struct proc *p; 860 { 861 register int i, rem; 862 register struct mbuf *md; 863 register char *fromcp, *tocp; 864 struct vnode *dp; 865 int error, rdonly; 866 struct componentname *cnp = &ndp->ni_cnd; 867 868 MALLOC(cnp->cn_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK); 869 /* 870 * Copy the name from the mbuf list to ndp->ni_pnbuf 871 * and set the various ndp fields appropriately. 872 */ 873 fromcp = *dposp; 874 tocp = cnp->cn_pnbuf; 875 md = *mdp; 876 rem = mtod(md, caddr_t) + md->m_len - fromcp; 877 cnp->cn_hash = 0; 878 for (i = 0; i < len; i++) { 879 while (rem == 0) { 880 md = md->m_next; 881 if (md == NULL) { 882 error = EBADRPC; 883 goto out; 884 } 885 fromcp = mtod(md, caddr_t); 886 rem = md->m_len; 887 } 888 if (*fromcp == '\0' || *fromcp == '/') { 889 error = EINVAL; 890 goto out; 891 } 892 if (*fromcp & 0200) 893 if ((*fromcp&0377) == ('/'|0200) || cnp->cn_nameiop != DELETE) { 894 error = EINVAL; 895 goto out; 896 } 897 cnp->cn_hash += (unsigned char)*fromcp; 898 *tocp++ = *fromcp++; 899 rem--; 900 } 901 *tocp = '\0'; 902 *mdp = md; 903 *dposp = fromcp; 904 len = nfsm_rndup(len)-len; 905 if (len > 0) { 906 if (rem >= len) 907 *dposp += len; 908 else if (error = nfs_adv(mdp, dposp, len, rem)) 909 goto out; 910 } 911 ndp->ni_pathlen = tocp - cnp->cn_pnbuf; 912 cnp->cn_nameptr = cnp->cn_pnbuf; 913 /* 914 * Extract and set starting directory. 915 */ 916 if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cnd.cn_cred, slp, 917 nam, &rdonly)) 918 goto out; 919 if (dp->v_type != VDIR) { 920 vrele(dp); 921 error = ENOTDIR; 922 goto out; 923 } 924 ndp->ni_startdir = dp; 925 if (rdonly) 926 cnp->cn_flags |= (NOCROSSMOUNT | RDONLY); 927 else 928 cnp->cn_flags |= NOCROSSMOUNT; 929 /* 930 * And call lookup() to do the real work 931 */ 932 cnp->cn_proc = p; 933 if (error = lookup(ndp)) 934 goto out; 935 /* 936 * Check for encountering a symbolic link 937 */ 938 if (cnp->cn_flags & ISSYMLINK) { 939 if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) 940 vput(ndp->ni_dvp); 941 else 942 vrele(ndp->ni_dvp); 943 vput(ndp->ni_vp); 944 ndp->ni_vp = NULL; 945 error = EINVAL; 946 goto out; 947 } 948 /* 949 * Check for saved name request 950 */ 951 if (cnp->cn_flags & (SAVENAME | SAVESTART)) { 952 cnp->cn_flags |= HASBUF; 953 return (0); 954 } 955 out: 956 FREE(cnp->cn_pnbuf, M_NAMEI); 957 return (error); 958 } 959 960 /* 961 * A fiddled version of m_adj() that ensures null fill to a long 962 * boundary and only trims off the back end 963 */ 964 void 965 nfsm_adj(mp, len, nul) 966 struct mbuf *mp; 967 register int len; 968 int nul; 969 { 970 register struct mbuf *m; 971 register int count, i; 972 register char *cp; 973 974 /* 975 * Trim from tail. Scan the mbuf chain, 976 * calculating its length and finding the last mbuf. 977 * If the adjustment only affects this mbuf, then just 978 * adjust and return. Otherwise, rescan and truncate 979 * after the remaining size. 980 */ 981 count = 0; 982 m = mp; 983 for (;;) { 984 count += m->m_len; 985 if (m->m_next == (struct mbuf *)0) 986 break; 987 m = m->m_next; 988 } 989 if (m->m_len > len) { 990 m->m_len -= len; 991 if (nul > 0) { 992 cp = mtod(m, caddr_t)+m->m_len-nul; 993 for (i = 0; i < nul; i++) 994 *cp++ = '\0'; 995 } 996 return; 997 } 998 count -= len; 999 if (count < 0) 1000 count = 0; 1001 /* 1002 * Correct length for chain is "count". 1003 * Find the mbuf with last data, adjust its length, 1004 * and toss data from remaining mbufs on chain. 1005 */ 1006 for (m = mp; m; m = m->m_next) { 1007 if (m->m_len >= count) { 1008 m->m_len = count; 1009 if (nul > 0) { 1010 cp = mtod(m, caddr_t)+m->m_len-nul; 1011 for (i = 0; i < nul; i++) 1012 *cp++ = '\0'; 1013 } 1014 break; 1015 } 1016 count -= m->m_len; 1017 } 1018 while (m = m->m_next) 1019 m->m_len = 0; 1020 } 1021 1022 /* 1023 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 1024 * - look up fsid in mount list (if not found ret error) 1025 * - get vp and export rights by calling VFS_FHTOVP() 1026 * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon 1027 * - if not lockflag unlock it with VOP_UNLOCK() 1028 */ 1029 nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp) 1030 fhandle_t *fhp; 1031 int lockflag; 1032 struct vnode **vpp; 1033 struct ucred *cred; 1034 struct nfssvc_sock *slp; 1035 struct mbuf *nam; 1036 int *rdonlyp; 1037 { 1038 register struct mount *mp; 1039 register struct nfsuid *uidp; 1040 register int i; 1041 struct ucred *credanon; 1042 int error, exflags; 1043 1044 *vpp = (struct vnode *)0; 1045 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 1046 return (ESTALE); 1047 if (error = VFS_FHTOVP(mp, &fhp->fh_fid, nam, vpp, &exflags, &credanon)) 1048 return (error); 1049 /* 1050 * Check/setup credentials. 1051 */ 1052 if (exflags & MNT_EXKERB) { 1053 uidp = slp->ns_uidh[NUIDHASH(cred->cr_uid)]; 1054 while (uidp) { 1055 if (uidp->nu_uid == cred->cr_uid) 1056 break; 1057 uidp = uidp->nu_hnext; 1058 } 1059 if (uidp) { 1060 cred->cr_uid = uidp->nu_cr.cr_uid; 1061 for (i = 0; i < uidp->nu_cr.cr_ngroups; i++) 1062 cred->cr_groups[i] = uidp->nu_cr.cr_groups[i]; 1063 } else { 1064 vput(*vpp); 1065 return (NQNFS_AUTHERR); 1066 } 1067 } else if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) { 1068 cred->cr_uid = credanon->cr_uid; 1069 for (i = 0; i < credanon->cr_ngroups && i < NGROUPS; i++) 1070 cred->cr_groups[i] = credanon->cr_groups[i]; 1071 } 1072 if (exflags & MNT_EXRDONLY) 1073 *rdonlyp = 1; 1074 else 1075 *rdonlyp = 0; 1076 if (!lockflag) 1077 VOP_UNLOCK(*vpp); 1078 return (0); 1079 } 1080 1081 /* 1082 * This function compares two net addresses by family and returns TRUE 1083 * if they are the same host. 1084 * If there is any doubt, return FALSE. 1085 * The AF_INET family is handled as a special case so that address mbufs 1086 * don't need to be saved to store "struct in_addr", which is only 4 bytes. 1087 */ 1088 netaddr_match(family, haddr, nam) 1089 int family; 1090 union nethostaddr *haddr; 1091 struct mbuf *nam; 1092 { 1093 register struct sockaddr_in *inetaddr; 1094 1095 switch (family) { 1096 case AF_INET: 1097 inetaddr = mtod(nam, struct sockaddr_in *); 1098 if (inetaddr->sin_family == AF_INET && 1099 inetaddr->sin_addr.s_addr == haddr->had_inetaddr) 1100 return (1); 1101 break; 1102 #ifdef ISO 1103 case AF_ISO: 1104 { 1105 register struct sockaddr_iso *isoaddr1, *isoaddr2; 1106 1107 isoaddr1 = mtod(nam, struct sockaddr_iso *); 1108 isoaddr2 = mtod(haddr->had_nam, struct sockaddr_iso *); 1109 if (isoaddr1->siso_family == AF_ISO && 1110 isoaddr1->siso_nlen > 0 && 1111 isoaddr1->siso_nlen == isoaddr2->siso_nlen && 1112 SAME_ISOADDR(isoaddr1, isoaddr2)) 1113 return (1); 1114 break; 1115 } 1116 #endif /* ISO */ 1117 default: 1118 break; 1119 }; 1120 return (0); 1121 } 1122