1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_subs.c 7.38 (Berkeley) 03/19/91 11 */ 12 13 /* 14 * These functions support the macros and help fiddle mbuf chains for 15 * the nfs op functions. They do things like create the rpc header and 16 * copy data between mbuf chains and uio lists. 17 */ 18 #include "param.h" 19 #include "proc.h" 20 #include "filedesc.h" 21 #include "systm.h" 22 #include "kernel.h" 23 #include "mount.h" 24 #include "file.h" 25 #include "vnode.h" 26 #include "mbuf.h" 27 #include "map.h" 28 29 #include "../ufs/quota.h" 30 #include "../ufs/inode.h" 31 32 #include "rpcv2.h" 33 #include "nfsv2.h" 34 #include "nfsnode.h" 35 #include "nfs.h" 36 #include "nfsiom.h" 37 #include "xdr_subs.h" 38 #include "nfsm_subs.h" 39 #include "nfscompress.h" 40 41 #define TRUE 1 42 #define FALSE 0 43 44 /* 45 * Data items converted to xdr at startup, since they are constant 46 * This is kinda hokey, but may save a little time doing byte swaps 47 */ 48 u_long nfs_procids[NFS_NPROCS]; 49 u_long nfs_xdrneg1; 50 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, 51 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted; 52 u_long nfs_vers, nfs_prog, nfs_true, nfs_false; 53 /* And other global data */ 54 static u_long *rpc_uidp = (u_long *)0; 55 static u_long nfs_xid = 1; 56 static char *rpc_unixauth; 57 extern long hostid; 58 enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON }; 59 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 60 extern struct map nfsmap[NFS_MSIZ]; 61 extern struct nfsreq nfsreqh; 62 63 /* Function ret types */ 64 static char *nfs_unixauth(); 65 66 /* 67 * Maximum number of groups passed through to NFS server. 68 * According to RFC1057 it should be 16. 69 * For release 3.X systems, the maximum value is 8. 70 * For some other servers, the maximum value is 10. 71 */ 72 int numgrps = 8; 73 74 /* 75 * Create the header for an rpc request packet 76 * The function nfs_unixauth() creates a unix style authorization string 77 * and returns a ptr to it. 78 * The hsiz is the size of the rest of the nfs request header. 79 * (just used to decide if a cluster is a good idea) 80 * nb: Note that the prog, vers and procid args are already in xdr byte order 81 */ 82 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid) 83 u_long prog; 84 u_long vers; 85 u_long procid; 86 struct ucred *cred; 87 int hsiz; 88 caddr_t *bpos; 89 struct mbuf **mb; 90 u_long *retxid; 91 { 92 register struct mbuf *mreq, *m; 93 register u_long *p; 94 struct mbuf *m1; 95 char *ap; 96 int asiz, siz; 97 98 NFSMGETHDR(mreq); 99 asiz = ((((cred->cr_ngroups - 1) > numgrps) ? numgrps : 100 (cred->cr_ngroups - 1)) << 2); 101 #ifdef FILLINHOST 102 asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED); 103 #else 104 asiz += 9*NFSX_UNSIGNED; 105 #endif 106 107 /* If we need a lot, alloc a cluster ?? */ 108 if ((asiz+hsiz+RPC_SIZ) > MHLEN) 109 MCLGET(mreq, M_WAIT); 110 mreq->m_len = NFSMSIZ(mreq); 111 siz = mreq->m_len; 112 m1 = mreq; 113 /* 114 * Alloc enough mbufs 115 * We do it now to avoid all sleeps after the call to nfs_unixauth() 116 */ 117 while ((asiz+RPC_SIZ) > siz) { 118 MGET(m, M_WAIT, MT_DATA); 119 m1->m_next = m; 120 m->m_len = MLEN; 121 siz += MLEN; 122 m1 = m; 123 } 124 p = mtod(mreq, u_long *); 125 *p++ = *retxid = txdr_unsigned(++nfs_xid); 126 *p++ = rpc_call; 127 *p++ = rpc_vers; 128 *p++ = prog; 129 *p++ = vers; 130 *p++ = procid; 131 132 /* Now we can call nfs_unixauth() and copy it in */ 133 ap = nfs_unixauth(cred); 134 m = mreq; 135 siz = m->m_len-RPC_SIZ; 136 if (asiz <= siz) { 137 bcopy(ap, (caddr_t)p, asiz); 138 m->m_len = asiz+RPC_SIZ; 139 } else { 140 bcopy(ap, (caddr_t)p, siz); 141 ap += siz; 142 asiz -= siz; 143 while (asiz > 0) { 144 siz = (asiz > MLEN) ? MLEN : asiz; 145 m = m->m_next; 146 bcopy(ap, mtod(m, caddr_t), siz); 147 m->m_len = siz; 148 asiz -= siz; 149 ap += siz; 150 } 151 } 152 153 /* Finally, return values */ 154 *mb = m; 155 *bpos = mtod(m, caddr_t)+m->m_len; 156 return (mreq); 157 } 158 159 /* 160 * copies mbuf chain to the uio scatter/gather list 161 */ 162 nfsm_mbuftouio(mrep, uiop, siz, dpos) 163 struct mbuf **mrep; 164 register struct uio *uiop; 165 int siz; 166 caddr_t *dpos; 167 { 168 register char *mbufcp, *uiocp; 169 register int xfer, left, len; 170 register struct mbuf *mp; 171 long uiosiz, rem; 172 int error = 0; 173 174 mp = *mrep; 175 mbufcp = *dpos; 176 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 177 rem = nfsm_rndup(siz)-siz; 178 while (siz > 0) { 179 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 180 return (EFBIG); 181 left = uiop->uio_iov->iov_len; 182 uiocp = uiop->uio_iov->iov_base; 183 if (left > siz) 184 left = siz; 185 uiosiz = left; 186 while (left > 0) { 187 while (len == 0) { 188 mp = mp->m_next; 189 if (mp == NULL) 190 return (EBADRPC); 191 mbufcp = mtod(mp, caddr_t); 192 len = mp->m_len; 193 } 194 xfer = (left > len) ? len : left; 195 #ifdef notdef 196 /* Not Yet.. */ 197 if (uiop->uio_iov->iov_op != NULL) 198 (*(uiop->uio_iov->iov_op)) 199 (mbufcp, uiocp, xfer); 200 else 201 #endif 202 if (uiop->uio_segflg == UIO_SYSSPACE) 203 bcopy(mbufcp, uiocp, xfer); 204 else 205 copyout(mbufcp, uiocp, xfer); 206 left -= xfer; 207 len -= xfer; 208 mbufcp += xfer; 209 uiocp += xfer; 210 uiop->uio_offset += xfer; 211 uiop->uio_resid -= xfer; 212 } 213 if (uiop->uio_iov->iov_len <= siz) { 214 uiop->uio_iovcnt--; 215 uiop->uio_iov++; 216 } else { 217 uiop->uio_iov->iov_base += uiosiz; 218 uiop->uio_iov->iov_len -= uiosiz; 219 } 220 siz -= uiosiz; 221 } 222 *dpos = mbufcp; 223 *mrep = mp; 224 if (rem > 0) { 225 if (len < rem) 226 error = nfs_adv(mrep, dpos, rem, len); 227 else 228 *dpos += rem; 229 } 230 return (error); 231 } 232 233 /* 234 * copies a uio scatter/gather list to an mbuf chain... 235 */ 236 nfsm_uiotombuf(uiop, mq, siz, bpos) 237 register struct uio *uiop; 238 struct mbuf **mq; 239 int siz; 240 caddr_t *bpos; 241 { 242 register char *uiocp; 243 register struct mbuf *mp, *mp2; 244 register int xfer, left, len; 245 int uiosiz, clflg, rem; 246 char *cp; 247 248 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 249 clflg = 1; 250 else 251 clflg = 0; 252 rem = nfsm_rndup(siz)-siz; 253 mp2 = *mq; 254 while (siz > 0) { 255 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 256 return (EINVAL); 257 left = uiop->uio_iov->iov_len; 258 uiocp = uiop->uio_iov->iov_base; 259 if (left > siz) 260 left = siz; 261 uiosiz = left; 262 while (left > 0) { 263 MGET(mp, M_WAIT, MT_DATA); 264 if (clflg) 265 MCLGET(mp, M_WAIT); 266 mp->m_len = NFSMSIZ(mp); 267 mp2->m_next = mp; 268 mp2 = mp; 269 xfer = (left > mp->m_len) ? mp->m_len : left; 270 #ifdef notdef 271 /* Not Yet.. */ 272 if (uiop->uio_iov->iov_op != NULL) 273 (*(uiop->uio_iov->iov_op)) 274 (uiocp, mtod(mp, caddr_t), xfer); 275 else 276 #endif 277 if (uiop->uio_segflg == UIO_SYSSPACE) 278 bcopy(uiocp, mtod(mp, caddr_t), xfer); 279 else 280 copyin(uiocp, mtod(mp, caddr_t), xfer); 281 len = mp->m_len; 282 mp->m_len = xfer; 283 left -= xfer; 284 uiocp += xfer; 285 uiop->uio_offset += xfer; 286 uiop->uio_resid -= xfer; 287 } 288 if (uiop->uio_iov->iov_len <= siz) { 289 uiop->uio_iovcnt--; 290 uiop->uio_iov++; 291 } else { 292 uiop->uio_iov->iov_base += uiosiz; 293 uiop->uio_iov->iov_len -= uiosiz; 294 } 295 siz -= uiosiz; 296 } 297 if (rem > 0) { 298 if (rem > (len-mp->m_len)) { 299 MGET(mp, M_WAIT, MT_DATA); 300 mp->m_len = 0; 301 mp2->m_next = mp; 302 } 303 cp = mtod(mp, caddr_t)+mp->m_len; 304 for (left = 0; left < rem; left++) 305 *cp++ = '\0'; 306 mp->m_len += rem; 307 *bpos = cp; 308 } else 309 *bpos = mtod(mp, caddr_t)+mp->m_len; 310 *mq = mp; 311 return (0); 312 } 313 314 /* 315 * Help break down an mbuf chain by setting the first siz bytes contiguous 316 * pointed to by returned val. 317 * If Updateflg == True we can overwrite the first part of the mbuf data 318 * This is used by the macros nfsm_disect and nfsm_disecton for tough 319 * cases. (The macros use the vars. dpos and dpos2) 320 */ 321 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2) 322 struct mbuf **mdp; 323 caddr_t *dposp; 324 int siz; 325 int left; 326 int updateflg; 327 caddr_t *cp2; 328 { 329 register struct mbuf *mp, *mp2; 330 register int siz2, xfer; 331 register caddr_t p; 332 333 mp = *mdp; 334 while (left == 0) { 335 *mdp = mp = mp->m_next; 336 if (mp == NULL) 337 return (EBADRPC); 338 left = mp->m_len; 339 *dposp = mtod(mp, caddr_t); 340 } 341 if (left >= siz) { 342 *cp2 = *dposp; 343 *dposp += siz; 344 } else if (mp->m_next == NULL) { 345 return (EBADRPC); 346 } else if (siz > MHLEN) { 347 panic("nfs S too big"); 348 } else { 349 /* Iff update, you can overwrite, else must alloc new mbuf */ 350 if (updateflg) { 351 NFSMINOFF(mp); 352 } else { 353 MGET(mp2, M_WAIT, MT_DATA); 354 mp2->m_next = mp->m_next; 355 mp->m_next = mp2; 356 mp->m_len -= left; 357 mp = mp2; 358 } 359 *cp2 = p = mtod(mp, caddr_t); 360 bcopy(*dposp, p, left); /* Copy what was left */ 361 siz2 = siz-left; 362 p += left; 363 mp2 = mp->m_next; 364 /* Loop around copying up the siz2 bytes */ 365 while (siz2 > 0) { 366 if (mp2 == NULL) 367 return (EBADRPC); 368 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 369 if (xfer > 0) { 370 bcopy(mtod(mp2, caddr_t), p, xfer); 371 NFSMADV(mp2, xfer); 372 mp2->m_len -= xfer; 373 p += xfer; 374 siz2 -= xfer; 375 } 376 if (siz2 > 0) 377 mp2 = mp2->m_next; 378 } 379 mp->m_len = siz; 380 *mdp = mp2; 381 *dposp = mtod(mp2, caddr_t); 382 } 383 return (0); 384 } 385 386 /* 387 * Advance the position in the mbuf chain. 388 */ 389 nfs_adv(mdp, dposp, offs, left) 390 struct mbuf **mdp; 391 caddr_t *dposp; 392 int offs; 393 int left; 394 { 395 register struct mbuf *m; 396 register int s; 397 398 m = *mdp; 399 s = left; 400 while (s < offs) { 401 offs -= s; 402 m = m->m_next; 403 if (m == NULL) 404 return (EBADRPC); 405 s = m->m_len; 406 } 407 *mdp = m; 408 *dposp = mtod(m, caddr_t)+offs; 409 return (0); 410 } 411 412 /* 413 * Copy a string into mbufs for the hard cases... 414 */ 415 nfsm_strtmbuf(mb, bpos, cp, siz) 416 struct mbuf **mb; 417 char **bpos; 418 char *cp; 419 long siz; 420 { 421 register struct mbuf *m1, *m2; 422 long left, xfer, len, tlen; 423 u_long *p; 424 int putsize; 425 426 putsize = 1; 427 m2 = *mb; 428 left = NFSMSIZ(m2)-m2->m_len; 429 if (left > 0) { 430 p = ((u_long *)(*bpos)); 431 *p++ = txdr_unsigned(siz); 432 putsize = 0; 433 left -= NFSX_UNSIGNED; 434 m2->m_len += NFSX_UNSIGNED; 435 if (left > 0) { 436 bcopy(cp, (caddr_t) p, left); 437 siz -= left; 438 cp += left; 439 m2->m_len += left; 440 left = 0; 441 } 442 } 443 /* Loop arround adding mbufs */ 444 while (siz > 0) { 445 MGET(m1, M_WAIT, MT_DATA); 446 if (siz > MLEN) 447 MCLGET(m1, M_WAIT); 448 m1->m_len = NFSMSIZ(m1); 449 m2->m_next = m1; 450 m2 = m1; 451 p = mtod(m1, u_long *); 452 tlen = 0; 453 if (putsize) { 454 *p++ = txdr_unsigned(siz); 455 m1->m_len -= NFSX_UNSIGNED; 456 tlen = NFSX_UNSIGNED; 457 putsize = 0; 458 } 459 if (siz < m1->m_len) { 460 len = nfsm_rndup(siz); 461 xfer = siz; 462 if (xfer < len) 463 *(p+(xfer>>2)) = 0; 464 } else { 465 xfer = len = m1->m_len; 466 } 467 bcopy(cp, (caddr_t) p, xfer); 468 m1->m_len = len+tlen; 469 siz -= xfer; 470 cp += xfer; 471 } 472 *mb = m1; 473 *bpos = mtod(m1, caddr_t)+m1->m_len; 474 return (0); 475 } 476 477 /* 478 * Called once to initialize data structures... 479 */ 480 nfs_init() 481 { 482 register int i; 483 484 rpc_vers = txdr_unsigned(RPC_VER2); 485 rpc_call = txdr_unsigned(RPC_CALL); 486 rpc_reply = txdr_unsigned(RPC_REPLY); 487 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 488 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 489 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 490 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 491 nfs_vers = txdr_unsigned(NFS_VER2); 492 nfs_prog = txdr_unsigned(NFS_PROG); 493 nfs_true = txdr_unsigned(TRUE); 494 nfs_false = txdr_unsigned(FALSE); 495 /* Loop thru nfs procids */ 496 for (i = 0; i < NFS_NPROCS; i++) 497 nfs_procids[i] = txdr_unsigned(i); 498 /* Ensure async daemons disabled */ 499 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 500 nfs_iodwant[i] = (struct proc *)0; 501 nfs_xdrneg1 = txdr_unsigned(-1); 502 nfs_nhinit(); /* Init the nfsnode table */ 503 nfsrv_initcache(); /* Init the server request cache */ 504 rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ); 505 506 /* 507 * Initialize reply list and start timer 508 */ 509 nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh; 510 nfs_timer(); 511 } 512 513 /* 514 * Fill in the rest of the rpc_unixauth and return it 515 */ 516 static char *nfs_unixauth(cr) 517 register struct ucred *cr; 518 { 519 register u_long *p; 520 register int i; 521 int ngr; 522 523 /* Maybe someday there should be a cache of AUTH_SHORT's */ 524 if ((p = rpc_uidp) == NULL) { 525 #ifdef FILLINHOST 526 i = nfsm_rndup(hostnamelen)+(25*NFSX_UNSIGNED); 527 #else 528 i = 25*NFSX_UNSIGNED; 529 #endif 530 MALLOC(p, u_long *, i, M_TEMP, M_WAITOK); 531 bzero((caddr_t)p, i); 532 rpc_unixauth = (caddr_t)p; 533 *p++ = txdr_unsigned(RPCAUTH_UNIX); 534 p++; /* Fill in size later */ 535 *p++ = hostid; 536 #ifdef FILLINHOST 537 *p++ = txdr_unsigned(hostnamelen); 538 i = nfsm_rndup(hostnamelen); 539 bcopy(hostname, (caddr_t)p, hostnamelen); 540 p += (i>>2); 541 #else 542 *p++ = 0; 543 #endif 544 rpc_uidp = p; 545 } 546 *p++ = txdr_unsigned(cr->cr_uid); 547 *p++ = txdr_unsigned(cr->cr_groups[0]); 548 ngr = ((cr->cr_ngroups - 1) > numgrps) ? numgrps : (cr->cr_ngroups - 1); 549 *p++ = txdr_unsigned(ngr); 550 for (i = 1; i <= ngr; i++) 551 *p++ = txdr_unsigned(cr->cr_groups[i]); 552 /* And add the AUTH_NULL */ 553 *p++ = 0; 554 *p = 0; 555 i = (((caddr_t)p)-rpc_unixauth)-12; 556 p = (u_long *)(rpc_unixauth+4); 557 *p = txdr_unsigned(i); 558 return (rpc_unixauth); 559 } 560 561 /* 562 * Attribute cache routines. 563 * nfs_loadattrcache() - loads or updates the cache contents from attributes 564 * that are on the mbuf list 565 * nfs_getattrcache() - returns valid attributes if found in cache, returns 566 * error otherwise 567 */ 568 569 /* 570 * Load the attribute cache (that lives in the nfsnode entry) with 571 * the values on the mbuf list and 572 * Iff vap not NULL 573 * copy the attributes to *vaper 574 */ 575 nfs_loadattrcache(vpp, mdp, dposp, vaper) 576 struct vnode **vpp; 577 struct mbuf **mdp; 578 caddr_t *dposp; 579 struct vattr *vaper; 580 { 581 register struct vnode *vp = *vpp; 582 register struct vattr *vap; 583 register struct nfsv2_fattr *fp; 584 extern struct vnodeops spec_nfsv2nodeops; 585 register struct nfsnode *np; 586 register long t1; 587 caddr_t dpos, cp2; 588 int error = 0; 589 struct mbuf *md; 590 enum vtype type; 591 u_short mode; 592 long rdev; 593 struct timeval mtime; 594 struct vnode *nvp; 595 596 md = *mdp; 597 dpos = *dposp; 598 t1 = (mtod(md, caddr_t)+md->m_len)-dpos; 599 if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2)) 600 return (error); 601 fp = (struct nfsv2_fattr *)cp2; 602 type = nfstov_type(fp->fa_type); 603 mode = fxdr_unsigned(u_short, fp->fa_mode); 604 if (type == VNON) 605 type = IFTOVT(mode); 606 rdev = fxdr_unsigned(long, fp->fa_rdev); 607 fxdr_time(&fp->fa_mtime, &mtime); 608 /* 609 * If v_type == VNON it is a new node, so fill in the v_type, 610 * n_mtime fields. Check to see if it represents a special 611 * device, and if so, check for a possible alias. Once the 612 * correct vnode has been obtained, fill in the rest of the 613 * information. 614 */ 615 np = VTONFS(vp); 616 if (vp->v_type == VNON) { 617 if (type == VCHR && rdev == 0xffffffff) 618 vp->v_type = type = VFIFO; 619 else 620 vp->v_type = type; 621 if (vp->v_type == VFIFO) { 622 #ifdef FIFO 623 extern struct vnodeops fifo_nfsv2nodeops; 624 vp->v_op = &fifo_nfsv2nodeops; 625 #else 626 return (EOPNOTSUPP); 627 #endif /* FIFO */ 628 } 629 if (vp->v_type == VCHR || vp->v_type == VBLK) { 630 vp->v_op = &spec_nfsv2nodeops; 631 if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) { 632 /* 633 * Reinitialize aliased node. 634 */ 635 np = VTONFS(nvp); 636 np->n_vnode = nvp; 637 np->n_flag = 0; 638 nfs_lock(nvp); 639 bcopy((caddr_t)&VTONFS(vp)->n_fh, 640 (caddr_t)&np->n_fh, NFSX_FH); 641 insque(np, nfs_hash(&np->n_fh)); 642 np->n_attrstamp = 0; 643 np->n_sillyrename = (struct sillyrename *)0; 644 /* 645 * Discard unneeded vnode and update actual one 646 */ 647 vput(vp); 648 *vpp = nvp; 649 } 650 } 651 np->n_mtime = mtime.tv_sec; 652 } 653 vap = &np->n_vattr; 654 vap->va_type = type; 655 vap->va_mode = (mode & 07777); 656 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 657 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 658 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 659 vap->va_size = fxdr_unsigned(u_long, fp->fa_size); 660 if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size) { 661 np->n_size = vap->va_size; 662 #ifdef NVM 663 vnode_pager_setsize(vp, np->n_size); 664 #endif 665 } 666 vap->va_size_rsv = 0; 667 vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize); 668 vap->va_rdev = (dev_t)rdev; 669 vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * NFS_FABLKSIZE; 670 vap->va_bytes_rsv = 0; 671 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 672 vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid); 673 vap->va_atime.tv_sec = fxdr_unsigned(long, fp->fa_atime.tv_sec); 674 vap->va_atime.tv_usec = 0; 675 vap->va_flags = fxdr_unsigned(u_long, fp->fa_atime.tv_usec); 676 vap->va_mtime = mtime; 677 vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa_ctime.tv_sec); 678 vap->va_ctime.tv_usec = 0; 679 vap->va_gen = fxdr_unsigned(u_long, fp->fa_ctime.tv_usec); 680 np->n_attrstamp = time.tv_sec; 681 *dposp = dpos; 682 *mdp = md; 683 if (vaper != NULL) { 684 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); 685 if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size)) 686 vaper->va_size = np->n_size; 687 } 688 return (0); 689 } 690 691 /* 692 * Check the time stamp 693 * If the cache is valid, copy contents to *vap and return 0 694 * otherwise return an error 695 */ 696 nfs_getattrcache(vp, vap) 697 register struct vnode *vp; 698 struct vattr *vap; 699 { 700 register struct nfsnode *np; 701 702 np = VTONFS(vp); 703 if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) { 704 nfsstats.attrcache_hits++; 705 bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr)); 706 if ((np->n_flag & NMODIFIED) == 0) { 707 np->n_size = vap->va_size; 708 #ifdef NVM 709 vnode_pager_setsize(vp, np->n_size); 710 #endif 711 } else if (np->n_size > vap->va_size) 712 vap->va_size = np->n_size; 713 return (0); 714 } else { 715 nfsstats.attrcache_misses++; 716 return (ENOENT); 717 } 718 } 719 720 /* 721 * Set up nameidata for a namei() call and do it 722 */ 723 nfs_namei(ndp, fhp, len, mdp, dposp) 724 register struct nameidata *ndp; 725 fhandle_t *fhp; 726 int len; 727 struct mbuf **mdp; 728 caddr_t *dposp; 729 { 730 register int i, rem; 731 register struct mbuf *md; 732 register char *cp; 733 struct vnode *dp; 734 int flag; 735 int error; 736 737 if ((ndp->ni_nameiop & HASBUF) == 0) { 738 flag = ndp->ni_nameiop & OPMASK; 739 /* 740 * Copy the name from the mbuf list to the d_name field of ndp 741 * and set the various ndp fields appropriately. 742 */ 743 cp = *dposp; 744 md = *mdp; 745 rem = mtod(md, caddr_t)+md->m_len-cp; 746 ndp->ni_hash = 0; 747 for (i = 0; i < len;) { 748 while (rem == 0) { 749 md = md->m_next; 750 if (md == NULL) 751 return (EBADRPC); 752 cp = mtod(md, caddr_t); 753 rem = md->m_len; 754 } 755 if (*cp == '\0' || *cp == '/') 756 return (EINVAL); 757 if (*cp & 0200) 758 if ((*cp&0377) == ('/'|0200) || flag != DELETE) 759 return (EINVAL); 760 ndp->ni_dent.d_name[i++] = *cp; 761 ndp->ni_hash += (unsigned char)*cp * i; 762 cp++; 763 rem--; 764 } 765 *mdp = md; 766 *dposp = cp; 767 len = nfsm_rndup(len)-len; 768 if (len > 0) { 769 if (rem < len) { 770 if (error = nfs_adv(mdp, dposp, len, rem)) 771 return (error); 772 } else 773 *dposp += len; 774 } 775 } else 776 i = len; 777 ndp->ni_namelen = i; 778 ndp->ni_dent.d_namlen = i; 779 ndp->ni_dent.d_name[i] = '\0'; 780 ndp->ni_segflg = UIO_SYSSPACE; 781 ndp->ni_pathlen = 1; 782 ndp->ni_pnbuf = ndp->ni_dirp = ndp->ni_ptr = &ndp->ni_dent.d_name[0]; 783 ndp->ni_next = &ndp->ni_dent.d_name[i]; 784 ndp->ni_nameiop |= (NOCROSSMOUNT | REMOTE | HASBUF | STARTDIR); 785 /* 786 * Extract and set starting directory. 787 */ 788 if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cred)) 789 return (error); 790 if (dp->v_type != VDIR) { 791 vrele(dp); 792 return (ENOTDIR); 793 } 794 ndp->ni_startdir = dp; 795 /* 796 * And call namei() to do the real work 797 */ 798 error = namei(ndp); 799 if (error || (ndp->ni_nameiop & SAVESTARTDIR) == 0) 800 vrele(dp); 801 return (error); 802 } 803 804 /* 805 * A fiddled version of m_adj() that ensures null fill to a long 806 * boundary and only trims off the back end 807 */ 808 nfsm_adj(mp, len, nul) 809 struct mbuf *mp; 810 register int len; 811 int nul; 812 { 813 register struct mbuf *m; 814 register int count, i; 815 register char *cp; 816 817 /* 818 * Trim from tail. Scan the mbuf chain, 819 * calculating its length and finding the last mbuf. 820 * If the adjustment only affects this mbuf, then just 821 * adjust and return. Otherwise, rescan and truncate 822 * after the remaining size. 823 */ 824 count = 0; 825 m = mp; 826 for (;;) { 827 count += m->m_len; 828 if (m->m_next == (struct mbuf *)0) 829 break; 830 m = m->m_next; 831 } 832 if (m->m_len > len) { 833 m->m_len -= len; 834 if (nul > 0) { 835 cp = mtod(m, caddr_t)+m->m_len-nul; 836 for (i = 0; i < nul; i++) 837 *cp++ = '\0'; 838 } 839 return; 840 } 841 count -= len; 842 if (count < 0) 843 count = 0; 844 /* 845 * Correct length for chain is "count". 846 * Find the mbuf with last data, adjust its length, 847 * and toss data from remaining mbufs on chain. 848 */ 849 for (m = mp; m; m = m->m_next) { 850 if (m->m_len >= count) { 851 m->m_len = count; 852 if (nul > 0) { 853 cp = mtod(m, caddr_t)+m->m_len-nul; 854 for (i = 0; i < nul; i++) 855 *cp++ = '\0'; 856 } 857 break; 858 } 859 count -= m->m_len; 860 } 861 while (m = m->m_next) 862 m->m_len = 0; 863 } 864 865 /* 866 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 867 * - look up fsid in mount list (if not found ret error) 868 * - check that it is exported 869 * - get vp by calling VFS_FHTOVP() macro 870 * - if not lockflag unlock it with VOP_UNLOCK() 871 * - if cred->cr_uid == 0 set it to m_exroot 872 */ 873 nfsrv_fhtovp(fhp, lockflag, vpp, cred) 874 fhandle_t *fhp; 875 int lockflag; 876 struct vnode **vpp; 877 struct ucred *cred; 878 { 879 register struct mount *mp; 880 881 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 882 return (ESTALE); 883 if ((mp->mnt_flag & MNT_EXPORTED) == 0) 884 return (EACCES); 885 if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp)) 886 return (ESTALE); 887 if (cred->cr_uid == 0) 888 cred->cr_uid = mp->mnt_exroot; 889 if (!lockflag) 890 VOP_UNLOCK(*vpp); 891 return (0); 892 } 893 894 /* 895 * These two functions implement nfs rpc compression. 896 * The algorithm is a trivial run length encoding of '\0' bytes. The high 897 * order nibble of hex "e" is or'd with the number of zeroes - 2 in four 898 * bits. (2 - 17 zeros) Any data byte with a high order nibble of hex "e" 899 * is byte stuffed. 900 * The compressed data is padded with 0x0 bytes to an even multiple of 901 * 4 bytes in length to avoid any weird long pointer alignments. 902 * If compression/uncompression is unsuccessful, the original mbuf list 903 * is returned. 904 * The first four bytes (the XID) are left uncompressed and the fifth 905 * byte is set to 0x1 for request and 0x2 for reply. 906 * An uncompressed RPC will always have the fifth byte == 0x0. 907 */ 908 struct mbuf * 909 nfs_compress(m0) 910 struct mbuf *m0; 911 { 912 register u_char ch, nextch; 913 register int i, rlelast; 914 register u_char *ip, *op; 915 register int ileft, oleft, noteof; 916 register struct mbuf *m, *om; 917 struct mbuf **mp, *retm; 918 int olen, clget; 919 920 i = rlelast = 0; 921 noteof = 1; 922 m = m0; 923 if (m->m_len < 12) 924 return (m0); 925 if (m->m_pkthdr.len >= MINCLSIZE) 926 clget = 1; 927 else 928 clget = 0; 929 ileft = m->m_len - 9; 930 ip = mtod(m, u_char *); 931 MGETHDR(om, M_WAIT, MT_DATA); 932 if (clget) 933 MCLGET(om, M_WAIT); 934 retm = om; 935 mp = &om->m_next; 936 olen = om->m_len = 5; 937 oleft = M_TRAILINGSPACE(om); 938 op = mtod(om, u_char *); 939 *((u_long *)op) = *((u_long *)ip); 940 ip += 7; 941 op += 4; 942 *op++ = *ip++ + 1; 943 nextch = *ip++; 944 while (noteof) { 945 ch = nextch; 946 if (ileft == 0) { 947 do { 948 m = m->m_next; 949 } while (m && m->m_len == 0); 950 if (m) { 951 ileft = m->m_len; 952 ip = mtod(m, u_char *); 953 } else { 954 noteof = 0; 955 nextch = 0x1; 956 goto doit; 957 } 958 } 959 nextch = *ip++; 960 ileft--; 961 doit: 962 if (ch == '\0') { 963 if (++i == NFSC_MAX || nextch != '\0') { 964 if (i < 2) { 965 nfscput('\0'); 966 } else { 967 if (rlelast == i) { 968 nfscput('\0'); 969 i--; 970 } 971 if (NFSCRLE(i) == (nextch & 0xff)) { 972 i--; 973 if (i < 2) { 974 nfscput('\0'); 975 } else { 976 nfscput(NFSCRLE(i)); 977 } 978 nfscput('\0'); 979 rlelast = 0; 980 } else { 981 nfscput(NFSCRLE(i)); 982 rlelast = i; 983 } 984 } 985 i = 0; 986 } 987 } else { 988 if ((ch & NFSCRL) == NFSCRL) { 989 nfscput(ch); 990 } 991 nfscput(ch); 992 i = rlelast = 0; 993 } 994 } 995 if (olen < m0->m_pkthdr.len) { 996 m_freem(m0); 997 if (i = (olen & 0x3)) { 998 i = 4 - i; 999 while (i-- > 0) { 1000 nfscput('\0'); 1001 } 1002 } 1003 retm->m_pkthdr.len = olen; 1004 retm->m_pkthdr.rcvif = (struct ifnet *)0; 1005 return (retm); 1006 } else { 1007 m_freem(retm); 1008 return (m0); 1009 } 1010 } 1011 1012 struct mbuf * 1013 nfs_uncompress(m0) 1014 struct mbuf *m0; 1015 { 1016 register u_char cp, nextcp, *ip, *op; 1017 register struct mbuf *m, *om; 1018 struct mbuf *retm, **mp; 1019 int i, j, noteof, clget, ileft, oleft, olen; 1020 1021 m = m0; 1022 i = 0; 1023 while (m && i < MINCLSIZE) { 1024 i += m->m_len; 1025 m = m->m_next; 1026 } 1027 if (i < 6) 1028 return (m0); 1029 if (i >= MINCLSIZE) 1030 clget = 1; 1031 else 1032 clget = 0; 1033 m = m0; 1034 MGET(om, M_WAIT, MT_DATA); 1035 if (clget) 1036 MCLGET(om, M_WAIT); 1037 olen = om->m_len = 8; 1038 oleft = M_TRAILINGSPACE(om); 1039 op = mtod(om, u_char *); 1040 retm = om; 1041 mp = &om->m_next; 1042 if (m->m_len >= 6) { 1043 ileft = m->m_len - 6; 1044 ip = mtod(m, u_char *); 1045 *((u_long *)op) = *((u_long *)ip); 1046 bzero(op + 4, 3); 1047 ip += 4; 1048 op += 7; 1049 if (*ip == '\0') { 1050 m_freem(om); 1051 return (m0); 1052 } 1053 *op++ = *ip++ - 1; 1054 cp = *ip++; 1055 } else { 1056 ileft = m->m_len; 1057 ip = mtod(m, u_char *); 1058 nfscget(*op++); 1059 nfscget(*op++); 1060 nfscget(*op++); 1061 nfscget(*op++); 1062 bzero(op, 3); 1063 op += 3; 1064 nfscget(*op); 1065 if (*op == '\0') { 1066 m_freem(om); 1067 return (m0); 1068 } 1069 (*op)--; 1070 op++; 1071 nfscget(cp); 1072 } 1073 noteof = 1; 1074 while (noteof) { 1075 if ((cp & NFSCRL) == NFSCRL) { 1076 nfscget(nextcp); 1077 if (cp == nextcp) { 1078 nfscput(cp); 1079 goto readit; 1080 } else { 1081 i = (cp & 0xf) + 2; 1082 for (j = 0; j < i; j++) { 1083 nfscput('\0'); 1084 } 1085 cp = nextcp; 1086 } 1087 } else { 1088 nfscput(cp); 1089 readit: 1090 nfscget(cp); 1091 } 1092 } 1093 m_freem(m0); 1094 if (i = (olen & 0x3)) 1095 om->m_len -= i; 1096 return (retm); 1097 } 1098