xref: /original-bsd/sys/nfs/nfs_subs.c (revision de3f5c4e)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)nfs_subs.c	7.41 (Berkeley) 05/15/91
11  */
12 
13 /*
14  * These functions support the macros and help fiddle mbuf chains for
15  * the nfs op functions. They do things like create the rpc header and
16  * copy data between mbuf chains and uio lists.
17  */
18 #include "param.h"
19 #include "proc.h"
20 #include "filedesc.h"
21 #include "systm.h"
22 #include "kernel.h"
23 #include "mount.h"
24 #include "file.h"
25 #include "vnode.h"
26 #include "namei.h"
27 #include "mbuf.h"
28 #include "map.h"
29 
30 #include "../ufs/quota.h"
31 #include "../ufs/inode.h"
32 
33 #include "rpcv2.h"
34 #include "nfsv2.h"
35 #include "nfsnode.h"
36 #include "nfs.h"
37 #include "nfsiom.h"
38 #include "xdr_subs.h"
39 #include "nfsm_subs.h"
40 #include "nfscompress.h"
41 
42 #define TRUE	1
43 #define	FALSE	0
44 
45 /*
46  * Data items converted to xdr at startup, since they are constant
47  * This is kinda hokey, but may save a little time doing byte swaps
48  */
49 u_long nfs_procids[NFS_NPROCS];
50 u_long nfs_xdrneg1;
51 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied,
52 	rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
53 u_long nfs_vers, nfs_prog, nfs_true, nfs_false;
54 /* And other global data */
55 static u_long *rpc_uidp = (u_long *)0;
56 static u_long nfs_xid = 1;
57 static char *rpc_unixauth;
58 extern long hostid;
59 enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON };
60 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
61 extern struct map nfsmap[NFS_MSIZ];
62 extern struct nfsreq nfsreqh;
63 
64 /* Function ret types */
65 static char *nfs_unixauth();
66 
67 /*
68  * Maximum number of groups passed through to NFS server.
69  * According to RFC1057 it should be 16.
70  * For release 3.X systems, the maximum value is 8.
71  * For some other servers, the maximum value is 10.
72  */
73 int numgrps = 8;
74 
75 /*
76  * Create the header for an rpc request packet
77  * The function nfs_unixauth() creates a unix style authorization string
78  * and returns a ptr to it.
79  * The hsiz is the size of the rest of the nfs request header.
80  * (just used to decide if a cluster is a good idea)
81  * nb: Note that the prog, vers and procid args are already in xdr byte order
82  */
83 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid)
84 	u_long prog;
85 	u_long vers;
86 	u_long procid;
87 	struct ucred *cred;
88 	int hsiz;
89 	caddr_t *bpos;
90 	struct mbuf **mb;
91 	u_long *retxid;
92 {
93 	register struct mbuf *mreq, *m;
94 	register u_long *tl;
95 	struct mbuf *m1;
96 	char *ap;
97 	int asiz, siz;
98 
99 	NFSMGETHDR(mreq);
100 	asiz = ((((cred->cr_ngroups - 1) > numgrps) ? numgrps :
101 		  (cred->cr_ngroups - 1)) << 2);
102 #ifdef FILLINHOST
103 	asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED);
104 #else
105 	asiz += 9*NFSX_UNSIGNED;
106 #endif
107 
108 	/* If we need a lot, alloc a cluster ?? */
109 	if ((asiz+hsiz+RPC_SIZ) > MHLEN)
110 		MCLGET(mreq, M_WAIT);
111 	mreq->m_len = NFSMSIZ(mreq);
112 	siz = mreq->m_len;
113 	m1 = mreq;
114 	/*
115 	 * Alloc enough mbufs
116 	 * We do it now to avoid all sleeps after the call to nfs_unixauth()
117 	 */
118 	while ((asiz+RPC_SIZ) > siz) {
119 		MGET(m, M_WAIT, MT_DATA);
120 		m1->m_next = m;
121 		m->m_len = MLEN;
122 		siz += MLEN;
123 		m1 = m;
124 	}
125 	tl = mtod(mreq, u_long *);
126 	*tl++ = *retxid = txdr_unsigned(++nfs_xid);
127 	*tl++ = rpc_call;
128 	*tl++ = rpc_vers;
129 	*tl++ = prog;
130 	*tl++ = vers;
131 	*tl++ = procid;
132 
133 	/* Now we can call nfs_unixauth() and copy it in */
134 	ap = nfs_unixauth(cred);
135 	m = mreq;
136 	siz = m->m_len-RPC_SIZ;
137 	if (asiz <= siz) {
138 		bcopy(ap, (caddr_t)tl, asiz);
139 		m->m_len = asiz+RPC_SIZ;
140 	} else {
141 		bcopy(ap, (caddr_t)tl, siz);
142 		ap += siz;
143 		asiz -= siz;
144 		while (asiz > 0) {
145 			siz = (asiz > MLEN) ? MLEN : asiz;
146 			m = m->m_next;
147 			bcopy(ap, mtod(m, caddr_t), siz);
148 			m->m_len = siz;
149 			asiz -= siz;
150 			ap += siz;
151 		}
152 	}
153 
154 	/* Finally, return values */
155 	*mb = m;
156 	*bpos = mtod(m, caddr_t)+m->m_len;
157 	return (mreq);
158 }
159 
160 /*
161  * copies mbuf chain to the uio scatter/gather list
162  */
163 nfsm_mbuftouio(mrep, uiop, siz, dpos)
164 	struct mbuf **mrep;
165 	register struct uio *uiop;
166 	int siz;
167 	caddr_t *dpos;
168 {
169 	register char *mbufcp, *uiocp;
170 	register int xfer, left, len;
171 	register struct mbuf *mp;
172 	long uiosiz, rem;
173 	int error = 0;
174 
175 	mp = *mrep;
176 	mbufcp = *dpos;
177 	len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
178 	rem = nfsm_rndup(siz)-siz;
179 	while (siz > 0) {
180 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
181 			return (EFBIG);
182 		left = uiop->uio_iov->iov_len;
183 		uiocp = uiop->uio_iov->iov_base;
184 		if (left > siz)
185 			left = siz;
186 		uiosiz = left;
187 		while (left > 0) {
188 			while (len == 0) {
189 				mp = mp->m_next;
190 				if (mp == NULL)
191 					return (EBADRPC);
192 				mbufcp = mtod(mp, caddr_t);
193 				len = mp->m_len;
194 			}
195 			xfer = (left > len) ? len : left;
196 #ifdef notdef
197 			/* Not Yet.. */
198 			if (uiop->uio_iov->iov_op != NULL)
199 				(*(uiop->uio_iov->iov_op))
200 				(mbufcp, uiocp, xfer);
201 			else
202 #endif
203 			if (uiop->uio_segflg == UIO_SYSSPACE)
204 				bcopy(mbufcp, uiocp, xfer);
205 			else
206 				copyout(mbufcp, uiocp, xfer);
207 			left -= xfer;
208 			len -= xfer;
209 			mbufcp += xfer;
210 			uiocp += xfer;
211 			uiop->uio_offset += xfer;
212 			uiop->uio_resid -= xfer;
213 		}
214 		if (uiop->uio_iov->iov_len <= siz) {
215 			uiop->uio_iovcnt--;
216 			uiop->uio_iov++;
217 		} else {
218 			uiop->uio_iov->iov_base += uiosiz;
219 			uiop->uio_iov->iov_len -= uiosiz;
220 		}
221 		siz -= uiosiz;
222 	}
223 	*dpos = mbufcp;
224 	*mrep = mp;
225 	if (rem > 0) {
226 		if (len < rem)
227 			error = nfs_adv(mrep, dpos, rem, len);
228 		else
229 			*dpos += rem;
230 	}
231 	return (error);
232 }
233 
234 /*
235  * copies a uio scatter/gather list to an mbuf chain...
236  */
237 nfsm_uiotombuf(uiop, mq, siz, bpos)
238 	register struct uio *uiop;
239 	struct mbuf **mq;
240 	int siz;
241 	caddr_t *bpos;
242 {
243 	register char *uiocp;
244 	register struct mbuf *mp, *mp2;
245 	register int xfer, left, len;
246 	int uiosiz, clflg, rem;
247 	char *cp;
248 
249 	if (siz > MLEN)		/* or should it >= MCLBYTES ?? */
250 		clflg = 1;
251 	else
252 		clflg = 0;
253 	rem = nfsm_rndup(siz)-siz;
254 	mp2 = *mq;
255 	while (siz > 0) {
256 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
257 			return (EINVAL);
258 		left = uiop->uio_iov->iov_len;
259 		uiocp = uiop->uio_iov->iov_base;
260 		if (left > siz)
261 			left = siz;
262 		uiosiz = left;
263 		while (left > 0) {
264 			MGET(mp, M_WAIT, MT_DATA);
265 			if (clflg)
266 				MCLGET(mp, M_WAIT);
267 			mp->m_len = NFSMSIZ(mp);
268 			mp2->m_next = mp;
269 			mp2 = mp;
270 			xfer = (left > mp->m_len) ? mp->m_len : left;
271 #ifdef notdef
272 			/* Not Yet.. */
273 			if (uiop->uio_iov->iov_op != NULL)
274 				(*(uiop->uio_iov->iov_op))
275 				(uiocp, mtod(mp, caddr_t), xfer);
276 			else
277 #endif
278 			if (uiop->uio_segflg == UIO_SYSSPACE)
279 				bcopy(uiocp, mtod(mp, caddr_t), xfer);
280 			else
281 				copyin(uiocp, mtod(mp, caddr_t), xfer);
282 			len = mp->m_len;
283 			mp->m_len = xfer;
284 			left -= xfer;
285 			uiocp += xfer;
286 			uiop->uio_offset += xfer;
287 			uiop->uio_resid -= xfer;
288 		}
289 		if (uiop->uio_iov->iov_len <= siz) {
290 			uiop->uio_iovcnt--;
291 			uiop->uio_iov++;
292 		} else {
293 			uiop->uio_iov->iov_base += uiosiz;
294 			uiop->uio_iov->iov_len -= uiosiz;
295 		}
296 		siz -= uiosiz;
297 	}
298 	if (rem > 0) {
299 		if (rem > (len-mp->m_len)) {
300 			MGET(mp, M_WAIT, MT_DATA);
301 			mp->m_len = 0;
302 			mp2->m_next = mp;
303 		}
304 		cp = mtod(mp, caddr_t)+mp->m_len;
305 		for (left = 0; left < rem; left++)
306 			*cp++ = '\0';
307 		mp->m_len += rem;
308 		*bpos = cp;
309 	} else
310 		*bpos = mtod(mp, caddr_t)+mp->m_len;
311 	*mq = mp;
312 	return (0);
313 }
314 
315 /*
316  * Help break down an mbuf chain by setting the first siz bytes contiguous
317  * pointed to by returned val.
318  * If Updateflg == True we can overwrite the first part of the mbuf data
319  * This is used by the macros nfsm_disect and nfsm_disecton for tough
320  * cases. (The macros use the vars. dpos and dpos2)
321  */
322 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2)
323 	struct mbuf **mdp;
324 	caddr_t *dposp;
325 	int siz;
326 	int left;
327 	int updateflg;
328 	caddr_t *cp2;
329 {
330 	register struct mbuf *mp, *mp2;
331 	register int siz2, xfer;
332 	register caddr_t tl;
333 
334 	mp = *mdp;
335 	while (left == 0) {
336 		*mdp = mp = mp->m_next;
337 		if (mp == NULL)
338 			return (EBADRPC);
339 		left = mp->m_len;
340 		*dposp = mtod(mp, caddr_t);
341 	}
342 	if (left >= siz) {
343 		*cp2 = *dposp;
344 		*dposp += siz;
345 	} else if (mp->m_next == NULL) {
346 		return (EBADRPC);
347 	} else if (siz > MHLEN) {
348 		panic("nfs S too big");
349 	} else {
350 		/* Iff update, you can overwrite, else must alloc new mbuf */
351 		if (updateflg) {
352 			NFSMINOFF(mp);
353 		} else {
354 			MGET(mp2, M_WAIT, MT_DATA);
355 			mp2->m_next = mp->m_next;
356 			mp->m_next = mp2;
357 			mp->m_len -= left;
358 			mp = mp2;
359 		}
360 		*cp2 = tl = mtod(mp, caddr_t);
361 		bcopy(*dposp, tl, left);		/* Copy what was left */
362 		siz2 = siz-left;
363 		tl += left;
364 		mp2 = mp->m_next;
365 		/* Loop around copying up the siz2 bytes */
366 		while (siz2 > 0) {
367 			if (mp2 == NULL)
368 				return (EBADRPC);
369 			xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
370 			if (xfer > 0) {
371 				bcopy(mtod(mp2, caddr_t), tl, xfer);
372 				NFSMADV(mp2, xfer);
373 				mp2->m_len -= xfer;
374 				tl += xfer;
375 				siz2 -= xfer;
376 			}
377 			if (siz2 > 0)
378 				mp2 = mp2->m_next;
379 		}
380 		mp->m_len = siz;
381 		*mdp = mp2;
382 		*dposp = mtod(mp2, caddr_t);
383 	}
384 	return (0);
385 }
386 
387 /*
388  * Advance the position in the mbuf chain.
389  */
390 nfs_adv(mdp, dposp, offs, left)
391 	struct mbuf **mdp;
392 	caddr_t *dposp;
393 	int offs;
394 	int left;
395 {
396 	register struct mbuf *m;
397 	register int s;
398 
399 	m = *mdp;
400 	s = left;
401 	while (s < offs) {
402 		offs -= s;
403 		m = m->m_next;
404 		if (m == NULL)
405 			return (EBADRPC);
406 		s = m->m_len;
407 	}
408 	*mdp = m;
409 	*dposp = mtod(m, caddr_t)+offs;
410 	return (0);
411 }
412 
413 /*
414  * Copy a string into mbufs for the hard cases...
415  */
416 nfsm_strtmbuf(mb, bpos, cp, siz)
417 	struct mbuf **mb;
418 	char **bpos;
419 	char *cp;
420 	long siz;
421 {
422 	register struct mbuf *m1, *m2;
423 	long left, xfer, len, tlen;
424 	u_long *tl;
425 	int putsize;
426 
427 	putsize = 1;
428 	m2 = *mb;
429 	left = NFSMSIZ(m2)-m2->m_len;
430 	if (left > 0) {
431 		tl = ((u_long *)(*bpos));
432 		*tl++ = txdr_unsigned(siz);
433 		putsize = 0;
434 		left -= NFSX_UNSIGNED;
435 		m2->m_len += NFSX_UNSIGNED;
436 		if (left > 0) {
437 			bcopy(cp, (caddr_t) tl, left);
438 			siz -= left;
439 			cp += left;
440 			m2->m_len += left;
441 			left = 0;
442 		}
443 	}
444 	/* Loop arround adding mbufs */
445 	while (siz > 0) {
446 		MGET(m1, M_WAIT, MT_DATA);
447 		if (siz > MLEN)
448 			MCLGET(m1, M_WAIT);
449 		m1->m_len = NFSMSIZ(m1);
450 		m2->m_next = m1;
451 		m2 = m1;
452 		tl = mtod(m1, u_long *);
453 		tlen = 0;
454 		if (putsize) {
455 			*tl++ = txdr_unsigned(siz);
456 			m1->m_len -= NFSX_UNSIGNED;
457 			tlen = NFSX_UNSIGNED;
458 			putsize = 0;
459 		}
460 		if (siz < m1->m_len) {
461 			len = nfsm_rndup(siz);
462 			xfer = siz;
463 			if (xfer < len)
464 				*(tl+(xfer>>2)) = 0;
465 		} else {
466 			xfer = len = m1->m_len;
467 		}
468 		bcopy(cp, (caddr_t) tl, xfer);
469 		m1->m_len = len+tlen;
470 		siz -= xfer;
471 		cp += xfer;
472 	}
473 	*mb = m1;
474 	*bpos = mtod(m1, caddr_t)+m1->m_len;
475 	return (0);
476 }
477 
478 /*
479  * Called once to initialize data structures...
480  */
481 nfs_init()
482 {
483 	register int i;
484 
485 	rpc_vers = txdr_unsigned(RPC_VER2);
486 	rpc_call = txdr_unsigned(RPC_CALL);
487 	rpc_reply = txdr_unsigned(RPC_REPLY);
488 	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
489 	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
490 	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
491 	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
492 	nfs_vers = txdr_unsigned(NFS_VER2);
493 	nfs_prog = txdr_unsigned(NFS_PROG);
494 	nfs_true = txdr_unsigned(TRUE);
495 	nfs_false = txdr_unsigned(FALSE);
496 	/* Loop thru nfs procids */
497 	for (i = 0; i < NFS_NPROCS; i++)
498 		nfs_procids[i] = txdr_unsigned(i);
499 	/* Ensure async daemons disabled */
500 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
501 		nfs_iodwant[i] = (struct proc *)0;
502 	nfs_xdrneg1 = txdr_unsigned(-1);
503 	nfs_nhinit();			/* Init the nfsnode table */
504 	nfsrv_initcache();		/* Init the server request cache */
505 	rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ);
506 
507 	/*
508 	 * Initialize reply list and start timer
509 	 */
510 	nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
511 	nfs_timer();
512 }
513 
514 /*
515  * Fill in the rest of the rpc_unixauth and return it
516  */
517 static char *nfs_unixauth(cr)
518 	register struct ucred *cr;
519 {
520 	register u_long *tl;
521 	register int i;
522 	int ngr;
523 
524 	/* Maybe someday there should be a cache of AUTH_SHORT's */
525 	if ((tl = rpc_uidp) == NULL) {
526 #ifdef FILLINHOST
527 		i = nfsm_rndup(hostnamelen)+(25*NFSX_UNSIGNED);
528 #else
529 		i = 25*NFSX_UNSIGNED;
530 #endif
531 		MALLOC(tl, u_long *, i, M_TEMP, M_WAITOK);
532 		bzero((caddr_t)tl, i);
533 		rpc_unixauth = (caddr_t)tl;
534 		*tl++ = txdr_unsigned(RPCAUTH_UNIX);
535 		tl++;	/* Fill in size later */
536 		*tl++ = hostid;
537 #ifdef FILLINHOST
538 		*tl++ = txdr_unsigned(hostnamelen);
539 		i = nfsm_rndup(hostnamelen);
540 		bcopy(hostname, (caddr_t)tl, hostnamelen);
541 		tl += (i>>2);
542 #else
543 		*tl++ = 0;
544 #endif
545 		rpc_uidp = tl;
546 	}
547 	*tl++ = txdr_unsigned(cr->cr_uid);
548 	*tl++ = txdr_unsigned(cr->cr_groups[0]);
549 	ngr = ((cr->cr_ngroups - 1) > numgrps) ? numgrps : (cr->cr_ngroups - 1);
550 	*tl++ = txdr_unsigned(ngr);
551 	for (i = 1; i <= ngr; i++)
552 		*tl++ = txdr_unsigned(cr->cr_groups[i]);
553 	/* And add the AUTH_NULL */
554 	*tl++ = 0;
555 	*tl = 0;
556 	i = (((caddr_t)tl)-rpc_unixauth)-12;
557 	tl = (u_long *)(rpc_unixauth+4);
558 	*tl = txdr_unsigned(i);
559 	return (rpc_unixauth);
560 }
561 
562 /*
563  * Attribute cache routines.
564  * nfs_loadattrcache() - loads or updates the cache contents from attributes
565  *	that are on the mbuf list
566  * nfs_getattrcache() - returns valid attributes if found in cache, returns
567  *	error otherwise
568  */
569 
570 /*
571  * Load the attribute cache (that lives in the nfsnode entry) with
572  * the values on the mbuf list and
573  * Iff vap not NULL
574  *    copy the attributes to *vaper
575  */
576 nfs_loadattrcache(vpp, mdp, dposp, vaper)
577 	struct vnode **vpp;
578 	struct mbuf **mdp;
579 	caddr_t *dposp;
580 	struct vattr *vaper;
581 {
582 	register struct vnode *vp = *vpp;
583 	register struct vattr *vap;
584 	register struct nfsv2_fattr *fp;
585 	extern struct vnodeops spec_nfsv2nodeops;
586 	register struct nfsnode *np;
587 	register long t1;
588 	caddr_t dpos, cp2;
589 	int error = 0;
590 	struct mbuf *md;
591 	enum vtype type;
592 	u_short mode;
593 	long rdev;
594 	struct timeval mtime;
595 	struct vnode *nvp;
596 
597 	md = *mdp;
598 	dpos = *dposp;
599 	t1 = (mtod(md, caddr_t)+md->m_len)-dpos;
600 	if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2))
601 		return (error);
602 	fp = (struct nfsv2_fattr *)cp2;
603 	type = nfstov_type(fp->fa_type);
604 	mode = fxdr_unsigned(u_short, fp->fa_mode);
605 	if (type == VNON)
606 		type = IFTOVT(mode);
607 	rdev = fxdr_unsigned(long, fp->fa_rdev);
608 	fxdr_time(&fp->fa_mtime, &mtime);
609 	/*
610 	 * If v_type == VNON it is a new node, so fill in the v_type,
611 	 * n_mtime fields. Check to see if it represents a special
612 	 * device, and if so, check for a possible alias. Once the
613 	 * correct vnode has been obtained, fill in the rest of the
614 	 * information.
615 	 */
616 	np = VTONFS(vp);
617 	if (vp->v_type == VNON) {
618 		if (type == VCHR && rdev == 0xffffffff)
619 			vp->v_type = type = VFIFO;
620 		else
621 			vp->v_type = type;
622 		if (vp->v_type == VFIFO) {
623 #ifdef FIFO
624 			extern struct vnodeops fifo_nfsv2nodeops;
625 			vp->v_op = &fifo_nfsv2nodeops;
626 #else
627 			return (EOPNOTSUPP);
628 #endif /* FIFO */
629 		}
630 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
631 			vp->v_op = &spec_nfsv2nodeops;
632 			if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) {
633 				/*
634 				 * Reinitialize aliased node.
635 				 */
636 				np = VTONFS(nvp);
637 				np->n_vnode = nvp;
638 				np->n_flag = 0;
639 				nfs_lock(nvp);
640 				bcopy((caddr_t)&VTONFS(vp)->n_fh,
641 					(caddr_t)&np->n_fh, NFSX_FH);
642 				insque(np, nfs_hash(&np->n_fh));
643 				np->n_attrstamp = 0;
644 				np->n_sillyrename = (struct sillyrename *)0;
645 				/*
646 				 * Discard unneeded vnode and update actual one
647 				 */
648 				vput(vp);
649 				*vpp = nvp;
650 			}
651 		}
652 		np->n_mtime = mtime.tv_sec;
653 	}
654 	vap = &np->n_vattr;
655 	vap->va_type = type;
656 	vap->va_mode = (mode & 07777);
657 	vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
658 	vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
659 	vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
660 	vap->va_size = fxdr_unsigned(u_long, fp->fa_size);
661 	if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size) {
662 		np->n_size = vap->va_size;
663 		vnode_pager_setsize(vp, np->n_size);
664 	}
665 	vap->va_size_rsv = 0;
666 	vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize);
667 	vap->va_rdev = (dev_t)rdev;
668 	vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * NFS_FABLKSIZE;
669 	vap->va_bytes_rsv = 0;
670 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
671 	vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid);
672 	vap->va_atime.tv_sec = fxdr_unsigned(long, fp->fa_atime.tv_sec);
673 	vap->va_atime.tv_usec = 0;
674 	vap->va_flags = fxdr_unsigned(u_long, fp->fa_atime.tv_usec);
675 	vap->va_mtime = mtime;
676 	vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa_ctime.tv_sec);
677 	vap->va_ctime.tv_usec = 0;
678 	vap->va_gen = fxdr_unsigned(u_long, fp->fa_ctime.tv_usec);
679 	np->n_attrstamp = time.tv_sec;
680 	*dposp = dpos;
681 	*mdp = md;
682 	if (vaper != NULL) {
683 		bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
684 		if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size))
685 			vaper->va_size = np->n_size;
686 	}
687 	return (0);
688 }
689 
690 /*
691  * Check the time stamp
692  * If the cache is valid, copy contents to *vap and return 0
693  * otherwise return an error
694  */
695 nfs_getattrcache(vp, vap)
696 	register struct vnode *vp;
697 	struct vattr *vap;
698 {
699 	register struct nfsnode *np;
700 
701 	np = VTONFS(vp);
702 	if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) {
703 		nfsstats.attrcache_hits++;
704 		bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr));
705 		if ((np->n_flag & NMODIFIED) == 0) {
706 			np->n_size = vap->va_size;
707 			vnode_pager_setsize(vp, np->n_size);
708 		} else if (np->n_size > vap->va_size)
709 			vap->va_size = np->n_size;
710 		return (0);
711 	} else {
712 		nfsstats.attrcache_misses++;
713 		return (ENOENT);
714 	}
715 }
716 
717 /*
718  * Set up nameidata for a namei() call and do it
719  */
720 nfs_namei(ndp, fhp, len, mdp, dposp, p)
721 	register struct nameidata *ndp;
722 	fhandle_t *fhp;
723 	int len;
724 	struct mbuf **mdp;
725 	caddr_t *dposp;
726 	struct proc *p;
727 {
728 	register int i, rem;
729 	register struct mbuf *md;
730 	register char *fromcp, *tocp;
731 	struct vnode *dp;
732 	int flag;
733 	int error;
734 
735 	flag = ndp->ni_nameiop & OPMASK;
736 	MALLOC(ndp->ni_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK);
737 	/*
738 	 * Copy the name from the mbuf list to ndp->ni_pnbuf
739 	 * and set the various ndp fields appropriately.
740 	 */
741 	fromcp = *dposp;
742 	tocp = ndp->ni_pnbuf;
743 	md = *mdp;
744 	rem = mtod(md, caddr_t) + md->m_len - fromcp;
745 	ndp->ni_hash = 0;
746 	for (i = 0; i < len; i++) {
747 		while (rem == 0) {
748 			md = md->m_next;
749 			if (md == NULL) {
750 				error = EBADRPC;
751 				goto out;
752 			}
753 			fromcp = mtod(md, caddr_t);
754 			rem = md->m_len;
755 		}
756 		if (*fromcp == '\0' || *fromcp == '/') {
757 			error = EINVAL;
758 			goto out;
759 		}
760 		if (*fromcp & 0200)
761 			if ((*fromcp&0377) == ('/'|0200) || flag != DELETE) {
762 				error = EINVAL;
763 				goto out;
764 			}
765 		ndp->ni_hash += (unsigned char)*fromcp;
766 		*tocp++ = *fromcp++;
767 		rem--;
768 	}
769 	*tocp = '\0';
770 	*mdp = md;
771 	*dposp = fromcp;
772 	len = nfsm_rndup(len)-len;
773 	if (len > 0) {
774 		if (rem >= len)
775 			*dposp += len;
776 		else if (error = nfs_adv(mdp, dposp, len, rem))
777 			goto out;
778 	}
779 	ndp->ni_pathlen = tocp - ndp->ni_pnbuf;
780 	ndp->ni_ptr = ndp->ni_pnbuf;
781 	/*
782 	 * Extract and set starting directory.
783 	 */
784 	if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cred))
785 		goto out;
786 	if (dp->v_type != VDIR) {
787 		vrele(dp);
788 		error = ENOTDIR;
789 		goto out;
790 	}
791 	ndp->ni_startdir = dp;
792 	ndp->ni_nameiop |= (NOCROSSMOUNT | REMOTE);
793 	/*
794 	 * And call lookup() to do the real work
795 	 */
796 	if (error = lookup(ndp, p))
797 		goto out;
798 	/*
799 	 * Check for encountering a symbolic link
800 	 */
801 	if (ndp->ni_more) {
802 		if ((ndp->ni_nameiop & LOCKPARENT) && ndp->ni_pathlen == 1)
803 			vput(ndp->ni_dvp);
804 		else
805 			vrele(ndp->ni_dvp);
806 		vput(ndp->ni_vp);
807 		ndp->ni_vp = NULL;
808 		error = EINVAL;
809 		goto out;
810 	}
811 	/*
812 	 * Check for saved name request
813 	 */
814 	if (ndp->ni_nameiop & (SAVENAME | SAVESTART)) {
815 		ndp->ni_nameiop |= HASBUF;
816 		return (0);
817 	}
818 out:
819 	FREE(ndp->ni_pnbuf, M_NAMEI);
820 	return (error);
821 }
822 
823 /*
824  * A fiddled version of m_adj() that ensures null fill to a long
825  * boundary and only trims off the back end
826  */
827 nfsm_adj(mp, len, nul)
828 	struct mbuf *mp;
829 	register int len;
830 	int nul;
831 {
832 	register struct mbuf *m;
833 	register int count, i;
834 	register char *cp;
835 
836 	/*
837 	 * Trim from tail.  Scan the mbuf chain,
838 	 * calculating its length and finding the last mbuf.
839 	 * If the adjustment only affects this mbuf, then just
840 	 * adjust and return.  Otherwise, rescan and truncate
841 	 * after the remaining size.
842 	 */
843 	count = 0;
844 	m = mp;
845 	for (;;) {
846 		count += m->m_len;
847 		if (m->m_next == (struct mbuf *)0)
848 			break;
849 		m = m->m_next;
850 	}
851 	if (m->m_len > len) {
852 		m->m_len -= len;
853 		if (nul > 0) {
854 			cp = mtod(m, caddr_t)+m->m_len-nul;
855 			for (i = 0; i < nul; i++)
856 				*cp++ = '\0';
857 		}
858 		return;
859 	}
860 	count -= len;
861 	if (count < 0)
862 		count = 0;
863 	/*
864 	 * Correct length for chain is "count".
865 	 * Find the mbuf with last data, adjust its length,
866 	 * and toss data from remaining mbufs on chain.
867 	 */
868 	for (m = mp; m; m = m->m_next) {
869 		if (m->m_len >= count) {
870 			m->m_len = count;
871 			if (nul > 0) {
872 				cp = mtod(m, caddr_t)+m->m_len-nul;
873 				for (i = 0; i < nul; i++)
874 					*cp++ = '\0';
875 			}
876 			break;
877 		}
878 		count -= m->m_len;
879 	}
880 	while (m = m->m_next)
881 		m->m_len = 0;
882 }
883 
884 /*
885  * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
886  * 	- look up fsid in mount list (if not found ret error)
887  *	- check that it is exported
888  *	- get vp by calling VFS_FHTOVP() macro
889  *	- if not lockflag unlock it with VOP_UNLOCK()
890  *	- if cred->cr_uid == 0 set it to m_exroot
891  */
892 nfsrv_fhtovp(fhp, lockflag, vpp, cred)
893 	fhandle_t *fhp;
894 	int lockflag;
895 	struct vnode **vpp;
896 	struct ucred *cred;
897 {
898 	register struct mount *mp;
899 
900 	if ((mp = getvfs(&fhp->fh_fsid)) == NULL)
901 		return (ESTALE);
902 	if ((mp->mnt_flag & MNT_EXPORTED) == 0)
903 		return (EACCES);
904 	if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp))
905 		return (ESTALE);
906 	if (cred->cr_uid == 0)
907 		cred->cr_uid = mp->mnt_exroot;
908 	if (!lockflag)
909 		VOP_UNLOCK(*vpp);
910 	return (0);
911 }
912 
913 /*
914  * These two functions implement nfs rpc compression.
915  * The algorithm is a trivial run length encoding of '\0' bytes. The high
916  * order nibble of hex "e" is or'd with the number of zeroes - 2 in four
917  * bits. (2 - 17 zeros) Any data byte with a high order nibble of hex "e"
918  * is byte stuffed.
919  * The compressed data is padded with 0x0 bytes to an even multiple of
920  * 4 bytes in length to avoid any weird long pointer alignments.
921  * If compression/uncompression is unsuccessful, the original mbuf list
922  * is returned.
923  * The first four bytes (the XID) are left uncompressed and the fifth
924  * byte is set to 0x1 for request and 0x2 for reply.
925  * An uncompressed RPC will always have the fifth byte == 0x0.
926  */
927 struct mbuf *
928 nfs_compress(m0)
929 	struct mbuf *m0;
930 {
931 	register u_char ch, nextch;
932 	register int i, rlelast;
933 	register u_char *ip, *op;
934 	register int ileft, oleft, noteof;
935 	register struct mbuf *m, *om;
936 	struct mbuf **mp, *retm;
937 	int olen, clget;
938 
939 	i = rlelast = 0;
940 	noteof = 1;
941 	m = m0;
942 	if (m->m_len < 12)
943 		return (m0);
944 	if (m->m_pkthdr.len >= MINCLSIZE)
945 		clget = 1;
946 	else
947 		clget = 0;
948 	ileft = m->m_len - 9;
949 	ip = mtod(m, u_char *);
950 	MGETHDR(om, M_WAIT, MT_DATA);
951 	if (clget)
952 		MCLGET(om, M_WAIT);
953 	retm = om;
954 	mp = &om->m_next;
955 	olen = om->m_len = 5;
956 	oleft = M_TRAILINGSPACE(om);
957 	op = mtod(om, u_char *);
958 	*((u_long *)op) = *((u_long *)ip);
959 	ip += 7;
960 	op += 4;
961 	*op++ = *ip++ + 1;
962 	nextch = *ip++;
963 	while (noteof) {
964 		ch = nextch;
965 		if (ileft == 0) {
966 			do {
967 				m = m->m_next;
968 			} while (m && m->m_len == 0);
969 			if (m) {
970 				ileft = m->m_len;
971 				ip = mtod(m, u_char *);
972 			} else {
973 				noteof = 0;
974 				nextch = 0x1;
975 				goto doit;
976 			}
977 		}
978 		nextch = *ip++;
979 		ileft--;
980 doit:
981 		if (ch == '\0') {
982 			if (++i == NFSC_MAX || nextch != '\0') {
983 				if (i < 2) {
984 					nfscput('\0');
985 				} else {
986 					if (rlelast == i) {
987 						nfscput('\0');
988 						i--;
989 					}
990 					if (NFSCRLE(i) == (nextch & 0xff)) {
991 						i--;
992 						if (i < 2) {
993 							nfscput('\0');
994 						} else {
995 							nfscput(NFSCRLE(i));
996 						}
997 						nfscput('\0');
998 						rlelast = 0;
999 					} else {
1000 						nfscput(NFSCRLE(i));
1001 						rlelast = i;
1002 					}
1003 				}
1004 				i = 0;
1005 			}
1006 		} else {
1007 			if ((ch & NFSCRL) == NFSCRL) {
1008 				nfscput(ch);
1009 			}
1010 			nfscput(ch);
1011 			i = rlelast = 0;
1012 		}
1013 	}
1014 	if (olen < m0->m_pkthdr.len) {
1015 		m_freem(m0);
1016 		if (i = (olen & 0x3)) {
1017 			i = 4 - i;
1018 			while (i-- > 0) {
1019 				nfscput('\0');
1020 			}
1021 		}
1022 		retm->m_pkthdr.len = olen;
1023 		retm->m_pkthdr.rcvif = (struct ifnet *)0;
1024 		return (retm);
1025 	} else {
1026 		m_freem(retm);
1027 		return (m0);
1028 	}
1029 }
1030 
1031 struct mbuf *
1032 nfs_uncompress(m0)
1033 	struct mbuf *m0;
1034 {
1035 	register u_char cp, nextcp, *ip, *op;
1036 	register struct mbuf *m, *om;
1037 	struct mbuf *retm, **mp;
1038 	int i, j, noteof, clget, ileft, oleft, olen;
1039 
1040 	m = m0;
1041 	i = 0;
1042 	while (m && i < MINCLSIZE) {
1043 		i += m->m_len;
1044 		m = m->m_next;
1045 	}
1046 	if (i < 6)
1047 		return (m0);
1048 	if (i >= MINCLSIZE)
1049 		clget = 1;
1050 	else
1051 		clget = 0;
1052 	m = m0;
1053 	MGET(om, M_WAIT, MT_DATA);
1054 	if (clget)
1055 		MCLGET(om, M_WAIT);
1056 	olen = om->m_len = 8;
1057 	oleft = M_TRAILINGSPACE(om);
1058 	op = mtod(om, u_char *);
1059 	retm = om;
1060 	mp = &om->m_next;
1061 	if (m->m_len >= 6) {
1062 		ileft = m->m_len - 6;
1063 		ip = mtod(m, u_char *);
1064 		*((u_long *)op) = *((u_long *)ip);
1065 		bzero(op + 4, 3);
1066 		ip += 4;
1067 		op += 7;
1068 		if (*ip == '\0') {
1069 			m_freem(om);
1070 			return (m0);
1071 		}
1072 		*op++ = *ip++ - 1;
1073 		cp = *ip++;
1074 	} else {
1075 		ileft = m->m_len;
1076 		ip = mtod(m, u_char *);
1077 		nfscget(*op++);
1078 		nfscget(*op++);
1079 		nfscget(*op++);
1080 		nfscget(*op++);
1081 		bzero(op, 3);
1082 		op += 3;
1083 		nfscget(*op);
1084 		if (*op == '\0') {
1085 			m_freem(om);
1086 			return (m0);
1087 		}
1088 		(*op)--;
1089 		op++;
1090 		nfscget(cp);
1091 	}
1092 	noteof = 1;
1093 	while (noteof) {
1094 		if ((cp & NFSCRL) == NFSCRL) {
1095 			nfscget(nextcp);
1096 			if (cp == nextcp) {
1097 				nfscput(cp);
1098 				goto readit;
1099 			} else {
1100 				i = (cp & 0xf) + 2;
1101 				for (j = 0; j < i; j++) {
1102 					nfscput('\0');
1103 				}
1104 				cp = nextcp;
1105 			}
1106 		} else {
1107 			nfscput(cp);
1108 readit:
1109 			nfscget(cp);
1110 		}
1111 	}
1112 	m_freem(m0);
1113 	if (i = (olen & 0x3))
1114 		om->m_len -= i;
1115 	return (retm);
1116 }
1117