xref: /original-bsd/sys/nfs/nfs_subs.c (revision e59fb703)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)nfs_subs.c	7.44 (Berkeley) 12/19/91
11  */
12 
13 /*
14  * These functions support the macros and help fiddle mbuf chains for
15  * the nfs op functions. They do things like create the rpc header and
16  * copy data between mbuf chains and uio lists.
17  */
18 #include "param.h"
19 #include "proc.h"
20 #include "filedesc.h"
21 #include "systm.h"
22 #include "kernel.h"
23 #include "mount.h"
24 #include "file.h"
25 #include "vnode.h"
26 #include "namei.h"
27 #include "mbuf.h"
28 #include "map.h"
29 
30 #include "ufs/ufs/quota.h"
31 #include "ufs/ufs/inode.h"
32 
33 #include "rpcv2.h"
34 #include "nfsv2.h"
35 #include "nfs.h"
36 #include "nfsnode.h"
37 #include "nfsiom.h"
38 #include "xdr_subs.h"
39 #include "nfsm_subs.h"
40 #include "nfscompress.h"
41 
42 #define TRUE	1
43 #define	FALSE	0
44 
45 /*
46  * Data items converted to xdr at startup, since they are constant
47  * This is kinda hokey, but may save a little time doing byte swaps
48  */
49 u_long nfs_procids[NFS_NPROCS];
50 u_long nfs_xdrneg1;
51 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied,
52 	rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
53 u_long nfs_vers, nfs_prog, nfs_true, nfs_false;
54 /* And other global data */
55 static u_long *rpc_uidp = (u_long *)0;
56 static u_long nfs_xid = 1;
57 static char *rpc_unixauth;
58 extern long hostid;
59 enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON };
60 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
61 extern struct map nfsmap[NFS_MSIZ];
62 extern struct nfsreq nfsreqh;
63 
64 /* Function ret types */
65 static char *nfs_unixauth();
66 
67 /*
68  * Maximum number of groups passed through to NFS server.
69  * According to RFC1057 it should be 16.
70  * For release 3.X systems, the maximum value is 8.
71  * For some other servers, the maximum value is 10.
72  */
73 int numgrps = 8;
74 
75 /*
76  * Create the header for an rpc request packet
77  * The function nfs_unixauth() creates a unix style authorization string
78  * and returns a ptr to it.
79  * The hsiz is the size of the rest of the nfs request header.
80  * (just used to decide if a cluster is a good idea)
81  * nb: Note that the prog, vers and procid args are already in xdr byte order
82  */
83 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid)
84 	u_long prog;
85 	u_long vers;
86 	u_long procid;
87 	struct ucred *cred;
88 	int hsiz;
89 	caddr_t *bpos;
90 	struct mbuf **mb;
91 	u_long *retxid;
92 {
93 	register struct mbuf *mreq, *m;
94 	register u_long *tl;
95 	struct mbuf *m1;
96 	char *ap;
97 	int asiz, siz;
98 
99 	NFSMGETHDR(mreq);
100 	asiz = ((((cred->cr_ngroups - 1) > numgrps) ? numgrps :
101 		  (cred->cr_ngroups - 1)) << 2);
102 #ifdef FILLINHOST
103 	asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED);
104 #else
105 	asiz += 9*NFSX_UNSIGNED;
106 #endif
107 
108 	/* If we need a lot, alloc a cluster ?? */
109 	if ((asiz+hsiz+RPC_SIZ) > MHLEN)
110 		MCLGET(mreq, M_WAIT);
111 	mreq->m_len = NFSMSIZ(mreq);
112 	siz = mreq->m_len;
113 	m1 = mreq;
114 	/*
115 	 * Alloc enough mbufs
116 	 * We do it now to avoid all sleeps after the call to nfs_unixauth()
117 	 */
118 	while ((asiz+RPC_SIZ) > siz) {
119 		MGET(m, M_WAIT, MT_DATA);
120 		m1->m_next = m;
121 		m->m_len = MLEN;
122 		siz += MLEN;
123 		m1 = m;
124 	}
125 	tl = mtod(mreq, u_long *);
126 	*tl++ = *retxid = txdr_unsigned(++nfs_xid);
127 	*tl++ = rpc_call;
128 	*tl++ = rpc_vers;
129 	*tl++ = prog;
130 	*tl++ = vers;
131 	*tl++ = procid;
132 
133 	/* Now we can call nfs_unixauth() and copy it in */
134 	ap = nfs_unixauth(cred);
135 	m = mreq;
136 	siz = m->m_len-RPC_SIZ;
137 	if (asiz <= siz) {
138 		bcopy(ap, (caddr_t)tl, asiz);
139 		m->m_len = asiz+RPC_SIZ;
140 	} else {
141 		bcopy(ap, (caddr_t)tl, siz);
142 		ap += siz;
143 		asiz -= siz;
144 		while (asiz > 0) {
145 			siz = (asiz > MLEN) ? MLEN : asiz;
146 			m = m->m_next;
147 			bcopy(ap, mtod(m, caddr_t), siz);
148 			m->m_len = siz;
149 			asiz -= siz;
150 			ap += siz;
151 		}
152 	}
153 
154 	/* Finally, return values */
155 	*mb = m;
156 	*bpos = mtod(m, caddr_t)+m->m_len;
157 	return (mreq);
158 }
159 
160 /*
161  * copies mbuf chain to the uio scatter/gather list
162  */
163 nfsm_mbuftouio(mrep, uiop, siz, dpos)
164 	struct mbuf **mrep;
165 	register struct uio *uiop;
166 	int siz;
167 	caddr_t *dpos;
168 {
169 	register char *mbufcp, *uiocp;
170 	register int xfer, left, len;
171 	register struct mbuf *mp;
172 	long uiosiz, rem;
173 	int error = 0;
174 
175 	mp = *mrep;
176 	mbufcp = *dpos;
177 	len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
178 	rem = nfsm_rndup(siz)-siz;
179 	while (siz > 0) {
180 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
181 			return (EFBIG);
182 		left = uiop->uio_iov->iov_len;
183 		uiocp = uiop->uio_iov->iov_base;
184 		if (left > siz)
185 			left = siz;
186 		uiosiz = left;
187 		while (left > 0) {
188 			while (len == 0) {
189 				mp = mp->m_next;
190 				if (mp == NULL)
191 					return (EBADRPC);
192 				mbufcp = mtod(mp, caddr_t);
193 				len = mp->m_len;
194 			}
195 			xfer = (left > len) ? len : left;
196 #ifdef notdef
197 			/* Not Yet.. */
198 			if (uiop->uio_iov->iov_op != NULL)
199 				(*(uiop->uio_iov->iov_op))
200 				(mbufcp, uiocp, xfer);
201 			else
202 #endif
203 			if (uiop->uio_segflg == UIO_SYSSPACE)
204 				bcopy(mbufcp, uiocp, xfer);
205 			else
206 				copyout(mbufcp, uiocp, xfer);
207 			left -= xfer;
208 			len -= xfer;
209 			mbufcp += xfer;
210 			uiocp += xfer;
211 			uiop->uio_offset += xfer;
212 			uiop->uio_resid -= xfer;
213 		}
214 		if (uiop->uio_iov->iov_len <= siz) {
215 			uiop->uio_iovcnt--;
216 			uiop->uio_iov++;
217 		} else {
218 			uiop->uio_iov->iov_base += uiosiz;
219 			uiop->uio_iov->iov_len -= uiosiz;
220 		}
221 		siz -= uiosiz;
222 	}
223 	*dpos = mbufcp;
224 	*mrep = mp;
225 	if (rem > 0) {
226 		if (len < rem)
227 			error = nfs_adv(mrep, dpos, rem, len);
228 		else
229 			*dpos += rem;
230 	}
231 	return (error);
232 }
233 
234 /*
235  * copies a uio scatter/gather list to an mbuf chain...
236  */
237 nfsm_uiotombuf(uiop, mq, siz, bpos)
238 	register struct uio *uiop;
239 	struct mbuf **mq;
240 	int siz;
241 	caddr_t *bpos;
242 {
243 	register char *uiocp;
244 	register struct mbuf *mp, *mp2;
245 	register int xfer, left, len;
246 	int uiosiz, clflg, rem;
247 	char *cp;
248 
249 	if (siz > MLEN)		/* or should it >= MCLBYTES ?? */
250 		clflg = 1;
251 	else
252 		clflg = 0;
253 	rem = nfsm_rndup(siz)-siz;
254 	mp2 = *mq;
255 	while (siz > 0) {
256 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
257 			return (EINVAL);
258 		left = uiop->uio_iov->iov_len;
259 		uiocp = uiop->uio_iov->iov_base;
260 		if (left > siz)
261 			left = siz;
262 		uiosiz = left;
263 		while (left > 0) {
264 			MGET(mp, M_WAIT, MT_DATA);
265 			if (clflg)
266 				MCLGET(mp, M_WAIT);
267 			mp->m_len = NFSMSIZ(mp);
268 			mp2->m_next = mp;
269 			mp2 = mp;
270 			xfer = (left > mp->m_len) ? mp->m_len : left;
271 #ifdef notdef
272 			/* Not Yet.. */
273 			if (uiop->uio_iov->iov_op != NULL)
274 				(*(uiop->uio_iov->iov_op))
275 				(uiocp, mtod(mp, caddr_t), xfer);
276 			else
277 #endif
278 			if (uiop->uio_segflg == UIO_SYSSPACE)
279 				bcopy(uiocp, mtod(mp, caddr_t), xfer);
280 			else
281 				copyin(uiocp, mtod(mp, caddr_t), xfer);
282 			len = mp->m_len;
283 			mp->m_len = xfer;
284 			left -= xfer;
285 			uiocp += xfer;
286 			uiop->uio_offset += xfer;
287 			uiop->uio_resid -= xfer;
288 		}
289 		if (uiop->uio_iov->iov_len <= siz) {
290 			uiop->uio_iovcnt--;
291 			uiop->uio_iov++;
292 		} else {
293 			uiop->uio_iov->iov_base += uiosiz;
294 			uiop->uio_iov->iov_len -= uiosiz;
295 		}
296 		siz -= uiosiz;
297 	}
298 	if (rem > 0) {
299 		if (rem > (len-mp->m_len)) {
300 			MGET(mp, M_WAIT, MT_DATA);
301 			mp->m_len = 0;
302 			mp2->m_next = mp;
303 		}
304 		cp = mtod(mp, caddr_t)+mp->m_len;
305 		for (left = 0; left < rem; left++)
306 			*cp++ = '\0';
307 		mp->m_len += rem;
308 		*bpos = cp;
309 	} else
310 		*bpos = mtod(mp, caddr_t)+mp->m_len;
311 	*mq = mp;
312 	return (0);
313 }
314 
315 /*
316  * Help break down an mbuf chain by setting the first siz bytes contiguous
317  * pointed to by returned val.
318  * If Updateflg == True we can overwrite the first part of the mbuf data
319  * This is used by the macros nfsm_disect and nfsm_disecton for tough
320  * cases. (The macros use the vars. dpos and dpos2)
321  */
322 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2)
323 	struct mbuf **mdp;
324 	caddr_t *dposp;
325 	int siz;
326 	int left;
327 	int updateflg;
328 	caddr_t *cp2;
329 {
330 	register struct mbuf *mp, *mp2;
331 	register int siz2, xfer;
332 	register caddr_t tl;
333 
334 	mp = *mdp;
335 	while (left == 0) {
336 		*mdp = mp = mp->m_next;
337 		if (mp == NULL)
338 			return (EBADRPC);
339 		left = mp->m_len;
340 		*dposp = mtod(mp, caddr_t);
341 	}
342 	if (left >= siz) {
343 		*cp2 = *dposp;
344 		*dposp += siz;
345 	} else if (mp->m_next == NULL) {
346 		return (EBADRPC);
347 	} else if (siz > MHLEN) {
348 		panic("nfs S too big");
349 	} else {
350 		/* Iff update, you can overwrite, else must alloc new mbuf */
351 		if (updateflg) {
352 			NFSMINOFF(mp);
353 		} else {
354 			MGET(mp2, M_WAIT, MT_DATA);
355 			mp2->m_next = mp->m_next;
356 			mp->m_next = mp2;
357 			mp->m_len -= left;
358 			mp = mp2;
359 		}
360 		*cp2 = tl = mtod(mp, caddr_t);
361 		bcopy(*dposp, tl, left);		/* Copy what was left */
362 		siz2 = siz-left;
363 		tl += left;
364 		mp2 = mp->m_next;
365 		/* Loop around copying up the siz2 bytes */
366 		while (siz2 > 0) {
367 			if (mp2 == NULL)
368 				return (EBADRPC);
369 			xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
370 			if (xfer > 0) {
371 				bcopy(mtod(mp2, caddr_t), tl, xfer);
372 				NFSMADV(mp2, xfer);
373 				mp2->m_len -= xfer;
374 				tl += xfer;
375 				siz2 -= xfer;
376 			}
377 			if (siz2 > 0)
378 				mp2 = mp2->m_next;
379 		}
380 		mp->m_len = siz;
381 		*mdp = mp2;
382 		*dposp = mtod(mp2, caddr_t);
383 	}
384 	return (0);
385 }
386 
387 /*
388  * Advance the position in the mbuf chain.
389  */
390 nfs_adv(mdp, dposp, offs, left)
391 	struct mbuf **mdp;
392 	caddr_t *dposp;
393 	int offs;
394 	int left;
395 {
396 	register struct mbuf *m;
397 	register int s;
398 
399 	m = *mdp;
400 	s = left;
401 	while (s < offs) {
402 		offs -= s;
403 		m = m->m_next;
404 		if (m == NULL)
405 			return (EBADRPC);
406 		s = m->m_len;
407 	}
408 	*mdp = m;
409 	*dposp = mtod(m, caddr_t)+offs;
410 	return (0);
411 }
412 
413 /*
414  * Copy a string into mbufs for the hard cases...
415  */
416 nfsm_strtmbuf(mb, bpos, cp, siz)
417 	struct mbuf **mb;
418 	char **bpos;
419 	char *cp;
420 	long siz;
421 {
422 	register struct mbuf *m1, *m2;
423 	long left, xfer, len, tlen;
424 	u_long *tl;
425 	int putsize;
426 
427 	putsize = 1;
428 	m2 = *mb;
429 	left = NFSMSIZ(m2)-m2->m_len;
430 	if (left > 0) {
431 		tl = ((u_long *)(*bpos));
432 		*tl++ = txdr_unsigned(siz);
433 		putsize = 0;
434 		left -= NFSX_UNSIGNED;
435 		m2->m_len += NFSX_UNSIGNED;
436 		if (left > 0) {
437 			bcopy(cp, (caddr_t) tl, left);
438 			siz -= left;
439 			cp += left;
440 			m2->m_len += left;
441 			left = 0;
442 		}
443 	}
444 	/* Loop arround adding mbufs */
445 	while (siz > 0) {
446 		MGET(m1, M_WAIT, MT_DATA);
447 		if (siz > MLEN)
448 			MCLGET(m1, M_WAIT);
449 		m1->m_len = NFSMSIZ(m1);
450 		m2->m_next = m1;
451 		m2 = m1;
452 		tl = mtod(m1, u_long *);
453 		tlen = 0;
454 		if (putsize) {
455 			*tl++ = txdr_unsigned(siz);
456 			m1->m_len -= NFSX_UNSIGNED;
457 			tlen = NFSX_UNSIGNED;
458 			putsize = 0;
459 		}
460 		if (siz < m1->m_len) {
461 			len = nfsm_rndup(siz);
462 			xfer = siz;
463 			if (xfer < len)
464 				*(tl+(xfer>>2)) = 0;
465 		} else {
466 			xfer = len = m1->m_len;
467 		}
468 		bcopy(cp, (caddr_t) tl, xfer);
469 		m1->m_len = len+tlen;
470 		siz -= xfer;
471 		cp += xfer;
472 	}
473 	*mb = m1;
474 	*bpos = mtod(m1, caddr_t)+m1->m_len;
475 	return (0);
476 }
477 
478 /*
479  * Called once to initialize data structures...
480  */
481 nfs_init()
482 {
483 	register int i;
484 
485 	rpc_vers = txdr_unsigned(RPC_VER2);
486 	rpc_call = txdr_unsigned(RPC_CALL);
487 	rpc_reply = txdr_unsigned(RPC_REPLY);
488 	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
489 	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
490 	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
491 	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
492 	nfs_vers = txdr_unsigned(NFS_VER2);
493 	nfs_prog = txdr_unsigned(NFS_PROG);
494 	nfs_true = txdr_unsigned(TRUE);
495 	nfs_false = txdr_unsigned(FALSE);
496 	/* Loop thru nfs procids */
497 	for (i = 0; i < NFS_NPROCS; i++)
498 		nfs_procids[i] = txdr_unsigned(i);
499 	/* Ensure async daemons disabled */
500 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
501 		nfs_iodwant[i] = (struct proc *)0;
502 	nfs_xdrneg1 = txdr_unsigned(-1);
503 	nfs_nhinit();			/* Init the nfsnode table */
504 	nfsrv_initcache();		/* Init the server request cache */
505 	rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ);
506 
507 	/*
508 	 * Initialize reply list and start timer
509 	 */
510 	nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
511 	nfs_timer();
512 }
513 
514 /*
515  * Fill in the rest of the rpc_unixauth and return it
516  */
517 static char *nfs_unixauth(cr)
518 	register struct ucred *cr;
519 {
520 	register u_long *tl;
521 	register int i;
522 	int ngr;
523 
524 	/* Maybe someday there should be a cache of AUTH_SHORT's */
525 	if ((tl = rpc_uidp) == NULL) {
526 #ifdef FILLINHOST
527 		i = nfsm_rndup(hostnamelen)+(25*NFSX_UNSIGNED);
528 #else
529 		i = 25*NFSX_UNSIGNED;
530 #endif
531 		MALLOC(tl, u_long *, i, M_TEMP, M_WAITOK);
532 		bzero((caddr_t)tl, i);
533 		rpc_unixauth = (caddr_t)tl;
534 		*tl++ = txdr_unsigned(RPCAUTH_UNIX);
535 		tl++;	/* Fill in size later */
536 		*tl++ = hostid;
537 #ifdef FILLINHOST
538 		*tl++ = txdr_unsigned(hostnamelen);
539 		i = nfsm_rndup(hostnamelen);
540 		bcopy(hostname, (caddr_t)tl, hostnamelen);
541 		tl += (i>>2);
542 #else
543 		*tl++ = 0;
544 #endif
545 		rpc_uidp = tl;
546 	}
547 	*tl++ = txdr_unsigned(cr->cr_uid);
548 	*tl++ = txdr_unsigned(cr->cr_groups[0]);
549 	ngr = ((cr->cr_ngroups - 1) > numgrps) ? numgrps : (cr->cr_ngroups - 1);
550 	*tl++ = txdr_unsigned(ngr);
551 	for (i = 1; i <= ngr; i++)
552 		*tl++ = txdr_unsigned(cr->cr_groups[i]);
553 	/* And add the AUTH_NULL */
554 	*tl++ = 0;
555 	*tl = 0;
556 	i = (((caddr_t)tl)-rpc_unixauth)-12;
557 	tl = (u_long *)(rpc_unixauth+4);
558 	*tl = txdr_unsigned(i);
559 	return (rpc_unixauth);
560 }
561 
562 /*
563  * Attribute cache routines.
564  * nfs_loadattrcache() - loads or updates the cache contents from attributes
565  *	that are on the mbuf list
566  * nfs_getattrcache() - returns valid attributes if found in cache, returns
567  *	error otherwise
568  */
569 
570 /*
571  * Load the attribute cache (that lives in the nfsnode entry) with
572  * the values on the mbuf list and
573  * Iff vap not NULL
574  *    copy the attributes to *vaper
575  */
576 nfs_loadattrcache(vpp, mdp, dposp, vaper)
577 	struct vnode **vpp;
578 	struct mbuf **mdp;
579 	caddr_t *dposp;
580 	struct vattr *vaper;
581 {
582 	register struct vnode *vp = *vpp;
583 	register struct vattr *vap;
584 	register struct nfsv2_fattr *fp;
585 	extern struct vnodeops spec_nfsv2nodeops, spec_vnodeops;
586 	register struct nfsnode *np;
587 	register long t1;
588 	caddr_t dpos, cp2;
589 	int error = 0;
590 	struct mbuf *md;
591 	enum vtype type;
592 	u_short mode;
593 	long rdev;
594 	struct timeval mtime;
595 	struct vnode *nvp;
596 
597 	md = *mdp;
598 	dpos = *dposp;
599 	t1 = (mtod(md, caddr_t)+md->m_len)-dpos;
600 	if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2))
601 		return (error);
602 	fp = (struct nfsv2_fattr *)cp2;
603 	type = nfstov_type(fp->fa_type);
604 	mode = fxdr_unsigned(u_short, fp->fa_mode);
605 	if (type == VNON)
606 		type = IFTOVT(mode);
607 	rdev = fxdr_unsigned(long, fp->fa_rdev);
608 	fxdr_time(&fp->fa_mtime, &mtime);
609 	/*
610 	 * If v_type == VNON it is a new node, so fill in the v_type,
611 	 * n_mtime fields. Check to see if it represents a special
612 	 * device, and if so, check for a possible alias. Once the
613 	 * correct vnode has been obtained, fill in the rest of the
614 	 * information.
615 	 */
616 	np = VTONFS(vp);
617 	if (vp->v_type == VNON) {
618 		if (type == VCHR && rdev == 0xffffffff)
619 			vp->v_type = type = VFIFO;
620 		else
621 			vp->v_type = type;
622 		if (vp->v_type == VFIFO) {
623 #ifdef FIFO
624 			extern struct vnodeops fifo_nfsv2nodeops;
625 			vp->v_op = &fifo_nfsv2nodeops;
626 #else
627 			return (EOPNOTSUPP);
628 #endif /* FIFO */
629 		}
630 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
631 			vp->v_op = &spec_nfsv2nodeops;
632 			if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) {
633 				/*
634 				 * Discard unneeded vnode, but save its nfsnode.
635 				 */
636 				remque(np);
637 				nfs_unlock(vp);
638 				nvp->v_data = vp->v_data;
639 				vp->v_data = NULL;
640 				vp->v_op = &spec_vnodeops;
641 				vrele(vp);
642 				vgone(vp);
643 				/*
644 				 * Reinitialize aliased node.
645 				 */
646 				np->n_vnode = nvp;
647 				insque(np, nfs_hash(&np->n_fh));
648 				nfs_lock(nvp);
649 				*vpp = vp = nvp;
650 			}
651 		}
652 		np->n_mtime = mtime.tv_sec;
653 	}
654 	vap = &np->n_vattr;
655 	vap->va_type = type;
656 	vap->va_mode = (mode & 07777);
657 	vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
658 	vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
659 	vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
660 	vap->va_size = fxdr_unsigned(u_long, fp->fa_size);
661 	if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size) {
662 		np->n_size = vap->va_size;
663 		vnode_pager_setsize(vp, np->n_size);
664 	}
665 	vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize);
666 	vap->va_rdev = (dev_t)rdev;
667 	vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * NFS_FABLKSIZE;
668 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
669 	vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid);
670 	vap->va_atime.tv_sec = fxdr_unsigned(long, fp->fa_atime.tv_sec);
671 	vap->va_atime.tv_usec = 0;
672 	vap->va_flags = fxdr_unsigned(u_long, fp->fa_atime.tv_usec);
673 	vap->va_mtime = mtime;
674 	vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa_ctime.tv_sec);
675 	vap->va_ctime.tv_usec = 0;
676 	vap->va_gen = fxdr_unsigned(u_long, fp->fa_ctime.tv_usec);
677 #ifdef _NOQUAD
678 	vap->va_size_rsv = 0;
679 	vap->va_bytes_rsv = 0;
680 #endif
681 	np->n_attrstamp = time.tv_sec;
682 	*dposp = dpos;
683 	*mdp = md;
684 	if (vaper != NULL) {
685 		bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
686 		if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size))
687 			vaper->va_size = np->n_size;
688 	}
689 	return (0);
690 }
691 
692 /*
693  * Check the time stamp
694  * If the cache is valid, copy contents to *vap and return 0
695  * otherwise return an error
696  */
697 nfs_getattrcache(vp, vap)
698 	register struct vnode *vp;
699 	struct vattr *vap;
700 {
701 	register struct nfsnode *np;
702 
703 	np = VTONFS(vp);
704 	if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) {
705 		nfsstats.attrcache_hits++;
706 		bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr));
707 		if ((np->n_flag & NMODIFIED) == 0) {
708 			np->n_size = vap->va_size;
709 			vnode_pager_setsize(vp, np->n_size);
710 		} else if (np->n_size > vap->va_size)
711 			vap->va_size = np->n_size;
712 		return (0);
713 	} else {
714 		nfsstats.attrcache_misses++;
715 		return (ENOENT);
716 	}
717 }
718 
719 /*
720  * Set up nameidata for a namei() call and do it
721  */
722 nfs_namei(ndp, fhp, len, mdp, dposp, p)
723 	register struct nameidata *ndp;
724 	fhandle_t *fhp;
725 	int len;
726 	struct mbuf **mdp;
727 	caddr_t *dposp;
728 	struct proc *p;
729 {
730 	register int i, rem;
731 	register struct mbuf *md;
732 	register char *fromcp, *tocp;
733 	struct vnode *dp;
734 	int flag;
735 	int error;
736 
737 	flag = ndp->ni_nameiop & OPMASK;
738 	MALLOC(ndp->ni_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK);
739 	/*
740 	 * Copy the name from the mbuf list to ndp->ni_pnbuf
741 	 * and set the various ndp fields appropriately.
742 	 */
743 	fromcp = *dposp;
744 	tocp = ndp->ni_pnbuf;
745 	md = *mdp;
746 	rem = mtod(md, caddr_t) + md->m_len - fromcp;
747 	ndp->ni_hash = 0;
748 	for (i = 0; i < len; i++) {
749 		while (rem == 0) {
750 			md = md->m_next;
751 			if (md == NULL) {
752 				error = EBADRPC;
753 				goto out;
754 			}
755 			fromcp = mtod(md, caddr_t);
756 			rem = md->m_len;
757 		}
758 		if (*fromcp == '\0' || *fromcp == '/') {
759 			error = EINVAL;
760 			goto out;
761 		}
762 		if (*fromcp & 0200)
763 			if ((*fromcp&0377) == ('/'|0200) || flag != DELETE) {
764 				error = EINVAL;
765 				goto out;
766 			}
767 		ndp->ni_hash += (unsigned char)*fromcp;
768 		*tocp++ = *fromcp++;
769 		rem--;
770 	}
771 	*tocp = '\0';
772 	*mdp = md;
773 	*dposp = fromcp;
774 	len = nfsm_rndup(len)-len;
775 	if (len > 0) {
776 		if (rem >= len)
777 			*dposp += len;
778 		else if (error = nfs_adv(mdp, dposp, len, rem))
779 			goto out;
780 	}
781 	ndp->ni_pathlen = tocp - ndp->ni_pnbuf;
782 	ndp->ni_ptr = ndp->ni_pnbuf;
783 	/*
784 	 * Extract and set starting directory.
785 	 */
786 	if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cred))
787 		goto out;
788 	if (dp->v_type != VDIR) {
789 		vrele(dp);
790 		error = ENOTDIR;
791 		goto out;
792 	}
793 	ndp->ni_startdir = dp;
794 	ndp->ni_nameiop |= (NOCROSSMOUNT | REMOTE);
795 	/*
796 	 * And call lookup() to do the real work
797 	 */
798 	if (error = lookup(ndp, p))
799 		goto out;
800 	/*
801 	 * Check for encountering a symbolic link
802 	 */
803 	if (ndp->ni_more) {
804 		if ((ndp->ni_nameiop & LOCKPARENT) && ndp->ni_pathlen == 1)
805 			vput(ndp->ni_dvp);
806 		else
807 			vrele(ndp->ni_dvp);
808 		vput(ndp->ni_vp);
809 		ndp->ni_vp = NULL;
810 		error = EINVAL;
811 		goto out;
812 	}
813 	/*
814 	 * Check for saved name request
815 	 */
816 	if (ndp->ni_nameiop & (SAVENAME | SAVESTART)) {
817 		ndp->ni_nameiop |= HASBUF;
818 		return (0);
819 	}
820 out:
821 	FREE(ndp->ni_pnbuf, M_NAMEI);
822 	return (error);
823 }
824 
825 /*
826  * A fiddled version of m_adj() that ensures null fill to a long
827  * boundary and only trims off the back end
828  */
829 nfsm_adj(mp, len, nul)
830 	struct mbuf *mp;
831 	register int len;
832 	int nul;
833 {
834 	register struct mbuf *m;
835 	register int count, i;
836 	register char *cp;
837 
838 	/*
839 	 * Trim from tail.  Scan the mbuf chain,
840 	 * calculating its length and finding the last mbuf.
841 	 * If the adjustment only affects this mbuf, then just
842 	 * adjust and return.  Otherwise, rescan and truncate
843 	 * after the remaining size.
844 	 */
845 	count = 0;
846 	m = mp;
847 	for (;;) {
848 		count += m->m_len;
849 		if (m->m_next == (struct mbuf *)0)
850 			break;
851 		m = m->m_next;
852 	}
853 	if (m->m_len > len) {
854 		m->m_len -= len;
855 		if (nul > 0) {
856 			cp = mtod(m, caddr_t)+m->m_len-nul;
857 			for (i = 0; i < nul; i++)
858 				*cp++ = '\0';
859 		}
860 		return;
861 	}
862 	count -= len;
863 	if (count < 0)
864 		count = 0;
865 	/*
866 	 * Correct length for chain is "count".
867 	 * Find the mbuf with last data, adjust its length,
868 	 * and toss data from remaining mbufs on chain.
869 	 */
870 	for (m = mp; m; m = m->m_next) {
871 		if (m->m_len >= count) {
872 			m->m_len = count;
873 			if (nul > 0) {
874 				cp = mtod(m, caddr_t)+m->m_len-nul;
875 				for (i = 0; i < nul; i++)
876 					*cp++ = '\0';
877 			}
878 			break;
879 		}
880 		count -= m->m_len;
881 	}
882 	while (m = m->m_next)
883 		m->m_len = 0;
884 }
885 
886 /*
887  * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
888  * 	- look up fsid in mount list (if not found ret error)
889  *	- check that it is exported
890  *	- get vp by calling VFS_FHTOVP() macro
891  *	- if not lockflag unlock it with VOP_UNLOCK()
892  *	- if cred->cr_uid == 0 set it to m_exroot
893  */
894 nfsrv_fhtovp(fhp, lockflag, vpp, cred)
895 	fhandle_t *fhp;
896 	int lockflag;
897 	struct vnode **vpp;
898 	struct ucred *cred;
899 {
900 	register struct mount *mp;
901 
902 	if ((mp = getvfs(&fhp->fh_fsid)) == NULL)
903 		return (ESTALE);
904 	if ((mp->mnt_flag & MNT_EXPORTED) == 0)
905 		return (EACCES);
906 	if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp))
907 		return (ESTALE);
908 	if (cred->cr_uid == 0)
909 		cred->cr_uid = mp->mnt_exroot;
910 	if (!lockflag)
911 		VOP_UNLOCK(*vpp);
912 	return (0);
913 }
914 
915 /*
916  * These two functions implement nfs rpc compression.
917  * The algorithm is a trivial run length encoding of '\0' bytes. The high
918  * order nibble of hex "e" is or'd with the number of zeroes - 2 in four
919  * bits. (2 - 17 zeros) Any data byte with a high order nibble of hex "e"
920  * is byte stuffed.
921  * The compressed data is padded with 0x0 bytes to an even multiple of
922  * 4 bytes in length to avoid any weird long pointer alignments.
923  * If compression/uncompression is unsuccessful, the original mbuf list
924  * is returned.
925  * The first four bytes (the XID) are left uncompressed and the fifth
926  * byte is set to 0x1 for request and 0x2 for reply.
927  * An uncompressed RPC will always have the fifth byte == 0x0.
928  */
929 struct mbuf *
930 nfs_compress(m0)
931 	struct mbuf *m0;
932 {
933 	register u_char ch, nextch;
934 	register int i, rlelast;
935 	register u_char *ip, *op;
936 	register int ileft, oleft, noteof;
937 	register struct mbuf *m, *om;
938 	struct mbuf **mp, *retm;
939 	int olen, clget;
940 
941 	i = rlelast = 0;
942 	noteof = 1;
943 	m = m0;
944 	if (m->m_len < 12)
945 		return (m0);
946 	if (m->m_pkthdr.len >= MINCLSIZE)
947 		clget = 1;
948 	else
949 		clget = 0;
950 	ileft = m->m_len - 9;
951 	ip = mtod(m, u_char *);
952 	MGETHDR(om, M_WAIT, MT_DATA);
953 	if (clget)
954 		MCLGET(om, M_WAIT);
955 	retm = om;
956 	mp = &om->m_next;
957 	olen = om->m_len = 5;
958 	oleft = M_TRAILINGSPACE(om);
959 	op = mtod(om, u_char *);
960 	*((u_long *)op) = *((u_long *)ip);
961 	ip += 7;
962 	op += 4;
963 	*op++ = *ip++ + 1;
964 	nextch = *ip++;
965 	while (noteof) {
966 		ch = nextch;
967 		if (ileft == 0) {
968 			do {
969 				m = m->m_next;
970 			} while (m && m->m_len == 0);
971 			if (m) {
972 				ileft = m->m_len;
973 				ip = mtod(m, u_char *);
974 			} else {
975 				noteof = 0;
976 				nextch = 0x1;
977 				goto doit;
978 			}
979 		}
980 		nextch = *ip++;
981 		ileft--;
982 doit:
983 		if (ch == '\0') {
984 			if (++i == NFSC_MAX || nextch != '\0') {
985 				if (i < 2) {
986 					nfscput('\0');
987 				} else {
988 					if (rlelast == i) {
989 						nfscput('\0');
990 						i--;
991 					}
992 					if (NFSCRLE(i) == (nextch & 0xff)) {
993 						i--;
994 						if (i < 2) {
995 							nfscput('\0');
996 						} else {
997 							nfscput(NFSCRLE(i));
998 						}
999 						nfscput('\0');
1000 						rlelast = 0;
1001 					} else {
1002 						nfscput(NFSCRLE(i));
1003 						rlelast = i;
1004 					}
1005 				}
1006 				i = 0;
1007 			}
1008 		} else {
1009 			if ((ch & NFSCRL) == NFSCRL) {
1010 				nfscput(ch);
1011 			}
1012 			nfscput(ch);
1013 			i = rlelast = 0;
1014 		}
1015 	}
1016 	if (olen < m0->m_pkthdr.len) {
1017 		m_freem(m0);
1018 		if (i = (olen & 0x3)) {
1019 			i = 4 - i;
1020 			while (i-- > 0) {
1021 				nfscput('\0');
1022 			}
1023 		}
1024 		retm->m_pkthdr.len = olen;
1025 		retm->m_pkthdr.rcvif = (struct ifnet *)0;
1026 		return (retm);
1027 	} else {
1028 		m_freem(retm);
1029 		return (m0);
1030 	}
1031 }
1032 
1033 struct mbuf *
1034 nfs_uncompress(m0)
1035 	struct mbuf *m0;
1036 {
1037 	register u_char cp, nextcp, *ip, *op;
1038 	register struct mbuf *m, *om;
1039 	struct mbuf *retm, **mp;
1040 	int i, j, noteof, clget, ileft, oleft, olen;
1041 
1042 	m = m0;
1043 	i = 0;
1044 	while (m && i < MINCLSIZE) {
1045 		i += m->m_len;
1046 		m = m->m_next;
1047 	}
1048 	if (i < 6)
1049 		return (m0);
1050 	if (i >= MINCLSIZE)
1051 		clget = 1;
1052 	else
1053 		clget = 0;
1054 	m = m0;
1055 	MGET(om, M_WAIT, MT_DATA);
1056 	if (clget)
1057 		MCLGET(om, M_WAIT);
1058 	olen = om->m_len = 8;
1059 	oleft = M_TRAILINGSPACE(om);
1060 	op = mtod(om, u_char *);
1061 	retm = om;
1062 	mp = &om->m_next;
1063 	if (m->m_len >= 6) {
1064 		ileft = m->m_len - 6;
1065 		ip = mtod(m, u_char *);
1066 		*((u_long *)op) = *((u_long *)ip);
1067 		bzero(op + 4, 3);
1068 		ip += 4;
1069 		op += 7;
1070 		if (*ip == '\0') {
1071 			m_freem(om);
1072 			return (m0);
1073 		}
1074 		*op++ = *ip++ - 1;
1075 		cp = *ip++;
1076 	} else {
1077 		ileft = m->m_len;
1078 		ip = mtod(m, u_char *);
1079 		nfscget(*op++);
1080 		nfscget(*op++);
1081 		nfscget(*op++);
1082 		nfscget(*op++);
1083 		bzero(op, 3);
1084 		op += 3;
1085 		nfscget(*op);
1086 		if (*op == '\0') {
1087 			m_freem(om);
1088 			return (m0);
1089 		}
1090 		(*op)--;
1091 		op++;
1092 		nfscget(cp);
1093 	}
1094 	noteof = 1;
1095 	while (noteof) {
1096 		if ((cp & NFSCRL) == NFSCRL) {
1097 			nfscget(nextcp);
1098 			if (cp == nextcp) {
1099 				nfscput(cp);
1100 				goto readit;
1101 			} else {
1102 				i = (cp & 0xf) + 2;
1103 				for (j = 0; j < i; j++) {
1104 					nfscput('\0');
1105 				}
1106 				cp = nextcp;
1107 			}
1108 		} else {
1109 			nfscput(cp);
1110 readit:
1111 			nfscget(cp);
1112 		}
1113 	}
1114 	m_freem(m0);
1115 	if (i = (olen & 0x3))
1116 		om->m_len -= i;
1117 	return (retm);
1118 }
1119