xref: /original-bsd/sys/nfs/nfs_subs.c (revision f3c03cba)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)nfs_subs.c	7.33 (Berkeley) 12/05/90
11  */
12 
13 /*
14  * These functions support the macros and help fiddle mbuf chains for
15  * the nfs op functions. They do things like create the rpc header and
16  * copy data between mbuf chains and uio lists.
17  */
18 #include "param.h"
19 #include "user.h"
20 #include "proc.h"
21 #include "systm.h"
22 #include "kernel.h"
23 #include "mount.h"
24 #include "file.h"
25 #include "vnode.h"
26 #include "mbuf.h"
27 #include "errno.h"
28 #include "map.h"
29 #include "rpcv2.h"
30 #include "nfsv2.h"
31 #include "nfsnode.h"
32 #include "nfs.h"
33 #include "nfsiom.h"
34 #include "xdr_subs.h"
35 #include "nfsm_subs.h"
36 #include "nfscompress.h"
37 
38 #define TRUE	1
39 #define	FALSE	0
40 
41 /*
42  * Data items converted to xdr at startup, since they are constant
43  * This is kinda hokey, but may save a little time doing byte swaps
44  */
45 u_long nfs_procids[NFS_NPROCS];
46 u_long nfs_xdrneg1;
47 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied,
48 	rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
49 u_long nfs_vers, nfs_prog, nfs_true, nfs_false;
50 /* And other global data */
51 static u_long *rpc_uidp = (u_long *)0;
52 static u_long nfs_xid = 1;
53 static char *rpc_unixauth;
54 extern long hostid;
55 enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON };
56 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
57 extern struct map nfsmap[NFS_MSIZ];
58 extern struct nfsreq nfsreqh;
59 
60 /* Function ret types */
61 static char *nfs_unixauth();
62 
63 /*
64  * Maximum number of groups passed through to NFS server.
65  * According to RFC1057 it should be 16.
66  * For release 3.X systems, the maximum value is 8.
67  * For some other servers, the maximum value is 10.
68  */
69 int numgrps = 8;
70 
71 /*
72  * Create the header for an rpc request packet
73  * The function nfs_unixauth() creates a unix style authorization string
74  * and returns a ptr to it.
75  * The hsiz is the size of the rest of the nfs request header.
76  * (just used to decide if a cluster is a good idea)
77  * nb: Note that the prog, vers and procid args are already in xdr byte order
78  */
79 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid)
80 	u_long prog;
81 	u_long vers;
82 	u_long procid;
83 	struct ucred *cred;
84 	int hsiz;
85 	caddr_t *bpos;
86 	struct mbuf **mb;
87 	u_long *retxid;
88 {
89 	register struct mbuf *mreq, *m;
90 	register u_long *p;
91 	struct mbuf *m1;
92 	char *ap;
93 	int asiz, siz;
94 
95 	NFSMGETHDR(mreq);
96 	asiz = ((((cred->cr_ngroups - 1) > numgrps) ? numgrps :
97 		  (cred->cr_ngroups - 1)) << 2);
98 #ifdef FILLINHOST
99 	asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED);
100 #else
101 	asiz += 9*NFSX_UNSIGNED;
102 #endif
103 
104 	/* If we need a lot, alloc a cluster ?? */
105 	if ((asiz+hsiz+RPC_SIZ) > MHLEN)
106 		MCLGET(mreq, M_WAIT);
107 	mreq->m_len = NFSMSIZ(mreq);
108 	siz = mreq->m_len;
109 	m1 = mreq;
110 	/*
111 	 * Alloc enough mbufs
112 	 * We do it now to avoid all sleeps after the call to nfs_unixauth()
113 	 */
114 	while ((asiz+RPC_SIZ) > siz) {
115 		MGET(m, M_WAIT, MT_DATA);
116 		m1->m_next = m;
117 		m->m_len = MLEN;
118 		siz += MLEN;
119 		m1 = m;
120 	}
121 	p = mtod(mreq, u_long *);
122 	*p++ = *retxid = txdr_unsigned(++nfs_xid);
123 	*p++ = rpc_call;
124 	*p++ = rpc_vers;
125 	*p++ = prog;
126 	*p++ = vers;
127 	*p++ = procid;
128 
129 	/* Now we can call nfs_unixauth() and copy it in */
130 	ap = nfs_unixauth(cred);
131 	m = mreq;
132 	siz = m->m_len-RPC_SIZ;
133 	if (asiz <= siz) {
134 		bcopy(ap, (caddr_t)p, asiz);
135 		m->m_len = asiz+RPC_SIZ;
136 	} else {
137 		bcopy(ap, (caddr_t)p, siz);
138 		ap += siz;
139 		asiz -= siz;
140 		while (asiz > 0) {
141 			siz = (asiz > MLEN) ? MLEN : asiz;
142 			m = m->m_next;
143 			bcopy(ap, mtod(m, caddr_t), siz);
144 			m->m_len = siz;
145 			asiz -= siz;
146 			ap += siz;
147 		}
148 	}
149 
150 	/* Finally, return values */
151 	*mb = m;
152 	*bpos = mtod(m, caddr_t)+m->m_len;
153 	return (mreq);
154 }
155 
156 /*
157  * copies mbuf chain to the uio scatter/gather list
158  */
159 nfsm_mbuftouio(mrep, uiop, siz, dpos)
160 	struct mbuf **mrep;
161 	register struct uio *uiop;
162 	int siz;
163 	caddr_t *dpos;
164 {
165 	register char *mbufcp, *uiocp;
166 	register int xfer, left, len;
167 	register struct mbuf *mp;
168 	long uiosiz, rem;
169 	int error = 0;
170 
171 	mp = *mrep;
172 	mbufcp = *dpos;
173 	len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
174 	rem = nfsm_rndup(siz)-siz;
175 	while (siz > 0) {
176 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
177 			return (EFBIG);
178 		left = uiop->uio_iov->iov_len;
179 		uiocp = uiop->uio_iov->iov_base;
180 		if (left > siz)
181 			left = siz;
182 		uiosiz = left;
183 		while (left > 0) {
184 			while (len == 0) {
185 				mp = mp->m_next;
186 				if (mp == NULL)
187 					return (EBADRPC);
188 				mbufcp = mtod(mp, caddr_t);
189 				len = mp->m_len;
190 			}
191 			xfer = (left > len) ? len : left;
192 #ifdef notdef
193 			/* Not Yet.. */
194 			if (uiop->uio_iov->iov_op != NULL)
195 				(*(uiop->uio_iov->iov_op))
196 				(mbufcp, uiocp, xfer);
197 			else
198 #endif
199 			if (uiop->uio_segflg == UIO_SYSSPACE)
200 				bcopy(mbufcp, uiocp, xfer);
201 			else
202 				copyout(mbufcp, uiocp, xfer);
203 			left -= xfer;
204 			len -= xfer;
205 			mbufcp += xfer;
206 			uiocp += xfer;
207 			uiop->uio_offset += xfer;
208 			uiop->uio_resid -= xfer;
209 		}
210 		if (uiop->uio_iov->iov_len <= siz) {
211 			uiop->uio_iovcnt--;
212 			uiop->uio_iov++;
213 		} else {
214 			uiop->uio_iov->iov_base += uiosiz;
215 			uiop->uio_iov->iov_len -= uiosiz;
216 		}
217 		siz -= uiosiz;
218 	}
219 	*dpos = mbufcp;
220 	*mrep = mp;
221 	if (rem > 0) {
222 		if (len < rem)
223 			error = nfs_adv(mrep, dpos, rem, len);
224 		else
225 			*dpos += rem;
226 	}
227 	return (error);
228 }
229 
230 /*
231  * copies a uio scatter/gather list to an mbuf chain...
232  */
233 nfsm_uiotombuf(uiop, mq, siz, bpos)
234 	register struct uio *uiop;
235 	struct mbuf **mq;
236 	int siz;
237 	caddr_t *bpos;
238 {
239 	register char *uiocp;
240 	register struct mbuf *mp, *mp2;
241 	register int xfer, left, len;
242 	int uiosiz, clflg, rem;
243 	char *cp;
244 
245 	if (siz > MLEN)		/* or should it >= MCLBYTES ?? */
246 		clflg = 1;
247 	else
248 		clflg = 0;
249 	rem = nfsm_rndup(siz)-siz;
250 	mp2 = *mq;
251 	while (siz > 0) {
252 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
253 			return (EINVAL);
254 		left = uiop->uio_iov->iov_len;
255 		uiocp = uiop->uio_iov->iov_base;
256 		if (left > siz)
257 			left = siz;
258 		uiosiz = left;
259 		while (left > 0) {
260 			MGET(mp, M_WAIT, MT_DATA);
261 			if (clflg)
262 				MCLGET(mp, M_WAIT);
263 			mp->m_len = NFSMSIZ(mp);
264 			mp2->m_next = mp;
265 			mp2 = mp;
266 			xfer = (left > mp->m_len) ? mp->m_len : left;
267 #ifdef notdef
268 			/* Not Yet.. */
269 			if (uiop->uio_iov->iov_op != NULL)
270 				(*(uiop->uio_iov->iov_op))
271 				(uiocp, mtod(mp, caddr_t), xfer);
272 			else
273 #endif
274 			if (uiop->uio_segflg == UIO_SYSSPACE)
275 				bcopy(uiocp, mtod(mp, caddr_t), xfer);
276 			else
277 				copyin(uiocp, mtod(mp, caddr_t), xfer);
278 			len = mp->m_len;
279 			mp->m_len = xfer;
280 			left -= xfer;
281 			uiocp += xfer;
282 			uiop->uio_offset += xfer;
283 			uiop->uio_resid -= xfer;
284 		}
285 		if (uiop->uio_iov->iov_len <= siz) {
286 			uiop->uio_iovcnt--;
287 			uiop->uio_iov++;
288 		} else {
289 			uiop->uio_iov->iov_base += uiosiz;
290 			uiop->uio_iov->iov_len -= uiosiz;
291 		}
292 		siz -= uiosiz;
293 	}
294 	if (rem > 0) {
295 		if (rem > (len-mp->m_len)) {
296 			MGET(mp, M_WAIT, MT_DATA);
297 			mp->m_len = 0;
298 			mp2->m_next = mp;
299 		}
300 		cp = mtod(mp, caddr_t)+mp->m_len;
301 		for (left = 0; left < rem; left++)
302 			*cp++ = '\0';
303 		mp->m_len += rem;
304 		*bpos = cp;
305 	} else
306 		*bpos = mtod(mp, caddr_t)+mp->m_len;
307 	*mq = mp;
308 	return (0);
309 }
310 
311 /*
312  * Help break down an mbuf chain by setting the first siz bytes contiguous
313  * pointed to by returned val.
314  * If Updateflg == True we can overwrite the first part of the mbuf data
315  * This is used by the macros nfsm_disect and nfsm_disecton for tough
316  * cases. (The macros use the vars. dpos and dpos2)
317  */
318 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2)
319 	struct mbuf **mdp;
320 	caddr_t *dposp;
321 	int siz;
322 	int left;
323 	int updateflg;
324 	caddr_t *cp2;
325 {
326 	register struct mbuf *mp, *mp2;
327 	register int siz2, xfer;
328 	register caddr_t p;
329 
330 	mp = *mdp;
331 	while (left == 0) {
332 		*mdp = mp = mp->m_next;
333 		if (mp == NULL)
334 			return (EBADRPC);
335 		left = mp->m_len;
336 		*dposp = mtod(mp, caddr_t);
337 	}
338 	if (left >= siz) {
339 		*cp2 = *dposp;
340 		*dposp += siz;
341 	} else if (mp->m_next == NULL) {
342 		return (EBADRPC);
343 	} else if (siz > MHLEN) {
344 		panic("nfs S too big");
345 	} else {
346 		/* Iff update, you can overwrite, else must alloc new mbuf */
347 		if (updateflg) {
348 			NFSMINOFF(mp);
349 		} else {
350 			MGET(mp2, M_WAIT, MT_DATA);
351 			mp2->m_next = mp->m_next;
352 			mp->m_next = mp2;
353 			mp->m_len -= left;
354 			mp = mp2;
355 		}
356 		*cp2 = p = mtod(mp, caddr_t);
357 		bcopy(*dposp, p, left);		/* Copy what was left */
358 		siz2 = siz-left;
359 		p += left;
360 		mp2 = mp->m_next;
361 		/* Loop around copying up the siz2 bytes */
362 		while (siz2 > 0) {
363 			if (mp2 == NULL)
364 				return (EBADRPC);
365 			xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
366 			if (xfer > 0) {
367 				bcopy(mtod(mp2, caddr_t), p, xfer);
368 				NFSMADV(mp2, xfer);
369 				mp2->m_len -= xfer;
370 				p += xfer;
371 				siz2 -= xfer;
372 			}
373 			if (siz2 > 0)
374 				mp2 = mp2->m_next;
375 		}
376 		mp->m_len = siz;
377 		*mdp = mp2;
378 		*dposp = mtod(mp2, caddr_t);
379 	}
380 	return (0);
381 }
382 
383 /*
384  * Advance the position in the mbuf chain.
385  */
386 nfs_adv(mdp, dposp, offs, left)
387 	struct mbuf **mdp;
388 	caddr_t *dposp;
389 	int offs;
390 	int left;
391 {
392 	register struct mbuf *m;
393 	register int s;
394 
395 	m = *mdp;
396 	s = left;
397 	while (s < offs) {
398 		offs -= s;
399 		m = m->m_next;
400 		if (m == NULL)
401 			return (EBADRPC);
402 		s = m->m_len;
403 	}
404 	*mdp = m;
405 	*dposp = mtod(m, caddr_t)+offs;
406 	return (0);
407 }
408 
409 /*
410  * Copy a string into mbufs for the hard cases...
411  */
412 nfsm_strtmbuf(mb, bpos, cp, siz)
413 	struct mbuf **mb;
414 	char **bpos;
415 	char *cp;
416 	long siz;
417 {
418 	register struct mbuf *m1, *m2;
419 	long left, xfer, len, tlen;
420 	u_long *p;
421 	int putsize;
422 
423 	putsize = 1;
424 	m2 = *mb;
425 	left = NFSMSIZ(m2)-m2->m_len;
426 	if (left > 0) {
427 		p = ((u_long *)(*bpos));
428 		*p++ = txdr_unsigned(siz);
429 		putsize = 0;
430 		left -= NFSX_UNSIGNED;
431 		m2->m_len += NFSX_UNSIGNED;
432 		if (left > 0) {
433 			bcopy(cp, (caddr_t) p, left);
434 			siz -= left;
435 			cp += left;
436 			m2->m_len += left;
437 			left = 0;
438 		}
439 	}
440 	/* Loop arround adding mbufs */
441 	while (siz > 0) {
442 		MGET(m1, M_WAIT, MT_DATA);
443 		if (siz > MLEN)
444 			MCLGET(m1, M_WAIT);
445 		m1->m_len = NFSMSIZ(m1);
446 		m2->m_next = m1;
447 		m2 = m1;
448 		p = mtod(m1, u_long *);
449 		tlen = 0;
450 		if (putsize) {
451 			*p++ = txdr_unsigned(siz);
452 			m1->m_len -= NFSX_UNSIGNED;
453 			tlen = NFSX_UNSIGNED;
454 			putsize = 0;
455 		}
456 		if (siz < m1->m_len) {
457 			len = nfsm_rndup(siz);
458 			xfer = siz;
459 			if (xfer < len)
460 				*(p+(xfer>>2)) = 0;
461 		} else {
462 			xfer = len = m1->m_len;
463 		}
464 		bcopy(cp, (caddr_t) p, xfer);
465 		m1->m_len = len+tlen;
466 		siz -= xfer;
467 		cp += xfer;
468 	}
469 	*mb = m1;
470 	*bpos = mtod(m1, caddr_t)+m1->m_len;
471 	return (0);
472 }
473 
474 /*
475  * Called once to initialize data structures...
476  */
477 nfs_init()
478 {
479 	register int i;
480 
481 	rpc_vers = txdr_unsigned(RPC_VER2);
482 	rpc_call = txdr_unsigned(RPC_CALL);
483 	rpc_reply = txdr_unsigned(RPC_REPLY);
484 	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
485 	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
486 	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
487 	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
488 	nfs_vers = txdr_unsigned(NFS_VER2);
489 	nfs_prog = txdr_unsigned(NFS_PROG);
490 	nfs_true = txdr_unsigned(TRUE);
491 	nfs_false = txdr_unsigned(FALSE);
492 	/* Loop thru nfs procids */
493 	for (i = 0; i < NFS_NPROCS; i++)
494 		nfs_procids[i] = txdr_unsigned(i);
495 	/* Ensure async daemons disabled */
496 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
497 		nfs_iodwant[i] = (struct proc *)0;
498 	nfs_xdrneg1 = txdr_unsigned(-1);
499 	nfs_nhinit();			/* Init the nfsnode table */
500 	nfsrv_initcache();		/* Init the server request cache */
501 	rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ);
502 
503 	/*
504 	 * Initialize reply list and start timer
505 	 */
506 	nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
507 	nfs_timer();
508 }
509 
510 /*
511  * Fill in the rest of the rpc_unixauth and return it
512  */
513 static char *nfs_unixauth(cr)
514 	register struct ucred *cr;
515 {
516 	register u_long *p;
517 	register int i;
518 	int ngr;
519 
520 	/* Maybe someday there should be a cache of AUTH_SHORT's */
521 	if ((p = rpc_uidp) == NULL) {
522 #ifdef FILLINHOST
523 		i = nfsm_rndup(hostnamelen)+(25*NFSX_UNSIGNED);
524 #else
525 		i = 25*NFSX_UNSIGNED;
526 #endif
527 		MALLOC(p, u_long *, i, M_TEMP, M_WAITOK);
528 		bzero((caddr_t)p, i);
529 		rpc_unixauth = (caddr_t)p;
530 		*p++ = txdr_unsigned(RPCAUTH_UNIX);
531 		p++;	/* Fill in size later */
532 		*p++ = hostid;
533 #ifdef FILLINHOST
534 		*p++ = txdr_unsigned(hostnamelen);
535 		i = nfsm_rndup(hostnamelen);
536 		bcopy(hostname, (caddr_t)p, hostnamelen);
537 		p += (i>>2);
538 #else
539 		*p++ = 0;
540 #endif
541 		rpc_uidp = p;
542 	}
543 	*p++ = txdr_unsigned(cr->cr_uid);
544 	*p++ = txdr_unsigned(cr->cr_groups[0]);
545 	ngr = ((cr->cr_ngroups - 1) > numgrps) ? numgrps : (cr->cr_ngroups - 1);
546 	*p++ = txdr_unsigned(ngr);
547 	for (i = 1; i <= ngr; i++)
548 		*p++ = txdr_unsigned(cr->cr_groups[i]);
549 	/* And add the AUTH_NULL */
550 	*p++ = 0;
551 	*p = 0;
552 	i = (((caddr_t)p)-rpc_unixauth)-12;
553 	p = (u_long *)(rpc_unixauth+4);
554 	*p = txdr_unsigned(i);
555 	return (rpc_unixauth);
556 }
557 
558 /*
559  * Attribute cache routines.
560  * nfs_loadattrcache() - loads or updates the cache contents from attributes
561  *	that are on the mbuf list
562  * nfs_getattrcache() - returns valid attributes if found in cache, returns
563  *	error otherwise
564  */
565 
566 /*
567  * Load the attribute cache (that lives in the nfsnode entry) with
568  * the values on the mbuf list and
569  * Iff vap not NULL
570  *    copy the attributes to *vaper
571  */
572 nfs_loadattrcache(vpp, mdp, dposp, vaper)
573 	struct vnode **vpp;
574 	struct mbuf **mdp;
575 	caddr_t *dposp;
576 	struct vattr *vaper;
577 {
578 	register struct vnode *vp = *vpp;
579 	register struct vattr *vap;
580 	register struct nfsv2_fattr *fp;
581 	extern struct vnodeops spec_nfsv2nodeops;
582 	register struct nfsnode *np;
583 	register long t1;
584 	caddr_t dpos, cp2;
585 	int error = 0;
586 	struct mbuf *md;
587 	enum vtype type;
588 	long rdev;
589 	struct timeval mtime;
590 	struct vnode *nvp;
591 
592 	md = *mdp;
593 	dpos = *dposp;
594 	t1 = (mtod(md, caddr_t)+md->m_len)-dpos;
595 	if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2))
596 		return (error);
597 	fp = (struct nfsv2_fattr *)cp2;
598 	type = nfstov_type(fp->fa_type);
599 	rdev = fxdr_unsigned(long, fp->fa_rdev);
600 	fxdr_time(&fp->fa_mtime, &mtime);
601 	/*
602 	 * If v_type == VNON it is a new node, so fill in the v_type,
603 	 * n_mtime fields. Check to see if it represents a special
604 	 * device, and if so, check for a possible alias. Once the
605 	 * correct vnode has been obtained, fill in the rest of the
606 	 * information.
607 	 */
608 	np = VTONFS(vp);
609 	if (vp->v_type == VNON) {
610 		if (type == VCHR && rdev == 0xffffffff)
611 			vp->v_type = type = VFIFO;
612 		else
613 			vp->v_type = type;
614 		if (vp->v_type == VFIFO) {
615 #ifdef FIFO
616 			extern struct vnodeops fifo_nfsv2nodeops;
617 			vp->v_op = &fifo_nfsv2nodeops;
618 #else
619 			return (EOPNOTSUPP);
620 #endif /* FIFO */
621 		}
622 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
623 			vp->v_op = &spec_nfsv2nodeops;
624 			if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) {
625 				/*
626 				 * Reinitialize aliased node.
627 				 */
628 				np = VTONFS(nvp);
629 				np->n_vnode = nvp;
630 				np->n_flag = 0;
631 				nfs_lock(nvp);
632 				bcopy((caddr_t)&VTONFS(vp)->n_fh,
633 					(caddr_t)&np->n_fh, NFSX_FH);
634 				insque(np, nfs_hash(&np->n_fh));
635 				np->n_attrstamp = 0;
636 				np->n_sillyrename = (struct sillyrename *)0;
637 				/*
638 				 * Discard unneeded vnode and update actual one
639 				 */
640 				vput(vp);
641 				*vpp = nvp;
642 			}
643 		}
644 		np->n_mtime = mtime.tv_sec;
645 	}
646 	vap = &np->n_vattr;
647 	vap->va_type = type;
648 	vap->va_mode = nfstov_mode(fp->fa_mode);
649 	vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
650 	vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
651 	vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
652 	vap->va_size = fxdr_unsigned(u_long, fp->fa_size);
653 	if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size) {
654 		np->n_size = vap->va_size;
655 		vnode_pager_setsize(vp, np->n_size);
656 	}
657 	vap->va_size_rsv = 0;
658 	vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize);
659 	vap->va_rdev = (dev_t)rdev;
660 	vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * NFS_FABLKSIZE;
661 	vap->va_bytes_rsv = 0;
662 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
663 	vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid);
664 	vap->va_atime.tv_sec = fxdr_unsigned(long, fp->fa_atime.tv_sec);
665 	vap->va_atime.tv_usec = 0;
666 	vap->va_flags = fxdr_unsigned(u_long, fp->fa_atime.tv_usec);
667 	vap->va_mtime = mtime;
668 	vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa_ctime.tv_sec);
669 	vap->va_ctime.tv_usec = 0;
670 	vap->va_gen = fxdr_unsigned(u_long, fp->fa_ctime.tv_usec);
671 	np->n_attrstamp = time.tv_sec;
672 	*dposp = dpos;
673 	*mdp = md;
674 	if (vaper != NULL) {
675 		bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
676 		if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size))
677 			vaper->va_size = np->n_size;
678 	}
679 	return (0);
680 }
681 
682 /*
683  * Check the time stamp
684  * If the cache is valid, copy contents to *vap and return 0
685  * otherwise return an error
686  */
687 nfs_getattrcache(vp, vap)
688 	register struct vnode *vp;
689 	struct vattr *vap;
690 {
691 	register struct nfsnode *np;
692 
693 	np = VTONFS(vp);
694 	if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) {
695 		nfsstats.attrcache_hits++;
696 		bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr));
697 		if ((np->n_flag & NMODIFIED) == 0) {
698 			np->n_size = vap->va_size;
699 			vnode_pager_setsize(vp, np->n_size);
700 		} else if (np->n_size > vap->va_size)
701 			vap->va_size = np->n_size;
702 		return (0);
703 	} else {
704 		nfsstats.attrcache_misses++;
705 		return (ENOENT);
706 	}
707 }
708 
709 /*
710  * Set up nameidata for a namei() call and do it
711  */
712 nfs_namei(ndp, fhp, len, mdp, dposp)
713 	register struct nameidata *ndp;
714 	fhandle_t *fhp;
715 	int len;
716 	struct mbuf **mdp;
717 	caddr_t *dposp;
718 {
719 	register int i, rem;
720 	register struct mbuf *md;
721 	register char *cp;
722 	struct vnode *dp;
723 	int flag;
724 	int error;
725 
726 	if ((ndp->ni_nameiop & HASBUF) == 0) {
727 		flag = ndp->ni_nameiop & OPFLAG;
728 		/*
729 		 * Copy the name from the mbuf list to the d_name field of ndp
730 		 * and set the various ndp fields appropriately.
731 		 */
732 		cp = *dposp;
733 		md = *mdp;
734 		rem = mtod(md, caddr_t)+md->m_len-cp;
735 		ndp->ni_hash = 0;
736 		for (i = 0; i < len;) {
737 			while (rem == 0) {
738 				md = md->m_next;
739 				if (md == NULL)
740 					return (EBADRPC);
741 				cp = mtod(md, caddr_t);
742 				rem = md->m_len;
743 			}
744 			if (*cp == '\0' || *cp == '/')
745 				return (EINVAL);
746 			if (*cp & 0200)
747 				if ((*cp&0377) == ('/'|0200) || flag != DELETE)
748 					return (EINVAL);
749 			ndp->ni_dent.d_name[i++] = *cp;
750 			ndp->ni_hash += (unsigned char)*cp * i;
751 			cp++;
752 			rem--;
753 		}
754 		*mdp = md;
755 		*dposp = cp;
756 		len = nfsm_rndup(len)-len;
757 		if (len > 0) {
758 			if (rem < len) {
759 				if (error = nfs_adv(mdp, dposp, len, rem))
760 					return (error);
761 			} else
762 				*dposp += len;
763 		}
764 	} else
765 		i = len;
766 	ndp->ni_namelen = i;
767 	ndp->ni_dent.d_namlen = i;
768 	ndp->ni_dent.d_name[i] = '\0';
769 	ndp->ni_segflg = UIO_SYSSPACE;
770 	ndp->ni_pathlen = 1;
771 	ndp->ni_pnbuf = ndp->ni_dirp = ndp->ni_ptr = &ndp->ni_dent.d_name[0];
772 	ndp->ni_next = &ndp->ni_dent.d_name[i];
773 	ndp->ni_nameiop |= (NOCROSSMOUNT | REMOTE | HASBUF);
774 
775 	if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cred))
776 		return (error);
777 	if (dp->v_type != VDIR) {
778 		vrele(dp);
779 		return (ENOTDIR);
780 	}
781 	/*
782 	 * Must set current directory here to avoid confusion in namei()
783 	 * called from rename()
784 	 */
785 	ndp->ni_cdir = dp;
786 	ndp->ni_rdir = NULLVP;
787 
788 	/*
789 	 * And call namei() to do the real work
790 	 */
791 	error = namei(ndp);
792 	vrele(dp);
793 	return (error);
794 }
795 
796 /*
797  * A fiddled version of m_adj() that ensures null fill to a long
798  * boundary and only trims off the back end
799  */
800 nfsm_adj(mp, len, nul)
801 	struct mbuf *mp;
802 	register int len;
803 	int nul;
804 {
805 	register struct mbuf *m;
806 	register int count, i;
807 	register char *cp;
808 
809 	/*
810 	 * Trim from tail.  Scan the mbuf chain,
811 	 * calculating its length and finding the last mbuf.
812 	 * If the adjustment only affects this mbuf, then just
813 	 * adjust and return.  Otherwise, rescan and truncate
814 	 * after the remaining size.
815 	 */
816 	count = 0;
817 	m = mp;
818 	for (;;) {
819 		count += m->m_len;
820 		if (m->m_next == (struct mbuf *)0)
821 			break;
822 		m = m->m_next;
823 	}
824 	if (m->m_len > len) {
825 		m->m_len -= len;
826 		if (nul > 0) {
827 			cp = mtod(m, caddr_t)+m->m_len-nul;
828 			for (i = 0; i < nul; i++)
829 				*cp++ = '\0';
830 		}
831 		return;
832 	}
833 	count -= len;
834 	if (count < 0)
835 		count = 0;
836 	/*
837 	 * Correct length for chain is "count".
838 	 * Find the mbuf with last data, adjust its length,
839 	 * and toss data from remaining mbufs on chain.
840 	 */
841 	for (m = mp; m; m = m->m_next) {
842 		if (m->m_len >= count) {
843 			m->m_len = count;
844 			if (nul > 0) {
845 				cp = mtod(m, caddr_t)+m->m_len-nul;
846 				for (i = 0; i < nul; i++)
847 					*cp++ = '\0';
848 			}
849 			break;
850 		}
851 		count -= m->m_len;
852 	}
853 	while (m = m->m_next)
854 		m->m_len = 0;
855 }
856 
857 /*
858  * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
859  * 	- look up fsid in mount list (if not found ret error)
860  *	- check that it is exported
861  *	- get vp by calling VFS_FHTOVP() macro
862  *	- if not lockflag unlock it with VOP_UNLOCK()
863  *	- if cred->cr_uid == 0 set it to m_exroot
864  */
865 nfsrv_fhtovp(fhp, lockflag, vpp, cred)
866 	fhandle_t *fhp;
867 	int lockflag;
868 	struct vnode **vpp;
869 	struct ucred *cred;
870 {
871 	register struct mount *mp;
872 
873 	if ((mp = getvfs(&fhp->fh_fsid)) == NULL)
874 		return (ESTALE);
875 	if ((mp->mnt_flag & MNT_EXPORTED) == 0)
876 		return (EACCES);
877 	if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp))
878 		return (ESTALE);
879 	if (cred->cr_uid == 0)
880 		cred->cr_uid = mp->mnt_exroot;
881 	if (!lockflag)
882 		VOP_UNLOCK(*vpp);
883 	return (0);
884 }
885 
886 /*
887  * These two functions implement nfs rpc compression.
888  * The algorithm is a trivial run length encoding of '\0' bytes. The high
889  * order nibble of hex "e" is or'd with the number of zeroes - 2 in four
890  * bits. (2 - 17 zeros) Any data byte with a high order nibble of hex "e"
891  * is byte stuffed.
892  * The compressed data is padded with 0x0 bytes to an even multiple of
893  * 4 bytes in length to avoid any weird long pointer alignments.
894  * If compression/uncompression is unsuccessful, the original mbuf list
895  * is returned.
896  * The first four bytes (the XID) are left uncompressed and the fifth
897  * byte is set to 0x1 for request and 0x2 for reply.
898  * An uncompressed RPC will always have the fifth byte == 0x0.
899  */
900 struct mbuf *
901 nfs_compress(m0)
902 	struct mbuf *m0;
903 {
904 	register u_char ch, nextch;
905 	register int i, rlelast;
906 	register u_char *ip, *op;
907 	register int ileft, oleft, noteof;
908 	register struct mbuf *m, *om;
909 	struct mbuf **mp, *retm;
910 	int olen, clget;
911 
912 	i = rlelast = 0;
913 	noteof = 1;
914 	m = m0;
915 	if (m->m_len < 12)
916 		return (m0);
917 	if (m->m_pkthdr.len >= MINCLSIZE)
918 		clget = 1;
919 	else
920 		clget = 0;
921 	ileft = m->m_len - 9;
922 	ip = mtod(m, u_char *);
923 	MGETHDR(om, M_WAIT, MT_DATA);
924 	if (clget)
925 		MCLGET(om, M_WAIT);
926 	retm = om;
927 	mp = &om->m_next;
928 	olen = om->m_len = 5;
929 	oleft = M_TRAILINGSPACE(om);
930 	op = mtod(om, u_char *);
931 	*((u_long *)op) = *((u_long *)ip);
932 	ip += 7;
933 	op += 4;
934 	*op++ = *ip++ + 1;
935 	nextch = *ip++;
936 	while (noteof) {
937 		ch = nextch;
938 		if (ileft == 0) {
939 			do {
940 				m = m->m_next;
941 			} while (m && m->m_len == 0);
942 			if (m) {
943 				ileft = m->m_len;
944 				ip = mtod(m, u_char *);
945 			} else {
946 				noteof = 0;
947 				nextch = 0x1;
948 				goto doit;
949 			}
950 		}
951 		nextch = *ip++;
952 		ileft--;
953 doit:
954 		if (ch == '\0') {
955 			if (++i == NFSC_MAX || nextch != '\0') {
956 				if (i < 2) {
957 					nfscput('\0');
958 				} else {
959 					if (rlelast == i) {
960 						nfscput('\0');
961 						i--;
962 					}
963 					if (NFSCRLE(i) == (nextch & 0xff)) {
964 						i--;
965 						if (i < 2) {
966 							nfscput('\0');
967 						} else {
968 							nfscput(NFSCRLE(i));
969 						}
970 						nfscput('\0');
971 						rlelast = 0;
972 					} else {
973 						nfscput(NFSCRLE(i));
974 						rlelast = i;
975 					}
976 				}
977 				i = 0;
978 			}
979 		} else {
980 			if ((ch & NFSCRL) == NFSCRL) {
981 				nfscput(ch);
982 			}
983 			nfscput(ch);
984 			i = rlelast = 0;
985 		}
986 	}
987 	if (olen < m0->m_pkthdr.len) {
988 		m_freem(m0);
989 		if (i = (olen & 0x3)) {
990 			i = 4 - i;
991 			while (i-- > 0) {
992 				nfscput('\0');
993 			}
994 		}
995 		retm->m_pkthdr.len = olen;
996 		retm->m_pkthdr.rcvif = (struct ifnet *)0;
997 		return (retm);
998 	} else {
999 		m_freem(retm);
1000 		return (m0);
1001 	}
1002 }
1003 
1004 struct mbuf *
1005 nfs_uncompress(m0)
1006 	struct mbuf *m0;
1007 {
1008 	register u_char cp, nextcp, *ip, *op;
1009 	register struct mbuf *m, *om;
1010 	struct mbuf *retm, **mp;
1011 	int i, j, noteof, clget, ileft, oleft, olen;
1012 
1013 	m = m0;
1014 	i = 0;
1015 	while (m && i < MINCLSIZE) {
1016 		i += m->m_len;
1017 		m = m->m_next;
1018 	}
1019 	if (i < 6)
1020 		return (m0);
1021 	if (i >= MINCLSIZE)
1022 		clget = 1;
1023 	else
1024 		clget = 0;
1025 	m = m0;
1026 	MGET(om, M_WAIT, MT_DATA);
1027 	if (clget)
1028 		MCLGET(om, M_WAIT);
1029 	olen = om->m_len = 8;
1030 	oleft = M_TRAILINGSPACE(om);
1031 	op = mtod(om, u_char *);
1032 	retm = om;
1033 	mp = &om->m_next;
1034 	if (m->m_len >= 6) {
1035 		ileft = m->m_len - 6;
1036 		ip = mtod(m, u_char *);
1037 		*((u_long *)op) = *((u_long *)ip);
1038 		bzero(op + 4, 3);
1039 		ip += 4;
1040 		op += 7;
1041 		if (*ip == '\0') {
1042 			m_freem(om);
1043 			return (m0);
1044 		}
1045 		*op++ = *ip++ - 1;
1046 		cp = *ip++;
1047 	} else {
1048 		ileft = m->m_len;
1049 		ip = mtod(m, u_char *);
1050 		nfscget(*op++);
1051 		nfscget(*op++);
1052 		nfscget(*op++);
1053 		nfscget(*op++);
1054 		bzero(op, 3);
1055 		op += 3;
1056 		nfscget(*op);
1057 		if (*op == '\0') {
1058 			m_freem(om);
1059 			return (m0);
1060 		}
1061 		(*op)--;
1062 		op++;
1063 		nfscget(cp);
1064 	}
1065 	noteof = 1;
1066 	while (noteof) {
1067 		if ((cp & NFSCRL) == NFSCRL) {
1068 			nfscget(nextcp);
1069 			if (cp == nextcp) {
1070 				nfscput(cp);
1071 				goto readit;
1072 			} else {
1073 				i = (cp & 0xf) + 2;
1074 				for (j = 0; j < i; j++) {
1075 					nfscput('\0');
1076 				}
1077 				cp = nextcp;
1078 			}
1079 		} else {
1080 			nfscput(cp);
1081 readit:
1082 			nfscget(cp);
1083 		}
1084 	}
1085 	m_freem(m0);
1086 	if (i = (olen & 0x3))
1087 		om->m_len -= i;
1088 	return (retm);
1089 }
1090