xref: /openbsd/sys/nfs/nfs_subs.c (revision 4cfece93)
1 /*	$OpenBSD: nfs_subs.c,v 1.144 2020/06/24 22:03:44 cheloha Exp $	*/
2 /*	$NetBSD: nfs_subs.c,v 1.27.4.3 1996/07/08 20:34:24 jtc Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Rick Macklem at The University of Guelph.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)nfs_subs.c	8.8 (Berkeley) 5/22/95
36  */
37 
38 
39 /*
40  * These functions support the macros and help fiddle mbuf chains for
41  * the nfs op functions. They do things like create the rpc header and
42  * copy data between mbuf chains and uio lists.
43  */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/namei.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/stat.h>
54 #include <sys/pool.h>
55 #include <sys/time.h>
56 
57 #include <nfs/rpcv2.h>
58 #include <nfs/nfsproto.h>
59 #include <nfs/nfsnode.h>
60 #include <nfs/nfs.h>
61 #include <nfs/xdr_subs.h>
62 #include <nfs/nfsm_subs.h>
63 #include <nfs/nfsmount.h>
64 #include <nfs/nfs_var.h>
65 
66 #include <uvm/uvm_extern.h>
67 
68 #include <netinet/in.h>
69 
70 #include <crypto/idgen.h>
71 
72 int	nfs_attrtimeo(struct nfsnode *np);
73 u_int32_t nfs_get_xid(void);
74 
75 /*
76  * Data items converted to xdr at startup, since they are constant
77  * This is kinda hokey, but may save a little time doing byte swaps
78  */
79 u_int32_t nfs_xdrneg1;
80 u_int32_t rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr,
81 	rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
82 u_int32_t nfs_prog, nfs_true, nfs_false;
83 
84 /* And other global data */
85 nfstype nfsv2_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON,
86 		      NFCHR, NFNON };
87 nfstype nfsv3_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK,
88 		      NFFIFO, NFNON };
89 enum vtype nv2tov_type[8] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON };
90 enum vtype nv3tov_type[8]={ VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO };
91 int nfs_ticks;
92 struct nfsstats nfsstats;
93 
94 /*
95  * Mapping of old NFS Version 2 RPC numbers to generic numbers.
96  */
97 int nfsv3_procid[NFS_NPROCS] = {
98 	NFSPROC_NULL,
99 	NFSPROC_GETATTR,
100 	NFSPROC_SETATTR,
101 	NFSPROC_NOOP,
102 	NFSPROC_LOOKUP,
103 	NFSPROC_READLINK,
104 	NFSPROC_READ,
105 	NFSPROC_NOOP,
106 	NFSPROC_WRITE,
107 	NFSPROC_CREATE,
108 	NFSPROC_REMOVE,
109 	NFSPROC_RENAME,
110 	NFSPROC_LINK,
111 	NFSPROC_SYMLINK,
112 	NFSPROC_MKDIR,
113 	NFSPROC_RMDIR,
114 	NFSPROC_READDIR,
115 	NFSPROC_FSSTAT,
116 	NFSPROC_NOOP,
117 	NFSPROC_NOOP,
118 	NFSPROC_NOOP,
119 	NFSPROC_NOOP,
120 	NFSPROC_NOOP
121 };
122 
123 /*
124  * and the reverse mapping from generic to Version 2 procedure numbers
125  */
126 int nfsv2_procid[NFS_NPROCS] = {
127 	NFSV2PROC_NULL,
128 	NFSV2PROC_GETATTR,
129 	NFSV2PROC_SETATTR,
130 	NFSV2PROC_LOOKUP,
131 	NFSV2PROC_NOOP,
132 	NFSV2PROC_READLINK,
133 	NFSV2PROC_READ,
134 	NFSV2PROC_WRITE,
135 	NFSV2PROC_CREATE,
136 	NFSV2PROC_MKDIR,
137 	NFSV2PROC_SYMLINK,
138 	NFSV2PROC_CREATE,
139 	NFSV2PROC_REMOVE,
140 	NFSV2PROC_RMDIR,
141 	NFSV2PROC_RENAME,
142 	NFSV2PROC_LINK,
143 	NFSV2PROC_READDIR,
144 	NFSV2PROC_NOOP,
145 	NFSV2PROC_STATFS,
146 	NFSV2PROC_NOOP,
147 	NFSV2PROC_NOOP,
148 	NFSV2PROC_NOOP,
149 	NFSV2PROC_NOOP
150 };
151 
152 /*
153  * Maps errno values to nfs error numbers.
154  * Use NFSERR_IO as the catch all for ones not specifically defined in
155  * RFC 1094.
156  */
157 static u_char nfsrv_v2errmap[] = {
158   NFSERR_PERM,	NFSERR_NOENT,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
159   NFSERR_NXIO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
160   NFSERR_IO,	NFSERR_IO,	NFSERR_ACCES,	NFSERR_IO,	NFSERR_IO,
161   NFSERR_IO,	NFSERR_EXIST,	NFSERR_IO,	NFSERR_NODEV,	NFSERR_NOTDIR,
162   NFSERR_ISDIR,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
163   NFSERR_IO,	NFSERR_FBIG,	NFSERR_NOSPC,	NFSERR_IO,	NFSERR_ROFS,
164   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
165   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
166   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
167   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
168   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
169   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
170   NFSERR_IO,	NFSERR_IO,	NFSERR_NAMETOL,	NFSERR_IO,	NFSERR_IO,
171   NFSERR_NOTEMPTY, NFSERR_IO,	NFSERR_IO,	NFSERR_DQUOT,	NFSERR_STALE
172   /* Everything after this maps to NFSERR_IO, so far */
173 };
174 
175 /*
176  * Maps errno values to nfs error numbers.
177  * Although it is not obvious whether or not NFS clients really care if
178  * a returned error value is in the specified list for the procedure, the
179  * safest thing to do is filter them appropriately. For Version 2, the
180  * X/Open XNFS document is the only specification that defines error values
181  * for each RPC (The RFC simply lists all possible error values for all RPCs),
182  * so I have decided to not do this for Version 2.
183  * The first entry is the default error return and the rest are the valid
184  * errors for that RPC in increasing numeric order.
185  */
186 static short nfsv3err_null[] = {
187 	0,
188 	0,
189 };
190 
191 static short nfsv3err_getattr[] = {
192 	NFSERR_IO,
193 	NFSERR_IO,
194 	NFSERR_STALE,
195 	NFSERR_BADHANDLE,
196 	NFSERR_SERVERFAULT,
197 	0,
198 };
199 
200 static short nfsv3err_setattr[] = {
201 	NFSERR_IO,
202 	NFSERR_PERM,
203 	NFSERR_IO,
204 	NFSERR_ACCES,
205 	NFSERR_INVAL,
206 	NFSERR_NOSPC,
207 	NFSERR_ROFS,
208 	NFSERR_DQUOT,
209 	NFSERR_STALE,
210 	NFSERR_BADHANDLE,
211 	NFSERR_NOT_SYNC,
212 	NFSERR_SERVERFAULT,
213 	0,
214 };
215 
216 static short nfsv3err_lookup[] = {
217 	NFSERR_IO,
218 	NFSERR_NOENT,
219 	NFSERR_IO,
220 	NFSERR_ACCES,
221 	NFSERR_NOTDIR,
222 	NFSERR_NAMETOL,
223 	NFSERR_STALE,
224 	NFSERR_BADHANDLE,
225 	NFSERR_SERVERFAULT,
226 	0,
227 };
228 
229 static short nfsv3err_access[] = {
230 	NFSERR_IO,
231 	NFSERR_IO,
232 	NFSERR_STALE,
233 	NFSERR_BADHANDLE,
234 	NFSERR_SERVERFAULT,
235 	0,
236 };
237 
238 static short nfsv3err_readlink[] = {
239 	NFSERR_IO,
240 	NFSERR_IO,
241 	NFSERR_ACCES,
242 	NFSERR_INVAL,
243 	NFSERR_STALE,
244 	NFSERR_BADHANDLE,
245 	NFSERR_NOTSUPP,
246 	NFSERR_SERVERFAULT,
247 	0,
248 };
249 
250 static short nfsv3err_read[] = {
251 	NFSERR_IO,
252 	NFSERR_IO,
253 	NFSERR_NXIO,
254 	NFSERR_ACCES,
255 	NFSERR_INVAL,
256 	NFSERR_STALE,
257 	NFSERR_BADHANDLE,
258 	NFSERR_SERVERFAULT,
259 	0,
260 };
261 
262 static short nfsv3err_write[] = {
263 	NFSERR_IO,
264 	NFSERR_IO,
265 	NFSERR_ACCES,
266 	NFSERR_INVAL,
267 	NFSERR_FBIG,
268 	NFSERR_NOSPC,
269 	NFSERR_ROFS,
270 	NFSERR_DQUOT,
271 	NFSERR_STALE,
272 	NFSERR_BADHANDLE,
273 	NFSERR_SERVERFAULT,
274 	0,
275 };
276 
277 static short nfsv3err_create[] = {
278 	NFSERR_IO,
279 	NFSERR_IO,
280 	NFSERR_ACCES,
281 	NFSERR_EXIST,
282 	NFSERR_NOTDIR,
283 	NFSERR_NOSPC,
284 	NFSERR_ROFS,
285 	NFSERR_NAMETOL,
286 	NFSERR_DQUOT,
287 	NFSERR_STALE,
288 	NFSERR_BADHANDLE,
289 	NFSERR_NOTSUPP,
290 	NFSERR_SERVERFAULT,
291 	0,
292 };
293 
294 static short nfsv3err_mkdir[] = {
295 	NFSERR_IO,
296 	NFSERR_IO,
297 	NFSERR_ACCES,
298 	NFSERR_EXIST,
299 	NFSERR_NOTDIR,
300 	NFSERR_NOSPC,
301 	NFSERR_ROFS,
302 	NFSERR_NAMETOL,
303 	NFSERR_DQUOT,
304 	NFSERR_STALE,
305 	NFSERR_BADHANDLE,
306 	NFSERR_NOTSUPP,
307 	NFSERR_SERVERFAULT,
308 	0,
309 };
310 
311 static short nfsv3err_symlink[] = {
312 	NFSERR_IO,
313 	NFSERR_IO,
314 	NFSERR_ACCES,
315 	NFSERR_EXIST,
316 	NFSERR_NOTDIR,
317 	NFSERR_NOSPC,
318 	NFSERR_ROFS,
319 	NFSERR_NAMETOL,
320 	NFSERR_DQUOT,
321 	NFSERR_STALE,
322 	NFSERR_BADHANDLE,
323 	NFSERR_NOTSUPP,
324 	NFSERR_SERVERFAULT,
325 	0,
326 };
327 
328 static short nfsv3err_mknod[] = {
329 	NFSERR_IO,
330 	NFSERR_IO,
331 	NFSERR_ACCES,
332 	NFSERR_EXIST,
333 	NFSERR_NOTDIR,
334 	NFSERR_NOSPC,
335 	NFSERR_ROFS,
336 	NFSERR_NAMETOL,
337 	NFSERR_DQUOT,
338 	NFSERR_STALE,
339 	NFSERR_BADHANDLE,
340 	NFSERR_NOTSUPP,
341 	NFSERR_SERVERFAULT,
342 	NFSERR_BADTYPE,
343 	0,
344 };
345 
346 static short nfsv3err_remove[] = {
347 	NFSERR_IO,
348 	NFSERR_NOENT,
349 	NFSERR_IO,
350 	NFSERR_ACCES,
351 	NFSERR_NOTDIR,
352 	NFSERR_ROFS,
353 	NFSERR_NAMETOL,
354 	NFSERR_STALE,
355 	NFSERR_BADHANDLE,
356 	NFSERR_SERVERFAULT,
357 	0,
358 };
359 
360 static short nfsv3err_rmdir[] = {
361 	NFSERR_IO,
362 	NFSERR_NOENT,
363 	NFSERR_IO,
364 	NFSERR_ACCES,
365 	NFSERR_EXIST,
366 	NFSERR_NOTDIR,
367 	NFSERR_INVAL,
368 	NFSERR_ROFS,
369 	NFSERR_NAMETOL,
370 	NFSERR_NOTEMPTY,
371 	NFSERR_STALE,
372 	NFSERR_BADHANDLE,
373 	NFSERR_NOTSUPP,
374 	NFSERR_SERVERFAULT,
375 	0,
376 };
377 
378 static short nfsv3err_rename[] = {
379 	NFSERR_IO,
380 	NFSERR_NOENT,
381 	NFSERR_IO,
382 	NFSERR_ACCES,
383 	NFSERR_EXIST,
384 	NFSERR_XDEV,
385 	NFSERR_NOTDIR,
386 	NFSERR_ISDIR,
387 	NFSERR_INVAL,
388 	NFSERR_NOSPC,
389 	NFSERR_ROFS,
390 	NFSERR_MLINK,
391 	NFSERR_NAMETOL,
392 	NFSERR_NOTEMPTY,
393 	NFSERR_DQUOT,
394 	NFSERR_STALE,
395 	NFSERR_BADHANDLE,
396 	NFSERR_NOTSUPP,
397 	NFSERR_SERVERFAULT,
398 	0,
399 };
400 
401 static short nfsv3err_link[] = {
402 	NFSERR_IO,
403 	NFSERR_IO,
404 	NFSERR_ACCES,
405 	NFSERR_EXIST,
406 	NFSERR_XDEV,
407 	NFSERR_NOTDIR,
408 	NFSERR_INVAL,
409 	NFSERR_NOSPC,
410 	NFSERR_ROFS,
411 	NFSERR_MLINK,
412 	NFSERR_NAMETOL,
413 	NFSERR_DQUOT,
414 	NFSERR_STALE,
415 	NFSERR_BADHANDLE,
416 	NFSERR_NOTSUPP,
417 	NFSERR_SERVERFAULT,
418 	0,
419 };
420 
421 static short nfsv3err_readdir[] = {
422 	NFSERR_IO,
423 	NFSERR_IO,
424 	NFSERR_ACCES,
425 	NFSERR_NOTDIR,
426 	NFSERR_STALE,
427 	NFSERR_BADHANDLE,
428 	NFSERR_BAD_COOKIE,
429 	NFSERR_TOOSMALL,
430 	NFSERR_SERVERFAULT,
431 	0,
432 };
433 
434 static short nfsv3err_readdirplus[] = {
435 	NFSERR_IO,
436 	NFSERR_IO,
437 	NFSERR_ACCES,
438 	NFSERR_NOTDIR,
439 	NFSERR_STALE,
440 	NFSERR_BADHANDLE,
441 	NFSERR_BAD_COOKIE,
442 	NFSERR_NOTSUPP,
443 	NFSERR_TOOSMALL,
444 	NFSERR_SERVERFAULT,
445 	0,
446 };
447 
448 static short nfsv3err_fsstat[] = {
449 	NFSERR_IO,
450 	NFSERR_IO,
451 	NFSERR_STALE,
452 	NFSERR_BADHANDLE,
453 	NFSERR_SERVERFAULT,
454 	0,
455 };
456 
457 static short nfsv3err_fsinfo[] = {
458 	NFSERR_STALE,
459 	NFSERR_STALE,
460 	NFSERR_BADHANDLE,
461 	NFSERR_SERVERFAULT,
462 	0,
463 };
464 
465 static short nfsv3err_pathconf[] = {
466 	NFSERR_STALE,
467 	NFSERR_STALE,
468 	NFSERR_BADHANDLE,
469 	NFSERR_SERVERFAULT,
470 	0,
471 };
472 
473 static short nfsv3err_commit[] = {
474 	NFSERR_IO,
475 	NFSERR_IO,
476 	NFSERR_STALE,
477 	NFSERR_BADHANDLE,
478 	NFSERR_SERVERFAULT,
479 	0,
480 };
481 
482 static short *nfsrv_v3errmap[] = {
483 	nfsv3err_null,
484 	nfsv3err_getattr,
485 	nfsv3err_setattr,
486 	nfsv3err_lookup,
487 	nfsv3err_access,
488 	nfsv3err_readlink,
489 	nfsv3err_read,
490 	nfsv3err_write,
491 	nfsv3err_create,
492 	nfsv3err_mkdir,
493 	nfsv3err_symlink,
494 	nfsv3err_mknod,
495 	nfsv3err_remove,
496 	nfsv3err_rmdir,
497 	nfsv3err_rename,
498 	nfsv3err_link,
499 	nfsv3err_readdir,
500 	nfsv3err_readdirplus,
501 	nfsv3err_fsstat,
502 	nfsv3err_fsinfo,
503 	nfsv3err_pathconf,
504 	nfsv3err_commit,
505 };
506 
507 struct pool nfsreqpl;
508 
509 /*
510  * Create the header for an rpc request packet
511  * The hsiz is the size of the rest of the nfs request header.
512  * (just used to decide if a cluster is a good idea)
513  */
514 struct mbuf *
515 nfsm_reqhead(int hsiz)
516 {
517 	struct mbuf *mb;
518 
519 	MGET(mb, M_WAIT, MT_DATA);
520 	if (hsiz > MLEN)
521 		MCLGET(mb, M_WAIT);
522 	mb->m_len = 0;
523 
524 	/* Finally, return values */
525 	return (mb);
526 }
527 
528 /*
529  * Return an unpredictable XID in XDR form.
530  */
531 u_int32_t
532 nfs_get_xid(void)
533 {
534 	static struct idgen32_ctx nfs_xid_ctx;
535 	static int called = 0;
536 
537 	if (!called) {
538 		called = 1;
539 		idgen32_init(&nfs_xid_ctx);
540 	}
541 	return (txdr_unsigned(idgen32(&nfs_xid_ctx)));
542 }
543 
544 /*
545  * Build the RPC header and fill in the authorization info.
546  * Right now we are pretty centric around RPCAUTH_UNIX, in the
547  * future, this function will need some love to be able to handle
548  * other authorization methods, such as Kerberos.
549  */
550 void
551 nfsm_rpchead(struct nfsreq *req, struct ucred *cr, int auth_type)
552 {
553 	struct mbuf	*mb;
554 	u_int32_t	*tl;
555 	int		i, authsiz, auth_len, ngroups;
556 
557 	KASSERT(auth_type == RPCAUTH_UNIX);
558 
559 	/*
560 	 * RPCAUTH_UNIX fits in an hdr mbuf, in the future other
561 	 * authorization methods need to figure out their own sizes
562 	 * and allocate and chain mbuf's accorindgly.
563 	 */
564 	mb = req->r_mreq;
565 
566 	/*
567 	 * We need to start out by finding how big the authorization cred
568 	 * and verifer are for the auth_type, to be able to correctly
569 	 * align the mbuf header/chain.
570 	 */
571 	switch (auth_type) {
572 	case RPCAUTH_UNIX:
573 		/*
574 		 * In the RPCAUTH_UNIX case, the size is the static
575 		 * part as shown in RFC1831 + the number of groups,
576 		 * RPCAUTH_UNIX has a zero verifer.
577 		 */
578 		if (cr->cr_ngroups > req->r_nmp->nm_numgrps)
579 			ngroups = req->r_nmp->nm_numgrps;
580 		else
581 			ngroups = cr->cr_ngroups;
582 
583 		auth_len = (ngroups << 2) + 5 * NFSX_UNSIGNED;
584 		authsiz = nfsm_rndup(auth_len);
585 		/* The authorization size + the size of the static part */
586 		m_align(mb, authsiz + 10 * NFSX_UNSIGNED);
587 		break;
588 	}
589 
590 	mb->m_len = 0;
591 
592 	/* First the RPC header. */
593 	tl = nfsm_build(&mb, 6 * NFSX_UNSIGNED);
594 
595 	/* Get a new (non-zero) xid */
596 	*tl++ = req->r_xid = nfs_get_xid();
597 	*tl++ = rpc_call;
598 	*tl++ = rpc_vers;
599 	*tl++ = nfs_prog;
600 	if (ISSET(req->r_nmp->nm_flag, NFSMNT_NFSV3)) {
601 		*tl++ = txdr_unsigned(NFS_VER3);
602 		*tl = txdr_unsigned(req->r_procnum);
603 	} else {
604 		*tl++ = txdr_unsigned(NFS_VER2);
605 		*tl = txdr_unsigned(nfsv2_procid[req->r_procnum]);
606 	}
607 
608 	/* The Authorization cred and its verifier */
609 	switch (auth_type) {
610 	case RPCAUTH_UNIX:
611 		tl = nfsm_build(&mb, auth_len + 4 * NFSX_UNSIGNED);
612 		*tl++ = txdr_unsigned(RPCAUTH_UNIX);
613 		*tl++ = txdr_unsigned(authsiz);
614 
615 		/* The authorization cred */
616 		*tl++ = 0;		/* stamp */
617 		*tl++ = 0;		/* NULL hostname */
618 		*tl++ = txdr_unsigned(cr->cr_uid);
619 		*tl++ = txdr_unsigned(cr->cr_gid);
620 		*tl++ = txdr_unsigned(ngroups);
621 		for (i = 0; i < ngroups; i++)
622 			*tl++ = txdr_unsigned(cr->cr_groups[i]);
623 		/* The authorization verifier */
624 		*tl++ = txdr_unsigned(RPCAUTH_NULL);
625 		*tl = 0;
626 		break;
627 	}
628 
629 	mb->m_pkthdr.len += authsiz + 10 * NFSX_UNSIGNED;
630 	mb->m_pkthdr.ph_ifidx = 0;
631 }
632 
633 /*
634  * copies mbuf chain to the uio scatter/gather list
635  */
636 int
637 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos)
638 {
639 	char *mbufcp, *uiocp;
640 	int xfer, left, len;
641 	struct mbuf *mp;
642 	long uiosiz, rem;
643 	int error = 0;
644 
645 	mp = *mrep;
646 	mbufcp = *dpos;
647 	len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
648 	rem = nfsm_padlen(siz);
649 	while (siz > 0) {
650 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
651 			return (EFBIG);
652 		left = uiop->uio_iov->iov_len;
653 		uiocp = uiop->uio_iov->iov_base;
654 		if (left > siz)
655 			left = siz;
656 		uiosiz = left;
657 		while (left > 0) {
658 			while (len == 0) {
659 				mp = mp->m_next;
660 				if (mp == NULL)
661 					return (EBADRPC);
662 				mbufcp = mtod(mp, caddr_t);
663 				len = mp->m_len;
664 			}
665 			xfer = (left > len) ? len : left;
666 			if (uiop->uio_segflg == UIO_SYSSPACE)
667 				memcpy(uiocp, mbufcp, xfer);
668 			else
669 				copyout(mbufcp, uiocp, xfer);
670 			left -= xfer;
671 			len -= xfer;
672 			mbufcp += xfer;
673 			uiocp += xfer;
674 			uiop->uio_offset += xfer;
675 			uiop->uio_resid -= xfer;
676 		}
677 		if (uiop->uio_iov->iov_len <= siz) {
678 			uiop->uio_iovcnt--;
679 			uiop->uio_iov++;
680 		} else {
681 			uiop->uio_iov->iov_base =
682 			    (char *)uiop->uio_iov->iov_base + uiosiz;
683 			uiop->uio_iov->iov_len -= uiosiz;
684 		}
685 		siz -= uiosiz;
686 	}
687 	*dpos = mbufcp;
688 	*mrep = mp;
689 	if (rem > 0) {
690 		if (len < rem)
691 			error = nfs_adv(mrep, dpos, rem, len);
692 		else
693 			*dpos += rem;
694 	}
695 	return (error);
696 }
697 
698 /*
699  * Copy a uio scatter/gather list to an mbuf chain.
700  */
701 void
702 nfsm_uiotombuf(struct mbuf **mp, struct uio *uiop, size_t len)
703 {
704 	struct mbuf *mb, *mb2;
705 	size_t xfer, pad;
706 
707 	mb = *mp;
708 
709 	pad = nfsm_padlen(len);
710 
711 	/* XXX -- the following should be done by the caller */
712 	uiop->uio_resid = len;
713 	uiop->uio_rw = UIO_WRITE;
714 
715 	while (len) {
716 		xfer = ulmin(len, m_trailingspace(mb));
717 		uiomove(mb_offset(mb), xfer, uiop);
718 		mb->m_len += xfer;
719 		len -= xfer;
720 		if (len > 0) {
721 			MGET(mb2, M_WAIT, MT_DATA);
722 			if (len > MLEN)
723 				MCLGET(mb2, M_WAIT);
724 			mb2->m_len = 0;
725 			mb->m_next = mb2;
726 			mb = mb2;
727 		}
728 	}
729 
730 	if (pad > 0) {
731 		if (pad > m_trailingspace(mb)) {
732 			MGET(mb2, M_WAIT, MT_DATA);
733 			mb2->m_len = 0;
734 			mb->m_next = mb2;
735 			mb = mb2;
736 		}
737 		memset(mb_offset(mb), 0, pad);
738 		mb->m_len += pad;
739 	}
740 
741 	*mp = mb;
742 }
743 
744 /*
745  * Copy a buffer to an mbuf chain
746  */
747 void
748 nfsm_buftombuf(struct mbuf **mp, void *buf, size_t len)
749 {
750 	struct iovec iov;
751 	struct uio io;
752 
753 	iov.iov_base = buf;
754 	iov.iov_len = len;
755 
756 	io.uio_iov = &iov;
757 	io.uio_iovcnt = 1;
758 	io.uio_resid = len;
759 	io.uio_segflg = UIO_SYSSPACE;
760 	io.uio_rw = UIO_WRITE;
761 
762 	nfsm_uiotombuf(mp, &io, len);
763 }
764 
765 /*
766  * Copy a string to an mbuf chain
767  */
768 void
769 nfsm_strtombuf(struct mbuf **mp, void *str, size_t len)
770 {
771 	struct iovec iov[2];
772 	struct uio io;
773 	uint32_t strlen;
774 
775 	strlen = txdr_unsigned(len);
776 
777 	iov[0].iov_base = &strlen;
778 	iov[0].iov_len = sizeof(uint32_t);
779 	iov[1].iov_base = str;
780 	iov[1].iov_len = len;
781 
782 	io.uio_iov = iov;
783 	io.uio_iovcnt = 2;
784 	io.uio_resid = sizeof(uint32_t) + len;
785 	io.uio_segflg = UIO_SYSSPACE;
786 	io.uio_rw = UIO_WRITE;
787 
788 	nfsm_uiotombuf(mp, &io, io.uio_resid);
789 }
790 
791 /*
792  * Help break down an mbuf chain by setting the first siz bytes contiguous
793  * pointed to by returned val.
794  * This is used by the macros nfsm_dissect and nfsm_dissecton for tough
795  * cases. (The macros use the vars. dpos and dpos2)
796  */
797 int
798 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2)
799 {
800 	struct mbuf *mp, *mp2;
801 	int siz2, xfer;
802 	caddr_t p;
803 
804 	mp = *mdp;
805 	while (left == 0) {
806 		*mdp = mp = mp->m_next;
807 		if (mp == NULL)
808 			return (EBADRPC);
809 		left = mp->m_len;
810 		*dposp = mtod(mp, caddr_t);
811 	}
812 	if (left >= siz) {
813 		*cp2 = *dposp;
814 		*dposp += siz;
815 	} else if (mp->m_next == NULL) {
816 		return (EBADRPC);
817 	} else if (siz > MHLEN) {
818 		panic("nfs S too big");
819 	} else {
820 		MGET(mp2, M_WAIT, MT_DATA);
821 		mp2->m_next = mp->m_next;
822 		mp->m_next = mp2;
823 		mp->m_len -= left;
824 		mp = mp2;
825 		*cp2 = p = mtod(mp, caddr_t);
826 		bcopy(*dposp, p, left);		/* Copy what was left */
827 		siz2 = siz-left;
828 		p += left;
829 		mp2 = mp->m_next;
830 		/* Loop around copying up the siz2 bytes */
831 		while (siz2 > 0) {
832 			if (mp2 == NULL)
833 				return (EBADRPC);
834 			xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
835 			if (xfer > 0) {
836 				bcopy(mtod(mp2, caddr_t), p, xfer);
837 				mp2->m_data += xfer;
838 				mp2->m_len -= xfer;
839 				p += xfer;
840 				siz2 -= xfer;
841 			}
842 			if (siz2 > 0)
843 				mp2 = mp2->m_next;
844 		}
845 		mp->m_len = siz;
846 		*mdp = mp2;
847 		*dposp = mtod(mp2, caddr_t);
848 	}
849 	return (0);
850 }
851 
852 /*
853  * Advance the position in the mbuf chain.
854  */
855 int
856 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left)
857 {
858 	struct mbuf *m;
859 	int s;
860 
861 	m = *mdp;
862 	s = left;
863 	while (s < offs) {
864 		offs -= s;
865 		m = m->m_next;
866 		if (m == NULL)
867 			return (EBADRPC);
868 		s = m->m_len;
869 	}
870 	*mdp = m;
871 	*dposp = mtod(m, caddr_t)+offs;
872 	return (0);
873 }
874 
875 /*
876  * Called once to initialize data structures...
877  */
878 void
879 nfs_init(void)
880 {
881 	rpc_vers = txdr_unsigned(RPC_VER2);
882 	rpc_call = txdr_unsigned(RPC_CALL);
883 	rpc_reply = txdr_unsigned(RPC_REPLY);
884 	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
885 	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
886 	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
887 	rpc_autherr = txdr_unsigned(RPC_AUTHERR);
888 	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
889 	nfs_prog = txdr_unsigned(NFS_PROG);
890 	nfs_true = txdr_unsigned(1);
891 	nfs_false = txdr_unsigned(0);
892 	nfs_xdrneg1 = txdr_unsigned(-1);
893 	nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
894 	if (nfs_ticks < 1)
895 		nfs_ticks = 1;
896 #ifdef NFSSERVER
897 	nfsrv_init(0);			/* Init server data structures */
898 	nfsrv_initcache();		/* Init the server request cache */
899 #endif /* NFSSERVER */
900 
901 	pool_init(&nfsreqpl, sizeof(struct nfsreq), 0, IPL_NONE, PR_WAITOK,
902 	    "nfsreqpl", NULL);
903 }
904 
905 #ifdef NFSCLIENT
906 int
907 nfs_vfs_init(struct vfsconf *vfsp)
908 {
909 	extern struct pool nfs_node_pool;
910 
911 	TAILQ_INIT(&nfs_bufq);
912 
913 	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, IPL_NONE,
914 		  PR_WAITOK, "nfsnodepl", NULL);
915 
916 	return (0);
917 }
918 
919 /*
920  * Attribute cache routines.
921  * nfs_loadattrcache() - loads or updates the cache contents from attributes
922  *	that are on the mbuf list
923  * nfs_getattrcache() - returns valid attributes if found in cache, returns
924  *	error otherwise
925  */
926 
927 /*
928  * Load the attribute cache (that lives in the nfsnode entry) with
929  * the values on the mbuf list and
930  * Iff vap not NULL
931  *    copy the attributes to *vaper
932  */
933 int
934 nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
935     struct vattr *vaper)
936 {
937 	struct vnode *vp = *vpp;
938 	struct vattr *vap;
939 	struct nfs_fattr *fp;
940 	extern const struct vops nfs_specvops;
941 	struct nfsnode *np;
942 	int32_t t1;
943 	caddr_t cp2;
944 	int error = 0;
945 	int32_t rdev;
946 	struct mbuf *md;
947 	enum vtype vtyp;
948 	mode_t vmode;
949 	struct timespec mtime;
950 	struct vnode *nvp;
951 	int v3 = NFS_ISV3(vp);
952 	uid_t uid;
953 	gid_t gid;
954 
955 	md = *mdp;
956 	t1 = (mtod(md, caddr_t) + md->m_len) - *dposp;
957 	error = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, &cp2);
958 	if (error)
959 		return (error);
960 	fp = (struct nfs_fattr *)cp2;
961 	if (v3) {
962 		vtyp = nfsv3tov_type(fp->fa_type);
963 		vmode = fxdr_unsigned(mode_t, fp->fa_mode);
964 		rdev = makedev(fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata1),
965 			fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata2));
966 		fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
967 	} else {
968 		vtyp = nfsv2tov_type(fp->fa_type);
969 		vmode = fxdr_unsigned(mode_t, fp->fa_mode);
970 		if (vtyp == VNON || vtyp == VREG)
971 			vtyp = IFTOVT(vmode);
972 		rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
973 		fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
974 
975 		/*
976 		 * Really ugly NFSv2 kludge.
977 		 */
978 		if (vtyp == VCHR && rdev == 0xffffffff)
979 			vtyp = VFIFO;
980 	}
981 
982 	/*
983 	 * If v_type == VNON it is a new node, so fill in the v_type,
984 	 * n_mtime fields. Check to see if it represents a special
985 	 * device, and if so, check for a possible alias. Once the
986 	 * correct vnode has been obtained, fill in the rest of the
987 	 * information.
988 	 */
989 	np = VTONFS(vp);
990 	if (vp->v_type != vtyp) {
991 		cache_purge(vp);
992 		vp->v_type = vtyp;
993 		if (vp->v_type == VFIFO) {
994 #ifndef FIFO
995 			return (EOPNOTSUPP);
996 #else
997                         extern const struct vops nfs_fifovops;
998 			vp->v_op = &nfs_fifovops;
999 #endif /* FIFO */
1000 		}
1001 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
1002 			vp->v_op = &nfs_specvops;
1003 			nvp = checkalias(vp, (dev_t)rdev, vp->v_mount);
1004 			if (nvp) {
1005 				/*
1006 				 * Discard unneeded vnode, but save its nfsnode.
1007 				 * Since the nfsnode does not have a lock, its
1008 				 * vnode lock has to be carried over.
1009 				 */
1010 
1011 				nvp->v_data = vp->v_data;
1012 				vp->v_data = NULL;
1013 				vp->v_op = &spec_vops;
1014 				vrele(vp);
1015 				vgone(vp);
1016 				/*
1017 				 * Reinitialize aliased node.
1018 				 */
1019 				np->n_vnode = nvp;
1020 				*vpp = vp = nvp;
1021 			}
1022 		}
1023 		np->n_mtime = mtime;
1024 	}
1025 	vap = &np->n_vattr;
1026 	vap->va_type = vtyp;
1027 	vap->va_rdev = (dev_t)rdev;
1028 	vap->va_mtime = mtime;
1029 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1030 
1031 	uid = fxdr_unsigned(uid_t, fp->fa_uid);
1032 	gid = fxdr_unsigned(gid_t, fp->fa_gid);
1033 	/* Invalidate access cache if uid, gid or mode changed. */
1034 	if (np->n_accstamp != -1 &&
1035 	    (gid != vap->va_gid || uid != vap->va_uid ||
1036 	    (vmode & 07777) != vap->va_mode))
1037 		np->n_accstamp = -1;
1038 
1039 	vap->va_mode = (vmode & 07777);
1040 
1041 	switch (vtyp) {
1042 	case VBLK:
1043 		vap->va_blocksize = BLKDEV_IOSIZE;
1044 		break;
1045 	case VCHR:
1046 		vap->va_blocksize = MAXBSIZE;
1047 		break;
1048 	default:
1049 		vap->va_blocksize = v3 ? vp->v_mount->mnt_stat.f_iosize :
1050 		     fxdr_unsigned(int32_t, fp->fa2_blocksize);
1051 		break;
1052 	}
1053 	if (v3) {
1054 		vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink);
1055 		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
1056 		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
1057 		vap->va_size = fxdr_hyper(&fp->fa3_size);
1058 		vap->va_bytes = fxdr_hyper(&fp->fa3_used);
1059 		vap->va_fileid = fxdr_hyper(&fp->fa3_fileid);
1060 		fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
1061 		fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
1062 		vap->va_flags = 0;
1063 		vap->va_filerev = 0;
1064 	} else {
1065 		vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink);
1066 		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
1067 		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
1068 		vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
1069 		vap->va_bytes =
1070 		    (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) *
1071 		    NFS_FABLKSIZE;
1072 		vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
1073 		fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
1074 		vap->va_flags = 0;
1075 		vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
1076 		    fp->fa2_ctime.nfsv2_sec);
1077 		vap->va_ctime.tv_nsec = 0;
1078 		vap->va_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec);
1079 		vap->va_filerev = 0;
1080 	}
1081 
1082 	if (vap->va_size != np->n_size) {
1083 		if (vap->va_type == VREG) {
1084 			if (np->n_flag & NMODIFIED) {
1085 				if (vap->va_size < np->n_size)
1086 					vap->va_size = np->n_size;
1087 				else
1088 					np->n_size = vap->va_size;
1089 			} else
1090 				np->n_size = vap->va_size;
1091 			uvm_vnp_setsize(vp, np->n_size);
1092 		} else
1093 			np->n_size = vap->va_size;
1094 	}
1095 	np->n_attrstamp = gettime();
1096 	if (vaper != NULL) {
1097 		bcopy(vap, vaper, sizeof(*vap));
1098 		if (np->n_flag & NCHG) {
1099 			if (np->n_flag & NACC)
1100 				vaper->va_atime = np->n_atim;
1101 			if (np->n_flag & NUPD)
1102 				vaper->va_mtime = np->n_mtim;
1103 		}
1104 	}
1105 	return (0);
1106 }
1107 
1108 int
1109 nfs_attrtimeo(struct nfsnode *np)
1110 {
1111 	struct vnode *vp = np->n_vnode;
1112 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1113 	int tenthage = (gettime() - np->n_mtime.tv_sec) / 10;
1114 	int minto, maxto;
1115 
1116 	if (vp->v_type == VDIR) {
1117 		maxto = nmp->nm_acdirmax;
1118 		minto = nmp->nm_acdirmin;
1119 	}
1120 	else {
1121 		maxto = nmp->nm_acregmax;
1122 		minto = nmp->nm_acregmin;
1123 	}
1124 
1125 	if (np->n_flag & NMODIFIED || tenthage < minto)
1126 		return minto;
1127 	else if (tenthage < maxto)
1128 		return tenthage;
1129 	else
1130 		return maxto;
1131 }
1132 
1133 /*
1134  * Check the time stamp
1135  * If the cache is valid, copy contents to *vap and return 0
1136  * otherwise return an error
1137  */
1138 int
1139 nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
1140 {
1141 	struct nfsnode *np = VTONFS(vp);
1142 	struct vattr *vap;
1143 
1144 	if (np->n_attrstamp == 0 ||
1145 	    (gettime() - np->n_attrstamp) >= nfs_attrtimeo(np)) {
1146 		nfsstats.attrcache_misses++;
1147 		return (ENOENT);
1148 	}
1149 	nfsstats.attrcache_hits++;
1150 	vap = &np->n_vattr;
1151 	if (vap->va_size != np->n_size) {
1152 		if (vap->va_type == VREG) {
1153 			if (np->n_flag & NMODIFIED) {
1154 				if (vap->va_size < np->n_size)
1155 					vap->va_size = np->n_size;
1156 				else
1157 					np->n_size = vap->va_size;
1158 			} else
1159 				np->n_size = vap->va_size;
1160 			uvm_vnp_setsize(vp, np->n_size);
1161 		} else
1162 			np->n_size = vap->va_size;
1163 	}
1164 	bcopy(vap, vaper, sizeof(struct vattr));
1165 	if (np->n_flag & NCHG) {
1166 		if (np->n_flag & NACC)
1167 			vaper->va_atime = np->n_atim;
1168 		if (np->n_flag & NUPD)
1169 			vaper->va_mtime = np->n_mtim;
1170 	}
1171 	return (0);
1172 }
1173 #endif /* NFSCLIENT */
1174 
1175 /*
1176  * Set up nameidata for a lookup() call and do it
1177  */
1178 int
1179 nfs_namei(struct nameidata *ndp, fhandle_t *fhp, int len,
1180     struct nfssvc_sock *slp, struct mbuf *nam, struct mbuf **mdp,
1181     caddr_t *dposp, struct vnode **retdirp, struct proc *p)
1182 {
1183 	int i, rem;
1184 	struct mbuf *md;
1185 	char *fromcp, *tocp;
1186 	struct vnode *dp;
1187 	int error, rdonly;
1188 	struct componentname *cnp = &ndp->ni_cnd;
1189 
1190 	*retdirp = NULL;
1191 	cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK);
1192 	/*
1193 	 * Copy the name from the mbuf list to ndp->ni_pnbuf
1194 	 * and set the various ndp fields appropriately.
1195 	 */
1196 	fromcp = *dposp;
1197 	tocp = cnp->cn_pnbuf;
1198 	md = *mdp;
1199 	rem = mtod(md, caddr_t) + md->m_len - fromcp;
1200 	for (i = 0; i < len; i++) {
1201 		while (rem == 0) {
1202 			md = md->m_next;
1203 			if (md == NULL) {
1204 				error = EBADRPC;
1205 				goto out;
1206 			}
1207 			fromcp = mtod(md, caddr_t);
1208 			rem = md->m_len;
1209 		}
1210 		if (*fromcp == '\0' || *fromcp == '/') {
1211 			error = EACCES;
1212 			goto out;
1213 		}
1214 		*tocp++ = *fromcp++;
1215 		rem--;
1216 	}
1217 	*tocp = '\0';
1218 	*mdp = md;
1219 	*dposp = fromcp;
1220 	len = nfsm_padlen(len);
1221 	if (len > 0) {
1222 		if (rem >= len)
1223 			*dposp += len;
1224 		else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0)
1225 			goto out;
1226 	}
1227 	ndp->ni_pathlen = tocp - cnp->cn_pnbuf;
1228 	cnp->cn_nameptr = cnp->cn_pnbuf;
1229 	/*
1230 	 * Extract and set starting directory.
1231 	 */
1232 	error = nfsrv_fhtovp(fhp, 0, &dp, ndp->ni_cnd.cn_cred, slp,
1233 	    nam, &rdonly);
1234 	if (error)
1235 		goto out;
1236 	if (dp->v_type != VDIR) {
1237 		vrele(dp);
1238 		error = ENOTDIR;
1239 		goto out;
1240 	}
1241 	vref(dp);
1242 	*retdirp = dp;
1243 	ndp->ni_startdir = dp;
1244 	if (rdonly)
1245 		cnp->cn_flags |= (NOCROSSMOUNT | RDONLY);
1246 	else
1247 		cnp->cn_flags |= NOCROSSMOUNT;
1248 
1249 	/*
1250 	 * And call lookup() to do the real work
1251 	 */
1252 	cnp->cn_proc = p;
1253 	error = vfs_lookup(ndp);
1254 	if (error)
1255 		goto out;
1256 	/*
1257 	 * Check for encountering a symbolic link
1258 	 */
1259 	if (cnp->cn_flags & ISSYMLINK) {
1260 		if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
1261 			vput(ndp->ni_dvp);
1262 		else
1263 			vrele(ndp->ni_dvp);
1264 		vput(ndp->ni_vp);
1265 		ndp->ni_vp = NULL;
1266 		error = EINVAL;
1267 		goto out;
1268 	}
1269 	/*
1270 	 * Check for saved name request
1271 	 */
1272 	if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
1273 		cnp->cn_flags |= HASBUF;
1274 		return (0);
1275 	}
1276 out:
1277 	pool_put(&namei_pool, cnp->cn_pnbuf);
1278 	return (error);
1279 }
1280 
1281 /*
1282  * A fiddled version of m_adj() that ensures null fill to a long
1283  * boundary and only trims off the back end
1284  */
1285 void
1286 nfsm_adj(struct mbuf *mp, int len, int nul)
1287 {
1288 	struct mbuf *m;
1289 	int count, i;
1290 	char *cp;
1291 
1292 	/*
1293 	 * Trim from tail.  Scan the mbuf chain,
1294 	 * calculating its length and finding the last mbuf.
1295 	 * If the adjustment only affects this mbuf, then just
1296 	 * adjust and return.  Otherwise, rescan and truncate
1297 	 * after the remaining size.
1298 	 */
1299 	count = 0;
1300 	m = mp;
1301 	for (;;) {
1302 		count += m->m_len;
1303 		if (m->m_next == NULL)
1304 			break;
1305 		m = m->m_next;
1306 	}
1307 	if (m->m_len > len) {
1308 		m->m_len -= len;
1309 		if (nul > 0) {
1310 			cp = mtod(m, caddr_t)+m->m_len-nul;
1311 			for (i = 0; i < nul; i++)
1312 				*cp++ = '\0';
1313 		}
1314 		return;
1315 	}
1316 	count -= len;
1317 	if (count < 0)
1318 		count = 0;
1319 	/*
1320 	 * Correct length for chain is "count".
1321 	 * Find the mbuf with last data, adjust its length,
1322 	 * and toss data from remaining mbufs on chain.
1323 	 */
1324 	for (m = mp; m; m = m->m_next) {
1325 		if (m->m_len >= count) {
1326 			m->m_len = count;
1327 			if (nul > 0) {
1328 				cp = mtod(m, caddr_t)+m->m_len-nul;
1329 				for (i = 0; i < nul; i++)
1330 					*cp++ = '\0';
1331 			}
1332 			break;
1333 		}
1334 		count -= m->m_len;
1335 	}
1336 	for (m = m->m_next;m;m = m->m_next)
1337 		m->m_len = 0;
1338 }
1339 
1340 /*
1341  * Make these functions instead of macros, so that the kernel text size
1342  * doesn't get too big...
1343  */
1344 void
1345 nfsm_srvwcc(struct nfsrv_descript *nfsd, int before_ret,
1346     struct vattr *before_vap, int after_ret, struct vattr *after_vap,
1347     struct nfsm_info *info)
1348 {
1349 	u_int32_t *tl;
1350 
1351 	if (before_ret) {
1352 		tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
1353 		*tl = nfs_false;
1354 	} else {
1355 		tl = nfsm_build(&info->nmi_mb, 7 * NFSX_UNSIGNED);
1356 		*tl++ = nfs_true;
1357 		txdr_hyper(before_vap->va_size, tl);
1358 		tl += 2;
1359 		txdr_nfsv3time(&(before_vap->va_mtime), tl);
1360 		tl += 2;
1361 		txdr_nfsv3time(&(before_vap->va_ctime), tl);
1362 	}
1363 	nfsm_srvpostop_attr(nfsd, after_ret, after_vap, info);
1364 }
1365 
1366 void
1367 nfsm_srvpostop_attr(struct nfsrv_descript *nfsd, int after_ret,
1368     struct vattr *after_vap, struct nfsm_info *info)
1369 {
1370 	u_int32_t *tl;
1371 	struct nfs_fattr *fp;
1372 
1373 	if (after_ret) {
1374 		tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
1375 		*tl = nfs_false;
1376 	} else {
1377 		tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED + NFSX_V3FATTR);
1378 		*tl++ = nfs_true;
1379 		fp = (struct nfs_fattr *)tl;
1380 		nfsm_srvfattr(nfsd, after_vap, fp);
1381 	}
1382 }
1383 
1384 void
1385 nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
1386     struct nfs_fattr *fp)
1387 {
1388 
1389 	fp->fa_nlink = txdr_unsigned(vap->va_nlink);
1390 	fp->fa_uid = txdr_unsigned(vap->va_uid);
1391 	fp->fa_gid = txdr_unsigned(vap->va_gid);
1392 	if (nfsd->nd_flag & ND_NFSV3) {
1393 		fp->fa_type = vtonfsv3_type(vap->va_type);
1394 		fp->fa_mode = vtonfsv3_mode(vap->va_mode);
1395 		txdr_hyper(vap->va_size, &fp->fa3_size);
1396 		txdr_hyper(vap->va_bytes, &fp->fa3_used);
1397 		fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev));
1398 		fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev));
1399 		fp->fa3_fsid.nfsuquad[0] = 0;
1400 		fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
1401 		txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
1402 		txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
1403 		txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
1404 		txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
1405 	} else {
1406 		fp->fa_type = vtonfsv2_type(vap->va_type);
1407 		fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1408 		fp->fa2_size = txdr_unsigned(vap->va_size);
1409 		fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
1410 		if (vap->va_type == VFIFO)
1411 			fp->fa2_rdev = 0xffffffff;
1412 		else
1413 			fp->fa2_rdev = txdr_unsigned(vap->va_rdev);
1414 		fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
1415 		fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
1416 		fp->fa2_fileid = txdr_unsigned((u_int32_t)vap->va_fileid);
1417 		txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
1418 		txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
1419 		txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);
1420 	}
1421 }
1422 
1423 /*
1424  * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
1425  * 	- look up fsid in mount list (if not found ret error)
1426  *	- get vp and export rights by calling VFS_FHTOVP() and VFS_CHECKEXP()
1427  *	- if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
1428  *	- if not lockflag unlock it with VOP_UNLOCK()
1429  */
1430 int
1431 nfsrv_fhtovp(fhandle_t *fhp, int lockflag, struct vnode **vpp,
1432     struct ucred *cred, struct nfssvc_sock *slp, struct mbuf *nam,
1433     int *rdonlyp)
1434 {
1435 	struct mount *mp;
1436 	int i;
1437 	struct ucred *credanon;
1438 	int error, exflags;
1439 	struct sockaddr_in *saddr;
1440 
1441 	*vpp = NULL;
1442 	mp = vfs_getvfs(&fhp->fh_fsid);
1443 
1444 	if (!mp)
1445 		return (ESTALE);
1446 	error = VFS_CHECKEXP(mp, nam, &exflags, &credanon);
1447 	if (error)
1448 		return (error);
1449 	error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp);
1450 	if (error)
1451 		return (error);
1452 
1453 	saddr = mtod(nam, struct sockaddr_in *);
1454 	if (saddr->sin_family == AF_INET &&
1455 	    (ntohs(saddr->sin_port) >= IPPORT_RESERVED ||
1456 	    (slp->ns_so->so_type == SOCK_STREAM && ntohs(saddr->sin_port) == 20))) {
1457 		vput(*vpp);
1458 		return (NFSERR_AUTHERR | AUTH_TOOWEAK);
1459 	}
1460 
1461 	/* Check/setup credentials. */
1462 	if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) {
1463 		cred->cr_uid = credanon->cr_uid;
1464 		cred->cr_gid = credanon->cr_gid;
1465 		for (i = 0; i < credanon->cr_ngroups && i < NGROUPS_MAX; i++)
1466 			cred->cr_groups[i] = credanon->cr_groups[i];
1467 		cred->cr_ngroups = i;
1468 	}
1469 	if (exflags & MNT_EXRDONLY)
1470 		*rdonlyp = 1;
1471 	else
1472 		*rdonlyp = 0;
1473 	if (!lockflag)
1474 		VOP_UNLOCK(*vpp);
1475 
1476 	return (0);
1477 }
1478 
1479 /*
1480  * This function compares two net addresses by family and returns non zero
1481  * if they are the same host, or if there is any doubt it returns 0.
1482  * The AF_INET family is handled as a special case so that address mbufs
1483  * don't need to be saved to store "struct in_addr", which is only 4 bytes.
1484  */
1485 int
1486 netaddr_match(int family, union nethostaddr *haddr, struct mbuf *nam)
1487 {
1488 	struct sockaddr_in *inetaddr;
1489 
1490 	switch (family) {
1491 	case AF_INET:
1492 		inetaddr = mtod(nam, struct sockaddr_in *);
1493 		if (inetaddr->sin_family == AF_INET &&
1494 		    inetaddr->sin_addr.s_addr == haddr->had_inetaddr)
1495 			return (1);
1496 		break;
1497 	default:
1498 		break;
1499 	};
1500 	return (0);
1501 }
1502 
1503 /*
1504  * The write verifier has changed (probably due to a server reboot), so all
1505  * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
1506  * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
1507  * flag. Once done the new write verifier can be set for the mount point.
1508  */
1509 void
1510 nfs_clearcommit(struct mount *mp)
1511 {
1512 	struct vnode *vp;
1513 	struct buf *bp;
1514 	int s;
1515 
1516 	s = splbio();
1517 loop:
1518 	TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1519 		if (vp->v_mount != mp)	/* Paranoia */
1520 			goto loop;
1521 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
1522 			if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
1523 			    == (B_DELWRI | B_NEEDCOMMIT))
1524 				bp->b_flags &= ~B_NEEDCOMMIT;
1525 		}
1526 	}
1527 	splx(s);
1528 }
1529 
1530 void
1531 nfs_merge_commit_ranges(struct vnode *vp)
1532 {
1533 	struct nfsnode *np = VTONFS(vp);
1534 
1535 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) {
1536 		np->n_pushedlo = np->n_pushlo;
1537 		np->n_pushedhi = np->n_pushhi;
1538 		np->n_commitflags |= NFS_COMMIT_PUSHED_VALID;
1539 	} else {
1540 		if (np->n_pushlo < np->n_pushedlo)
1541 			np->n_pushedlo = np->n_pushlo;
1542 		if (np->n_pushhi > np->n_pushedhi)
1543 			np->n_pushedhi = np->n_pushhi;
1544 	}
1545 
1546 	np->n_pushlo = np->n_pushhi = 0;
1547 	np->n_commitflags &= ~NFS_COMMIT_PUSH_VALID;
1548 }
1549 
1550 int
1551 nfs_in_committed_range(struct vnode *vp, struct buf *bp)
1552 {
1553 	struct nfsnode *np = VTONFS(vp);
1554 	off_t lo, hi;
1555 
1556 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID))
1557 		return 0;
1558 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1559 	hi = lo + bp->b_dirtyend;
1560 
1561 	return (lo >= np->n_pushedlo && hi <= np->n_pushedhi);
1562 }
1563 
1564 int
1565 nfs_in_tobecommitted_range(struct vnode *vp, struct buf *bp)
1566 {
1567 	struct nfsnode *np = VTONFS(vp);
1568 	off_t lo, hi;
1569 
1570 	if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID))
1571 		return 0;
1572 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1573 	hi = lo + bp->b_dirtyend;
1574 
1575 	return (lo >= np->n_pushlo && hi <= np->n_pushhi);
1576 }
1577 
1578 void
1579 nfs_add_committed_range(struct vnode *vp, struct buf *bp)
1580 {
1581 	struct nfsnode *np = VTONFS(vp);
1582 	off_t lo, hi;
1583 
1584 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1585 	hi = lo + bp->b_dirtyend;
1586 
1587 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) {
1588 		np->n_pushedlo = lo;
1589 		np->n_pushedhi = hi;
1590 		np->n_commitflags |= NFS_COMMIT_PUSHED_VALID;
1591 	} else {
1592 		if (hi > np->n_pushedhi)
1593 			np->n_pushedhi = hi;
1594 		if (lo < np->n_pushedlo)
1595 			np->n_pushedlo = lo;
1596 	}
1597 }
1598 
1599 void
1600 nfs_del_committed_range(struct vnode *vp, struct buf *bp)
1601 {
1602 	struct nfsnode *np = VTONFS(vp);
1603 	off_t lo, hi;
1604 
1605 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID))
1606 		return;
1607 
1608 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1609 	hi = lo + bp->b_dirtyend;
1610 
1611 	if (lo > np->n_pushedhi || hi < np->n_pushedlo)
1612 		return;
1613 	if (lo <= np->n_pushedlo)
1614 		np->n_pushedlo = hi;
1615 	else if (hi >= np->n_pushedhi)
1616 		np->n_pushedhi = lo;
1617 	else {
1618 		/*
1619 		 * XXX There's only one range. If the deleted range
1620 		 * is in the middle, pick the largest of the
1621 		 * contiguous ranges that it leaves.
1622 		 */
1623 		if ((np->n_pushedlo - lo) > (hi - np->n_pushedhi))
1624 			np->n_pushedhi = lo;
1625 		else
1626 			np->n_pushedlo = hi;
1627 	}
1628 }
1629 
1630 void
1631 nfs_add_tobecommitted_range(struct vnode *vp, struct buf *bp)
1632 {
1633 	struct nfsnode *np = VTONFS(vp);
1634 	off_t lo, hi;
1635 
1636 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1637 	hi = lo + bp->b_dirtyend;
1638 
1639 	if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) {
1640 		np->n_pushlo = lo;
1641 		np->n_pushhi = hi;
1642 		np->n_commitflags |= NFS_COMMIT_PUSH_VALID;
1643 	} else {
1644 		if (lo < np->n_pushlo)
1645 			np->n_pushlo = lo;
1646 		if (hi > np->n_pushhi)
1647 			np->n_pushhi = hi;
1648 	}
1649 }
1650 
1651 void
1652 nfs_del_tobecommitted_range(struct vnode *vp, struct buf *bp)
1653 {
1654 	struct nfsnode *np = VTONFS(vp);
1655 	off_t lo, hi;
1656 
1657 	if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID))
1658 		return;
1659 
1660 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1661 	hi = lo + bp->b_dirtyend;
1662 
1663 	if (lo > np->n_pushhi || hi < np->n_pushlo)
1664 		return;
1665 
1666 	if (lo <= np->n_pushlo)
1667 		np->n_pushlo = hi;
1668 	else if (hi >= np->n_pushhi)
1669 		np->n_pushhi = lo;
1670 	else {
1671 		/*
1672 		 * XXX There's only one range. If the deleted range
1673 		 * is in the middle, pick the largest of the
1674 		 * contiguous ranges that it leaves.
1675 		 */
1676 		if ((np->n_pushlo - lo) > (hi - np->n_pushhi))
1677 			np->n_pushhi = lo;
1678 		else
1679 			np->n_pushlo = hi;
1680 	}
1681 }
1682 
1683 /*
1684  * Map errnos to NFS error numbers. For Version 3 also filter out error
1685  * numbers not specified for the associated procedure.
1686  */
1687 int
1688 nfsrv_errmap(struct nfsrv_descript *nd, int err)
1689 {
1690 	short *defaulterrp, *errp;
1691 
1692 	if (nd->nd_flag & ND_NFSV3) {
1693 	    if (nd->nd_procnum <= NFSPROC_COMMIT) {
1694 		errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum];
1695 		while (*++errp) {
1696 			if (*errp == err)
1697 				return (err);
1698 			else if (*errp > err)
1699 				break;
1700 		}
1701 		return ((int)*defaulterrp);
1702 	    } else
1703 		return (err & 0xffff);
1704 	}
1705 	if (err <= nitems(nfsrv_v2errmap))
1706 		return ((int)nfsrv_v2errmap[err - 1]);
1707 	return (NFSERR_IO);
1708 }
1709 
1710 /*
1711  * If full is non zero, set all fields, otherwise just set mode and time fields
1712  */
1713 void
1714 nfsm_v3attrbuild(struct mbuf **mp, struct vattr *a, int full)
1715 {
1716 	struct mbuf *mb;
1717 	u_int32_t *tl;
1718 
1719 	mb = *mp;
1720 
1721 	if (a->va_mode != (mode_t)VNOVAL) {
1722 		tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1723 		*tl++ = nfs_true;
1724 		*tl = txdr_unsigned(a->va_mode);
1725 	} else {
1726 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1727 		*tl = nfs_false;
1728 	}
1729 	if (full && a->va_uid != (uid_t)VNOVAL) {
1730 		tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1731 		*tl++ = nfs_true;
1732 		*tl = txdr_unsigned(a->va_uid);
1733 	} else {
1734 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1735 		*tl = nfs_false;
1736 	}
1737 	if (full && a->va_gid != (gid_t)VNOVAL) {
1738 		tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1739 		*tl++ = nfs_true;
1740 		*tl = txdr_unsigned((a)->va_gid);
1741 	} else {
1742 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1743 		*tl = nfs_false;
1744 	}
1745 	if (full && a->va_size != VNOVAL) {
1746 		tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1747 		*tl++ = nfs_true;
1748 		txdr_hyper(a->va_size, tl);
1749 	} else {
1750 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1751 		*tl = nfs_false;
1752 	}
1753 	if (a->va_atime.tv_nsec != VNOVAL) {
1754 		if (a->va_atime.tv_sec != gettime()) {
1755 			tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1756 			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1757 			txdr_nfsv3time(&a->va_atime, tl);
1758 		} else {
1759 			tl = nfsm_build(&mb, NFSX_UNSIGNED);
1760 			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1761 		}
1762 	} else {
1763 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1764 		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1765 	}
1766 	if (a->va_mtime.tv_nsec != VNOVAL) {
1767 		if (a->va_mtime.tv_sec != gettime()) {
1768 			tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1769 			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1770 			txdr_nfsv3time(&a->va_mtime, tl);
1771 		} else {
1772 			tl = nfsm_build(&mb, NFSX_UNSIGNED);
1773 			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1774 		}
1775 	} else {
1776 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1777 		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1778 	}
1779 
1780 	*mp = mb;
1781 }
1782 
1783 /*
1784  * Ensure a contiguous buffer len bytes long
1785  */
1786 void *
1787 nfsm_build(struct mbuf **mp, u_int len)
1788 {
1789 	struct mbuf *mb, *mb2;
1790 	caddr_t bpos;
1791 
1792 	mb = *mp;
1793 	bpos = mb_offset(mb);
1794 
1795 	if (len > m_trailingspace(mb)) {
1796 		MGET(mb2, M_WAIT, MT_DATA);
1797 		if (len > MLEN)
1798 			panic("build > MLEN");
1799 		mb->m_next = mb2;
1800 		mb = mb2;
1801 		mb->m_len = 0;
1802 		bpos = mtod(mb, caddr_t);
1803 	}
1804 	mb->m_len += len;
1805 
1806 	*mp = mb;
1807 
1808 	return (bpos);
1809 }
1810 
1811 void
1812 nfsm_fhtom(struct nfsm_info *info, struct vnode *v, int v3)
1813 {
1814 	struct nfsnode *n = VTONFS(v);
1815 
1816 	if (v3) {
1817 		nfsm_strtombuf(&info->nmi_mb, n->n_fhp, n->n_fhsize);
1818 	} else {
1819 		nfsm_buftombuf(&info->nmi_mb, n->n_fhp, NFSX_V2FH);
1820 	}
1821 }
1822 
1823 void
1824 nfsm_srvfhtom(struct mbuf **mp, fhandle_t *f, int v3)
1825 {
1826 	if (v3) {
1827 		nfsm_strtombuf(mp, f, NFSX_V3FH);
1828 	} else {
1829 		nfsm_buftombuf(mp, f, NFSX_V2FH);
1830 	}
1831 }
1832 
1833 int
1834 nfsm_srvsattr(struct mbuf **mp, struct vattr *va, struct mbuf *mrep,
1835     caddr_t *dposp)
1836 {
1837 	struct nfsm_info	info;
1838 	uint32_t *tl, t1;
1839 	caddr_t cp2;
1840 	int error = 0;
1841 
1842 	info.nmi_md = *mp;
1843 	info.nmi_dpos = *dposp;
1844 
1845 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1846 	if (*tl == nfs_true) {
1847 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1848 		va->va_mode = nfstov_mode(*tl);
1849 	}
1850 
1851 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1852 	if (*tl == nfs_true) {
1853 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1854 		va->va_uid = fxdr_unsigned(uid_t, *tl);
1855 	}
1856 
1857 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1858 	if (*tl == nfs_true) {
1859 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1860 		va->va_gid = fxdr_unsigned(gid_t, *tl);
1861 	}
1862 
1863 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1864 	if (*tl == nfs_true) {
1865 		nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1866 		va->va_size = fxdr_hyper(tl);
1867 	}
1868 
1869 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1870 	switch (fxdr_unsigned(int, *tl)) {
1871 	case NFSV3SATTRTIME_TOCLIENT:
1872 		va->va_vaflags |= VA_UTIMES_CHANGE;
1873 		va->va_vaflags &= ~VA_UTIMES_NULL;
1874 		nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1875 		fxdr_nfsv3time(tl, &va->va_atime);
1876 		break;
1877 	case NFSV3SATTRTIME_TOSERVER:
1878 		va->va_vaflags |= VA_UTIMES_CHANGE;
1879 		getnanotime(&va->va_atime);
1880 		break;
1881 	};
1882 
1883 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1884 	switch (fxdr_unsigned(int, *tl)) {
1885 	case NFSV3SATTRTIME_TOCLIENT:
1886 		va->va_vaflags |= VA_UTIMES_CHANGE;
1887 		va->va_vaflags &= ~VA_UTIMES_NULL;
1888 		nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1889 		fxdr_nfsv3time(tl, &va->va_mtime);
1890 		break;
1891 	case NFSV3SATTRTIME_TOSERVER:
1892 		va->va_vaflags |= VA_UTIMES_CHANGE;
1893 		getnanotime(&va->va_mtime);
1894 		break;
1895 	};
1896 
1897 	*dposp = info.nmi_dpos;
1898 	*mp = info.nmi_md;
1899 nfsmout:
1900 	return (error);
1901 }
1902 
1903 void
1904 txdr_nfsv2time(const struct timespec *from, struct nfsv2_time *to)
1905 {
1906 	if (from->tv_nsec == VNOVAL) {
1907 		to->nfsv2_sec = nfs_xdrneg1;
1908 		to->nfsv2_usec = nfs_xdrneg1;
1909 	} else if (from->tv_sec == -1) {
1910 		/*
1911 		 * can't request a time of -1; send
1912 		 * -1.000001 == {-2,999999} instead
1913 		 */
1914 		to->nfsv2_sec = htonl(-2);
1915 		to->nfsv2_usec = htonl(999999);
1916 	} else {
1917 		to->nfsv2_sec = htonl(from->tv_sec);
1918 		to->nfsv2_usec = htonl(from->tv_nsec / 1000);
1919 	}
1920 }
1921