xref: /openbsd/sys/nfs/nfs_subs.c (revision 9b7c3dbb)
1 /*	$OpenBSD: nfs_subs.c,v 1.132 2016/08/30 07:12:49 dlg Exp $	*/
2 /*	$NetBSD: nfs_subs.c,v 1.27.4.3 1996/07/08 20:34:24 jtc Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Rick Macklem at The University of Guelph.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)nfs_subs.c	8.8 (Berkeley) 5/22/95
36  */
37 
38 
39 /*
40  * These functions support the macros and help fiddle mbuf chains for
41  * the nfs op functions. They do things like create the rpc header and
42  * copy data between mbuf chains and uio lists.
43  */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/namei.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/stat.h>
54 #include <sys/pool.h>
55 #include <sys/time.h>
56 
57 #include <nfs/rpcv2.h>
58 #include <nfs/nfsproto.h>
59 #include <nfs/nfsnode.h>
60 #include <nfs/nfs.h>
61 #include <nfs/xdr_subs.h>
62 #include <nfs/nfsm_subs.h>
63 #include <nfs/nfsmount.h>
64 #include <nfs/nfs_var.h>
65 
66 #include <uvm/uvm_extern.h>
67 
68 #include <netinet/in.h>
69 
70 #include <crypto/idgen.h>
71 
72 int	nfs_attrtimeo(struct nfsnode *np);
73 
74 /*
75  * Data items converted to xdr at startup, since they are constant
76  * This is kinda hokey, but may save a little time doing byte swaps
77  */
78 u_int32_t nfs_xdrneg1;
79 u_int32_t rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr,
80 	rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
81 u_int32_t nfs_prog, nfs_true, nfs_false;
82 
83 /* And other global data */
84 nfstype nfsv2_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON,
85 		      NFCHR, NFNON };
86 nfstype nfsv3_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK,
87 		      NFFIFO, NFNON };
88 enum vtype nv2tov_type[8] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON };
89 enum vtype nv3tov_type[8]={ VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO };
90 int nfs_ticks;
91 struct nfsstats nfsstats;
92 
93 /*
94  * Mapping of old NFS Version 2 RPC numbers to generic numbers.
95  */
96 int nfsv3_procid[NFS_NPROCS] = {
97 	NFSPROC_NULL,
98 	NFSPROC_GETATTR,
99 	NFSPROC_SETATTR,
100 	NFSPROC_NOOP,
101 	NFSPROC_LOOKUP,
102 	NFSPROC_READLINK,
103 	NFSPROC_READ,
104 	NFSPROC_NOOP,
105 	NFSPROC_WRITE,
106 	NFSPROC_CREATE,
107 	NFSPROC_REMOVE,
108 	NFSPROC_RENAME,
109 	NFSPROC_LINK,
110 	NFSPROC_SYMLINK,
111 	NFSPROC_MKDIR,
112 	NFSPROC_RMDIR,
113 	NFSPROC_READDIR,
114 	NFSPROC_FSSTAT,
115 	NFSPROC_NOOP,
116 	NFSPROC_NOOP,
117 	NFSPROC_NOOP,
118 	NFSPROC_NOOP,
119 	NFSPROC_NOOP
120 };
121 
122 /*
123  * and the reverse mapping from generic to Version 2 procedure numbers
124  */
125 int nfsv2_procid[NFS_NPROCS] = {
126 	NFSV2PROC_NULL,
127 	NFSV2PROC_GETATTR,
128 	NFSV2PROC_SETATTR,
129 	NFSV2PROC_LOOKUP,
130 	NFSV2PROC_NOOP,
131 	NFSV2PROC_READLINK,
132 	NFSV2PROC_READ,
133 	NFSV2PROC_WRITE,
134 	NFSV2PROC_CREATE,
135 	NFSV2PROC_MKDIR,
136 	NFSV2PROC_SYMLINK,
137 	NFSV2PROC_CREATE,
138 	NFSV2PROC_REMOVE,
139 	NFSV2PROC_RMDIR,
140 	NFSV2PROC_RENAME,
141 	NFSV2PROC_LINK,
142 	NFSV2PROC_READDIR,
143 	NFSV2PROC_NOOP,
144 	NFSV2PROC_STATFS,
145 	NFSV2PROC_NOOP,
146 	NFSV2PROC_NOOP,
147 	NFSV2PROC_NOOP,
148 	NFSV2PROC_NOOP
149 };
150 
151 /*
152  * Maps errno values to nfs error numbers.
153  * Use NFSERR_IO as the catch all for ones not specifically defined in
154  * RFC 1094.
155  */
156 static u_char nfsrv_v2errmap[] = {
157   NFSERR_PERM,	NFSERR_NOENT,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
158   NFSERR_NXIO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
159   NFSERR_IO,	NFSERR_IO,	NFSERR_ACCES,	NFSERR_IO,	NFSERR_IO,
160   NFSERR_IO,	NFSERR_EXIST,	NFSERR_IO,	NFSERR_NODEV,	NFSERR_NOTDIR,
161   NFSERR_ISDIR,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
162   NFSERR_IO,	NFSERR_FBIG,	NFSERR_NOSPC,	NFSERR_IO,	NFSERR_ROFS,
163   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
164   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
165   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
166   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
167   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
168   NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,	NFSERR_IO,
169   NFSERR_IO,	NFSERR_IO,	NFSERR_NAMETOL,	NFSERR_IO,	NFSERR_IO,
170   NFSERR_NOTEMPTY, NFSERR_IO,	NFSERR_IO,	NFSERR_DQUOT,	NFSERR_STALE
171   /* Everything after this maps to NFSERR_IO, so far */
172 };
173 
174 /*
175  * Maps errno values to nfs error numbers.
176  * Although it is not obvious whether or not NFS clients really care if
177  * a returned error value is in the specified list for the procedure, the
178  * safest thing to do is filter them appropriately. For Version 2, the
179  * X/Open XNFS document is the only specification that defines error values
180  * for each RPC (The RFC simply lists all possible error values for all RPCs),
181  * so I have decided to not do this for Version 2.
182  * The first entry is the default error return and the rest are the valid
183  * errors for that RPC in increasing numeric order.
184  */
185 static short nfsv3err_null[] = {
186 	0,
187 	0,
188 };
189 
190 static short nfsv3err_getattr[] = {
191 	NFSERR_IO,
192 	NFSERR_IO,
193 	NFSERR_STALE,
194 	NFSERR_BADHANDLE,
195 	NFSERR_SERVERFAULT,
196 	0,
197 };
198 
199 static short nfsv3err_setattr[] = {
200 	NFSERR_IO,
201 	NFSERR_PERM,
202 	NFSERR_IO,
203 	NFSERR_ACCES,
204 	NFSERR_INVAL,
205 	NFSERR_NOSPC,
206 	NFSERR_ROFS,
207 	NFSERR_DQUOT,
208 	NFSERR_STALE,
209 	NFSERR_BADHANDLE,
210 	NFSERR_NOT_SYNC,
211 	NFSERR_SERVERFAULT,
212 	0,
213 };
214 
215 static short nfsv3err_lookup[] = {
216 	NFSERR_IO,
217 	NFSERR_NOENT,
218 	NFSERR_IO,
219 	NFSERR_ACCES,
220 	NFSERR_NOTDIR,
221 	NFSERR_NAMETOL,
222 	NFSERR_STALE,
223 	NFSERR_BADHANDLE,
224 	NFSERR_SERVERFAULT,
225 	0,
226 };
227 
228 static short nfsv3err_access[] = {
229 	NFSERR_IO,
230 	NFSERR_IO,
231 	NFSERR_STALE,
232 	NFSERR_BADHANDLE,
233 	NFSERR_SERVERFAULT,
234 	0,
235 };
236 
237 static short nfsv3err_readlink[] = {
238 	NFSERR_IO,
239 	NFSERR_IO,
240 	NFSERR_ACCES,
241 	NFSERR_INVAL,
242 	NFSERR_STALE,
243 	NFSERR_BADHANDLE,
244 	NFSERR_NOTSUPP,
245 	NFSERR_SERVERFAULT,
246 	0,
247 };
248 
249 static short nfsv3err_read[] = {
250 	NFSERR_IO,
251 	NFSERR_IO,
252 	NFSERR_NXIO,
253 	NFSERR_ACCES,
254 	NFSERR_INVAL,
255 	NFSERR_STALE,
256 	NFSERR_BADHANDLE,
257 	NFSERR_SERVERFAULT,
258 	0,
259 };
260 
261 static short nfsv3err_write[] = {
262 	NFSERR_IO,
263 	NFSERR_IO,
264 	NFSERR_ACCES,
265 	NFSERR_INVAL,
266 	NFSERR_FBIG,
267 	NFSERR_NOSPC,
268 	NFSERR_ROFS,
269 	NFSERR_DQUOT,
270 	NFSERR_STALE,
271 	NFSERR_BADHANDLE,
272 	NFSERR_SERVERFAULT,
273 	0,
274 };
275 
276 static short nfsv3err_create[] = {
277 	NFSERR_IO,
278 	NFSERR_IO,
279 	NFSERR_ACCES,
280 	NFSERR_EXIST,
281 	NFSERR_NOTDIR,
282 	NFSERR_NOSPC,
283 	NFSERR_ROFS,
284 	NFSERR_NAMETOL,
285 	NFSERR_DQUOT,
286 	NFSERR_STALE,
287 	NFSERR_BADHANDLE,
288 	NFSERR_NOTSUPP,
289 	NFSERR_SERVERFAULT,
290 	0,
291 };
292 
293 static short nfsv3err_mkdir[] = {
294 	NFSERR_IO,
295 	NFSERR_IO,
296 	NFSERR_ACCES,
297 	NFSERR_EXIST,
298 	NFSERR_NOTDIR,
299 	NFSERR_NOSPC,
300 	NFSERR_ROFS,
301 	NFSERR_NAMETOL,
302 	NFSERR_DQUOT,
303 	NFSERR_STALE,
304 	NFSERR_BADHANDLE,
305 	NFSERR_NOTSUPP,
306 	NFSERR_SERVERFAULT,
307 	0,
308 };
309 
310 static short nfsv3err_symlink[] = {
311 	NFSERR_IO,
312 	NFSERR_IO,
313 	NFSERR_ACCES,
314 	NFSERR_EXIST,
315 	NFSERR_NOTDIR,
316 	NFSERR_NOSPC,
317 	NFSERR_ROFS,
318 	NFSERR_NAMETOL,
319 	NFSERR_DQUOT,
320 	NFSERR_STALE,
321 	NFSERR_BADHANDLE,
322 	NFSERR_NOTSUPP,
323 	NFSERR_SERVERFAULT,
324 	0,
325 };
326 
327 static short nfsv3err_mknod[] = {
328 	NFSERR_IO,
329 	NFSERR_IO,
330 	NFSERR_ACCES,
331 	NFSERR_EXIST,
332 	NFSERR_NOTDIR,
333 	NFSERR_NOSPC,
334 	NFSERR_ROFS,
335 	NFSERR_NAMETOL,
336 	NFSERR_DQUOT,
337 	NFSERR_STALE,
338 	NFSERR_BADHANDLE,
339 	NFSERR_NOTSUPP,
340 	NFSERR_SERVERFAULT,
341 	NFSERR_BADTYPE,
342 	0,
343 };
344 
345 static short nfsv3err_remove[] = {
346 	NFSERR_IO,
347 	NFSERR_NOENT,
348 	NFSERR_IO,
349 	NFSERR_ACCES,
350 	NFSERR_NOTDIR,
351 	NFSERR_ROFS,
352 	NFSERR_NAMETOL,
353 	NFSERR_STALE,
354 	NFSERR_BADHANDLE,
355 	NFSERR_SERVERFAULT,
356 	0,
357 };
358 
359 static short nfsv3err_rmdir[] = {
360 	NFSERR_IO,
361 	NFSERR_NOENT,
362 	NFSERR_IO,
363 	NFSERR_ACCES,
364 	NFSERR_EXIST,
365 	NFSERR_NOTDIR,
366 	NFSERR_INVAL,
367 	NFSERR_ROFS,
368 	NFSERR_NAMETOL,
369 	NFSERR_NOTEMPTY,
370 	NFSERR_STALE,
371 	NFSERR_BADHANDLE,
372 	NFSERR_NOTSUPP,
373 	NFSERR_SERVERFAULT,
374 	0,
375 };
376 
377 static short nfsv3err_rename[] = {
378 	NFSERR_IO,
379 	NFSERR_NOENT,
380 	NFSERR_IO,
381 	NFSERR_ACCES,
382 	NFSERR_EXIST,
383 	NFSERR_XDEV,
384 	NFSERR_NOTDIR,
385 	NFSERR_ISDIR,
386 	NFSERR_INVAL,
387 	NFSERR_NOSPC,
388 	NFSERR_ROFS,
389 	NFSERR_MLINK,
390 	NFSERR_NAMETOL,
391 	NFSERR_NOTEMPTY,
392 	NFSERR_DQUOT,
393 	NFSERR_STALE,
394 	NFSERR_BADHANDLE,
395 	NFSERR_NOTSUPP,
396 	NFSERR_SERVERFAULT,
397 	0,
398 };
399 
400 static short nfsv3err_link[] = {
401 	NFSERR_IO,
402 	NFSERR_IO,
403 	NFSERR_ACCES,
404 	NFSERR_EXIST,
405 	NFSERR_XDEV,
406 	NFSERR_NOTDIR,
407 	NFSERR_INVAL,
408 	NFSERR_NOSPC,
409 	NFSERR_ROFS,
410 	NFSERR_MLINK,
411 	NFSERR_NAMETOL,
412 	NFSERR_DQUOT,
413 	NFSERR_STALE,
414 	NFSERR_BADHANDLE,
415 	NFSERR_NOTSUPP,
416 	NFSERR_SERVERFAULT,
417 	0,
418 };
419 
420 static short nfsv3err_readdir[] = {
421 	NFSERR_IO,
422 	NFSERR_IO,
423 	NFSERR_ACCES,
424 	NFSERR_NOTDIR,
425 	NFSERR_STALE,
426 	NFSERR_BADHANDLE,
427 	NFSERR_BAD_COOKIE,
428 	NFSERR_TOOSMALL,
429 	NFSERR_SERVERFAULT,
430 	0,
431 };
432 
433 static short nfsv3err_readdirplus[] = {
434 	NFSERR_IO,
435 	NFSERR_IO,
436 	NFSERR_ACCES,
437 	NFSERR_NOTDIR,
438 	NFSERR_STALE,
439 	NFSERR_BADHANDLE,
440 	NFSERR_BAD_COOKIE,
441 	NFSERR_NOTSUPP,
442 	NFSERR_TOOSMALL,
443 	NFSERR_SERVERFAULT,
444 	0,
445 };
446 
447 static short nfsv3err_fsstat[] = {
448 	NFSERR_IO,
449 	NFSERR_IO,
450 	NFSERR_STALE,
451 	NFSERR_BADHANDLE,
452 	NFSERR_SERVERFAULT,
453 	0,
454 };
455 
456 static short nfsv3err_fsinfo[] = {
457 	NFSERR_STALE,
458 	NFSERR_STALE,
459 	NFSERR_BADHANDLE,
460 	NFSERR_SERVERFAULT,
461 	0,
462 };
463 
464 static short nfsv3err_pathconf[] = {
465 	NFSERR_STALE,
466 	NFSERR_STALE,
467 	NFSERR_BADHANDLE,
468 	NFSERR_SERVERFAULT,
469 	0,
470 };
471 
472 static short nfsv3err_commit[] = {
473 	NFSERR_IO,
474 	NFSERR_IO,
475 	NFSERR_STALE,
476 	NFSERR_BADHANDLE,
477 	NFSERR_SERVERFAULT,
478 	0,
479 };
480 
481 static short *nfsrv_v3errmap[] = {
482 	nfsv3err_null,
483 	nfsv3err_getattr,
484 	nfsv3err_setattr,
485 	nfsv3err_lookup,
486 	nfsv3err_access,
487 	nfsv3err_readlink,
488 	nfsv3err_read,
489 	nfsv3err_write,
490 	nfsv3err_create,
491 	nfsv3err_mkdir,
492 	nfsv3err_symlink,
493 	nfsv3err_mknod,
494 	nfsv3err_remove,
495 	nfsv3err_rmdir,
496 	nfsv3err_rename,
497 	nfsv3err_link,
498 	nfsv3err_readdir,
499 	nfsv3err_readdirplus,
500 	nfsv3err_fsstat,
501 	nfsv3err_fsinfo,
502 	nfsv3err_pathconf,
503 	nfsv3err_commit,
504 };
505 
506 struct pool nfsreqpl;
507 
508 /*
509  * Create the header for an rpc request packet
510  * The hsiz is the size of the rest of the nfs request header.
511  * (just used to decide if a cluster is a good idea)
512  */
513 struct mbuf *
514 nfsm_reqhead(int hsiz)
515 {
516 	struct mbuf *mb;
517 
518 	MGET(mb, M_WAIT, MT_DATA);
519 	if (hsiz > MLEN)
520 		MCLGET(mb, M_WAIT);
521 	mb->m_len = 0;
522 
523 	/* Finally, return values */
524 	return (mb);
525 }
526 
527 /*
528  * Return an unpredictable XID in XDR form.
529  */
530 u_int32_t
531 nfs_get_xid(void)
532 {
533 	static struct idgen32_ctx nfs_xid_ctx;
534 	static int called = 0;
535 
536 	if (!called) {
537 		called = 1;
538 		idgen32_init(&nfs_xid_ctx);
539 	}
540 	return (txdr_unsigned(idgen32(&nfs_xid_ctx)));
541 }
542 
543 /*
544  * Build the RPC header and fill in the authorization info.
545  * Right now we are pretty centric around RPCAUTH_UNIX, in the
546  * future, this function will need some love to be able to handle
547  * other authorization methods, such as Kerberos.
548  */
549 void
550 nfsm_rpchead(struct nfsreq *req, struct ucred *cr, int auth_type)
551 {
552 	struct mbuf	*mb;
553 	u_int32_t	*tl;
554 	int		i, authsiz, auth_len, ngroups;
555 
556 	KASSERT(auth_type == RPCAUTH_UNIX);
557 
558 	/*
559 	 * RPCAUTH_UNIX fits in an hdr mbuf, in the future other
560 	 * authorization methods need to figure out their own sizes
561 	 * and allocate and chain mbuf's accorindgly.
562 	 */
563 	mb = req->r_mreq;
564 
565 	/*
566 	 * We need to start out by finding how big the authorization cred
567 	 * and verifer are for the auth_type, to be able to correctly
568 	 * align the mbuf header/chain.
569 	 */
570 	switch (auth_type) {
571 	case RPCAUTH_UNIX:
572 		/*
573 		 * In the RPCAUTH_UNIX case, the size is the static
574 		 * part as shown in RFC1831 + the number of groups,
575 		 * RPCAUTH_UNIX has a zero verifer.
576 		 */
577 		if (cr->cr_ngroups > req->r_nmp->nm_numgrps)
578 			ngroups = req->r_nmp->nm_numgrps;
579 		else
580 			ngroups = cr->cr_ngroups;
581 
582 		auth_len = (ngroups << 2) + 5 * NFSX_UNSIGNED;
583 		authsiz = nfsm_rndup(auth_len);
584 		/* The authorization size + the size of the static part */
585 		MH_ALIGN(mb, authsiz + 10 * NFSX_UNSIGNED);
586 		break;
587 	}
588 
589 	mb->m_len = 0;
590 
591 	/* First the RPC header. */
592 	tl = nfsm_build(&mb, 6 * NFSX_UNSIGNED);
593 
594 	/* Get a new (non-zero) xid */
595 	*tl++ = req->r_xid = nfs_get_xid();
596 	*tl++ = rpc_call;
597 	*tl++ = rpc_vers;
598 	*tl++ = nfs_prog;
599 	if (ISSET(req->r_nmp->nm_flag, NFSMNT_NFSV3)) {
600 		*tl++ = txdr_unsigned(NFS_VER3);
601 		*tl = txdr_unsigned(req->r_procnum);
602 	} else {
603 		*tl++ = txdr_unsigned(NFS_VER2);
604 		*tl = txdr_unsigned(nfsv2_procid[req->r_procnum]);
605 	}
606 
607 	/* The Authorization cred and its verifier */
608 	switch (auth_type) {
609 	case RPCAUTH_UNIX:
610 		tl = nfsm_build(&mb, auth_len + 4 * NFSX_UNSIGNED);
611 		*tl++ = txdr_unsigned(RPCAUTH_UNIX);
612 		*tl++ = txdr_unsigned(authsiz);
613 
614 		/* The authorization cred */
615 		*tl++ = 0;		/* stamp */
616 		*tl++ = 0;		/* NULL hostname */
617 		*tl++ = txdr_unsigned(cr->cr_uid);
618 		*tl++ = txdr_unsigned(cr->cr_gid);
619 		*tl++ = txdr_unsigned(ngroups);
620 		for (i = 0; i < ngroups; i++)
621 			*tl++ = txdr_unsigned(cr->cr_groups[i]);
622 		/* The authorization verifier */
623 		*tl++ = txdr_unsigned(RPCAUTH_NULL);
624 		*tl = 0;
625 		break;
626 	}
627 
628 	mb->m_pkthdr.len += authsiz + 10 * NFSX_UNSIGNED;
629 	mb->m_pkthdr.ph_ifidx = 0;
630 }
631 
632 /*
633  * copies mbuf chain to the uio scatter/gather list
634  */
635 int
636 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos)
637 {
638 	char *mbufcp, *uiocp;
639 	int xfer, left, len;
640 	struct mbuf *mp;
641 	long uiosiz, rem;
642 	int error = 0;
643 
644 	mp = *mrep;
645 	mbufcp = *dpos;
646 	len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
647 	rem = nfsm_padlen(siz);
648 	while (siz > 0) {
649 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
650 			return (EFBIG);
651 		left = uiop->uio_iov->iov_len;
652 		uiocp = uiop->uio_iov->iov_base;
653 		if (left > siz)
654 			left = siz;
655 		uiosiz = left;
656 		while (left > 0) {
657 			while (len == 0) {
658 				mp = mp->m_next;
659 				if (mp == NULL)
660 					return (EBADRPC);
661 				mbufcp = mtod(mp, caddr_t);
662 				len = mp->m_len;
663 			}
664 			xfer = (left > len) ? len : left;
665 			if (uiop->uio_segflg == UIO_SYSSPACE)
666 				memcpy(uiocp, mbufcp, xfer);
667 			else
668 				copyout(mbufcp, uiocp, xfer);
669 			left -= xfer;
670 			len -= xfer;
671 			mbufcp += xfer;
672 			uiocp += xfer;
673 			uiop->uio_offset += xfer;
674 			uiop->uio_resid -= xfer;
675 		}
676 		if (uiop->uio_iov->iov_len <= siz) {
677 			uiop->uio_iovcnt--;
678 			uiop->uio_iov++;
679 		} else {
680 			uiop->uio_iov->iov_base =
681 			    (char *)uiop->uio_iov->iov_base + uiosiz;
682 			uiop->uio_iov->iov_len -= uiosiz;
683 		}
684 		siz -= uiosiz;
685 	}
686 	*dpos = mbufcp;
687 	*mrep = mp;
688 	if (rem > 0) {
689 		if (len < rem)
690 			error = nfs_adv(mrep, dpos, rem, len);
691 		else
692 			*dpos += rem;
693 	}
694 	return (error);
695 }
696 
697 /*
698  * Copy a uio scatter/gather list to an mbuf chain.
699  */
700 void
701 nfsm_uiotombuf(struct mbuf **mp, struct uio *uiop, size_t len)
702 {
703 	struct mbuf *mb, *mb2;
704 	size_t xfer, pad;
705 
706 	mb = *mp;
707 
708 	pad = nfsm_padlen(len);
709 
710 	/* XXX -- the following should be done by the caller */
711 	uiop->uio_resid = len;
712 	uiop->uio_rw = UIO_WRITE;
713 
714 	while (len) {
715 		xfer = ulmin(len, M_TRAILINGSPACE(mb));
716 		uiomove(mb_offset(mb), xfer, uiop);
717 		mb->m_len += xfer;
718 		len -= xfer;
719 		if (len > 0) {
720 			MGET(mb2, M_WAIT, MT_DATA);
721 			if (len > MLEN)
722 				MCLGET(mb2, M_WAIT);
723 			mb2->m_len = 0;
724 			mb->m_next = mb2;
725 			mb = mb2;
726 		}
727 	}
728 
729 	if (pad > 0) {
730 		if (pad > M_TRAILINGSPACE(mb)) {
731 			MGET(mb2, M_WAIT, MT_DATA);
732 			mb2->m_len = 0;
733 			mb->m_next = mb2;
734 			mb = mb2;
735 		}
736 		memset(mb_offset(mb), 0, pad);
737 		mb->m_len += pad;
738 	}
739 
740 	*mp = mb;
741 }
742 
743 /*
744  * Copy a buffer to an mbuf chain
745  */
746 void
747 nfsm_buftombuf(struct mbuf **mp, void *buf, size_t len)
748 {
749 	struct iovec iov;
750 	struct uio io;
751 
752 	iov.iov_base = buf;
753 	iov.iov_len = len;
754 
755 	io.uio_iov = &iov;
756 	io.uio_iovcnt = 1;
757 	io.uio_resid = len;
758 	io.uio_segflg = UIO_SYSSPACE;
759 	io.uio_rw = UIO_WRITE;
760 
761 	nfsm_uiotombuf(mp, &io, len);
762 }
763 
764 /*
765  * Copy a string to an mbuf chain
766  */
767 void
768 nfsm_strtombuf(struct mbuf **mp, void *str, size_t len)
769 {
770 	struct iovec iov[2];
771 	struct uio io;
772 	uint32_t strlen;
773 
774 	strlen = txdr_unsigned(len);
775 
776 	iov[0].iov_base = &strlen;
777 	iov[0].iov_len = sizeof(uint32_t);
778 	iov[1].iov_base = str;
779 	iov[1].iov_len = len;
780 
781 	io.uio_iov = iov;
782 	io.uio_iovcnt = 2;
783 	io.uio_resid = sizeof(uint32_t) + len;
784 	io.uio_segflg = UIO_SYSSPACE;
785 	io.uio_rw = UIO_WRITE;
786 
787 	nfsm_uiotombuf(mp, &io, io.uio_resid);
788 }
789 
790 /*
791  * Help break down an mbuf chain by setting the first siz bytes contiguous
792  * pointed to by returned val.
793  * This is used by the macros nfsm_dissect and nfsm_dissecton for tough
794  * cases. (The macros use the vars. dpos and dpos2)
795  */
796 int
797 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2)
798 {
799 	struct mbuf *mp, *mp2;
800 	int siz2, xfer;
801 	caddr_t p;
802 
803 	mp = *mdp;
804 	while (left == 0) {
805 		*mdp = mp = mp->m_next;
806 		if (mp == NULL)
807 			return (EBADRPC);
808 		left = mp->m_len;
809 		*dposp = mtod(mp, caddr_t);
810 	}
811 	if (left >= siz) {
812 		*cp2 = *dposp;
813 		*dposp += siz;
814 	} else if (mp->m_next == NULL) {
815 		return (EBADRPC);
816 	} else if (siz > MHLEN) {
817 		panic("nfs S too big");
818 	} else {
819 		MGET(mp2, M_WAIT, MT_DATA);
820 		mp2->m_next = mp->m_next;
821 		mp->m_next = mp2;
822 		mp->m_len -= left;
823 		mp = mp2;
824 		*cp2 = p = mtod(mp, caddr_t);
825 		bcopy(*dposp, p, left);		/* Copy what was left */
826 		siz2 = siz-left;
827 		p += left;
828 		mp2 = mp->m_next;
829 		/* Loop around copying up the siz2 bytes */
830 		while (siz2 > 0) {
831 			if (mp2 == NULL)
832 				return (EBADRPC);
833 			xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
834 			if (xfer > 0) {
835 				bcopy(mtod(mp2, caddr_t), p, xfer);
836 				mp2->m_data += xfer;
837 				mp2->m_len -= xfer;
838 				p += xfer;
839 				siz2 -= xfer;
840 			}
841 			if (siz2 > 0)
842 				mp2 = mp2->m_next;
843 		}
844 		mp->m_len = siz;
845 		*mdp = mp2;
846 		*dposp = mtod(mp2, caddr_t);
847 	}
848 	return (0);
849 }
850 
851 /*
852  * Advance the position in the mbuf chain.
853  */
854 int
855 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left)
856 {
857 	struct mbuf *m;
858 	int s;
859 
860 	m = *mdp;
861 	s = left;
862 	while (s < offs) {
863 		offs -= s;
864 		m = m->m_next;
865 		if (m == NULL)
866 			return (EBADRPC);
867 		s = m->m_len;
868 	}
869 	*mdp = m;
870 	*dposp = mtod(m, caddr_t)+offs;
871 	return (0);
872 }
873 
874 /*
875  * Called once to initialize data structures...
876  */
877 void
878 nfs_init(void)
879 {
880 	rpc_vers = txdr_unsigned(RPC_VER2);
881 	rpc_call = txdr_unsigned(RPC_CALL);
882 	rpc_reply = txdr_unsigned(RPC_REPLY);
883 	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
884 	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
885 	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
886 	rpc_autherr = txdr_unsigned(RPC_AUTHERR);
887 	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
888 	nfs_prog = txdr_unsigned(NFS_PROG);
889 	nfs_true = txdr_unsigned(1);
890 	nfs_false = txdr_unsigned(0);
891 	nfs_xdrneg1 = txdr_unsigned(-1);
892 	nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
893 	if (nfs_ticks < 1)
894 		nfs_ticks = 1;
895 #ifdef NFSSERVER
896 	nfsrv_init(0);			/* Init server data structures */
897 	nfsrv_initcache();		/* Init the server request cache */
898 #endif /* NFSSERVER */
899 
900 	pool_init(&nfsreqpl, sizeof(struct nfsreq), 0, 0, PR_WAITOK,
901 	    "nfsreqpl", NULL);
902 	pool_setipl(&nfsreqpl, IPL_NONE);
903 }
904 
905 #ifdef NFSCLIENT
906 int
907 nfs_vfs_init(struct vfsconf *vfsp)
908 {
909 	extern struct pool nfs_node_pool;
910 
911 	TAILQ_INIT(&nfs_bufq);
912 
913 	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, PR_WAITOK,
914 	    "nfsnodepl", NULL);
915 	pool_setipl(&nfs_node_pool, IPL_NONE);
916 
917 	return (0);
918 }
919 
920 /*
921  * Attribute cache routines.
922  * nfs_loadattrcache() - loads or updates the cache contents from attributes
923  *	that are on the mbuf list
924  * nfs_getattrcache() - returns valid attributes if found in cache, returns
925  *	error otherwise
926  */
927 
928 /*
929  * Load the attribute cache (that lives in the nfsnode entry) with
930  * the values on the mbuf list and
931  * Iff vap not NULL
932  *    copy the attributes to *vaper
933  */
934 int
935 nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
936     struct vattr *vaper)
937 {
938 	struct vnode *vp = *vpp;
939 	struct vattr *vap;
940 	struct nfs_fattr *fp;
941 	extern struct vops nfs_specvops;
942 	struct nfsnode *np;
943 	int32_t t1;
944 	caddr_t cp2;
945 	int error = 0;
946 	int32_t rdev;
947 	struct mbuf *md;
948 	enum vtype vtyp;
949 	mode_t vmode;
950 	struct timespec mtime;
951 	struct vnode *nvp;
952 	int v3 = NFS_ISV3(vp);
953 	uid_t uid;
954 	gid_t gid;
955 
956 	md = *mdp;
957 	t1 = (mtod(md, caddr_t) + md->m_len) - *dposp;
958 	error = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, &cp2);
959 	if (error)
960 		return (error);
961 	fp = (struct nfs_fattr *)cp2;
962 	if (v3) {
963 		vtyp = nfsv3tov_type(fp->fa_type);
964 		vmode = fxdr_unsigned(mode_t, fp->fa_mode);
965 		rdev = makedev(fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata1),
966 			fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata2));
967 		fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
968 	} else {
969 		vtyp = nfsv2tov_type(fp->fa_type);
970 		vmode = fxdr_unsigned(mode_t, fp->fa_mode);
971 		if (vtyp == VNON || vtyp == VREG)
972 			vtyp = IFTOVT(vmode);
973 		rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
974 		fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
975 
976 		/*
977 		 * Really ugly NFSv2 kludge.
978 		 */
979 		if (vtyp == VCHR && rdev == 0xffffffff)
980 			vtyp = VFIFO;
981 	}
982 
983 	/*
984 	 * If v_type == VNON it is a new node, so fill in the v_type,
985 	 * n_mtime fields. Check to see if it represents a special
986 	 * device, and if so, check for a possible alias. Once the
987 	 * correct vnode has been obtained, fill in the rest of the
988 	 * information.
989 	 */
990 	np = VTONFS(vp);
991 	if (vp->v_type != vtyp) {
992 		cache_purge(vp);
993 		vp->v_type = vtyp;
994 		if (vp->v_type == VFIFO) {
995 #ifndef FIFO
996 			return (EOPNOTSUPP);
997 #else
998                         extern struct vops nfs_fifovops;
999 			vp->v_op = &nfs_fifovops;
1000 #endif /* FIFO */
1001 		}
1002 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
1003 			vp->v_op = &nfs_specvops;
1004 			nvp = checkalias(vp, (dev_t)rdev, vp->v_mount);
1005 			if (nvp) {
1006 				/*
1007 				 * Discard unneeded vnode, but save its nfsnode.
1008 				 * Since the nfsnode does not have a lock, its
1009 				 * vnode lock has to be carried over.
1010 				 */
1011 
1012 				nvp->v_data = vp->v_data;
1013 				vp->v_data = NULL;
1014 				vp->v_op = &spec_vops;
1015 				vrele(vp);
1016 				vgone(vp);
1017 				/*
1018 				 * Reinitialize aliased node.
1019 				 */
1020 				np->n_vnode = nvp;
1021 				*vpp = vp = nvp;
1022 			}
1023 		}
1024 		np->n_mtime = mtime;
1025 	}
1026 	vap = &np->n_vattr;
1027 	vap->va_type = vtyp;
1028 	vap->va_rdev = (dev_t)rdev;
1029 	vap->va_mtime = mtime;
1030 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1031 
1032 	uid = fxdr_unsigned(uid_t, fp->fa_uid);
1033 	gid = fxdr_unsigned(gid_t, fp->fa_gid);
1034 	/* Invalidate access cache if uid, gid or mode changed. */
1035 	if (np->n_accstamp != -1 &&
1036 	    (gid != vap->va_gid || uid != vap->va_uid ||
1037 	    (vmode & 07777) != vap->va_mode))
1038 		np->n_accstamp = -1;
1039 
1040 	vap->va_mode = (vmode & 07777);
1041 
1042 	switch (vtyp) {
1043 	case VBLK:
1044 		vap->va_blocksize = BLKDEV_IOSIZE;
1045 		break;
1046 	case VCHR:
1047 		vap->va_blocksize = MAXBSIZE;
1048 		break;
1049 	default:
1050 		vap->va_blocksize = v3 ? vp->v_mount->mnt_stat.f_iosize :
1051 		     fxdr_unsigned(int32_t, fp->fa2_blocksize);
1052 		break;
1053 	}
1054 	if (v3) {
1055 		vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink);
1056 		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
1057 		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
1058 		vap->va_size = fxdr_hyper(&fp->fa3_size);
1059 		vap->va_bytes = fxdr_hyper(&fp->fa3_used);
1060 		vap->va_fileid = fxdr_hyper(&fp->fa3_fileid);
1061 		fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
1062 		fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
1063 		vap->va_flags = 0;
1064 		vap->va_filerev = 0;
1065 	} else {
1066 		vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink);
1067 		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
1068 		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
1069 		vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
1070 		vap->va_bytes =
1071 		    (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) *
1072 		    NFS_FABLKSIZE;
1073 		vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
1074 		fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
1075 		vap->va_flags = 0;
1076 		vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
1077 		    fp->fa2_ctime.nfsv2_sec);
1078 		vap->va_ctime.tv_nsec = 0;
1079 		vap->va_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec);
1080 		vap->va_filerev = 0;
1081 	}
1082 
1083 	if (vap->va_size != np->n_size) {
1084 		if (vap->va_type == VREG) {
1085 			if (np->n_flag & NMODIFIED) {
1086 				if (vap->va_size < np->n_size)
1087 					vap->va_size = np->n_size;
1088 				else
1089 					np->n_size = vap->va_size;
1090 			} else
1091 				np->n_size = vap->va_size;
1092 			uvm_vnp_setsize(vp, np->n_size);
1093 		} else
1094 			np->n_size = vap->va_size;
1095 	}
1096 	np->n_attrstamp = time_second;
1097 	if (vaper != NULL) {
1098 		bcopy(vap, vaper, sizeof(*vap));
1099 		if (np->n_flag & NCHG) {
1100 			if (np->n_flag & NACC)
1101 				vaper->va_atime = np->n_atim;
1102 			if (np->n_flag & NUPD)
1103 				vaper->va_mtime = np->n_mtim;
1104 		}
1105 	}
1106 	return (0);
1107 }
1108 
1109 int
1110 nfs_attrtimeo(struct nfsnode *np)
1111 {
1112 	struct vnode *vp = np->n_vnode;
1113 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1114 	int tenthage = (time_second - np->n_mtime.tv_sec) / 10;
1115 	int minto, maxto;
1116 
1117 	if (vp->v_type == VDIR) {
1118 		maxto = nmp->nm_acdirmax;
1119 		minto = nmp->nm_acdirmin;
1120 	}
1121 	else {
1122 		maxto = nmp->nm_acregmax;
1123 		minto = nmp->nm_acregmin;
1124 	}
1125 
1126 	if (np->n_flag & NMODIFIED || tenthage < minto)
1127 		return minto;
1128 	else if (tenthage < maxto)
1129 		return tenthage;
1130 	else
1131 		return maxto;
1132 }
1133 
1134 /*
1135  * Check the time stamp
1136  * If the cache is valid, copy contents to *vap and return 0
1137  * otherwise return an error
1138  */
1139 int
1140 nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
1141 {
1142 	struct nfsnode *np = VTONFS(vp);
1143 	struct vattr *vap;
1144 
1145 	if (np->n_attrstamp == 0 ||
1146 	    (time_second - np->n_attrstamp) >= nfs_attrtimeo(np)) {
1147 		nfsstats.attrcache_misses++;
1148 		return (ENOENT);
1149 	}
1150 	nfsstats.attrcache_hits++;
1151 	vap = &np->n_vattr;
1152 	if (vap->va_size != np->n_size) {
1153 		if (vap->va_type == VREG) {
1154 			if (np->n_flag & NMODIFIED) {
1155 				if (vap->va_size < np->n_size)
1156 					vap->va_size = np->n_size;
1157 				else
1158 					np->n_size = vap->va_size;
1159 			} else
1160 				np->n_size = vap->va_size;
1161 			uvm_vnp_setsize(vp, np->n_size);
1162 		} else
1163 			np->n_size = vap->va_size;
1164 	}
1165 	bcopy(vap, vaper, sizeof(struct vattr));
1166 	if (np->n_flag & NCHG) {
1167 		if (np->n_flag & NACC)
1168 			vaper->va_atime = np->n_atim;
1169 		if (np->n_flag & NUPD)
1170 			vaper->va_mtime = np->n_mtim;
1171 	}
1172 	return (0);
1173 }
1174 #endif /* NFSCLIENT */
1175 
1176 /*
1177  * Set up nameidata for a lookup() call and do it
1178  */
1179 int
1180 nfs_namei(struct nameidata *ndp, fhandle_t *fhp, int len,
1181     struct nfssvc_sock *slp, struct mbuf *nam, struct mbuf **mdp,
1182     caddr_t *dposp, struct vnode **retdirp, struct proc *p)
1183 {
1184 	int i, rem;
1185 	struct mbuf *md;
1186 	char *fromcp, *tocp;
1187 	struct vnode *dp;
1188 	int error, rdonly;
1189 	struct componentname *cnp = &ndp->ni_cnd;
1190 
1191 	*retdirp = NULL;
1192 	cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK);
1193 	/*
1194 	 * Copy the name from the mbuf list to ndp->ni_pnbuf
1195 	 * and set the various ndp fields appropriately.
1196 	 */
1197 	fromcp = *dposp;
1198 	tocp = cnp->cn_pnbuf;
1199 	md = *mdp;
1200 	rem = mtod(md, caddr_t) + md->m_len - fromcp;
1201 	for (i = 0; i < len; i++) {
1202 		while (rem == 0) {
1203 			md = md->m_next;
1204 			if (md == NULL) {
1205 				error = EBADRPC;
1206 				goto out;
1207 			}
1208 			fromcp = mtod(md, caddr_t);
1209 			rem = md->m_len;
1210 		}
1211 		if (*fromcp == '\0' || *fromcp == '/') {
1212 			error = EACCES;
1213 			goto out;
1214 		}
1215 		*tocp++ = *fromcp++;
1216 		rem--;
1217 	}
1218 	*tocp = '\0';
1219 	*mdp = md;
1220 	*dposp = fromcp;
1221 	len = nfsm_padlen(len);
1222 	if (len > 0) {
1223 		if (rem >= len)
1224 			*dposp += len;
1225 		else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0)
1226 			goto out;
1227 	}
1228 	ndp->ni_pathlen = tocp - cnp->cn_pnbuf;
1229 	cnp->cn_nameptr = cnp->cn_pnbuf;
1230 	/*
1231 	 * Extract and set starting directory.
1232 	 */
1233 	error = nfsrv_fhtovp(fhp, 0, &dp, ndp->ni_cnd.cn_cred, slp,
1234 	    nam, &rdonly);
1235 	if (error)
1236 		goto out;
1237 	if (dp->v_type != VDIR) {
1238 		vrele(dp);
1239 		error = ENOTDIR;
1240 		goto out;
1241 	}
1242 	vref(dp);
1243 	*retdirp = dp;
1244 	ndp->ni_startdir = dp;
1245 	if (rdonly)
1246 		cnp->cn_flags |= (NOCROSSMOUNT | RDONLY);
1247 	else
1248 		cnp->cn_flags |= NOCROSSMOUNT;
1249 
1250 	/*
1251 	 * Should be 0, if not someone didn't init ndp with NDINIT,
1252 	 * go find and murder the offender messily.
1253 	 */
1254 	KASSERT (ndp->ni_p_path == NULL && ndp->ni_p_size == 0);
1255 
1256 	/*
1257 	 * And call lookup() to do the real work
1258 	 */
1259 	cnp->cn_proc = p;
1260 	error = vfs_lookup(ndp);
1261 	if (error)
1262 		goto out;
1263 	/*
1264 	 * Check for encountering a symbolic link
1265 	 */
1266 	if (cnp->cn_flags & ISSYMLINK) {
1267 		if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
1268 			vput(ndp->ni_dvp);
1269 		else
1270 			vrele(ndp->ni_dvp);
1271 		vput(ndp->ni_vp);
1272 		ndp->ni_vp = NULL;
1273 		error = EINVAL;
1274 		goto out;
1275 	}
1276 	/*
1277 	 * Check for saved name request
1278 	 */
1279 	if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
1280 		cnp->cn_flags |= HASBUF;
1281 		return (0);
1282 	}
1283 out:
1284 	pool_put(&namei_pool, cnp->cn_pnbuf);
1285 	return (error);
1286 }
1287 
1288 /*
1289  * A fiddled version of m_adj() that ensures null fill to a long
1290  * boundary and only trims off the back end
1291  */
1292 void
1293 nfsm_adj(struct mbuf *mp, int len, int nul)
1294 {
1295 	struct mbuf *m;
1296 	int count, i;
1297 	char *cp;
1298 
1299 	/*
1300 	 * Trim from tail.  Scan the mbuf chain,
1301 	 * calculating its length and finding the last mbuf.
1302 	 * If the adjustment only affects this mbuf, then just
1303 	 * adjust and return.  Otherwise, rescan and truncate
1304 	 * after the remaining size.
1305 	 */
1306 	count = 0;
1307 	m = mp;
1308 	for (;;) {
1309 		count += m->m_len;
1310 		if (m->m_next == NULL)
1311 			break;
1312 		m = m->m_next;
1313 	}
1314 	if (m->m_len > len) {
1315 		m->m_len -= len;
1316 		if (nul > 0) {
1317 			cp = mtod(m, caddr_t)+m->m_len-nul;
1318 			for (i = 0; i < nul; i++)
1319 				*cp++ = '\0';
1320 		}
1321 		return;
1322 	}
1323 	count -= len;
1324 	if (count < 0)
1325 		count = 0;
1326 	/*
1327 	 * Correct length for chain is "count".
1328 	 * Find the mbuf with last data, adjust its length,
1329 	 * and toss data from remaining mbufs on chain.
1330 	 */
1331 	for (m = mp; m; m = m->m_next) {
1332 		if (m->m_len >= count) {
1333 			m->m_len = count;
1334 			if (nul > 0) {
1335 				cp = mtod(m, caddr_t)+m->m_len-nul;
1336 				for (i = 0; i < nul; i++)
1337 					*cp++ = '\0';
1338 			}
1339 			break;
1340 		}
1341 		count -= m->m_len;
1342 	}
1343 	for (m = m->m_next;m;m = m->m_next)
1344 		m->m_len = 0;
1345 }
1346 
1347 /*
1348  * Make these functions instead of macros, so that the kernel text size
1349  * doesn't get too big...
1350  */
1351 void
1352 nfsm_srvwcc(struct nfsrv_descript *nfsd, int before_ret,
1353     struct vattr *before_vap, int after_ret, struct vattr *after_vap,
1354     struct nfsm_info *info)
1355 {
1356 	u_int32_t *tl;
1357 
1358 	if (before_ret) {
1359 		tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
1360 		*tl = nfs_false;
1361 	} else {
1362 		tl = nfsm_build(&info->nmi_mb, 7 * NFSX_UNSIGNED);
1363 		*tl++ = nfs_true;
1364 		txdr_hyper(before_vap->va_size, tl);
1365 		tl += 2;
1366 		txdr_nfsv3time(&(before_vap->va_mtime), tl);
1367 		tl += 2;
1368 		txdr_nfsv3time(&(before_vap->va_ctime), tl);
1369 	}
1370 	nfsm_srvpostop_attr(nfsd, after_ret, after_vap, info);
1371 }
1372 
1373 void
1374 nfsm_srvpostop_attr(struct nfsrv_descript *nfsd, int after_ret,
1375     struct vattr *after_vap, struct nfsm_info *info)
1376 {
1377 	u_int32_t *tl;
1378 	struct nfs_fattr *fp;
1379 
1380 	if (after_ret) {
1381 		tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
1382 		*tl = nfs_false;
1383 	} else {
1384 		tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED + NFSX_V3FATTR);
1385 		*tl++ = nfs_true;
1386 		fp = (struct nfs_fattr *)tl;
1387 		nfsm_srvfattr(nfsd, after_vap, fp);
1388 	}
1389 }
1390 
1391 void
1392 nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
1393     struct nfs_fattr *fp)
1394 {
1395 
1396 	fp->fa_nlink = txdr_unsigned(vap->va_nlink);
1397 	fp->fa_uid = txdr_unsigned(vap->va_uid);
1398 	fp->fa_gid = txdr_unsigned(vap->va_gid);
1399 	if (nfsd->nd_flag & ND_NFSV3) {
1400 		fp->fa_type = vtonfsv3_type(vap->va_type);
1401 		fp->fa_mode = vtonfsv3_mode(vap->va_mode);
1402 		txdr_hyper(vap->va_size, &fp->fa3_size);
1403 		txdr_hyper(vap->va_bytes, &fp->fa3_used);
1404 		fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev));
1405 		fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev));
1406 		fp->fa3_fsid.nfsuquad[0] = 0;
1407 		fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
1408 		txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
1409 		txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
1410 		txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
1411 		txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
1412 	} else {
1413 		fp->fa_type = vtonfsv2_type(vap->va_type);
1414 		fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1415 		fp->fa2_size = txdr_unsigned(vap->va_size);
1416 		fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
1417 		if (vap->va_type == VFIFO)
1418 			fp->fa2_rdev = 0xffffffff;
1419 		else
1420 			fp->fa2_rdev = txdr_unsigned(vap->va_rdev);
1421 		fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
1422 		fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
1423 		fp->fa2_fileid = txdr_unsigned((u_int32_t)vap->va_fileid);
1424 		txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
1425 		txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
1426 		txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);
1427 	}
1428 }
1429 
1430 /*
1431  * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
1432  * 	- look up fsid in mount list (if not found ret error)
1433  *	- get vp and export rights by calling VFS_FHTOVP() and VFS_CHECKEXP()
1434  *	- if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
1435  *	- if not lockflag unlock it with VOP_UNLOCK()
1436  */
1437 int
1438 nfsrv_fhtovp(fhandle_t *fhp, int lockflag, struct vnode **vpp,
1439     struct ucred *cred, struct nfssvc_sock *slp, struct mbuf *nam,
1440     int *rdonlyp)
1441 {
1442 	struct proc *p = curproc;	/* XXX */
1443 	struct mount *mp;
1444 	int i;
1445 	struct ucred *credanon;
1446 	int error, exflags;
1447 	struct sockaddr_in *saddr;
1448 
1449 	*vpp = NULL;
1450 	mp = vfs_getvfs(&fhp->fh_fsid);
1451 
1452 	if (!mp)
1453 		return (ESTALE);
1454 	error = VFS_CHECKEXP(mp, nam, &exflags, &credanon);
1455 	if (error)
1456 		return (error);
1457 	error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp);
1458 	if (error)
1459 		return (error);
1460 
1461 	saddr = mtod(nam, struct sockaddr_in *);
1462 	if (saddr->sin_family == AF_INET &&
1463 	    (ntohs(saddr->sin_port) >= IPPORT_RESERVED ||
1464 	    (slp->ns_so->so_type == SOCK_STREAM && ntohs(saddr->sin_port) == 20))) {
1465 		vput(*vpp);
1466 		return (NFSERR_AUTHERR | AUTH_TOOWEAK);
1467 	}
1468 
1469 	/* Check/setup credentials. */
1470 	if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) {
1471 		cred->cr_uid = credanon->cr_uid;
1472 		cred->cr_gid = credanon->cr_gid;
1473 		for (i = 0; i < credanon->cr_ngroups && i < NGROUPS_MAX; i++)
1474 			cred->cr_groups[i] = credanon->cr_groups[i];
1475 		cred->cr_ngroups = i;
1476 	}
1477 	if (exflags & MNT_EXRDONLY)
1478 		*rdonlyp = 1;
1479 	else
1480 		*rdonlyp = 0;
1481 	if (!lockflag)
1482 		VOP_UNLOCK(*vpp, p);
1483 
1484 	return (0);
1485 }
1486 
1487 /*
1488  * This function compares two net addresses by family and returns non zero
1489  * if they are the same host, or if there is any doubt it returns 0.
1490  * The AF_INET family is handled as a special case so that address mbufs
1491  * don't need to be saved to store "struct in_addr", which is only 4 bytes.
1492  */
1493 int
1494 netaddr_match(int family, union nethostaddr *haddr, struct mbuf *nam)
1495 {
1496 	struct sockaddr_in *inetaddr;
1497 
1498 	switch (family) {
1499 	case AF_INET:
1500 		inetaddr = mtod(nam, struct sockaddr_in *);
1501 		if (inetaddr->sin_family == AF_INET &&
1502 		    inetaddr->sin_addr.s_addr == haddr->had_inetaddr)
1503 			return (1);
1504 		break;
1505 	default:
1506 		break;
1507 	};
1508 	return (0);
1509 }
1510 
1511 /*
1512  * The write verifier has changed (probably due to a server reboot), so all
1513  * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
1514  * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
1515  * flag. Once done the new write verifier can be set for the mount point.
1516  */
1517 void
1518 nfs_clearcommit(struct mount *mp)
1519 {
1520 	struct vnode *vp, *nvp;
1521 	struct buf *bp, *nbp;
1522 	int s;
1523 
1524 	s = splbio();
1525 loop:
1526 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1527 		if (vp->v_mount != mp)	/* Paranoia */
1528 			goto loop;
1529 		nvp = LIST_NEXT(vp, v_mntvnodes);
1530 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
1531 			nbp = LIST_NEXT(bp, b_vnbufs);
1532 			if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
1533 				== (B_DELWRI | B_NEEDCOMMIT))
1534 				bp->b_flags &= ~B_NEEDCOMMIT;
1535 		}
1536 	}
1537 	splx(s);
1538 }
1539 
1540 void
1541 nfs_merge_commit_ranges(struct vnode *vp)
1542 {
1543 	struct nfsnode *np = VTONFS(vp);
1544 
1545 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) {
1546 		np->n_pushedlo = np->n_pushlo;
1547 		np->n_pushedhi = np->n_pushhi;
1548 		np->n_commitflags |= NFS_COMMIT_PUSHED_VALID;
1549 	} else {
1550 		if (np->n_pushlo < np->n_pushedlo)
1551 			np->n_pushedlo = np->n_pushlo;
1552 		if (np->n_pushhi > np->n_pushedhi)
1553 			np->n_pushedhi = np->n_pushhi;
1554 	}
1555 
1556 	np->n_pushlo = np->n_pushhi = 0;
1557 	np->n_commitflags &= ~NFS_COMMIT_PUSH_VALID;
1558 }
1559 
1560 int
1561 nfs_in_committed_range(struct vnode *vp, struct buf *bp)
1562 {
1563 	struct nfsnode *np = VTONFS(vp);
1564 	off_t lo, hi;
1565 
1566 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID))
1567 		return 0;
1568 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1569 	hi = lo + bp->b_dirtyend;
1570 
1571 	return (lo >= np->n_pushedlo && hi <= np->n_pushedhi);
1572 }
1573 
1574 int
1575 nfs_in_tobecommitted_range(struct vnode *vp, struct buf *bp)
1576 {
1577 	struct nfsnode *np = VTONFS(vp);
1578 	off_t lo, hi;
1579 
1580 	if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID))
1581 		return 0;
1582 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1583 	hi = lo + bp->b_dirtyend;
1584 
1585 	return (lo >= np->n_pushlo && hi <= np->n_pushhi);
1586 }
1587 
1588 void
1589 nfs_add_committed_range(struct vnode *vp, struct buf *bp)
1590 {
1591 	struct nfsnode *np = VTONFS(vp);
1592 	off_t lo, hi;
1593 
1594 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1595 	hi = lo + bp->b_dirtyend;
1596 
1597 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) {
1598 		np->n_pushedlo = lo;
1599 		np->n_pushedhi = hi;
1600 		np->n_commitflags |= NFS_COMMIT_PUSHED_VALID;
1601 	} else {
1602 		if (hi > np->n_pushedhi)
1603 			np->n_pushedhi = hi;
1604 		if (lo < np->n_pushedlo)
1605 			np->n_pushedlo = lo;
1606 	}
1607 }
1608 
1609 void
1610 nfs_del_committed_range(struct vnode *vp, struct buf *bp)
1611 {
1612 	struct nfsnode *np = VTONFS(vp);
1613 	off_t lo, hi;
1614 
1615 	if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID))
1616 		return;
1617 
1618 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1619 	hi = lo + bp->b_dirtyend;
1620 
1621 	if (lo > np->n_pushedhi || hi < np->n_pushedlo)
1622 		return;
1623 	if (lo <= np->n_pushedlo)
1624 		np->n_pushedlo = hi;
1625 	else if (hi >= np->n_pushedhi)
1626 		np->n_pushedhi = lo;
1627 	else {
1628 		/*
1629 		 * XXX There's only one range. If the deleted range
1630 		 * is in the middle, pick the largest of the
1631 		 * contiguous ranges that it leaves.
1632 		 */
1633 		if ((np->n_pushedlo - lo) > (hi - np->n_pushedhi))
1634 			np->n_pushedhi = lo;
1635 		else
1636 			np->n_pushedlo = hi;
1637 	}
1638 }
1639 
1640 void
1641 nfs_add_tobecommitted_range(struct vnode *vp, struct buf *bp)
1642 {
1643 	struct nfsnode *np = VTONFS(vp);
1644 	off_t lo, hi;
1645 
1646 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1647 	hi = lo + bp->b_dirtyend;
1648 
1649 	if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) {
1650 		np->n_pushlo = lo;
1651 		np->n_pushhi = hi;
1652 		np->n_commitflags |= NFS_COMMIT_PUSH_VALID;
1653 	} else {
1654 		if (lo < np->n_pushlo)
1655 			np->n_pushlo = lo;
1656 		if (hi > np->n_pushhi)
1657 			np->n_pushhi = hi;
1658 	}
1659 }
1660 
1661 void
1662 nfs_del_tobecommitted_range(struct vnode *vp, struct buf *bp)
1663 {
1664 	struct nfsnode *np = VTONFS(vp);
1665 	off_t lo, hi;
1666 
1667 	if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID))
1668 		return;
1669 
1670 	lo = (off_t)bp->b_blkno * DEV_BSIZE;
1671 	hi = lo + bp->b_dirtyend;
1672 
1673 	if (lo > np->n_pushhi || hi < np->n_pushlo)
1674 		return;
1675 
1676 	if (lo <= np->n_pushlo)
1677 		np->n_pushlo = hi;
1678 	else if (hi >= np->n_pushhi)
1679 		np->n_pushhi = lo;
1680 	else {
1681 		/*
1682 		 * XXX There's only one range. If the deleted range
1683 		 * is in the middle, pick the largest of the
1684 		 * contiguous ranges that it leaves.
1685 		 */
1686 		if ((np->n_pushlo - lo) > (hi - np->n_pushhi))
1687 			np->n_pushhi = lo;
1688 		else
1689 			np->n_pushlo = hi;
1690 	}
1691 }
1692 
1693 /*
1694  * Map errnos to NFS error numbers. For Version 3 also filter out error
1695  * numbers not specified for the associated procedure.
1696  */
1697 int
1698 nfsrv_errmap(struct nfsrv_descript *nd, int err)
1699 {
1700 	short *defaulterrp, *errp;
1701 
1702 	if (nd->nd_flag & ND_NFSV3) {
1703 	    if (nd->nd_procnum <= NFSPROC_COMMIT) {
1704 		errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum];
1705 		while (*++errp) {
1706 			if (*errp == err)
1707 				return (err);
1708 			else if (*errp > err)
1709 				break;
1710 		}
1711 		return ((int)*defaulterrp);
1712 	    } else
1713 		return (err & 0xffff);
1714 	}
1715 	if (err <= nitems(nfsrv_v2errmap))
1716 		return ((int)nfsrv_v2errmap[err - 1]);
1717 	return (NFSERR_IO);
1718 }
1719 
1720 /*
1721  * If full is non zero, set all fields, otherwise just set mode and time fields
1722  */
1723 void
1724 nfsm_v3attrbuild(struct mbuf **mp, struct vattr *a, int full)
1725 {
1726 	struct mbuf *mb;
1727 	u_int32_t *tl;
1728 
1729 	mb = *mp;
1730 
1731 	if (a->va_mode != (mode_t)VNOVAL) {
1732 		tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1733 		*tl++ = nfs_true;
1734 		*tl = txdr_unsigned(a->va_mode);
1735 	} else {
1736 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1737 		*tl = nfs_false;
1738 	}
1739 	if (full && a->va_uid != (uid_t)VNOVAL) {
1740 		tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1741 		*tl++ = nfs_true;
1742 		*tl = txdr_unsigned(a->va_uid);
1743 	} else {
1744 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1745 		*tl = nfs_false;
1746 	}
1747 	if (full && a->va_gid != (gid_t)VNOVAL) {
1748 		tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1749 		*tl++ = nfs_true;
1750 		*tl = txdr_unsigned((a)->va_gid);
1751 	} else {
1752 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1753 		*tl = nfs_false;
1754 	}
1755 	if (full && a->va_size != VNOVAL) {
1756 		tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1757 		*tl++ = nfs_true;
1758 		txdr_hyper(a->va_size, tl);
1759 	} else {
1760 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1761 		*tl = nfs_false;
1762 	}
1763 	if (a->va_atime.tv_nsec != VNOVAL) {
1764 		if (a->va_atime.tv_sec != time_second) {
1765 			tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1766 			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1767 			txdr_nfsv3time(&a->va_atime, tl);
1768 		} else {
1769 			tl = nfsm_build(&mb, NFSX_UNSIGNED);
1770 			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1771 		}
1772 	} else {
1773 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1774 		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1775 	}
1776 	if (a->va_mtime.tv_nsec != VNOVAL) {
1777 		if (a->va_mtime.tv_sec != time_second) {
1778 			tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1779 			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1780 			txdr_nfsv3time(&a->va_mtime, tl);
1781 		} else {
1782 			tl = nfsm_build(&mb, NFSX_UNSIGNED);
1783 			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1784 		}
1785 	} else {
1786 		tl = nfsm_build(&mb, NFSX_UNSIGNED);
1787 		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1788 	}
1789 
1790 	*mp = mb;
1791 }
1792 
1793 /*
1794  * Ensure a contiguous buffer len bytes long
1795  */
1796 void *
1797 nfsm_build(struct mbuf **mp, u_int len)
1798 {
1799 	struct mbuf *mb, *mb2;
1800 	caddr_t bpos;
1801 
1802 	mb = *mp;
1803 	bpos = mb_offset(mb);
1804 
1805 	if (len > M_TRAILINGSPACE(mb)) {
1806 		MGET(mb2, M_WAIT, MT_DATA);
1807 		if (len > MLEN)
1808 			panic("build > MLEN");
1809 		mb->m_next = mb2;
1810 		mb = mb2;
1811 		mb->m_len = 0;
1812 		bpos = mtod(mb, caddr_t);
1813 	}
1814 	mb->m_len += len;
1815 
1816 	*mp = mb;
1817 
1818 	return (bpos);
1819 }
1820 
1821 void
1822 nfsm_fhtom(struct nfsm_info *info, struct vnode *v, int v3)
1823 {
1824 	struct nfsnode *n = VTONFS(v);
1825 
1826 	if (v3) {
1827 		nfsm_strtombuf(&info->nmi_mb, n->n_fhp, n->n_fhsize);
1828 	} else {
1829 		nfsm_buftombuf(&info->nmi_mb, n->n_fhp, NFSX_V2FH);
1830 	}
1831 }
1832 
1833 void
1834 nfsm_srvfhtom(struct mbuf **mp, fhandle_t *f, int v3)
1835 {
1836 	if (v3) {
1837 		nfsm_strtombuf(mp, f, NFSX_V3FH);
1838 	} else {
1839 		nfsm_buftombuf(mp, f, NFSX_V2FH);
1840 	}
1841 }
1842 
1843 int
1844 nfsm_srvsattr(struct mbuf **mp, struct vattr *va, struct mbuf *mrep,
1845     caddr_t *dposp)
1846 {
1847 	struct nfsm_info	info;
1848 	uint32_t *tl, t1;
1849 	caddr_t cp2;
1850 	int error = 0;
1851 
1852 	info.nmi_md = *mp;
1853 	info.nmi_dpos = *dposp;
1854 
1855 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1856 	if (*tl == nfs_true) {
1857 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1858 		va->va_mode = nfstov_mode(*tl);
1859 	}
1860 
1861 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1862 	if (*tl == nfs_true) {
1863 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1864 		va->va_uid = fxdr_unsigned(uid_t, *tl);
1865 	}
1866 
1867 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1868 	if (*tl == nfs_true) {
1869 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1870 		va->va_gid = fxdr_unsigned(gid_t, *tl);
1871 	}
1872 
1873 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1874 	if (*tl == nfs_true) {
1875 		nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1876 		va->va_size = fxdr_hyper(tl);
1877 	}
1878 
1879 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1880 	switch (fxdr_unsigned(int, *tl)) {
1881 	case NFSV3SATTRTIME_TOCLIENT:
1882 		va->va_vaflags |= VA_UTIMES_CHANGE;
1883 		va->va_vaflags &= ~VA_UTIMES_NULL;
1884 		nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1885 		fxdr_nfsv3time(tl, &va->va_atime);
1886 		break;
1887 	case NFSV3SATTRTIME_TOSERVER:
1888 		va->va_vaflags |= VA_UTIMES_CHANGE;
1889 		getnanotime(&va->va_atime);
1890 		break;
1891 	};
1892 
1893 	nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1894 	switch (fxdr_unsigned(int, *tl)) {
1895 	case NFSV3SATTRTIME_TOCLIENT:
1896 		va->va_vaflags |= VA_UTIMES_CHANGE;
1897 		va->va_vaflags &= ~VA_UTIMES_NULL;
1898 		nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1899 		fxdr_nfsv3time(tl, &va->va_mtime);
1900 		break;
1901 	case NFSV3SATTRTIME_TOSERVER:
1902 		va->va_vaflags |= VA_UTIMES_CHANGE;
1903 		getnanotime(&va->va_mtime);
1904 		break;
1905 	};
1906 
1907 	*dposp = info.nmi_dpos;
1908 	*mp = info.nmi_md;
1909 nfsmout:
1910 	return (error);
1911 }
1912 
1913 void
1914 txdr_nfsv2time(const struct timespec *from, struct nfsv2_time *to)
1915 {
1916 	if (from->tv_nsec == VNOVAL) {
1917 		to->nfsv2_sec = nfs_xdrneg1;
1918 		to->nfsv2_usec = nfs_xdrneg1;
1919 	} else if (from->tv_sec == -1) {
1920 		/*
1921 		 * can't request a time of -1; send
1922 		 * -1.000001 == {-2,999999} instead
1923 		 */
1924 		to->nfsv2_sec = htonl(-2);
1925 		to->nfsv2_usec = htonl(999999);
1926 	} else {
1927 		to->nfsv2_sec = htonl(from->tv_sec);
1928 		to->nfsv2_usec = htonl(from->tv_nsec / 1000);
1929 	}
1930 }
1931