1 /* $OpenBSD: nfs_subs.c,v 1.151 2024/09/09 03:50:14 jsg Exp $ */
2 /* $NetBSD: nfs_subs.c,v 1.27.4.3 1996/07/08 20:34:24 jtc Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Rick Macklem at The University of Guelph.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
36 */
37
38
39 /*
40 * These functions support the nfsm_subs.h inline functions and help fiddle
41 * mbuf chains for the nfs op functions. They do things such as creating the
42 * rpc header and copying data between mbuf chains and uio lists.
43 */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/namei.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/stat.h>
54 #include <sys/pool.h>
55 #include <sys/time.h>
56
57 #include <nfs/rpcv2.h>
58 #include <nfs/nfsproto.h>
59 #include <nfs/nfsnode.h>
60 #include <nfs/nfs.h>
61 #include <nfs/xdr_subs.h>
62 #include <nfs/nfsmount.h>
63 #include <nfs/nfs_var.h>
64 #include <nfs/nfsm_subs.h>
65
66 #include <netinet/in.h>
67
68 #include <crypto/idgen.h>
69
70 int nfs_attrtimeo(struct nfsnode *np);
71 u_int32_t nfs_get_xid(void);
72
73 /*
74 * Data items converted to xdr at startup, since they are constant
75 * This is kinda hokey, but may save a little time doing byte swaps
76 */
77 u_int32_t nfs_xdrneg1;
78 u_int32_t rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr,
79 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
80 u_int32_t nfs_prog, nfs_true, nfs_false;
81
82 /* And other global data */
83 const nfstype nfsv2_type[9] =
84 { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON, NFCHR, NFNON };
85 const nfstype nfsv3_type[9] =
86 { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK, NFFIFO, NFNON };
87 const enum vtype nv2tov_type[8] =
88 { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON };
89 const enum vtype nv3tov_type[8]=
90 { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO };
91 int nfs_ticks;
92 struct nfsstats nfsstats;
93
94 /*
95 * Mapping of old NFS Version 2 RPC numbers to generic numbers.
96 */
97 const int nfsv3_procid[NFS_NPROCS] = {
98 NFSPROC_NULL,
99 NFSPROC_GETATTR,
100 NFSPROC_SETATTR,
101 NFSPROC_NOOP,
102 NFSPROC_LOOKUP,
103 NFSPROC_READLINK,
104 NFSPROC_READ,
105 NFSPROC_NOOP,
106 NFSPROC_WRITE,
107 NFSPROC_CREATE,
108 NFSPROC_REMOVE,
109 NFSPROC_RENAME,
110 NFSPROC_LINK,
111 NFSPROC_SYMLINK,
112 NFSPROC_MKDIR,
113 NFSPROC_RMDIR,
114 NFSPROC_READDIR,
115 NFSPROC_FSSTAT,
116 NFSPROC_NOOP,
117 NFSPROC_NOOP,
118 NFSPROC_NOOP,
119 NFSPROC_NOOP,
120 NFSPROC_NOOP
121 };
122
123 /*
124 * and the reverse mapping from generic to Version 2 procedure numbers
125 */
126 const int nfsv2_procid[NFS_NPROCS] = {
127 NFSV2PROC_NULL,
128 NFSV2PROC_GETATTR,
129 NFSV2PROC_SETATTR,
130 NFSV2PROC_LOOKUP,
131 NFSV2PROC_NOOP,
132 NFSV2PROC_READLINK,
133 NFSV2PROC_READ,
134 NFSV2PROC_WRITE,
135 NFSV2PROC_CREATE,
136 NFSV2PROC_MKDIR,
137 NFSV2PROC_SYMLINK,
138 NFSV2PROC_CREATE,
139 NFSV2PROC_REMOVE,
140 NFSV2PROC_RMDIR,
141 NFSV2PROC_RENAME,
142 NFSV2PROC_LINK,
143 NFSV2PROC_READDIR,
144 NFSV2PROC_NOOP,
145 NFSV2PROC_STATFS,
146 NFSV2PROC_NOOP,
147 NFSV2PROC_NOOP,
148 NFSV2PROC_NOOP,
149 NFSV2PROC_NOOP
150 };
151
152 /*
153 * Maps errno values to nfs error numbers.
154 * Use NFSERR_IO as the catch all for ones not specifically defined in
155 * RFC 1094.
156 */
157 static const u_char nfsrv_v2errmap[] = {
158 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO,
159 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
160 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO,
161 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR,
162 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
163 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS,
164 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
165 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
166 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
167 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
168 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
169 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
170 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO,
171 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE
172 /* Everything after this maps to NFSERR_IO, so far */
173 };
174
175 /*
176 * Maps errno values to nfs error numbers.
177 * Although it is not obvious whether or not NFS clients really care if
178 * a returned error value is in the specified list for the procedure, the
179 * safest thing to do is filter them appropriately. For Version 2, the
180 * X/Open XNFS document is the only specification that defines error values
181 * for each RPC (The RFC simply lists all possible error values for all RPCs),
182 * so I have decided to not do this for Version 2.
183 * The first entry is the default error return and the rest are the valid
184 * errors for that RPC in increasing numeric order.
185 */
186 static const short nfsv3err_null[] = {
187 0,
188 0,
189 };
190
191 static const short nfsv3err_getattr[] = {
192 NFSERR_IO,
193 NFSERR_IO,
194 NFSERR_STALE,
195 NFSERR_BADHANDLE,
196 NFSERR_SERVERFAULT,
197 0,
198 };
199
200 static const short nfsv3err_setattr[] = {
201 NFSERR_IO,
202 NFSERR_PERM,
203 NFSERR_IO,
204 NFSERR_ACCES,
205 NFSERR_INVAL,
206 NFSERR_NOSPC,
207 NFSERR_ROFS,
208 NFSERR_DQUOT,
209 NFSERR_STALE,
210 NFSERR_BADHANDLE,
211 NFSERR_NOT_SYNC,
212 NFSERR_SERVERFAULT,
213 0,
214 };
215
216 static const short nfsv3err_lookup[] = {
217 NFSERR_IO,
218 NFSERR_NOENT,
219 NFSERR_IO,
220 NFSERR_ACCES,
221 NFSERR_NOTDIR,
222 NFSERR_NAMETOL,
223 NFSERR_STALE,
224 NFSERR_BADHANDLE,
225 NFSERR_SERVERFAULT,
226 0,
227 };
228
229 static const short nfsv3err_access[] = {
230 NFSERR_IO,
231 NFSERR_IO,
232 NFSERR_STALE,
233 NFSERR_BADHANDLE,
234 NFSERR_SERVERFAULT,
235 0,
236 };
237
238 static const short nfsv3err_readlink[] = {
239 NFSERR_IO,
240 NFSERR_IO,
241 NFSERR_ACCES,
242 NFSERR_INVAL,
243 NFSERR_STALE,
244 NFSERR_BADHANDLE,
245 NFSERR_NOTSUPP,
246 NFSERR_SERVERFAULT,
247 0,
248 };
249
250 static const short nfsv3err_read[] = {
251 NFSERR_IO,
252 NFSERR_IO,
253 NFSERR_NXIO,
254 NFSERR_ACCES,
255 NFSERR_INVAL,
256 NFSERR_STALE,
257 NFSERR_BADHANDLE,
258 NFSERR_SERVERFAULT,
259 0,
260 };
261
262 static const short nfsv3err_write[] = {
263 NFSERR_IO,
264 NFSERR_IO,
265 NFSERR_ACCES,
266 NFSERR_INVAL,
267 NFSERR_FBIG,
268 NFSERR_NOSPC,
269 NFSERR_ROFS,
270 NFSERR_DQUOT,
271 NFSERR_STALE,
272 NFSERR_BADHANDLE,
273 NFSERR_SERVERFAULT,
274 0,
275 };
276
277 static const short nfsv3err_create[] = {
278 NFSERR_IO,
279 NFSERR_IO,
280 NFSERR_ACCES,
281 NFSERR_EXIST,
282 NFSERR_NOTDIR,
283 NFSERR_NOSPC,
284 NFSERR_ROFS,
285 NFSERR_NAMETOL,
286 NFSERR_DQUOT,
287 NFSERR_STALE,
288 NFSERR_BADHANDLE,
289 NFSERR_NOTSUPP,
290 NFSERR_SERVERFAULT,
291 0,
292 };
293
294 static const short nfsv3err_mkdir[] = {
295 NFSERR_IO,
296 NFSERR_IO,
297 NFSERR_ACCES,
298 NFSERR_EXIST,
299 NFSERR_NOTDIR,
300 NFSERR_NOSPC,
301 NFSERR_ROFS,
302 NFSERR_NAMETOL,
303 NFSERR_DQUOT,
304 NFSERR_STALE,
305 NFSERR_BADHANDLE,
306 NFSERR_NOTSUPP,
307 NFSERR_SERVERFAULT,
308 0,
309 };
310
311 static const short nfsv3err_symlink[] = {
312 NFSERR_IO,
313 NFSERR_IO,
314 NFSERR_ACCES,
315 NFSERR_EXIST,
316 NFSERR_NOTDIR,
317 NFSERR_NOSPC,
318 NFSERR_ROFS,
319 NFSERR_NAMETOL,
320 NFSERR_DQUOT,
321 NFSERR_STALE,
322 NFSERR_BADHANDLE,
323 NFSERR_NOTSUPP,
324 NFSERR_SERVERFAULT,
325 0,
326 };
327
328 static const short nfsv3err_mknod[] = {
329 NFSERR_IO,
330 NFSERR_IO,
331 NFSERR_ACCES,
332 NFSERR_EXIST,
333 NFSERR_NOTDIR,
334 NFSERR_NOSPC,
335 NFSERR_ROFS,
336 NFSERR_NAMETOL,
337 NFSERR_DQUOT,
338 NFSERR_STALE,
339 NFSERR_BADHANDLE,
340 NFSERR_NOTSUPP,
341 NFSERR_SERVERFAULT,
342 NFSERR_BADTYPE,
343 0,
344 };
345
346 static const short nfsv3err_remove[] = {
347 NFSERR_IO,
348 NFSERR_NOENT,
349 NFSERR_IO,
350 NFSERR_ACCES,
351 NFSERR_NOTDIR,
352 NFSERR_ROFS,
353 NFSERR_NAMETOL,
354 NFSERR_STALE,
355 NFSERR_BADHANDLE,
356 NFSERR_SERVERFAULT,
357 0,
358 };
359
360 static const short nfsv3err_rmdir[] = {
361 NFSERR_IO,
362 NFSERR_NOENT,
363 NFSERR_IO,
364 NFSERR_ACCES,
365 NFSERR_EXIST,
366 NFSERR_NOTDIR,
367 NFSERR_INVAL,
368 NFSERR_ROFS,
369 NFSERR_NAMETOL,
370 NFSERR_NOTEMPTY,
371 NFSERR_STALE,
372 NFSERR_BADHANDLE,
373 NFSERR_NOTSUPP,
374 NFSERR_SERVERFAULT,
375 0,
376 };
377
378 static const short nfsv3err_rename[] = {
379 NFSERR_IO,
380 NFSERR_NOENT,
381 NFSERR_IO,
382 NFSERR_ACCES,
383 NFSERR_EXIST,
384 NFSERR_XDEV,
385 NFSERR_NOTDIR,
386 NFSERR_ISDIR,
387 NFSERR_INVAL,
388 NFSERR_NOSPC,
389 NFSERR_ROFS,
390 NFSERR_MLINK,
391 NFSERR_NAMETOL,
392 NFSERR_NOTEMPTY,
393 NFSERR_DQUOT,
394 NFSERR_STALE,
395 NFSERR_BADHANDLE,
396 NFSERR_NOTSUPP,
397 NFSERR_SERVERFAULT,
398 0,
399 };
400
401 static const short nfsv3err_link[] = {
402 NFSERR_IO,
403 NFSERR_IO,
404 NFSERR_ACCES,
405 NFSERR_EXIST,
406 NFSERR_XDEV,
407 NFSERR_NOTDIR,
408 NFSERR_INVAL,
409 NFSERR_NOSPC,
410 NFSERR_ROFS,
411 NFSERR_MLINK,
412 NFSERR_NAMETOL,
413 NFSERR_DQUOT,
414 NFSERR_STALE,
415 NFSERR_BADHANDLE,
416 NFSERR_NOTSUPP,
417 NFSERR_SERVERFAULT,
418 0,
419 };
420
421 static const short nfsv3err_readdir[] = {
422 NFSERR_IO,
423 NFSERR_IO,
424 NFSERR_ACCES,
425 NFSERR_NOTDIR,
426 NFSERR_STALE,
427 NFSERR_BADHANDLE,
428 NFSERR_BAD_COOKIE,
429 NFSERR_TOOSMALL,
430 NFSERR_SERVERFAULT,
431 0,
432 };
433
434 static const short nfsv3err_readdirplus[] = {
435 NFSERR_IO,
436 NFSERR_IO,
437 NFSERR_ACCES,
438 NFSERR_NOTDIR,
439 NFSERR_STALE,
440 NFSERR_BADHANDLE,
441 NFSERR_BAD_COOKIE,
442 NFSERR_NOTSUPP,
443 NFSERR_TOOSMALL,
444 NFSERR_SERVERFAULT,
445 0,
446 };
447
448 static const short nfsv3err_fsstat[] = {
449 NFSERR_IO,
450 NFSERR_IO,
451 NFSERR_STALE,
452 NFSERR_BADHANDLE,
453 NFSERR_SERVERFAULT,
454 0,
455 };
456
457 static const short nfsv3err_fsinfo[] = {
458 NFSERR_STALE,
459 NFSERR_STALE,
460 NFSERR_BADHANDLE,
461 NFSERR_SERVERFAULT,
462 0,
463 };
464
465 static const short nfsv3err_pathconf[] = {
466 NFSERR_STALE,
467 NFSERR_STALE,
468 NFSERR_BADHANDLE,
469 NFSERR_SERVERFAULT,
470 0,
471 };
472
473 static const short nfsv3err_commit[] = {
474 NFSERR_IO,
475 NFSERR_IO,
476 NFSERR_STALE,
477 NFSERR_BADHANDLE,
478 NFSERR_SERVERFAULT,
479 0,
480 };
481
482 static const short *nfsrv_v3errmap[] = {
483 nfsv3err_null,
484 nfsv3err_getattr,
485 nfsv3err_setattr,
486 nfsv3err_lookup,
487 nfsv3err_access,
488 nfsv3err_readlink,
489 nfsv3err_read,
490 nfsv3err_write,
491 nfsv3err_create,
492 nfsv3err_mkdir,
493 nfsv3err_symlink,
494 nfsv3err_mknod,
495 nfsv3err_remove,
496 nfsv3err_rmdir,
497 nfsv3err_rename,
498 nfsv3err_link,
499 nfsv3err_readdir,
500 nfsv3err_readdirplus,
501 nfsv3err_fsstat,
502 nfsv3err_fsinfo,
503 nfsv3err_pathconf,
504 nfsv3err_commit,
505 };
506
507 struct pool nfsreqpl;
508
509 /*
510 * Create the header for an rpc request packet
511 * The hsiz is the size of the rest of the nfs request header.
512 * (just used to decide if a cluster is a good idea)
513 */
514 struct mbuf *
nfsm_reqhead(int hsiz)515 nfsm_reqhead(int hsiz)
516 {
517 struct mbuf *mb;
518
519 MGET(mb, M_WAIT, MT_DATA);
520 if (hsiz > MLEN)
521 MCLGET(mb, M_WAIT);
522 mb->m_len = 0;
523
524 /* Finally, return values */
525 return (mb);
526 }
527
528 /*
529 * Return an unpredictable XID in XDR form.
530 */
531 u_int32_t
nfs_get_xid(void)532 nfs_get_xid(void)
533 {
534 static struct idgen32_ctx nfs_xid_ctx;
535 static int called = 0;
536
537 if (!called) {
538 called = 1;
539 idgen32_init(&nfs_xid_ctx);
540 }
541 return (txdr_unsigned(idgen32(&nfs_xid_ctx)));
542 }
543
544 /*
545 * Build the RPC header and fill in the authorization info.
546 * Right now we are pretty centric around RPCAUTH_UNIX, in the
547 * future, this function will need some love to be able to handle
548 * other authorization methods, such as Kerberos.
549 */
550 void
nfsm_rpchead(struct nfsreq * req,struct ucred * cr,int auth_type)551 nfsm_rpchead(struct nfsreq *req, struct ucred *cr, int auth_type)
552 {
553 struct mbuf *mb;
554 u_int32_t *tl;
555 int i, authsiz, auth_len, ngroups;
556
557 KASSERT(auth_type == RPCAUTH_UNIX);
558
559 /*
560 * RPCAUTH_UNIX fits in an hdr mbuf, in the future other
561 * authorization methods need to figure out their own sizes
562 * and allocate and chain mbufs accordingly.
563 */
564 mb = req->r_mreq;
565
566 /*
567 * We need to start out by finding how big the authorization cred
568 * and verifier are for the auth_type, to be able to correctly
569 * align the mbuf header/chain.
570 */
571 switch (auth_type) {
572 case RPCAUTH_UNIX:
573 /*
574 * In the RPCAUTH_UNIX case, the size is the static
575 * part as shown in RFC1831 + the number of groups,
576 * RPCAUTH_UNIX has a zero verifier.
577 */
578 if (cr->cr_ngroups > req->r_nmp->nm_numgrps)
579 ngroups = req->r_nmp->nm_numgrps;
580 else
581 ngroups = cr->cr_ngroups;
582
583 auth_len = (ngroups << 2) + 5 * NFSX_UNSIGNED;
584 authsiz = nfsm_rndup(auth_len);
585 /* The authorization size + the size of the static part */
586 m_align(mb, authsiz + 10 * NFSX_UNSIGNED);
587 break;
588 }
589
590 mb->m_len = 0;
591
592 /* First the RPC header. */
593 tl = nfsm_build(&mb, 6 * NFSX_UNSIGNED);
594
595 /* Get a new (non-zero) xid */
596 *tl++ = req->r_xid = nfs_get_xid();
597 *tl++ = rpc_call;
598 *tl++ = rpc_vers;
599 *tl++ = nfs_prog;
600 if (ISSET(req->r_nmp->nm_flag, NFSMNT_NFSV3)) {
601 *tl++ = txdr_unsigned(NFS_VER3);
602 *tl = txdr_unsigned(req->r_procnum);
603 } else {
604 *tl++ = txdr_unsigned(NFS_VER2);
605 *tl = txdr_unsigned(nfsv2_procid[req->r_procnum]);
606 }
607
608 /* The Authorization cred and its verifier */
609 switch (auth_type) {
610 case RPCAUTH_UNIX:
611 tl = nfsm_build(&mb, auth_len + 4 * NFSX_UNSIGNED);
612 *tl++ = txdr_unsigned(RPCAUTH_UNIX);
613 *tl++ = txdr_unsigned(authsiz);
614
615 /* The authorization cred */
616 *tl++ = 0; /* stamp */
617 *tl++ = 0; /* NULL hostname */
618 *tl++ = txdr_unsigned(cr->cr_uid);
619 *tl++ = txdr_unsigned(cr->cr_gid);
620 *tl++ = txdr_unsigned(ngroups);
621 for (i = 0; i < ngroups; i++)
622 *tl++ = txdr_unsigned(cr->cr_groups[i]);
623 /* The authorization verifier */
624 *tl++ = txdr_unsigned(RPCAUTH_NULL);
625 *tl = 0;
626 break;
627 }
628
629 mb->m_pkthdr.len += authsiz + 10 * NFSX_UNSIGNED;
630 mb->m_pkthdr.ph_ifidx = 0;
631 }
632
633 /*
634 * copies mbuf chain to the uio scatter/gather list
635 */
636 int
nfsm_mbuftouio(struct mbuf ** mrep,struct uio * uiop,int siz,caddr_t * dpos)637 nfsm_mbuftouio(struct mbuf **mrep, struct uio *uiop, int siz, caddr_t *dpos)
638 {
639 char *mbufcp, *uiocp;
640 int xfer, left, len;
641 struct mbuf *mp;
642 long uiosiz, rem;
643 int error = 0;
644
645 mp = *mrep;
646 mbufcp = *dpos;
647 len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
648 rem = nfsm_padlen(siz);
649 while (siz > 0) {
650 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
651 return (EFBIG);
652 left = uiop->uio_iov->iov_len;
653 uiocp = uiop->uio_iov->iov_base;
654 if (left > siz)
655 left = siz;
656 uiosiz = left;
657 while (left > 0) {
658 while (len == 0) {
659 mp = mp->m_next;
660 if (mp == NULL)
661 return (EBADRPC);
662 mbufcp = mtod(mp, caddr_t);
663 len = mp->m_len;
664 }
665 xfer = (left > len) ? len : left;
666 if (uiop->uio_segflg == UIO_SYSSPACE)
667 memcpy(uiocp, mbufcp, xfer);
668 else
669 copyout(mbufcp, uiocp, xfer);
670 left -= xfer;
671 len -= xfer;
672 mbufcp += xfer;
673 uiocp += xfer;
674 uiop->uio_offset += xfer;
675 uiop->uio_resid -= xfer;
676 }
677 if (uiop->uio_iov->iov_len <= siz) {
678 uiop->uio_iovcnt--;
679 uiop->uio_iov++;
680 } else {
681 uiop->uio_iov->iov_base =
682 (char *)uiop->uio_iov->iov_base + uiosiz;
683 uiop->uio_iov->iov_len -= uiosiz;
684 }
685 siz -= uiosiz;
686 }
687 *dpos = mbufcp;
688 *mrep = mp;
689 if (rem > 0) {
690 if (len < rem)
691 error = nfs_adv(mrep, dpos, rem, len);
692 else
693 *dpos += rem;
694 }
695 return (error);
696 }
697
698 /*
699 * Copy a uio scatter/gather list to an mbuf chain.
700 */
701 void
nfsm_uiotombuf(struct mbuf ** mp,struct uio * uiop,size_t len)702 nfsm_uiotombuf(struct mbuf **mp, struct uio *uiop, size_t len)
703 {
704 struct mbuf *mb, *mb2;
705 size_t xfer, pad;
706
707 mb = *mp;
708
709 pad = nfsm_padlen(len);
710
711 /* XXX -- the following should be done by the caller */
712 uiop->uio_resid = len;
713 uiop->uio_rw = UIO_WRITE;
714
715 while (len) {
716 xfer = ulmin(len, m_trailingspace(mb));
717 uiomove(mb_offset(mb), xfer, uiop);
718 mb->m_len += xfer;
719 len -= xfer;
720 if (len > 0) {
721 MGET(mb2, M_WAIT, MT_DATA);
722 if (len > MLEN)
723 MCLGET(mb2, M_WAIT);
724 mb2->m_len = 0;
725 mb->m_next = mb2;
726 mb = mb2;
727 }
728 }
729
730 if (pad > 0) {
731 if (pad > m_trailingspace(mb)) {
732 MGET(mb2, M_WAIT, MT_DATA);
733 mb2->m_len = 0;
734 mb->m_next = mb2;
735 mb = mb2;
736 }
737 memset(mb_offset(mb), 0, pad);
738 mb->m_len += pad;
739 }
740
741 *mp = mb;
742 }
743
744 /*
745 * Copy a buffer to an mbuf chain
746 */
747 void
nfsm_buftombuf(struct mbuf ** mp,void * buf,size_t len)748 nfsm_buftombuf(struct mbuf **mp, void *buf, size_t len)
749 {
750 struct iovec iov;
751 struct uio io;
752
753 iov.iov_base = buf;
754 iov.iov_len = len;
755
756 io.uio_iov = &iov;
757 io.uio_iovcnt = 1;
758 io.uio_resid = len;
759 io.uio_segflg = UIO_SYSSPACE;
760 io.uio_rw = UIO_WRITE;
761
762 nfsm_uiotombuf(mp, &io, len);
763 }
764
765 /*
766 * Copy a string to an mbuf chain
767 */
768 void
nfsm_strtombuf(struct mbuf ** mp,void * str,size_t len)769 nfsm_strtombuf(struct mbuf **mp, void *str, size_t len)
770 {
771 struct iovec iov[2];
772 struct uio io;
773 uint32_t strlen;
774
775 strlen = txdr_unsigned(len);
776
777 iov[0].iov_base = &strlen;
778 iov[0].iov_len = sizeof(uint32_t);
779 iov[1].iov_base = str;
780 iov[1].iov_len = len;
781
782 io.uio_iov = iov;
783 io.uio_iovcnt = 2;
784 io.uio_resid = sizeof(uint32_t) + len;
785 io.uio_segflg = UIO_SYSSPACE;
786 io.uio_rw = UIO_WRITE;
787
788 nfsm_uiotombuf(mp, &io, io.uio_resid);
789 }
790
791 /*
792 * Help break down an mbuf chain by setting the first siz bytes contiguous
793 * pointed to by returned val.
794 * This is used by nfsm_dissect for tough cases.
795 */
796 int
nfsm_disct(struct mbuf ** mdp,caddr_t * dposp,int siz,int left,caddr_t * cp2)797 nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, caddr_t *cp2)
798 {
799 struct mbuf *mp, *mp2;
800 int siz2, xfer;
801 caddr_t p;
802
803 mp = *mdp;
804 while (left == 0) {
805 *mdp = mp = mp->m_next;
806 if (mp == NULL)
807 return (EBADRPC);
808 left = mp->m_len;
809 *dposp = mtod(mp, caddr_t);
810 }
811 if (left >= siz) {
812 *cp2 = *dposp;
813 *dposp += siz;
814 } else if (mp->m_next == NULL) {
815 return (EBADRPC);
816 } else if (siz > MHLEN) {
817 panic("nfs S too big");
818 } else {
819 MGET(mp2, M_WAIT, MT_DATA);
820 mp2->m_next = mp->m_next;
821 mp->m_next = mp2;
822 mp->m_len -= left;
823 mp = mp2;
824 *cp2 = p = mtod(mp, caddr_t);
825 bcopy(*dposp, p, left); /* Copy what was left */
826 siz2 = siz - left;
827 p += left;
828 mp2 = mp->m_next;
829 /* Loop around copying up the siz2 bytes */
830 while (siz2 > 0) {
831 if (mp2 == NULL)
832 return (EBADRPC);
833 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
834 if (xfer > 0) {
835 bcopy(mtod(mp2, caddr_t), p, xfer);
836 mp2->m_data += xfer;
837 mp2->m_len -= xfer;
838 p += xfer;
839 siz2 -= xfer;
840 }
841 if (siz2 > 0)
842 mp2 = mp2->m_next;
843 }
844 mp->m_len = siz;
845 *mdp = mp2;
846 *dposp = mtod(mp2, caddr_t);
847 }
848 return (0);
849 }
850
851 /*
852 * Advance the position in the mbuf chain.
853 */
854 int
nfs_adv(struct mbuf ** mdp,caddr_t * dposp,int offs,int left)855 nfs_adv(struct mbuf **mdp, caddr_t *dposp, int offs, int left)
856 {
857 struct mbuf *m;
858 int s;
859
860 m = *mdp;
861 s = left;
862 while (s < offs) {
863 offs -= s;
864 m = m->m_next;
865 if (m == NULL)
866 return (EBADRPC);
867 s = m->m_len;
868 }
869 *mdp = m;
870 *dposp = mtod(m, caddr_t)+offs;
871 return (0);
872 }
873
874 /*
875 * Called once to initialize data structures...
876 */
877 void
nfs_init(void)878 nfs_init(void)
879 {
880 rpc_vers = txdr_unsigned(RPC_VER2);
881 rpc_call = txdr_unsigned(RPC_CALL);
882 rpc_reply = txdr_unsigned(RPC_REPLY);
883 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
884 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
885 rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
886 rpc_autherr = txdr_unsigned(RPC_AUTHERR);
887 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
888 nfs_prog = txdr_unsigned(NFS_PROG);
889 nfs_true = txdr_unsigned(1);
890 nfs_false = txdr_unsigned(0);
891 nfs_xdrneg1 = txdr_unsigned(-1);
892 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
893 if (nfs_ticks < 1)
894 nfs_ticks = 1;
895 #ifdef NFSSERVER
896 nfsrv_init(0); /* Init server data structures */
897 nfsrv_initcache(); /* Init the server request cache */
898 #endif /* NFSSERVER */
899
900 pool_init(&nfsreqpl, sizeof(struct nfsreq), 0, IPL_NONE, PR_WAITOK,
901 "nfsreqpl", NULL);
902 }
903
904 #ifdef NFSCLIENT
905 int
nfs_vfs_init(struct vfsconf * vfsp)906 nfs_vfs_init(struct vfsconf *vfsp)
907 {
908 extern struct pool nfs_node_pool;
909
910 TAILQ_INIT(&nfs_bufq);
911
912 pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, IPL_NONE,
913 PR_WAITOK, "nfsnodepl", NULL);
914
915 return (0);
916 }
917
918 /*
919 * Attribute cache routines.
920 * nfs_loadattrcache() - loads or updates the cache contents from attributes
921 * that are on the mbuf list
922 * nfs_getattrcache() - returns valid attributes if found in cache, returns
923 * error otherwise
924 */
925
926 /*
927 * Load the attribute cache (that lives in the nfsnode entry) with
928 * the values on the mbuf list and
929 * Iff vap not NULL
930 * copy the attributes to *vaper
931 */
932 int
nfs_loadattrcache(struct vnode ** vpp,struct mbuf ** mdp,caddr_t * dposp,struct vattr * vaper)933 nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
934 struct vattr *vaper)
935 {
936 struct vnode *vp = *vpp;
937 struct vattr *vap;
938 struct nfs_fattr *fp;
939 extern const struct vops nfs_specvops;
940 struct nfsnode *np;
941 int32_t avail;
942 int error = 0;
943 int32_t rdev;
944 struct mbuf *md;
945 enum vtype vtyp;
946 mode_t vmode;
947 struct timespec mtime;
948 struct vnode *nvp;
949 int v3 = NFS_ISV3(vp);
950 uid_t uid;
951 gid_t gid;
952
953 md = *mdp;
954 avail = (mtod(md, caddr_t) + md->m_len) - *dposp;
955 error = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), avail, (caddr_t *)&fp);
956 if (error)
957 return (error);
958 if (v3) {
959 vtyp = nfsv3tov_type(fp->fa_type);
960 vmode = fxdr_unsigned(mode_t, fp->fa_mode);
961 rdev = makedev(fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata1),
962 fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata2));
963 fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
964 } else {
965 vtyp = nfsv2tov_type(fp->fa_type);
966 vmode = fxdr_unsigned(mode_t, fp->fa_mode);
967 if (vtyp == VNON || vtyp == VREG)
968 vtyp = IFTOVT(vmode);
969 rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
970 fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
971
972 /*
973 * Really ugly NFSv2 kludge.
974 */
975 if (vtyp == VCHR && rdev == 0xffffffff)
976 vtyp = VFIFO;
977 }
978
979 /*
980 * If v_type == VNON it is a new node, so fill in the v_type,
981 * n_mtime fields. Check to see if it represents a special
982 * device, and if so, check for a possible alias. Once the
983 * correct vnode has been obtained, fill in the rest of the
984 * information.
985 */
986 np = VTONFS(vp);
987 if (vp->v_type != vtyp) {
988 cache_purge(vp);
989 vp->v_type = vtyp;
990 if (vp->v_type == VFIFO) {
991 #ifndef FIFO
992 return (EOPNOTSUPP);
993 #else
994 extern const struct vops nfs_fifovops;
995 vp->v_op = &nfs_fifovops;
996 #endif /* FIFO */
997 }
998 if (vp->v_type == VCHR || vp->v_type == VBLK) {
999 vp->v_op = &nfs_specvops;
1000 nvp = checkalias(vp, (dev_t)rdev, vp->v_mount);
1001 if (nvp) {
1002 /*
1003 * Discard unneeded vnode, but save its nfsnode.
1004 * Since the nfsnode does not have a lock, its
1005 * vnode lock has to be carried over.
1006 */
1007
1008 nvp->v_data = vp->v_data;
1009 vp->v_data = NULL;
1010 vp->v_op = &spec_vops;
1011 vrele(vp);
1012 vgone(vp);
1013 /*
1014 * Reinitialize aliased node.
1015 */
1016 np->n_vnode = nvp;
1017 *vpp = vp = nvp;
1018 }
1019 }
1020 np->n_mtime = mtime;
1021 }
1022 vap = &np->n_vattr;
1023 vap->va_type = vtyp;
1024 vap->va_rdev = (dev_t)rdev;
1025 vap->va_mtime = mtime;
1026 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1027
1028 uid = fxdr_unsigned(uid_t, fp->fa_uid);
1029 gid = fxdr_unsigned(gid_t, fp->fa_gid);
1030 /* Invalidate access cache if uid, gid or mode changed. */
1031 if (np->n_accstamp != -1 &&
1032 (gid != vap->va_gid || uid != vap->va_uid ||
1033 (vmode & 07777) != vap->va_mode))
1034 np->n_accstamp = -1;
1035
1036 vap->va_mode = (vmode & 07777);
1037
1038 switch (vtyp) {
1039 case VBLK:
1040 vap->va_blocksize = BLKDEV_IOSIZE;
1041 break;
1042 case VCHR:
1043 vap->va_blocksize = MAXBSIZE;
1044 break;
1045 default:
1046 vap->va_blocksize = v3 ? vp->v_mount->mnt_stat.f_iosize :
1047 fxdr_unsigned(int32_t, fp->fa2_blocksize);
1048 break;
1049 }
1050 vap->va_nlink = fxdr_unsigned(nlink_t, fp->fa_nlink);
1051 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
1052 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
1053 if (v3) {
1054 vap->va_size = fxdr_hyper(&fp->fa3_size);
1055 vap->va_bytes = fxdr_hyper(&fp->fa3_used);
1056 vap->va_fileid = fxdr_hyper(&fp->fa3_fileid);
1057 fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
1058 fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
1059 } else {
1060 vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
1061 vap->va_bytes =
1062 (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) *
1063 NFS_FABLKSIZE;
1064 vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
1065 fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
1066 vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
1067 fp->fa2_ctime.nfsv2_sec);
1068 vap->va_ctime.tv_nsec = 0;
1069 vap->va_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec);
1070 }
1071 vap->va_flags = 0;
1072 vap->va_filerev = 0;
1073
1074 if (vap->va_size != np->n_size) {
1075 if (vap->va_type == VREG) {
1076 if (np->n_flag & NMODIFIED) {
1077 if (vap->va_size < np->n_size)
1078 vap->va_size = np->n_size;
1079 else
1080 np->n_size = vap->va_size;
1081 } else
1082 np->n_size = vap->va_size;
1083 uvm_vnp_setsize(vp, np->n_size);
1084 } else
1085 np->n_size = vap->va_size;
1086 }
1087 np->n_attrstamp = gettime();
1088 if (vaper != NULL) {
1089 bcopy(vap, vaper, sizeof(*vap));
1090 if (np->n_flag & NCHG) {
1091 if (np->n_flag & NACC)
1092 vaper->va_atime = np->n_atim;
1093 if (np->n_flag & NUPD)
1094 vaper->va_mtime = np->n_mtim;
1095 }
1096 }
1097 return (0);
1098 }
1099
1100 int
nfs_attrtimeo(struct nfsnode * np)1101 nfs_attrtimeo(struct nfsnode *np)
1102 {
1103 struct vnode *vp = np->n_vnode;
1104 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1105 int tenthage = (gettime() - np->n_mtime.tv_sec) / 10;
1106 int minto, maxto;
1107
1108 if (vp->v_type == VDIR) {
1109 maxto = nmp->nm_acdirmax;
1110 minto = nmp->nm_acdirmin;
1111 } else {
1112 maxto = nmp->nm_acregmax;
1113 minto = nmp->nm_acregmin;
1114 }
1115
1116 if (np->n_flag & NMODIFIED || tenthage < minto)
1117 return minto;
1118 else if (tenthage < maxto)
1119 return tenthage;
1120 else
1121 return maxto;
1122 }
1123
1124 /*
1125 * Check the time stamp
1126 * If the cache is valid, copy contents to *vap and return 0
1127 * otherwise return an error
1128 */
1129 int
nfs_getattrcache(struct vnode * vp,struct vattr * vaper)1130 nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
1131 {
1132 struct nfsnode *np = VTONFS(vp);
1133 struct vattr *vap;
1134
1135 if (np->n_attrstamp == 0 ||
1136 (gettime() - np->n_attrstamp) >= nfs_attrtimeo(np)) {
1137 nfsstats.attrcache_misses++;
1138 return (ENOENT);
1139 }
1140 nfsstats.attrcache_hits++;
1141 vap = &np->n_vattr;
1142 if (vap->va_size != np->n_size) {
1143 if (vap->va_type == VREG) {
1144 if (np->n_flag & NMODIFIED) {
1145 if (vap->va_size < np->n_size)
1146 vap->va_size = np->n_size;
1147 else
1148 np->n_size = vap->va_size;
1149 } else
1150 np->n_size = vap->va_size;
1151 uvm_vnp_setsize(vp, np->n_size);
1152 } else
1153 np->n_size = vap->va_size;
1154 }
1155 bcopy(vap, vaper, sizeof(struct vattr));
1156 if (np->n_flag & NCHG) {
1157 if (np->n_flag & NACC)
1158 vaper->va_atime = np->n_atim;
1159 if (np->n_flag & NUPD)
1160 vaper->va_mtime = np->n_mtim;
1161 }
1162 return (0);
1163 }
1164 #endif /* NFSCLIENT */
1165
1166 /*
1167 * The write verifier has changed (probably due to a server reboot), so all
1168 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
1169 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
1170 * flag. Once done the new write verifier can be set for the mount point.
1171 */
1172 void
nfs_clearcommit(struct mount * mp)1173 nfs_clearcommit(struct mount *mp)
1174 {
1175 struct vnode *vp;
1176 struct buf *bp;
1177 int s;
1178
1179 s = splbio();
1180 loop:
1181 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1182 if (vp->v_mount != mp) /* Paranoia */
1183 goto loop;
1184 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
1185 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
1186 == (B_DELWRI | B_NEEDCOMMIT))
1187 bp->b_flags &= ~B_NEEDCOMMIT;
1188 }
1189 }
1190 splx(s);
1191 }
1192
1193 void
nfs_merge_commit_ranges(struct vnode * vp)1194 nfs_merge_commit_ranges(struct vnode *vp)
1195 {
1196 struct nfsnode *np = VTONFS(vp);
1197
1198 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) {
1199 np->n_pushedlo = np->n_pushlo;
1200 np->n_pushedhi = np->n_pushhi;
1201 np->n_commitflags |= NFS_COMMIT_PUSHED_VALID;
1202 } else {
1203 if (np->n_pushlo < np->n_pushedlo)
1204 np->n_pushedlo = np->n_pushlo;
1205 if (np->n_pushhi > np->n_pushedhi)
1206 np->n_pushedhi = np->n_pushhi;
1207 }
1208
1209 np->n_pushlo = np->n_pushhi = 0;
1210 np->n_commitflags &= ~NFS_COMMIT_PUSH_VALID;
1211 }
1212
1213 int
nfs_in_committed_range(struct vnode * vp,struct buf * bp)1214 nfs_in_committed_range(struct vnode *vp, struct buf *bp)
1215 {
1216 struct nfsnode *np = VTONFS(vp);
1217 off_t lo, hi;
1218
1219 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID))
1220 return 0;
1221 lo = (off_t)bp->b_blkno * DEV_BSIZE;
1222 hi = lo + bp->b_dirtyend;
1223
1224 return (lo >= np->n_pushedlo && hi <= np->n_pushedhi);
1225 }
1226
1227 int
nfs_in_tobecommitted_range(struct vnode * vp,struct buf * bp)1228 nfs_in_tobecommitted_range(struct vnode *vp, struct buf *bp)
1229 {
1230 struct nfsnode *np = VTONFS(vp);
1231 off_t lo, hi;
1232
1233 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID))
1234 return 0;
1235 lo = (off_t)bp->b_blkno * DEV_BSIZE;
1236 hi = lo + bp->b_dirtyend;
1237
1238 return (lo >= np->n_pushlo && hi <= np->n_pushhi);
1239 }
1240
1241 void
nfs_add_committed_range(struct vnode * vp,struct buf * bp)1242 nfs_add_committed_range(struct vnode *vp, struct buf *bp)
1243 {
1244 struct nfsnode *np = VTONFS(vp);
1245 off_t lo, hi;
1246
1247 lo = (off_t)bp->b_blkno * DEV_BSIZE;
1248 hi = lo + bp->b_dirtyend;
1249
1250 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) {
1251 np->n_pushedlo = lo;
1252 np->n_pushedhi = hi;
1253 np->n_commitflags |= NFS_COMMIT_PUSHED_VALID;
1254 } else {
1255 if (hi > np->n_pushedhi)
1256 np->n_pushedhi = hi;
1257 if (lo < np->n_pushedlo)
1258 np->n_pushedlo = lo;
1259 }
1260 }
1261
1262 void
nfs_del_committed_range(struct vnode * vp,struct buf * bp)1263 nfs_del_committed_range(struct vnode *vp, struct buf *bp)
1264 {
1265 struct nfsnode *np = VTONFS(vp);
1266 off_t lo, hi;
1267
1268 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID))
1269 return;
1270
1271 lo = (off_t)bp->b_blkno * DEV_BSIZE;
1272 hi = lo + bp->b_dirtyend;
1273
1274 if (lo > np->n_pushedhi || hi < np->n_pushedlo)
1275 return;
1276 if (lo <= np->n_pushedlo)
1277 np->n_pushedlo = hi;
1278 else if (hi >= np->n_pushedhi)
1279 np->n_pushedhi = lo;
1280 else {
1281 /*
1282 * XXX There's only one range. If the deleted range
1283 * is in the middle, pick the largest of the
1284 * contiguous ranges that it leaves.
1285 */
1286 if ((np->n_pushedlo - lo) > (hi - np->n_pushedhi))
1287 np->n_pushedhi = lo;
1288 else
1289 np->n_pushedlo = hi;
1290 }
1291 }
1292
1293 void
nfs_add_tobecommitted_range(struct vnode * vp,struct buf * bp)1294 nfs_add_tobecommitted_range(struct vnode *vp, struct buf *bp)
1295 {
1296 struct nfsnode *np = VTONFS(vp);
1297 off_t lo, hi;
1298
1299 lo = (off_t)bp->b_blkno * DEV_BSIZE;
1300 hi = lo + bp->b_dirtyend;
1301
1302 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) {
1303 np->n_pushlo = lo;
1304 np->n_pushhi = hi;
1305 np->n_commitflags |= NFS_COMMIT_PUSH_VALID;
1306 } else {
1307 if (lo < np->n_pushlo)
1308 np->n_pushlo = lo;
1309 if (hi > np->n_pushhi)
1310 np->n_pushhi = hi;
1311 }
1312 }
1313
1314 void
nfs_del_tobecommitted_range(struct vnode * vp,struct buf * bp)1315 nfs_del_tobecommitted_range(struct vnode *vp, struct buf *bp)
1316 {
1317 struct nfsnode *np = VTONFS(vp);
1318 off_t lo, hi;
1319
1320 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID))
1321 return;
1322
1323 lo = (off_t)bp->b_blkno * DEV_BSIZE;
1324 hi = lo + bp->b_dirtyend;
1325
1326 if (lo > np->n_pushhi || hi < np->n_pushlo)
1327 return;
1328
1329 if (lo <= np->n_pushlo)
1330 np->n_pushlo = hi;
1331 else if (hi >= np->n_pushhi)
1332 np->n_pushhi = lo;
1333 else {
1334 /*
1335 * XXX There's only one range. If the deleted range
1336 * is in the middle, pick the largest of the
1337 * contiguous ranges that it leaves.
1338 */
1339 if ((np->n_pushlo - lo) > (hi - np->n_pushhi))
1340 np->n_pushhi = lo;
1341 else
1342 np->n_pushlo = hi;
1343 }
1344 }
1345
1346 /*
1347 * Map errnos to NFS error numbers. For Version 3 also filter out error
1348 * numbers not specified for the associated procedure.
1349 */
1350 int
nfsrv_errmap(struct nfsrv_descript * nd,int err)1351 nfsrv_errmap(struct nfsrv_descript *nd, int err)
1352 {
1353 const short *defaulterrp, *errp;
1354
1355 if (nd->nd_flag & ND_NFSV3) {
1356 if (nd->nd_procnum <= NFSPROC_COMMIT) {
1357 errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum];
1358 while (*++errp) {
1359 if (*errp == err)
1360 return (err);
1361 else if (*errp > err)
1362 break;
1363 }
1364 return ((int)*defaulterrp);
1365 } else
1366 return (err & 0xffff);
1367 }
1368 if (err <= nitems(nfsrv_v2errmap))
1369 return ((int)nfsrv_v2errmap[err - 1]);
1370 return (NFSERR_IO);
1371 }
1372
1373 /*
1374 * If full is non zero, set all fields, otherwise just set mode and time fields
1375 */
1376 void
nfsm_v3attrbuild(struct mbuf ** mp,struct vattr * a,int full)1377 nfsm_v3attrbuild(struct mbuf **mp, struct vattr *a, int full)
1378 {
1379 struct mbuf *mb;
1380 u_int32_t *tl;
1381
1382 mb = *mp;
1383
1384 if (a->va_mode != (mode_t)VNOVAL) {
1385 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1386 *tl++ = nfs_true;
1387 *tl = txdr_unsigned(a->va_mode);
1388 } else {
1389 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1390 *tl = nfs_false;
1391 }
1392 if (full && a->va_uid != (uid_t)VNOVAL) {
1393 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1394 *tl++ = nfs_true;
1395 *tl = txdr_unsigned(a->va_uid);
1396 } else {
1397 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1398 *tl = nfs_false;
1399 }
1400 if (full && a->va_gid != (gid_t)VNOVAL) {
1401 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED);
1402 *tl++ = nfs_true;
1403 *tl = txdr_unsigned((a)->va_gid);
1404 } else {
1405 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1406 *tl = nfs_false;
1407 }
1408 if (full && a->va_size != VNOVAL) {
1409 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1410 *tl++ = nfs_true;
1411 txdr_hyper(a->va_size, tl);
1412 } else {
1413 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1414 *tl = nfs_false;
1415 }
1416 if (a->va_atime.tv_nsec != VNOVAL) {
1417 if (a->va_atime.tv_sec != gettime()) {
1418 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1419 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1420 txdr_nfsv3time(&a->va_atime, tl);
1421 } else {
1422 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1423 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1424 }
1425 } else {
1426 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1427 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1428 }
1429 if (a->va_mtime.tv_nsec != VNOVAL) {
1430 if (a->va_mtime.tv_sec != gettime()) {
1431 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED);
1432 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1433 txdr_nfsv3time(&a->va_mtime, tl);
1434 } else {
1435 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1436 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1437 }
1438 } else {
1439 tl = nfsm_build(&mb, NFSX_UNSIGNED);
1440 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1441 }
1442
1443 *mp = mb;
1444 }
1445
1446 /*
1447 * Ensure a contiguous buffer len bytes long
1448 */
1449 void *
nfsm_build(struct mbuf ** mp,u_int len)1450 nfsm_build(struct mbuf **mp, u_int len)
1451 {
1452 struct mbuf *mb, *mb2;
1453 caddr_t bpos;
1454
1455 mb = *mp;
1456 bpos = mb_offset(mb);
1457
1458 if (len > m_trailingspace(mb)) {
1459 MGET(mb2, M_WAIT, MT_DATA);
1460 if (len > MLEN)
1461 panic("build > MLEN");
1462 mb->m_next = mb2;
1463 mb = mb2;
1464 mb->m_len = 0;
1465 bpos = mtod(mb, caddr_t);
1466 }
1467 mb->m_len += len;
1468
1469 *mp = mb;
1470
1471 return (bpos);
1472 }
1473
1474 void
nfsm_fhtom(struct nfsm_info * info,struct vnode * v,int v3)1475 nfsm_fhtom(struct nfsm_info *info, struct vnode *v, int v3)
1476 {
1477 struct nfsnode *n = VTONFS(v);
1478
1479 if (v3) {
1480 nfsm_strtombuf(&info->nmi_mb, n->n_fhp, n->n_fhsize);
1481 } else {
1482 nfsm_buftombuf(&info->nmi_mb, n->n_fhp, NFSX_V2FH);
1483 }
1484 }
1485
1486 void
nfsm_srvfhtom(struct mbuf ** mp,fhandle_t * f,int v3)1487 nfsm_srvfhtom(struct mbuf **mp, fhandle_t *f, int v3)
1488 {
1489 if (v3) {
1490 nfsm_strtombuf(mp, f, NFSX_V3FH);
1491 } else {
1492 nfsm_buftombuf(mp, f, NFSX_V2FH);
1493 }
1494 }
1495
1496 void
txdr_nfsv2time(const struct timespec * from,struct nfsv2_time * to)1497 txdr_nfsv2time(const struct timespec *from, struct nfsv2_time *to)
1498 {
1499 if (from->tv_nsec == VNOVAL) {
1500 to->nfsv2_sec = nfs_xdrneg1;
1501 to->nfsv2_usec = nfs_xdrneg1;
1502 } else if (from->tv_sec == -1) {
1503 /*
1504 * can't request a time of -1; send
1505 * -1.000001 == {-2,999999} instead
1506 */
1507 to->nfsv2_sec = htonl(-2);
1508 to->nfsv2_usec = htonl(999999);
1509 } else {
1510 to->nfsv2_sec = htonl(from->tv_sec);
1511 to->nfsv2_usec = htonl(from->tv_nsec / 1000);
1512 }
1513 }
1514