xref: /freebsd/sys/fs/nfs/nfs_commonkrpc.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 1989, 1991, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Socket operations for use by nfs
39  */
40 
41 #include "opt_inet6.h"
42 #include "opt_kgssapi.h"
43 #include "opt_nfs.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/vnode.h>
60 
61 #include <rpc/rpc.h>
62 
63 #include <kgssapi/krb5/kcrypto.h>
64 
65 #include <fs/nfs/nfsport.h>
66 
67 NFSSTATESPINLOCK;
68 NFSREQSPINLOCK;
69 extern struct nfsstats newnfsstats;
70 extern struct nfsreqhead nfsd_reqq;
71 extern int nfscl_ticks;
72 extern void (*ncl_call_invalcaches)(struct vnode *);
73 
74 static int	nfsrv_gsscallbackson = 0;
75 static int	nfs_bufpackets = 4;
76 static int	nfs_reconnects;
77 static int	nfs3_jukebox_delay = 10;
78 static int	nfs_skip_wcc_data_onerr = 1;
79 static int	nfs_keytab_enctype = ETYPE_DES_CBC_CRC;
80 
81 SYSCTL_DECL(_vfs_newnfs);
82 
83 SYSCTL_INT(_vfs_newnfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
84     "Buffer reservation size 2 < x < 64");
85 SYSCTL_INT(_vfs_newnfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
86     "Number of times the nfs client has had to reconnect");
87 SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
88     "Number of seconds to delay a retry after receiving EJUKEBOX");
89 SYSCTL_INT(_vfs_newnfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
90     "Disable weak cache consistency checking when server returns an error");
91 SYSCTL_INT(_vfs_newnfs, OID_AUTO, keytab_enctype, CTLFLAG_RW, &nfs_keytab_enctype, 0,
92     "Encryption type for the keytab entry used by nfs");
93 
94 static void	nfs_down(struct nfsmount *, struct thread *, const char *,
95     int, int);
96 static void	nfs_up(struct nfsmount *, struct thread *, const char *,
97     int, int);
98 static int	nfs_msg(struct thread *, const char *, const char *, int);
99 
100 struct nfs_cached_auth {
101 	int		ca_refs; /* refcount, including 1 from the cache */
102 	uid_t		ca_uid;	 /* uid that corresponds to this auth */
103 	AUTH		*ca_auth; /* RPC auth handle */
104 };
105 
106 static int nfsv2_procid[NFS_V3NPROCS] = {
107 	NFSV2PROC_NULL,
108 	NFSV2PROC_GETATTR,
109 	NFSV2PROC_SETATTR,
110 	NFSV2PROC_LOOKUP,
111 	NFSV2PROC_NOOP,
112 	NFSV2PROC_READLINK,
113 	NFSV2PROC_READ,
114 	NFSV2PROC_WRITE,
115 	NFSV2PROC_CREATE,
116 	NFSV2PROC_MKDIR,
117 	NFSV2PROC_SYMLINK,
118 	NFSV2PROC_CREATE,
119 	NFSV2PROC_REMOVE,
120 	NFSV2PROC_RMDIR,
121 	NFSV2PROC_RENAME,
122 	NFSV2PROC_LINK,
123 	NFSV2PROC_READDIR,
124 	NFSV2PROC_NOOP,
125 	NFSV2PROC_STATFS,
126 	NFSV2PROC_NOOP,
127 	NFSV2PROC_NOOP,
128 	NFSV2PROC_NOOP,
129 };
130 
131 /*
132  * Initialize sockets and congestion for a new NFS connection.
133  * We do not free the sockaddr if error.
134  */
135 int
136 newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp,
137     struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
138 {
139 	int rcvreserve, sndreserve;
140 	int pktscale;
141 	struct sockaddr *saddr;
142 	struct ucred *origcred;
143 	CLIENT *client;
144 	struct netconfig *nconf;
145 	struct socket *so;
146 	int one = 1, retries, error, printsbmax = 0;
147 	struct thread *td = curthread;
148 
149 	/*
150 	 * We need to establish the socket using the credentials of
151 	 * the mountpoint.  Some parts of this process (such as
152 	 * sobind() and soconnect()) will use the curent thread's
153 	 * credential instead of the socket credential.  To work
154 	 * around this, temporarily change the current thread's
155 	 * credential to that of the mountpoint.
156 	 *
157 	 * XXX: It would be better to explicitly pass the correct
158 	 * credential to sobind() and soconnect().
159 	 */
160 	origcred = td->td_ucred;
161 
162 	/*
163 	 * Use the credential in nr_cred, if not NULL.
164 	 */
165 	if (nrp->nr_cred != NULL)
166 		td->td_ucred = nrp->nr_cred;
167 	else
168 		td->td_ucred = cred;
169 	saddr = nrp->nr_nam;
170 
171 	if (saddr->sa_family == AF_INET)
172 		if (nrp->nr_sotype == SOCK_DGRAM)
173 			nconf = getnetconfigent("udp");
174 		else
175 			nconf = getnetconfigent("tcp");
176 	else
177 		if (nrp->nr_sotype == SOCK_DGRAM)
178 			nconf = getnetconfigent("udp6");
179 		else
180 			nconf = getnetconfigent("tcp6");
181 
182 	pktscale = nfs_bufpackets;
183 	if (pktscale < 2)
184 		pktscale = 2;
185 	if (pktscale > 64)
186 		pktscale = 64;
187 	/*
188 	 * soreserve() can fail if sb_max is too small, so shrink pktscale
189 	 * and try again if there is an error.
190 	 * Print a log message suggesting increasing sb_max.
191 	 * Creating a socket and doing this is necessary since, if the
192 	 * reservation sizes are too large and will make soreserve() fail,
193 	 * the connection will work until a large send is attempted and
194 	 * then it will loop in the krpc code.
195 	 */
196 	so = NULL;
197 	saddr = NFSSOCKADDR(nrp->nr_nam, struct sockaddr *);
198 	error = socreate(saddr->sa_family, &so, nrp->nr_sotype,
199 	    nrp->nr_soproto, td->td_ucred, td);
200 	if (error) {
201 		td->td_ucred = origcred;
202 		return (error);
203 	}
204 	do {
205 	    if (error != 0 && pktscale > 2) {
206 		pktscale--;
207 		if (printsbmax == 0) {
208 		    printf("nfscl: consider increasing kern.ipc.maxsockbuf\n");
209 		    printsbmax = 1;
210 		}
211 	    }
212 	    if (nrp->nr_sotype == SOCK_DGRAM) {
213 		if (nmp != NULL) {
214 			sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
215 			    pktscale;
216 			rcvreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
217 			    pktscale;
218 		} else {
219 			sndreserve = rcvreserve = 1024 * pktscale;
220 		}
221 	    } else {
222 		if (nrp->nr_sotype != SOCK_STREAM)
223 			panic("nfscon sotype");
224 		if (nmp != NULL) {
225 			sndreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
226 			    sizeof (u_int32_t)) * pktscale;
227 			rcvreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
228 			    sizeof (u_int32_t)) * pktscale;
229 		} else {
230 			sndreserve = rcvreserve = 1024 * pktscale;
231 		}
232 	    }
233 	    error = soreserve(so, sndreserve, rcvreserve);
234 	} while (error != 0 && pktscale > 2);
235 	soclose(so);
236 	if (error) {
237 		td->td_ucred = origcred;
238 		return (error);
239 	}
240 
241 	client = clnt_reconnect_create(nconf, saddr, nrp->nr_prog,
242 	    nrp->nr_vers, sndreserve, rcvreserve);
243 	CLNT_CONTROL(client, CLSET_WAITCHAN, "newnfsreq");
244 	if (nmp != NULL) {
245 		if ((nmp->nm_flag & NFSMNT_INT))
246 			CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one);
247 		if ((nmp->nm_flag & NFSMNT_RESVPORT))
248 			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
249 		if (NFSHASSOFT(nmp))
250 			retries = nmp->nm_retry;
251 		else
252 			retries = INT_MAX;
253 	} else {
254 		/*
255 		 * Three cases:
256 		 * - Null RPC callback to client
257 		 * - Non-Null RPC callback to client, wait a little longer
258 		 * - upcalls to nfsuserd and gssd (clp == NULL)
259 		 */
260 		if (callback_retry_mult == 0) {
261 			retries = NFSV4_UPCALLRETRY;
262 			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
263 		} else {
264 			retries = NFSV4_CALLBACKRETRY * callback_retry_mult;
265 		}
266 	}
267 	CLNT_CONTROL(client, CLSET_RETRIES, &retries);
268 
269 	mtx_lock(&nrp->nr_mtx);
270 	if (nrp->nr_client != NULL) {
271 		/*
272 		 * Someone else already connected.
273 		 */
274 		CLNT_RELEASE(client);
275 	} else {
276 		nrp->nr_client = client;
277 	}
278 
279 	/*
280 	 * Protocols that do not require connections may be optionally left
281 	 * unconnected for servers that reply from a port other than NFS_PORT.
282 	 */
283 	if (nmp == NULL || (nmp->nm_flag & NFSMNT_NOCONN) == 0) {
284 		mtx_unlock(&nrp->nr_mtx);
285 		CLNT_CONTROL(client, CLSET_CONNECT, &one);
286 	} else {
287 		mtx_unlock(&nrp->nr_mtx);
288 	}
289 
290 	/* Restore current thread's credentials. */
291 	td->td_ucred = origcred;
292 	return (0);
293 }
294 
295 /*
296  * NFS disconnect. Clean up and unlink.
297  */
298 void
299 newnfs_disconnect(struct nfssockreq *nrp)
300 {
301 	CLIENT *client;
302 
303 	mtx_lock(&nrp->nr_mtx);
304 	if (nrp->nr_client != NULL) {
305 		client = nrp->nr_client;
306 		nrp->nr_client = NULL;
307 		mtx_unlock(&nrp->nr_mtx);
308 #ifdef KGSSAPI
309 		rpc_gss_secpurge(client);
310 #endif
311 		CLNT_CLOSE(client);
312 		CLNT_RELEASE(client);
313 	} else {
314 		mtx_unlock(&nrp->nr_mtx);
315 	}
316 }
317 
318 static AUTH *
319 nfs_getauth(struct nfssockreq *nrp, int secflavour, char *clnt_principal,
320     char *srv_principal, gss_OID mech_oid, struct ucred *cred)
321 {
322 #ifdef KGSSAPI
323 	rpc_gss_service_t svc;
324 	AUTH *auth;
325 #ifdef notyet
326 	rpc_gss_options_req_t req_options;
327 #endif
328 #endif
329 
330 	switch (secflavour) {
331 #ifdef KGSSAPI
332 	case RPCSEC_GSS_KRB5:
333 	case RPCSEC_GSS_KRB5I:
334 	case RPCSEC_GSS_KRB5P:
335 		if (!mech_oid) {
336 			if (!rpc_gss_mech_to_oid("kerberosv5", &mech_oid))
337 				return (NULL);
338 		}
339 		if (secflavour == RPCSEC_GSS_KRB5)
340 			svc = rpc_gss_svc_none;
341 		else if (secflavour == RPCSEC_GSS_KRB5I)
342 			svc = rpc_gss_svc_integrity;
343 		else
344 			svc = rpc_gss_svc_privacy;
345 #ifdef notyet
346 		req_options.req_flags = GSS_C_MUTUAL_FLAG;
347 		req_options.time_req = 0;
348 		req_options.my_cred = GSS_C_NO_CREDENTIAL;
349 		req_options.input_channel_bindings = NULL;
350 		req_options.enc_type = nfs_keytab_enctype;
351 
352 		auth = rpc_gss_secfind(nrp->nr_client, cred,
353 		    clnt_principal, srv_principal, mech_oid, svc,
354 		    &req_options);
355 #else
356 		/*
357 		 * Until changes to the rpcsec_gss code are committed,
358 		 * there is no support for host based initiator
359 		 * principals. As such, that case cannot yet be handled.
360 		 */
361 		if (clnt_principal == NULL)
362 			auth = rpc_gss_secfind(nrp->nr_client, cred,
363 			    srv_principal, mech_oid, svc);
364 		else
365 			auth = NULL;
366 #endif
367 		if (auth != NULL)
368 			return (auth);
369 		/* fallthrough */
370 #endif	/* KGSSAPI */
371 	case AUTH_SYS:
372 	default:
373 		return (authunix_create(cred));
374 
375 	}
376 }
377 
378 /*
379  * Callback from the RPC code to generate up/down notifications.
380  */
381 
382 struct nfs_feedback_arg {
383 	struct nfsmount *nf_mount;
384 	int		nf_lastmsg;	/* last tprintf */
385 	int		nf_tprintfmsg;
386 	struct thread	*nf_td;
387 };
388 
389 static void
390 nfs_feedback(int type, int proc, void *arg)
391 {
392 	struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
393 	struct nfsmount *nmp = nf->nf_mount;
394 	struct timeval now;
395 
396 	getmicrouptime(&now);
397 
398 	switch (type) {
399 	case FEEDBACK_REXMIT2:
400 	case FEEDBACK_RECONNECT:
401 		if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
402 			nfs_down(nmp, nf->nf_td,
403 			    "not responding", 0, NFSSTA_TIMEO);
404 			nf->nf_tprintfmsg = TRUE;
405 			nf->nf_lastmsg = now.tv_sec;
406 		}
407 		break;
408 
409 	case FEEDBACK_OK:
410 		nfs_up(nf->nf_mount, nf->nf_td,
411 		    "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
412 		break;
413 	}
414 }
415 
416 /*
417  * newnfs_request - goes something like this
418  *	- does the rpc by calling the krpc layer
419  *	- break down rpc header and return with nfs reply
420  * nb: always frees up nd_mreq mbuf list
421  */
422 int
423 newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
424     struct nfsclient *clp, struct nfssockreq *nrp, vnode_t vp,
425     struct thread *td, struct ucred *cred, u_int32_t prog, u_int32_t vers,
426     u_char *retsum, int toplevel, u_int64_t *xidp)
427 {
428 	u_int32_t *tl;
429 	time_t waituntil;
430 	int i, j, set_uid = 0, set_sigset = 0;
431 	int trycnt, error = 0, usegssname = 0, secflavour = AUTH_SYS;
432 	u_int16_t procnum;
433 	u_int trylater_delay = 1;
434 	struct nfs_feedback_arg nf;
435 	struct timeval timo, now;
436 	AUTH *auth;
437 	struct rpc_callextra ext;
438 	enum clnt_stat stat;
439 	struct nfsreq *rep = NULL;
440 	char *srv_principal = NULL;
441 	uid_t saved_uid = (uid_t)-1;
442 	sigset_t oldset;
443 
444 	if (xidp != NULL)
445 		*xidp = 0;
446 	/* Reject requests while attempting a forced unmount. */
447 	if (nmp != NULL && (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) {
448 		m_freem(nd->nd_mreq);
449 		return (ESTALE);
450 	}
451 
452 	/* For client side interruptible mounts, mask off the signals. */
453 	if (nmp != NULL && td != NULL && NFSHASINT(nmp)) {
454 		newnfs_set_sigmask(td, &oldset);
455 		set_sigset = 1;
456 	}
457 
458 	/*
459 	 * XXX if not already connected call nfs_connect now. Longer
460 	 * term, change nfs_mount to call nfs_connect unconditionally
461 	 * and let clnt_reconnect_create handle reconnects.
462 	 */
463 	if (nrp->nr_client == NULL)
464 		newnfs_connect(nmp, nrp, cred, td, 0);
465 
466 	/*
467 	 * For a client side mount, nmp is != NULL and clp == NULL. For
468 	 * server calls (callbacks or upcalls), nmp == NULL.
469 	 */
470 	if (clp != NULL) {
471 		NFSLOCKSTATE();
472 		if ((clp->lc_flags & LCL_GSS) && nfsrv_gsscallbackson) {
473 			secflavour = RPCSEC_GSS_KRB5;
474 			if (nd->nd_procnum != NFSPROC_NULL) {
475 				if (clp->lc_flags & LCL_GSSINTEGRITY)
476 					secflavour = RPCSEC_GSS_KRB5I;
477 				else if (clp->lc_flags & LCL_GSSPRIVACY)
478 					secflavour = RPCSEC_GSS_KRB5P;
479 			}
480 		}
481 		NFSUNLOCKSTATE();
482 	} else if (nmp != NULL && NFSHASKERB(nmp) &&
483 	     nd->nd_procnum != NFSPROC_NULL) {
484 		if (NFSHASALLGSSNAME(nmp) && nmp->nm_krbnamelen > 0)
485 			nd->nd_flag |= ND_USEGSSNAME;
486 		if ((nd->nd_flag & ND_USEGSSNAME) != 0) {
487 			/*
488 			 * If there is a client side host based credential,
489 			 * use that, otherwise use the system uid, if set.
490 			 */
491 			if (nmp->nm_krbnamelen > 0) {
492 				usegssname = 1;
493 			} else if (nmp->nm_uid != (uid_t)-1) {
494 				saved_uid = cred->cr_uid;
495 				cred->cr_uid = nmp->nm_uid;
496 				set_uid = 1;
497 			}
498 		} else if (nmp->nm_krbnamelen == 0 &&
499 		    nmp->nm_uid != (uid_t)-1 && cred->cr_uid == (uid_t)0) {
500 			/*
501 			 * If there is no host based principal name and
502 			 * the system uid is set and this is root, use the
503 			 * system uid, since root won't have user
504 			 * credentials in a credentials cache file.
505 			 */
506 			saved_uid = cred->cr_uid;
507 			cred->cr_uid = nmp->nm_uid;
508 			set_uid = 1;
509 		}
510 		if (NFSHASINTEGRITY(nmp))
511 			secflavour = RPCSEC_GSS_KRB5I;
512 		else if (NFSHASPRIVACY(nmp))
513 			secflavour = RPCSEC_GSS_KRB5P;
514 		else
515 			secflavour = RPCSEC_GSS_KRB5;
516 		srv_principal = NFSMNT_SRVKRBNAME(nmp);
517 	}
518 
519 	if (nmp != NULL) {
520 		bzero(&nf, sizeof(struct nfs_feedback_arg));
521 		nf.nf_mount = nmp;
522 		nf.nf_td = td;
523 		getmicrouptime(&now);
524 		nf.nf_lastmsg = now.tv_sec -
525 		    ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
526 	}
527 
528 	if (nd->nd_procnum == NFSPROC_NULL)
529 		auth = authnone_create();
530 	else if (usegssname)
531 		auth = nfs_getauth(nrp, secflavour, nmp->nm_krbname,
532 		    srv_principal, NULL, cred);
533 	else
534 		auth = nfs_getauth(nrp, secflavour, NULL,
535 		    srv_principal, NULL, cred);
536 	if (set_uid)
537 		cred->cr_uid = saved_uid;
538 	if (auth == NULL) {
539 		m_freem(nd->nd_mreq);
540 		if (set_sigset)
541 			newnfs_restore_sigmask(td, &oldset);
542 		return (EACCES);
543 	}
544 	bzero(&ext, sizeof(ext));
545 	ext.rc_auth = auth;
546 	if (nmp != NULL) {
547 		ext.rc_feedback = nfs_feedback;
548 		ext.rc_feedback_arg = &nf;
549 	}
550 
551 	procnum = nd->nd_procnum;
552 	if ((nd->nd_flag & ND_NFSV4) &&
553 	    nd->nd_procnum != NFSPROC_NULL &&
554 	    nd->nd_procnum != NFSV4PROC_CBCOMPOUND)
555 		procnum = NFSV4PROC_COMPOUND;
556 
557 	if (nmp != NULL) {
558 		NFSINCRGLOBAL(newnfsstats.rpcrequests);
559 
560 		/* Map the procnum to the old NFSv2 one, as required. */
561 		if ((nd->nd_flag & ND_NFSV2) != 0) {
562 			if (nd->nd_procnum < NFS_V3NPROCS)
563 				procnum = nfsv2_procid[nd->nd_procnum];
564 			else
565 				procnum = NFSV2PROC_NOOP;
566 		}
567 
568 		/*
569 		 * Now only used for the R_DONTRECOVER case, but until that is
570 		 * supported within the krpc code, I need to keep a queue of
571 		 * outstanding RPCs for nfsv4 client requests.
572 		 */
573 		if ((nd->nd_flag & ND_NFSV4) && procnum == NFSV4PROC_COMPOUND)
574 			MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq),
575 			    M_NFSDREQ, M_WAITOK);
576 	}
577 	trycnt = 0;
578 tryagain:
579 	if (nmp == NULL) {
580 		timo.tv_usec = 0;
581 		if (clp == NULL)
582 			timo.tv_sec = NFSV4_UPCALLTIMEO;
583 		else
584 			timo.tv_sec = NFSV4_CALLBACKTIMEO;
585 	} else {
586 		if (nrp->nr_sotype != SOCK_DGRAM) {
587 			timo.tv_usec = 0;
588 			if ((nmp->nm_flag & NFSMNT_NFSV4))
589 				timo.tv_sec = INT_MAX;
590 			else
591 				timo.tv_sec = NFS_TCPTIMEO;
592 		} else {
593 			timo.tv_sec = nmp->nm_timeo / NFS_HZ;
594 			timo.tv_usec = (nmp->nm_timeo * 1000000) / NFS_HZ;
595 		}
596 
597 		if (rep != NULL) {
598 			rep->r_flags = 0;
599 			rep->r_nmp = nmp;
600 			/*
601 			 * Chain request into list of outstanding requests.
602 			 */
603 			NFSLOCKREQ();
604 			TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain);
605 			NFSUNLOCKREQ();
606 		}
607 	}
608 
609 	nd->nd_mrep = NULL;
610 	stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum, nd->nd_mreq,
611 	    &nd->nd_mrep, timo);
612 
613 	if (rep != NULL) {
614 		/*
615 		 * RPC done, unlink the request.
616 		 */
617 		NFSLOCKREQ();
618 		TAILQ_REMOVE(&nfsd_reqq, rep, r_chain);
619 		NFSUNLOCKREQ();
620 	}
621 
622 	/*
623 	 * If there was a successful reply and a tprintf msg.
624 	 * tprintf a response.
625 	 */
626 	if (stat == RPC_SUCCESS) {
627 		error = 0;
628 	} else if (stat == RPC_TIMEDOUT) {
629 		error = ETIMEDOUT;
630 	} else if (stat == RPC_VERSMISMATCH) {
631 		error = EOPNOTSUPP;
632 	} else if (stat == RPC_PROGVERSMISMATCH) {
633 		error = EPROTONOSUPPORT;
634 	} else {
635 		error = EACCES;
636 	}
637 	if (error) {
638 		m_freem(nd->nd_mreq);
639 		AUTH_DESTROY(auth);
640 		if (rep != NULL)
641 			FREE((caddr_t)rep, M_NFSDREQ);
642 		if (set_sigset)
643 			newnfs_restore_sigmask(td, &oldset);
644 		return (error);
645 	}
646 
647 	KASSERT(nd->nd_mrep != NULL, ("mrep shouldn't be NULL if no error\n"));
648 
649 	/*
650 	 * Search for any mbufs that are not a multiple of 4 bytes long
651 	 * or with m_data not longword aligned.
652 	 * These could cause pointer alignment problems, so copy them to
653 	 * well aligned mbufs.
654 	 */
655 	newnfs_realign(&nd->nd_mrep);
656 	nd->nd_md = nd->nd_mrep;
657 	nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
658 	nd->nd_repstat = 0;
659 	if (nd->nd_procnum != NFSPROC_NULL) {
660 		/*
661 		 * and now the actual NFS xdr.
662 		 */
663 		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
664 		nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl);
665 		if (nd->nd_repstat != 0) {
666 			if ((nd->nd_repstat == NFSERR_DELAY &&
667 			     (nd->nd_flag & ND_NFSV4) &&
668 			     nd->nd_procnum != NFSPROC_SETATTR &&
669 			     nd->nd_procnum != NFSPROC_READ &&
670 			     nd->nd_procnum != NFSPROC_WRITE &&
671 			     nd->nd_procnum != NFSPROC_OPEN &&
672 			     nd->nd_procnum != NFSPROC_CREATE &&
673 			     nd->nd_procnum != NFSPROC_OPENCONFIRM &&
674 			     nd->nd_procnum != NFSPROC_OPENDOWNGRADE &&
675 			     nd->nd_procnum != NFSPROC_CLOSE &&
676 			     nd->nd_procnum != NFSPROC_LOCK &&
677 			     nd->nd_procnum != NFSPROC_LOCKU) ||
678 			    (nd->nd_repstat == NFSERR_DELAY &&
679 			     (nd->nd_flag & ND_NFSV4) == 0) ||
680 			    nd->nd_repstat == NFSERR_RESOURCE) {
681 				if (trylater_delay > NFS_TRYLATERDEL)
682 					trylater_delay = NFS_TRYLATERDEL;
683 				waituntil = NFSD_MONOSEC + trylater_delay;
684 				while (NFSD_MONOSEC < waituntil)
685 					(void) nfs_catnap(PZERO, 0, "nfstry");
686 				trylater_delay *= 2;
687 				goto tryagain;
688 			}
689 
690 			/*
691 			 * If the File Handle was stale, invalidate the
692 			 * lookup cache, just in case.
693 			 * (vp != NULL implies a client side call)
694 			 */
695 			if (nd->nd_repstat == ESTALE && vp != NULL) {
696 				cache_purge(vp);
697 				if (ncl_call_invalcaches != NULL)
698 					(*ncl_call_invalcaches)(vp);
699 			}
700 		}
701 
702 		/*
703 		 * Get rid of the tag, return count, and PUTFH result for V4.
704 		 */
705 		if (nd->nd_flag & ND_NFSV4) {
706 			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
707 			i = fxdr_unsigned(int, *tl);
708 			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
709 			if (error)
710 				goto nfsmout;
711 			NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
712 			i = fxdr_unsigned(int, *++tl);
713 
714 			/*
715 			 * If the first op's status is non-zero, mark that
716 			 * there is no more data to process.
717 			 */
718 			if (*++tl)
719 				nd->nd_flag |= ND_NOMOREDATA;
720 
721 			/*
722 			 * If the first op is Putfh, throw its results away
723 			 * and toss the op# and status for the first op.
724 			 */
725 			if (nmp != NULL && i == NFSV4OP_PUTFH && *tl == 0) {
726 				NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED);
727 				i = fxdr_unsigned(int, *tl++);
728 				j = fxdr_unsigned(int, *tl);
729 				/*
730 				 * All Compounds that do an Op that must
731 				 * be in sequence consist of NFSV4OP_PUTFH
732 				 * followed by one of these. As such, we
733 				 * can determine if the seqid# should be
734 				 * incremented, here.
735 				 */
736 				if ((i == NFSV4OP_OPEN ||
737 				     i == NFSV4OP_OPENCONFIRM ||
738 				     i == NFSV4OP_OPENDOWNGRADE ||
739 				     i == NFSV4OP_CLOSE ||
740 				     i == NFSV4OP_LOCK ||
741 				     i == NFSV4OP_LOCKU) &&
742 				    (j == 0 ||
743 				     (j != NFSERR_STALECLIENTID &&
744 				      j != NFSERR_STALESTATEID &&
745 				      j != NFSERR_BADSTATEID &&
746 				      j != NFSERR_BADSEQID &&
747 				      j != NFSERR_BADXDR &&
748 				      j != NFSERR_RESOURCE &&
749 				      j != NFSERR_NOFILEHANDLE)))
750 					nd->nd_flag |= ND_INCRSEQID;
751 				/*
752 				 * If the first op's status is non-zero, mark
753 				 * that there is no more data to process.
754 				 */
755 				if (j)
756 					nd->nd_flag |= ND_NOMOREDATA;
757 			}
758 
759 			/*
760 			 * If R_DONTRECOVER is set, replace the stale error
761 			 * reply, so that recovery isn't initiated.
762 			 */
763 			if ((nd->nd_repstat == NFSERR_STALECLIENTID ||
764 			     nd->nd_repstat == NFSERR_STALESTATEID) &&
765 			    rep != NULL && (rep->r_flags & R_DONTRECOVER))
766 				nd->nd_repstat = NFSERR_STALEDONTRECOVER;
767 		}
768 	}
769 
770 	m_freem(nd->nd_mreq);
771 	AUTH_DESTROY(auth);
772 	if (rep != NULL)
773 		FREE((caddr_t)rep, M_NFSDREQ);
774 	if (set_sigset)
775 		newnfs_restore_sigmask(td, &oldset);
776 	return (0);
777 nfsmout:
778 	mbuf_freem(nd->nd_mrep);
779 	mbuf_freem(nd->nd_mreq);
780 	AUTH_DESTROY(auth);
781 	if (rep != NULL)
782 		FREE((caddr_t)rep, M_NFSDREQ);
783 	if (set_sigset)
784 		newnfs_restore_sigmask(td, &oldset);
785 	return (error);
786 }
787 
788 /*
789  * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
790  * wait for all requests to complete. This is used by forced unmounts
791  * to terminate any outstanding RPCs.
792  */
793 int
794 newnfs_nmcancelreqs(struct nfsmount *nmp)
795 {
796 
797 	if (nmp->nm_sockreq.nr_client != NULL)
798 		CLNT_CLOSE(nmp->nm_sockreq.nr_client);
799 	return (0);
800 }
801 
802 /*
803  * Any signal that can interrupt an NFS operation in an intr mount
804  * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
805  */
806 int newnfs_sig_set[] = {
807 	SIGINT,
808 	SIGTERM,
809 	SIGHUP,
810 	SIGKILL,
811 	SIGSTOP,
812 	SIGQUIT
813 };
814 
815 /*
816  * Check to see if one of the signals in our subset is pending on
817  * the process (in an intr mount).
818  */
819 static int
820 nfs_sig_pending(sigset_t set)
821 {
822 	int i;
823 
824 	for (i = 0 ; i < sizeof(newnfs_sig_set)/sizeof(int) ; i++)
825 		if (SIGISMEMBER(set, newnfs_sig_set[i]))
826 			return (1);
827 	return (0);
828 }
829 
830 /*
831  * The set/restore sigmask functions are used to (temporarily) overwrite
832  * the process p_sigmask during an RPC call (for example). These are also
833  * used in other places in the NFS client that might tsleep().
834  */
835 void
836 newnfs_set_sigmask(struct thread *td, sigset_t *oldset)
837 {
838 	sigset_t newset;
839 	int i;
840 	struct proc *p;
841 
842 	SIGFILLSET(newset);
843 	if (td == NULL)
844 		td = curthread; /* XXX */
845 	p = td->td_proc;
846 	/* Remove the NFS set of signals from newset */
847 	PROC_LOCK(p);
848 	mtx_lock(&p->p_sigacts->ps_mtx);
849 	for (i = 0 ; i < sizeof(newnfs_sig_set)/sizeof(int) ; i++) {
850 		/*
851 		 * But make sure we leave the ones already masked
852 		 * by the process, ie. remove the signal from the
853 		 * temporary signalmask only if it wasn't already
854 		 * in p_sigmask.
855 		 */
856 		if (!SIGISMEMBER(td->td_sigmask, newnfs_sig_set[i]) &&
857 		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, newnfs_sig_set[i]))
858 			SIGDELSET(newset, newnfs_sig_set[i]);
859 	}
860 	mtx_unlock(&p->p_sigacts->ps_mtx);
861 	PROC_UNLOCK(p);
862 	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
863 }
864 
865 void
866 newnfs_restore_sigmask(struct thread *td, sigset_t *set)
867 {
868 	if (td == NULL)
869 		td = curthread; /* XXX */
870 	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
871 }
872 
873 /*
874  * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
875  * old one after msleep() returns.
876  */
877 int
878 newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
879 {
880 	sigset_t oldset;
881 	int error;
882 	struct proc *p;
883 
884 	if ((priority & PCATCH) == 0)
885 		return msleep(ident, mtx, priority, wmesg, timo);
886 	if (td == NULL)
887 		td = curthread; /* XXX */
888 	newnfs_set_sigmask(td, &oldset);
889 	error = msleep(ident, mtx, priority, wmesg, timo);
890 	newnfs_restore_sigmask(td, &oldset);
891 	p = td->td_proc;
892 	return (error);
893 }
894 
895 /*
896  * Test for a termination condition pending on the process.
897  * This is used for NFSMNT_INT mounts.
898  */
899 int
900 newnfs_sigintr(struct nfsmount *nmp, struct thread *td)
901 {
902 	struct proc *p;
903 	sigset_t tmpset;
904 
905 	/* Terminate all requests while attempting a forced unmount. */
906 	if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
907 		return (EIO);
908 	if (!(nmp->nm_flag & NFSMNT_INT))
909 		return (0);
910 	if (td == NULL)
911 		return (0);
912 	p = td->td_proc;
913 	PROC_LOCK(p);
914 	tmpset = p->p_siglist;
915 	SIGSETOR(tmpset, td->td_siglist);
916 	SIGSETNAND(tmpset, td->td_sigmask);
917 	mtx_lock(&p->p_sigacts->ps_mtx);
918 	SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
919 	mtx_unlock(&p->p_sigacts->ps_mtx);
920 	if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
921 	    && nfs_sig_pending(tmpset)) {
922 		PROC_UNLOCK(p);
923 		return (EINTR);
924 	}
925 	PROC_UNLOCK(p);
926 	return (0);
927 }
928 
929 static int
930 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
931 {
932 	struct proc *p;
933 
934 	p = td ? td->td_proc : NULL;
935 	if (error) {
936 		tprintf(p, LOG_INFO, "newnfs server %s: %s, error %d\n",
937 		    server, msg, error);
938 	} else {
939 		tprintf(p, LOG_INFO, "newnfs server %s: %s\n", server, msg);
940 	}
941 	return (0);
942 }
943 
944 static void
945 nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
946     int error, int flags)
947 {
948 	if (nmp == NULL)
949 		return;
950 	mtx_lock(&nmp->nm_mtx);
951 	if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
952 		nmp->nm_state |= NFSSTA_TIMEO;
953 		mtx_unlock(&nmp->nm_mtx);
954 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
955 		    VQ_NOTRESP, 0);
956 	} else
957 		mtx_unlock(&nmp->nm_mtx);
958 	mtx_lock(&nmp->nm_mtx);
959 	if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
960 		nmp->nm_state |= NFSSTA_LOCKTIMEO;
961 		mtx_unlock(&nmp->nm_mtx);
962 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
963 		    VQ_NOTRESPLOCK, 0);
964 	} else
965 		mtx_unlock(&nmp->nm_mtx);
966 	nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
967 }
968 
969 static void
970 nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
971     int flags, int tprintfmsg)
972 {
973 	if (nmp == NULL)
974 		return;
975 	if (tprintfmsg) {
976 		nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
977 	}
978 
979 	mtx_lock(&nmp->nm_mtx);
980 	if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
981 		nmp->nm_state &= ~NFSSTA_TIMEO;
982 		mtx_unlock(&nmp->nm_mtx);
983 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
984 		    VQ_NOTRESP, 1);
985 	} else
986 		mtx_unlock(&nmp->nm_mtx);
987 
988 	mtx_lock(&nmp->nm_mtx);
989 	if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
990 		nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
991 		mtx_unlock(&nmp->nm_mtx);
992 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
993 		    VQ_NOTRESPLOCK, 1);
994 	} else
995 		mtx_unlock(&nmp->nm_mtx);
996 }
997 
998