xref: /freebsd/sys/fs/nfs/nfs_commonkrpc.c (revision f05cddf9)
1 /*-
2  * Copyright (c) 1989, 1991, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Socket operations for use by nfs
39  */
40 
41 #include "opt_kdtrace.h"
42 #include "opt_kgssapi.h"
43 #include "opt_nfs.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/vnode.h>
60 
61 #include <rpc/rpc.h>
62 
63 #include <kgssapi/krb5/kcrypto.h>
64 
65 #include <fs/nfs/nfsport.h>
66 
67 #ifdef KDTRACE_HOOKS
68 #include <sys/dtrace_bsd.h>
69 
70 dtrace_nfsclient_nfs23_start_probe_func_t
71 		dtrace_nfscl_nfs234_start_probe;
72 
73 dtrace_nfsclient_nfs23_done_probe_func_t
74 		dtrace_nfscl_nfs234_done_probe;
75 
76 /*
77  * Registered probes by RPC type.
78  */
79 uint32_t	nfscl_nfs2_start_probes[NFSV41_NPROCS + 1];
80 uint32_t	nfscl_nfs2_done_probes[NFSV41_NPROCS + 1];
81 
82 uint32_t	nfscl_nfs3_start_probes[NFSV41_NPROCS + 1];
83 uint32_t	nfscl_nfs3_done_probes[NFSV41_NPROCS + 1];
84 
85 uint32_t	nfscl_nfs4_start_probes[NFSV41_NPROCS + 1];
86 uint32_t	nfscl_nfs4_done_probes[NFSV41_NPROCS + 1];
87 #endif
88 
89 NFSSTATESPINLOCK;
90 NFSREQSPINLOCK;
91 NFSDLOCKMUTEX;
92 extern struct nfsstats newnfsstats;
93 extern struct nfsreqhead nfsd_reqq;
94 extern int nfscl_ticks;
95 extern void (*ncl_call_invalcaches)(struct vnode *);
96 extern int nfs_numnfscbd;
97 extern int nfscl_debuglevel;
98 
99 SVCPOOL		*nfscbd_pool;
100 static int	nfsrv_gsscallbackson = 0;
101 static int	nfs_bufpackets = 4;
102 static int	nfs_reconnects;
103 static int	nfs3_jukebox_delay = 10;
104 static int	nfs_skip_wcc_data_onerr = 1;
105 static int	nfs_keytab_enctype = ETYPE_DES_CBC_CRC;
106 
107 SYSCTL_DECL(_vfs_nfs);
108 
109 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
110     "Buffer reservation size 2 < x < 64");
111 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
112     "Number of times the nfs client has had to reconnect");
113 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
114     "Number of seconds to delay a retry after receiving EJUKEBOX");
115 SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
116     "Disable weak cache consistency checking when server returns an error");
117 SYSCTL_INT(_vfs_nfs, OID_AUTO, keytab_enctype, CTLFLAG_RW, &nfs_keytab_enctype, 0,
118     "Encryption type for the keytab entry used by nfs");
119 
120 static void	nfs_down(struct nfsmount *, struct thread *, const char *,
121     int, int);
122 static void	nfs_up(struct nfsmount *, struct thread *, const char *,
123     int, int);
124 static int	nfs_msg(struct thread *, const char *, const char *, int);
125 
126 struct nfs_cached_auth {
127 	int		ca_refs; /* refcount, including 1 from the cache */
128 	uid_t		ca_uid;	 /* uid that corresponds to this auth */
129 	AUTH		*ca_auth; /* RPC auth handle */
130 };
131 
132 static int nfsv2_procid[NFS_V3NPROCS] = {
133 	NFSV2PROC_NULL,
134 	NFSV2PROC_GETATTR,
135 	NFSV2PROC_SETATTR,
136 	NFSV2PROC_LOOKUP,
137 	NFSV2PROC_NOOP,
138 	NFSV2PROC_READLINK,
139 	NFSV2PROC_READ,
140 	NFSV2PROC_WRITE,
141 	NFSV2PROC_CREATE,
142 	NFSV2PROC_MKDIR,
143 	NFSV2PROC_SYMLINK,
144 	NFSV2PROC_CREATE,
145 	NFSV2PROC_REMOVE,
146 	NFSV2PROC_RMDIR,
147 	NFSV2PROC_RENAME,
148 	NFSV2PROC_LINK,
149 	NFSV2PROC_READDIR,
150 	NFSV2PROC_NOOP,
151 	NFSV2PROC_STATFS,
152 	NFSV2PROC_NOOP,
153 	NFSV2PROC_NOOP,
154 	NFSV2PROC_NOOP,
155 };
156 
157 /*
158  * Initialize sockets and congestion for a new NFS connection.
159  * We do not free the sockaddr if error.
160  */
161 int
162 newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp,
163     struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
164 {
165 	int rcvreserve, sndreserve;
166 	int pktscale;
167 	struct sockaddr *saddr;
168 	struct ucred *origcred;
169 	CLIENT *client;
170 	struct netconfig *nconf;
171 	struct socket *so;
172 	int one = 1, retries, error = 0;
173 	struct thread *td = curthread;
174 	SVCXPRT *xprt;
175 	struct timeval timo;
176 
177 	/*
178 	 * We need to establish the socket using the credentials of
179 	 * the mountpoint.  Some parts of this process (such as
180 	 * sobind() and soconnect()) will use the curent thread's
181 	 * credential instead of the socket credential.  To work
182 	 * around this, temporarily change the current thread's
183 	 * credential to that of the mountpoint.
184 	 *
185 	 * XXX: It would be better to explicitly pass the correct
186 	 * credential to sobind() and soconnect().
187 	 */
188 	origcred = td->td_ucred;
189 
190 	/*
191 	 * Use the credential in nr_cred, if not NULL.
192 	 */
193 	if (nrp->nr_cred != NULL)
194 		td->td_ucred = nrp->nr_cred;
195 	else
196 		td->td_ucred = cred;
197 	saddr = nrp->nr_nam;
198 
199 	if (saddr->sa_family == AF_INET)
200 		if (nrp->nr_sotype == SOCK_DGRAM)
201 			nconf = getnetconfigent("udp");
202 		else
203 			nconf = getnetconfigent("tcp");
204 	else
205 		if (nrp->nr_sotype == SOCK_DGRAM)
206 			nconf = getnetconfigent("udp6");
207 		else
208 			nconf = getnetconfigent("tcp6");
209 
210 	pktscale = nfs_bufpackets;
211 	if (pktscale < 2)
212 		pktscale = 2;
213 	if (pktscale > 64)
214 		pktscale = 64;
215 	/*
216 	 * soreserve() can fail if sb_max is too small, so shrink pktscale
217 	 * and try again if there is an error.
218 	 * Print a log message suggesting increasing sb_max.
219 	 * Creating a socket and doing this is necessary since, if the
220 	 * reservation sizes are too large and will make soreserve() fail,
221 	 * the connection will work until a large send is attempted and
222 	 * then it will loop in the krpc code.
223 	 */
224 	so = NULL;
225 	saddr = NFSSOCKADDR(nrp->nr_nam, struct sockaddr *);
226 	error = socreate(saddr->sa_family, &so, nrp->nr_sotype,
227 	    nrp->nr_soproto, td->td_ucred, td);
228 	if (error) {
229 		td->td_ucred = origcred;
230 		goto out;
231 	}
232 	do {
233 	    if (error != 0 && pktscale > 2)
234 		pktscale--;
235 	    if (nrp->nr_sotype == SOCK_DGRAM) {
236 		if (nmp != NULL) {
237 			sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
238 			    pktscale;
239 			rcvreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
240 			    pktscale;
241 		} else {
242 			sndreserve = rcvreserve = 1024 * pktscale;
243 		}
244 	    } else {
245 		if (nrp->nr_sotype != SOCK_STREAM)
246 			panic("nfscon sotype");
247 		if (nmp != NULL) {
248 			sndreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
249 			    sizeof (u_int32_t)) * pktscale;
250 			rcvreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
251 			    sizeof (u_int32_t)) * pktscale;
252 		} else {
253 			sndreserve = rcvreserve = 1024 * pktscale;
254 		}
255 	    }
256 	    error = soreserve(so, sndreserve, rcvreserve);
257 	} while (error != 0 && pktscale > 2);
258 	soclose(so);
259 	if (error) {
260 		td->td_ucred = origcred;
261 		goto out;
262 	}
263 
264 	client = clnt_reconnect_create(nconf, saddr, nrp->nr_prog,
265 	    nrp->nr_vers, sndreserve, rcvreserve);
266 	CLNT_CONTROL(client, CLSET_WAITCHAN, "newnfsreq");
267 	if (nmp != NULL) {
268 		if ((nmp->nm_flag & NFSMNT_INT))
269 			CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one);
270 		if ((nmp->nm_flag & NFSMNT_RESVPORT))
271 			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
272 		if (NFSHASSOFT(nmp)) {
273 			if (nmp->nm_sotype == SOCK_DGRAM)
274 				/*
275 				 * For UDP, the large timeout for a reconnect
276 				 * will be set to "nm_retry * nm_timeo / 2", so
277 				 * we only want to do 2 reconnect timeout
278 				 * retries.
279 				 */
280 				retries = 2;
281 			else
282 				retries = nmp->nm_retry;
283 		} else
284 			retries = INT_MAX;
285 		if (NFSHASNFSV4N(nmp)) {
286 			/*
287 			 * Make sure the nfscbd_pool doesn't get destroyed
288 			 * while doing this.
289 			 */
290 			NFSD_LOCK();
291 			if (nfs_numnfscbd > 0) {
292 				nfs_numnfscbd++;
293 				NFSD_UNLOCK();
294 				xprt = svc_vc_create_backchannel(nfscbd_pool);
295 				CLNT_CONTROL(client, CLSET_BACKCHANNEL, xprt);
296 				NFSD_LOCK();
297 				nfs_numnfscbd--;
298 				if (nfs_numnfscbd == 0)
299 					wakeup(&nfs_numnfscbd);
300 			}
301 			NFSD_UNLOCK();
302 		}
303 	} else {
304 		/*
305 		 * Three cases:
306 		 * - Null RPC callback to client
307 		 * - Non-Null RPC callback to client, wait a little longer
308 		 * - upcalls to nfsuserd and gssd (clp == NULL)
309 		 */
310 		if (callback_retry_mult == 0) {
311 			retries = NFSV4_UPCALLRETRY;
312 			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
313 		} else {
314 			retries = NFSV4_CALLBACKRETRY * callback_retry_mult;
315 		}
316 	}
317 	CLNT_CONTROL(client, CLSET_RETRIES, &retries);
318 
319 	if (nmp != NULL) {
320 		/*
321 		 * For UDP, there are 2 timeouts:
322 		 * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer
323 		 *   that does a retransmit of an RPC request using the same
324 		 *   socket and xid. This is what you normally want to do,
325 		 *   since NFS servers depend on "same xid" for their
326 		 *   Duplicate Request Cache.
327 		 * - timeout specified in CLNT_CALL_MBUF(), which specifies when
328 		 *   retransmits on the same socket should fail and a fresh
329 		 *   socket created. Each of these timeouts counts as one
330 		 *   CLSET_RETRIES as set above.
331 		 * Set the initial retransmit timeout for UDP. This timeout
332 		 * doesn't exist for TCP and the following call just fails,
333 		 * which is ok.
334 		 */
335 		timo.tv_sec = nmp->nm_timeo / NFS_HZ;
336 		timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
337 		CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo);
338 	}
339 
340 	mtx_lock(&nrp->nr_mtx);
341 	if (nrp->nr_client != NULL) {
342 		/*
343 		 * Someone else already connected.
344 		 */
345 		CLNT_RELEASE(client);
346 	} else {
347 		nrp->nr_client = client;
348 	}
349 
350 	/*
351 	 * Protocols that do not require connections may be optionally left
352 	 * unconnected for servers that reply from a port other than NFS_PORT.
353 	 */
354 	if (nmp == NULL || (nmp->nm_flag & NFSMNT_NOCONN) == 0) {
355 		mtx_unlock(&nrp->nr_mtx);
356 		CLNT_CONTROL(client, CLSET_CONNECT, &one);
357 	} else {
358 		mtx_unlock(&nrp->nr_mtx);
359 	}
360 
361 	/* Restore current thread's credentials. */
362 	td->td_ucred = origcred;
363 
364 out:
365 	NFSEXITCODE(error);
366 	return (error);
367 }
368 
369 /*
370  * NFS disconnect. Clean up and unlink.
371  */
372 void
373 newnfs_disconnect(struct nfssockreq *nrp)
374 {
375 	CLIENT *client;
376 
377 	mtx_lock(&nrp->nr_mtx);
378 	if (nrp->nr_client != NULL) {
379 		client = nrp->nr_client;
380 		nrp->nr_client = NULL;
381 		mtx_unlock(&nrp->nr_mtx);
382 		rpc_gss_secpurge_call(client);
383 		CLNT_CLOSE(client);
384 		CLNT_RELEASE(client);
385 	} else {
386 		mtx_unlock(&nrp->nr_mtx);
387 	}
388 }
389 
390 static AUTH *
391 nfs_getauth(struct nfssockreq *nrp, int secflavour, char *clnt_principal,
392     char *srv_principal, gss_OID mech_oid, struct ucred *cred)
393 {
394 	rpc_gss_service_t svc;
395 	AUTH *auth;
396 #ifdef notyet
397 	rpc_gss_options_req_t req_options;
398 #endif
399 
400 	switch (secflavour) {
401 	case RPCSEC_GSS_KRB5:
402 	case RPCSEC_GSS_KRB5I:
403 	case RPCSEC_GSS_KRB5P:
404 		if (!mech_oid) {
405 			if (!rpc_gss_mech_to_oid_call("kerberosv5", &mech_oid))
406 				return (NULL);
407 		}
408 		if (secflavour == RPCSEC_GSS_KRB5)
409 			svc = rpc_gss_svc_none;
410 		else if (secflavour == RPCSEC_GSS_KRB5I)
411 			svc = rpc_gss_svc_integrity;
412 		else
413 			svc = rpc_gss_svc_privacy;
414 #ifdef notyet
415 		req_options.req_flags = GSS_C_MUTUAL_FLAG;
416 		req_options.time_req = 0;
417 		req_options.my_cred = GSS_C_NO_CREDENTIAL;
418 		req_options.input_channel_bindings = NULL;
419 		req_options.enc_type = nfs_keytab_enctype;
420 
421 		auth = rpc_gss_secfind_call(nrp->nr_client, cred,
422 		    clnt_principal, srv_principal, mech_oid, svc,
423 		    &req_options);
424 #else
425 		/*
426 		 * Until changes to the rpcsec_gss code are committed,
427 		 * there is no support for host based initiator
428 		 * principals. As such, that case cannot yet be handled.
429 		 */
430 		if (clnt_principal == NULL)
431 			auth = rpc_gss_secfind_call(nrp->nr_client, cred,
432 			    srv_principal, mech_oid, svc);
433 		else
434 			auth = NULL;
435 #endif
436 		if (auth != NULL)
437 			return (auth);
438 		/* fallthrough */
439 	case AUTH_SYS:
440 	default:
441 		return (authunix_create(cred));
442 
443 	}
444 }
445 
446 /*
447  * Callback from the RPC code to generate up/down notifications.
448  */
449 
450 struct nfs_feedback_arg {
451 	struct nfsmount *nf_mount;
452 	int		nf_lastmsg;	/* last tprintf */
453 	int		nf_tprintfmsg;
454 	struct thread	*nf_td;
455 };
456 
457 static void
458 nfs_feedback(int type, int proc, void *arg)
459 {
460 	struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
461 	struct nfsmount *nmp = nf->nf_mount;
462 	time_t now;
463 
464 	switch (type) {
465 	case FEEDBACK_REXMIT2:
466 	case FEEDBACK_RECONNECT:
467 		now = NFSD_MONOSEC;
468 		if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now) {
469 			nfs_down(nmp, nf->nf_td,
470 			    "not responding", 0, NFSSTA_TIMEO);
471 			nf->nf_tprintfmsg = TRUE;
472 			nf->nf_lastmsg = now;
473 		}
474 		break;
475 
476 	case FEEDBACK_OK:
477 		nfs_up(nf->nf_mount, nf->nf_td,
478 		    "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
479 		break;
480 	}
481 }
482 
483 /*
484  * newnfs_request - goes something like this
485  *	- does the rpc by calling the krpc layer
486  *	- break down rpc header and return with nfs reply
487  * nb: always frees up nd_mreq mbuf list
488  */
489 int
490 newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
491     struct nfsclient *clp, struct nfssockreq *nrp, vnode_t vp,
492     struct thread *td, struct ucred *cred, u_int32_t prog, u_int32_t vers,
493     u_char *retsum, int toplevel, u_int64_t *xidp, struct nfsclsession *sep)
494 {
495 	u_int32_t retseq, retval, *tl;
496 	time_t waituntil;
497 	int i = 0, j = 0, opcnt, set_sigset = 0, slot;
498 	int trycnt, error = 0, usegssname = 0, secflavour = AUTH_SYS;
499 	int freeslot, timeo;
500 	u_int16_t procnum;
501 	u_int trylater_delay = 1;
502 	struct nfs_feedback_arg nf;
503 	struct timeval timo;
504 	AUTH *auth;
505 	struct rpc_callextra ext;
506 	enum clnt_stat stat;
507 	struct nfsreq *rep = NULL;
508 	char *srv_principal = NULL;
509 	sigset_t oldset;
510 	struct ucred *authcred;
511 
512 	if (xidp != NULL)
513 		*xidp = 0;
514 	/* Reject requests while attempting a forced unmount. */
515 	if (nmp != NULL && (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) {
516 		m_freem(nd->nd_mreq);
517 		return (ESTALE);
518 	}
519 
520 	/*
521 	 * Set authcred, which is used to acquire RPC credentials to
522 	 * the cred argument, by default. The crhold() should not be
523 	 * necessary, but will ensure that some future code change
524 	 * doesn't result in the credential being free'd prematurely.
525 	 */
526 	authcred = crhold(cred);
527 
528 	/* For client side interruptible mounts, mask off the signals. */
529 	if (nmp != NULL && td != NULL && NFSHASINT(nmp)) {
530 		newnfs_set_sigmask(td, &oldset);
531 		set_sigset = 1;
532 	}
533 
534 	/*
535 	 * XXX if not already connected call nfs_connect now. Longer
536 	 * term, change nfs_mount to call nfs_connect unconditionally
537 	 * and let clnt_reconnect_create handle reconnects.
538 	 */
539 	if (nrp->nr_client == NULL)
540 		newnfs_connect(nmp, nrp, cred, td, 0);
541 
542 	/*
543 	 * For a client side mount, nmp is != NULL and clp == NULL. For
544 	 * server calls (callbacks or upcalls), nmp == NULL.
545 	 */
546 	if (clp != NULL) {
547 		NFSLOCKSTATE();
548 		if ((clp->lc_flags & LCL_GSS) && nfsrv_gsscallbackson) {
549 			secflavour = RPCSEC_GSS_KRB5;
550 			if (nd->nd_procnum != NFSPROC_NULL) {
551 				if (clp->lc_flags & LCL_GSSINTEGRITY)
552 					secflavour = RPCSEC_GSS_KRB5I;
553 				else if (clp->lc_flags & LCL_GSSPRIVACY)
554 					secflavour = RPCSEC_GSS_KRB5P;
555 			}
556 		}
557 		NFSUNLOCKSTATE();
558 	} else if (nmp != NULL && NFSHASKERB(nmp) &&
559 	     nd->nd_procnum != NFSPROC_NULL) {
560 		if (NFSHASALLGSSNAME(nmp) && nmp->nm_krbnamelen > 0)
561 			nd->nd_flag |= ND_USEGSSNAME;
562 		if ((nd->nd_flag & ND_USEGSSNAME) != 0) {
563 			/*
564 			 * If there is a client side host based credential,
565 			 * use that, otherwise use the system uid, if set.
566 			 * The system uid is in the nmp->nm_sockreq.nr_cred
567 			 * credentials.
568 			 */
569 			if (nmp->nm_krbnamelen > 0) {
570 				usegssname = 1;
571 			} else if (nmp->nm_uid != (uid_t)-1) {
572 				KASSERT(nmp->nm_sockreq.nr_cred != NULL,
573 				    ("newnfs_request: NULL nr_cred"));
574 				crfree(authcred);
575 				authcred = crhold(nmp->nm_sockreq.nr_cred);
576 			}
577 		} else if (nmp->nm_krbnamelen == 0 &&
578 		    nmp->nm_uid != (uid_t)-1 && cred->cr_uid == (uid_t)0) {
579 			/*
580 			 * If there is no host based principal name and
581 			 * the system uid is set and this is root, use the
582 			 * system uid, since root won't have user
583 			 * credentials in a credentials cache file.
584 			 * The system uid is in the nmp->nm_sockreq.nr_cred
585 			 * credentials.
586 			 */
587 			KASSERT(nmp->nm_sockreq.nr_cred != NULL,
588 			    ("newnfs_request: NULL nr_cred"));
589 			crfree(authcred);
590 			authcred = crhold(nmp->nm_sockreq.nr_cred);
591 		}
592 		if (NFSHASINTEGRITY(nmp))
593 			secflavour = RPCSEC_GSS_KRB5I;
594 		else if (NFSHASPRIVACY(nmp))
595 			secflavour = RPCSEC_GSS_KRB5P;
596 		else
597 			secflavour = RPCSEC_GSS_KRB5;
598 		srv_principal = NFSMNT_SRVKRBNAME(nmp);
599 	} else if (nmp != NULL && !NFSHASKERB(nmp) &&
600 	    nd->nd_procnum != NFSPROC_NULL &&
601 	    (nd->nd_flag & ND_USEGSSNAME) != 0) {
602 		/*
603 		 * Use the uid that did the mount when the RPC is doing
604 		 * NFSv4 system operations, as indicated by the
605 		 * ND_USEGSSNAME flag, for the AUTH_SYS case.
606 		 * The credentials in nm_sockreq.nr_cred were used for the
607 		 * mount.
608 		 */
609 		KASSERT(nmp->nm_sockreq.nr_cred != NULL,
610 		    ("newnfs_request: NULL nr_cred"));
611 		crfree(authcred);
612 		authcred = crhold(nmp->nm_sockreq.nr_cred);
613 	}
614 
615 	if (nmp != NULL) {
616 		bzero(&nf, sizeof(struct nfs_feedback_arg));
617 		nf.nf_mount = nmp;
618 		nf.nf_td = td;
619 		nf.nf_lastmsg = NFSD_MONOSEC -
620 		    ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
621 	}
622 
623 	if (nd->nd_procnum == NFSPROC_NULL)
624 		auth = authnone_create();
625 	else if (usegssname)
626 		auth = nfs_getauth(nrp, secflavour, nmp->nm_krbname,
627 		    srv_principal, NULL, authcred);
628 	else
629 		auth = nfs_getauth(nrp, secflavour, NULL,
630 		    srv_principal, NULL, authcred);
631 	crfree(authcred);
632 	if (auth == NULL) {
633 		m_freem(nd->nd_mreq);
634 		if (set_sigset)
635 			newnfs_restore_sigmask(td, &oldset);
636 		return (EACCES);
637 	}
638 	bzero(&ext, sizeof(ext));
639 	ext.rc_auth = auth;
640 	if (nmp != NULL) {
641 		ext.rc_feedback = nfs_feedback;
642 		ext.rc_feedback_arg = &nf;
643 	}
644 
645 	procnum = nd->nd_procnum;
646 	if ((nd->nd_flag & ND_NFSV4) &&
647 	    nd->nd_procnum != NFSPROC_NULL &&
648 	    nd->nd_procnum != NFSV4PROC_CBCOMPOUND)
649 		procnum = NFSV4PROC_COMPOUND;
650 
651 	if (nmp != NULL) {
652 		NFSINCRGLOBAL(newnfsstats.rpcrequests);
653 
654 		/* Map the procnum to the old NFSv2 one, as required. */
655 		if ((nd->nd_flag & ND_NFSV2) != 0) {
656 			if (nd->nd_procnum < NFS_V3NPROCS)
657 				procnum = nfsv2_procid[nd->nd_procnum];
658 			else
659 				procnum = NFSV2PROC_NOOP;
660 		}
661 
662 		/*
663 		 * Now only used for the R_DONTRECOVER case, but until that is
664 		 * supported within the krpc code, I need to keep a queue of
665 		 * outstanding RPCs for nfsv4 client requests.
666 		 */
667 		if ((nd->nd_flag & ND_NFSV4) && procnum == NFSV4PROC_COMPOUND)
668 			MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq),
669 			    M_NFSDREQ, M_WAITOK);
670 #ifdef KDTRACE_HOOKS
671 		if (dtrace_nfscl_nfs234_start_probe != NULL) {
672 			uint32_t probe_id;
673 			int probe_procnum;
674 
675 			if (nd->nd_flag & ND_NFSV4) {
676 				probe_id =
677 				    nfscl_nfs4_start_probes[nd->nd_procnum];
678 				probe_procnum = nd->nd_procnum;
679 			} else if (nd->nd_flag & ND_NFSV3) {
680 				probe_id = nfscl_nfs3_start_probes[procnum];
681 				probe_procnum = procnum;
682 			} else {
683 				probe_id =
684 				    nfscl_nfs2_start_probes[nd->nd_procnum];
685 				probe_procnum = procnum;
686 			}
687 			if (probe_id != 0)
688 				(dtrace_nfscl_nfs234_start_probe)
689 				    (probe_id, vp, nd->nd_mreq, cred,
690 				     probe_procnum);
691 		}
692 #endif
693 	}
694 	trycnt = 0;
695 	freeslot = -1;		/* Set to slot that needs to be free'd */
696 tryagain:
697 	slot = -1;		/* Slot that needs a sequence# increment. */
698 	/*
699 	 * This timeout specifies when a new socket should be created,
700 	 * along with new xid values. For UDP, this should be done
701 	 * infrequently, since retransmits of RPC requests should normally
702 	 * use the same xid.
703 	 */
704 	if (nmp == NULL) {
705 		timo.tv_usec = 0;
706 		if (clp == NULL)
707 			timo.tv_sec = NFSV4_UPCALLTIMEO;
708 		else
709 			timo.tv_sec = NFSV4_CALLBACKTIMEO;
710 	} else {
711 		if (nrp->nr_sotype != SOCK_DGRAM) {
712 			timo.tv_usec = 0;
713 			if ((nmp->nm_flag & NFSMNT_NFSV4))
714 				timo.tv_sec = INT_MAX;
715 			else
716 				timo.tv_sec = NFS_TCPTIMEO;
717 		} else {
718 			if (NFSHASSOFT(nmp)) {
719 				/*
720 				 * CLSET_RETRIES is set to 2, so this should be
721 				 * half of the total timeout required.
722 				 */
723 				timeo = nmp->nm_retry * nmp->nm_timeo / 2;
724 				if (timeo < 1)
725 					timeo = 1;
726 				timo.tv_sec = timeo / NFS_HZ;
727 				timo.tv_usec = (timeo % NFS_HZ) * 1000000 /
728 				    NFS_HZ;
729 			} else {
730 				/* For UDP hard mounts, use a large value. */
731 				timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
732 				timo.tv_usec = 0;
733 			}
734 		}
735 
736 		if (rep != NULL) {
737 			rep->r_flags = 0;
738 			rep->r_nmp = nmp;
739 			/*
740 			 * Chain request into list of outstanding requests.
741 			 */
742 			NFSLOCKREQ();
743 			TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain);
744 			NFSUNLOCKREQ();
745 		}
746 	}
747 
748 	nd->nd_mrep = NULL;
749 	stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum, nd->nd_mreq,
750 	    &nd->nd_mrep, timo);
751 
752 	if (rep != NULL) {
753 		/*
754 		 * RPC done, unlink the request.
755 		 */
756 		NFSLOCKREQ();
757 		TAILQ_REMOVE(&nfsd_reqq, rep, r_chain);
758 		NFSUNLOCKREQ();
759 	}
760 
761 	/*
762 	 * If there was a successful reply and a tprintf msg.
763 	 * tprintf a response.
764 	 */
765 	if (stat == RPC_SUCCESS) {
766 		error = 0;
767 	} else if (stat == RPC_TIMEDOUT) {
768 		NFSINCRGLOBAL(newnfsstats.rpctimeouts);
769 		error = ETIMEDOUT;
770 	} else if (stat == RPC_VERSMISMATCH) {
771 		NFSINCRGLOBAL(newnfsstats.rpcinvalid);
772 		error = EOPNOTSUPP;
773 	} else if (stat == RPC_PROGVERSMISMATCH) {
774 		NFSINCRGLOBAL(newnfsstats.rpcinvalid);
775 		error = EPROTONOSUPPORT;
776 	} else if (stat == RPC_INTR) {
777 		error = EINTR;
778 	} else {
779 		NFSINCRGLOBAL(newnfsstats.rpcinvalid);
780 		error = EACCES;
781 	}
782 	if (error) {
783 		m_freem(nd->nd_mreq);
784 		AUTH_DESTROY(auth);
785 		if (rep != NULL)
786 			FREE((caddr_t)rep, M_NFSDREQ);
787 		if (set_sigset)
788 			newnfs_restore_sigmask(td, &oldset);
789 		return (error);
790 	}
791 
792 	KASSERT(nd->nd_mrep != NULL, ("mrep shouldn't be NULL if no error\n"));
793 
794 	/*
795 	 * Search for any mbufs that are not a multiple of 4 bytes long
796 	 * or with m_data not longword aligned.
797 	 * These could cause pointer alignment problems, so copy them to
798 	 * well aligned mbufs.
799 	 */
800 	newnfs_realign(&nd->nd_mrep, M_WAITOK);
801 	nd->nd_md = nd->nd_mrep;
802 	nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
803 	nd->nd_repstat = 0;
804 	if (nd->nd_procnum != NFSPROC_NULL) {
805 		/* If sep == NULL, set it to the default in nmp. */
806 		if (sep == NULL && nmp != NULL)
807 			sep = NFSMNT_MDSSESSION(nmp);
808 		/*
809 		 * and now the actual NFS xdr.
810 		 */
811 		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
812 		nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl);
813 		if (nd->nd_repstat >= 10000)
814 			NFSCL_DEBUG(1, "proc=%d reps=%d\n", (int)nd->nd_procnum,
815 			    (int)nd->nd_repstat);
816 
817 		/*
818 		 * Get rid of the tag, return count and SEQUENCE result for
819 		 * NFSv4.
820 		 */
821 		if ((nd->nd_flag & ND_NFSV4) != 0) {
822 			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
823 			i = fxdr_unsigned(int, *tl);
824 			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
825 			if (error)
826 				goto nfsmout;
827 			NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
828 			opcnt = fxdr_unsigned(int, *tl++);
829 			i = fxdr_unsigned(int, *tl++);
830 			j = fxdr_unsigned(int, *tl);
831 			if (j >= 10000)
832 				NFSCL_DEBUG(1, "fop=%d fst=%d\n", i, j);
833 			/*
834 			 * If the first op is Sequence, free up the slot.
835 			 */
836 			if (nmp != NULL && i == NFSV4OP_SEQUENCE && j != 0)
837 				NFSCL_DEBUG(1, "failed seq=%d\n", j);
838 			if (nmp != NULL && i == NFSV4OP_SEQUENCE && j == 0) {
839 				NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
840 				    5 * NFSX_UNSIGNED);
841 				mtx_lock(&sep->nfsess_mtx);
842 				tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
843 				retseq = fxdr_unsigned(uint32_t, *tl++);
844 				slot = fxdr_unsigned(int, *tl++);
845 				freeslot = slot;
846 				if (retseq != sep->nfsess_slotseq[slot])
847 					printf("retseq diff 0x%x\n", retseq);
848 				retval = fxdr_unsigned(uint32_t, *++tl);
849 				if ((retval + 1) < sep->nfsess_foreslots)
850 					sep->nfsess_foreslots = (retval + 1);
851 				else if ((retval + 1) > sep->nfsess_foreslots)
852 					sep->nfsess_foreslots = (retval < 64) ?
853 					    (retval + 1) : 64;
854 				mtx_unlock(&sep->nfsess_mtx);
855 
856 				/* Grab the op and status for the next one. */
857 				if (opcnt > 1) {
858 					NFSM_DISSECT(tl, uint32_t *,
859 					    2 * NFSX_UNSIGNED);
860 					i = fxdr_unsigned(int, *tl++);
861 					j = fxdr_unsigned(int, *tl);
862 				}
863 			}
864 		}
865 		if (nd->nd_repstat != 0) {
866 			if (((nd->nd_repstat == NFSERR_DELAY ||
867 			      nd->nd_repstat == NFSERR_GRACE) &&
868 			     (nd->nd_flag & ND_NFSV4) &&
869 			     nd->nd_procnum != NFSPROC_DELEGRETURN &&
870 			     nd->nd_procnum != NFSPROC_SETATTR &&
871 			     nd->nd_procnum != NFSPROC_READ &&
872 			     nd->nd_procnum != NFSPROC_READDS &&
873 			     nd->nd_procnum != NFSPROC_WRITE &&
874 			     nd->nd_procnum != NFSPROC_WRITEDS &&
875 			     nd->nd_procnum != NFSPROC_OPEN &&
876 			     nd->nd_procnum != NFSPROC_CREATE &&
877 			     nd->nd_procnum != NFSPROC_OPENCONFIRM &&
878 			     nd->nd_procnum != NFSPROC_OPENDOWNGRADE &&
879 			     nd->nd_procnum != NFSPROC_CLOSE &&
880 			     nd->nd_procnum != NFSPROC_LOCK &&
881 			     nd->nd_procnum != NFSPROC_LOCKU) ||
882 			    (nd->nd_repstat == NFSERR_DELAY &&
883 			     (nd->nd_flag & ND_NFSV4) == 0) ||
884 			    nd->nd_repstat == NFSERR_RESOURCE) {
885 				if (trylater_delay > NFS_TRYLATERDEL)
886 					trylater_delay = NFS_TRYLATERDEL;
887 				waituntil = NFSD_MONOSEC + trylater_delay;
888 				while (NFSD_MONOSEC < waituntil)
889 					(void) nfs_catnap(PZERO, 0, "nfstry");
890 				trylater_delay *= 2;
891 				if (slot != -1) {
892 					mtx_lock(&sep->nfsess_mtx);
893 					sep->nfsess_slotseq[slot]++;
894 					*nd->nd_slotseq = txdr_unsigned(
895 					    sep->nfsess_slotseq[slot]);
896 					mtx_unlock(&sep->nfsess_mtx);
897 				}
898 				m_freem(nd->nd_mrep);
899 				nd->nd_mrep = NULL;
900 				goto tryagain;
901 			}
902 
903 			/*
904 			 * If the File Handle was stale, invalidate the
905 			 * lookup cache, just in case.
906 			 * (vp != NULL implies a client side call)
907 			 */
908 			if (nd->nd_repstat == ESTALE && vp != NULL) {
909 				cache_purge(vp);
910 				if (ncl_call_invalcaches != NULL)
911 					(*ncl_call_invalcaches)(vp);
912 			}
913 		}
914 		if ((nd->nd_flag & ND_NFSV4) != 0) {
915 			/* Free the slot, as required. */
916 			if (freeslot != -1)
917 				nfsv4_freeslot(sep, freeslot);
918 			/*
919 			 * If this op is Putfh, throw its results away.
920 			 */
921 			if (j >= 10000)
922 				NFSCL_DEBUG(1, "nop=%d nst=%d\n", i, j);
923 			if (nmp != NULL && i == NFSV4OP_PUTFH && j == 0) {
924 				NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED);
925 				i = fxdr_unsigned(int, *tl++);
926 				j = fxdr_unsigned(int, *tl);
927 				if (j >= 10000)
928 					NFSCL_DEBUG(1, "n2op=%d n2st=%d\n", i,
929 					    j);
930 				/*
931 				 * All Compounds that do an Op that must
932 				 * be in sequence consist of NFSV4OP_PUTFH
933 				 * followed by one of these. As such, we
934 				 * can determine if the seqid# should be
935 				 * incremented, here.
936 				 */
937 				if ((i == NFSV4OP_OPEN ||
938 				     i == NFSV4OP_OPENCONFIRM ||
939 				     i == NFSV4OP_OPENDOWNGRADE ||
940 				     i == NFSV4OP_CLOSE ||
941 				     i == NFSV4OP_LOCK ||
942 				     i == NFSV4OP_LOCKU) &&
943 				    (j == 0 ||
944 				     (j != NFSERR_STALECLIENTID &&
945 				      j != NFSERR_STALESTATEID &&
946 				      j != NFSERR_BADSTATEID &&
947 				      j != NFSERR_BADSEQID &&
948 				      j != NFSERR_BADXDR &&
949 				      j != NFSERR_RESOURCE &&
950 				      j != NFSERR_NOFILEHANDLE)))
951 					nd->nd_flag |= ND_INCRSEQID;
952 			}
953 			/*
954 			 * If this op's status is non-zero, mark
955 			 * that there is no more data to process.
956 			 */
957 			if (j)
958 				nd->nd_flag |= ND_NOMOREDATA;
959 
960 			/*
961 			 * If R_DONTRECOVER is set, replace the stale error
962 			 * reply, so that recovery isn't initiated.
963 			 */
964 			if ((nd->nd_repstat == NFSERR_STALECLIENTID ||
965 			     nd->nd_repstat == NFSERR_BADSESSION ||
966 			     nd->nd_repstat == NFSERR_STALESTATEID) &&
967 			    rep != NULL && (rep->r_flags & R_DONTRECOVER))
968 				nd->nd_repstat = NFSERR_STALEDONTRECOVER;
969 		}
970 	}
971 
972 #ifdef KDTRACE_HOOKS
973 	if (nmp != NULL && dtrace_nfscl_nfs234_done_probe != NULL) {
974 		uint32_t probe_id;
975 		int probe_procnum;
976 
977 		if (nd->nd_flag & ND_NFSV4) {
978 			probe_id = nfscl_nfs4_done_probes[nd->nd_procnum];
979 			probe_procnum = nd->nd_procnum;
980 		} else if (nd->nd_flag & ND_NFSV3) {
981 			probe_id = nfscl_nfs3_done_probes[procnum];
982 			probe_procnum = procnum;
983 		} else {
984 			probe_id = nfscl_nfs2_done_probes[nd->nd_procnum];
985 			probe_procnum = procnum;
986 		}
987 		if (probe_id != 0)
988 			(dtrace_nfscl_nfs234_done_probe)(probe_id, vp,
989 			    nd->nd_mreq, cred, probe_procnum, 0);
990 	}
991 #endif
992 
993 	m_freem(nd->nd_mreq);
994 	AUTH_DESTROY(auth);
995 	if (rep != NULL)
996 		FREE((caddr_t)rep, M_NFSDREQ);
997 	if (set_sigset)
998 		newnfs_restore_sigmask(td, &oldset);
999 	return (0);
1000 nfsmout:
1001 	mbuf_freem(nd->nd_mrep);
1002 	mbuf_freem(nd->nd_mreq);
1003 	AUTH_DESTROY(auth);
1004 	if (rep != NULL)
1005 		FREE((caddr_t)rep, M_NFSDREQ);
1006 	if (set_sigset)
1007 		newnfs_restore_sigmask(td, &oldset);
1008 	return (error);
1009 }
1010 
1011 /*
1012  * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1013  * wait for all requests to complete. This is used by forced unmounts
1014  * to terminate any outstanding RPCs.
1015  */
1016 int
1017 newnfs_nmcancelreqs(struct nfsmount *nmp)
1018 {
1019 
1020 	if (nmp->nm_sockreq.nr_client != NULL)
1021 		CLNT_CLOSE(nmp->nm_sockreq.nr_client);
1022 	return (0);
1023 }
1024 
1025 /*
1026  * Any signal that can interrupt an NFS operation in an intr mount
1027  * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
1028  */
1029 int newnfs_sig_set[] = {
1030 	SIGINT,
1031 	SIGTERM,
1032 	SIGHUP,
1033 	SIGKILL,
1034 	SIGQUIT
1035 };
1036 
1037 /*
1038  * Check to see if one of the signals in our subset is pending on
1039  * the process (in an intr mount).
1040  */
1041 static int
1042 nfs_sig_pending(sigset_t set)
1043 {
1044 	int i;
1045 
1046 	for (i = 0 ; i < sizeof(newnfs_sig_set)/sizeof(int) ; i++)
1047 		if (SIGISMEMBER(set, newnfs_sig_set[i]))
1048 			return (1);
1049 	return (0);
1050 }
1051 
1052 /*
1053  * The set/restore sigmask functions are used to (temporarily) overwrite
1054  * the thread td_sigmask during an RPC call (for example). These are also
1055  * used in other places in the NFS client that might tsleep().
1056  */
1057 void
1058 newnfs_set_sigmask(struct thread *td, sigset_t *oldset)
1059 {
1060 	sigset_t newset;
1061 	int i;
1062 	struct proc *p;
1063 
1064 	SIGFILLSET(newset);
1065 	if (td == NULL)
1066 		td = curthread; /* XXX */
1067 	p = td->td_proc;
1068 	/* Remove the NFS set of signals from newset */
1069 	PROC_LOCK(p);
1070 	mtx_lock(&p->p_sigacts->ps_mtx);
1071 	for (i = 0 ; i < sizeof(newnfs_sig_set)/sizeof(int) ; i++) {
1072 		/*
1073 		 * But make sure we leave the ones already masked
1074 		 * by the process, ie. remove the signal from the
1075 		 * temporary signalmask only if it wasn't already
1076 		 * in p_sigmask.
1077 		 */
1078 		if (!SIGISMEMBER(td->td_sigmask, newnfs_sig_set[i]) &&
1079 		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, newnfs_sig_set[i]))
1080 			SIGDELSET(newset, newnfs_sig_set[i]);
1081 	}
1082 	mtx_unlock(&p->p_sigacts->ps_mtx);
1083 	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset,
1084 	    SIGPROCMASK_PROC_LOCKED);
1085 	PROC_UNLOCK(p);
1086 }
1087 
1088 void
1089 newnfs_restore_sigmask(struct thread *td, sigset_t *set)
1090 {
1091 	if (td == NULL)
1092 		td = curthread; /* XXX */
1093 	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
1094 }
1095 
1096 /*
1097  * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
1098  * old one after msleep() returns.
1099  */
1100 int
1101 newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
1102 {
1103 	sigset_t oldset;
1104 	int error;
1105 	struct proc *p;
1106 
1107 	if ((priority & PCATCH) == 0)
1108 		return msleep(ident, mtx, priority, wmesg, timo);
1109 	if (td == NULL)
1110 		td = curthread; /* XXX */
1111 	newnfs_set_sigmask(td, &oldset);
1112 	error = msleep(ident, mtx, priority, wmesg, timo);
1113 	newnfs_restore_sigmask(td, &oldset);
1114 	p = td->td_proc;
1115 	return (error);
1116 }
1117 
1118 /*
1119  * Test for a termination condition pending on the process.
1120  * This is used for NFSMNT_INT mounts.
1121  */
1122 int
1123 newnfs_sigintr(struct nfsmount *nmp, struct thread *td)
1124 {
1125 	struct proc *p;
1126 	sigset_t tmpset;
1127 
1128 	/* Terminate all requests while attempting a forced unmount. */
1129 	if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1130 		return (EIO);
1131 	if (!(nmp->nm_flag & NFSMNT_INT))
1132 		return (0);
1133 	if (td == NULL)
1134 		return (0);
1135 	p = td->td_proc;
1136 	PROC_LOCK(p);
1137 	tmpset = p->p_siglist;
1138 	SIGSETOR(tmpset, td->td_siglist);
1139 	SIGSETNAND(tmpset, td->td_sigmask);
1140 	mtx_lock(&p->p_sigacts->ps_mtx);
1141 	SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1142 	mtx_unlock(&p->p_sigacts->ps_mtx);
1143 	if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
1144 	    && nfs_sig_pending(tmpset)) {
1145 		PROC_UNLOCK(p);
1146 		return (EINTR);
1147 	}
1148 	PROC_UNLOCK(p);
1149 	return (0);
1150 }
1151 
1152 static int
1153 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
1154 {
1155 	struct proc *p;
1156 
1157 	p = td ? td->td_proc : NULL;
1158 	if (error) {
1159 		tprintf(p, LOG_INFO, "newnfs server %s: %s, error %d\n",
1160 		    server, msg, error);
1161 	} else {
1162 		tprintf(p, LOG_INFO, "newnfs server %s: %s\n", server, msg);
1163 	}
1164 	return (0);
1165 }
1166 
1167 static void
1168 nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
1169     int error, int flags)
1170 {
1171 	if (nmp == NULL)
1172 		return;
1173 	mtx_lock(&nmp->nm_mtx);
1174 	if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
1175 		nmp->nm_state |= NFSSTA_TIMEO;
1176 		mtx_unlock(&nmp->nm_mtx);
1177 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1178 		    VQ_NOTRESP, 0);
1179 	} else
1180 		mtx_unlock(&nmp->nm_mtx);
1181 	mtx_lock(&nmp->nm_mtx);
1182 	if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1183 		nmp->nm_state |= NFSSTA_LOCKTIMEO;
1184 		mtx_unlock(&nmp->nm_mtx);
1185 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1186 		    VQ_NOTRESPLOCK, 0);
1187 	} else
1188 		mtx_unlock(&nmp->nm_mtx);
1189 	nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
1190 }
1191 
1192 static void
1193 nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
1194     int flags, int tprintfmsg)
1195 {
1196 	if (nmp == NULL)
1197 		return;
1198 	if (tprintfmsg) {
1199 		nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
1200 	}
1201 
1202 	mtx_lock(&nmp->nm_mtx);
1203 	if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
1204 		nmp->nm_state &= ~NFSSTA_TIMEO;
1205 		mtx_unlock(&nmp->nm_mtx);
1206 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1207 		    VQ_NOTRESP, 1);
1208 	} else
1209 		mtx_unlock(&nmp->nm_mtx);
1210 
1211 	mtx_lock(&nmp->nm_mtx);
1212 	if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1213 		nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
1214 		mtx_unlock(&nmp->nm_mtx);
1215 		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1216 		    VQ_NOTRESPLOCK, 1);
1217 	} else
1218 		mtx_unlock(&nmp->nm_mtx);
1219 }
1220 
1221