xref: /freebsd/sys/fs/nfsclient/nfs_clnfsiod.c (revision d6b92ffa)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from nfs_syscalls.c	8.5 (Berkeley) 3/30/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/file.h>
44 #include <sys/filedesc.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/proc.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/mbuf.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/domain.h>
55 #include <sys/protosw.h>
56 #include <sys/namei.h>
57 #include <sys/unistd.h>
58 #include <sys/kthread.h>
59 #include <sys/fcntl.h>
60 #include <sys/lockf.h>
61 #include <sys/mutex.h>
62 #include <sys/taskqueue.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/tcp.h>
66 
67 #include <fs/nfs/nfsport.h>
68 #include <fs/nfsclient/nfsmount.h>
69 #include <fs/nfsclient/nfs.h>
70 #include <fs/nfsclient/nfsnode.h>
71 
72 extern struct mtx	ncl_iod_mutex;
73 extern struct task	ncl_nfsiodnew_task;
74 
75 int ncl_numasync;
76 enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
77 struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
78 
79 static void	nfssvc_iod(void *);
80 
81 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
82 
83 SYSCTL_DECL(_vfs_nfs);
84 
85 /* Maximum number of seconds a nfsiod kthread will sleep before exiting */
86 static unsigned int nfs_iodmaxidle = 120;
87 SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
88     "Max number of seconds an nfsiod kthread will sleep before exiting");
89 
90 /* Maximum number of nfsiod kthreads */
91 unsigned int ncl_iodmax = 20;
92 
93 /* Minimum number of nfsiod kthreads to keep as spares */
94 static unsigned int nfs_iodmin = 0;
95 
96 static int nfs_nfsiodnew_sync(void);
97 
98 static int
99 sysctl_iodmin(SYSCTL_HANDLER_ARGS)
100 {
101 	int error, i;
102 	int newmin;
103 
104 	newmin = nfs_iodmin;
105 	error = sysctl_handle_int(oidp, &newmin, 0, req);
106 	if (error || (req->newptr == NULL))
107 		return (error);
108 	mtx_lock(&ncl_iod_mutex);
109 	if (newmin > ncl_iodmax) {
110 		error = EINVAL;
111 		goto out;
112 	}
113 	nfs_iodmin = newmin;
114 	if (ncl_numasync >= nfs_iodmin)
115 		goto out;
116 	/*
117 	 * If the current number of nfsiod is lower
118 	 * than the new minimum, create some more.
119 	 */
120 	for (i = nfs_iodmin - ncl_numasync; i > 0; i--)
121 		nfs_nfsiodnew_sync();
122 out:
123 	mtx_unlock(&ncl_iod_mutex);
124 	return (0);
125 }
126 SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
127     sizeof (nfs_iodmin), sysctl_iodmin, "IU",
128     "Min number of nfsiod kthreads to keep as spares");
129 
130 static int
131 sysctl_iodmax(SYSCTL_HANDLER_ARGS)
132 {
133 	int error, i;
134 	int iod, newmax;
135 
136 	newmax = ncl_iodmax;
137 	error = sysctl_handle_int(oidp, &newmax, 0, req);
138 	if (error || (req->newptr == NULL))
139 		return (error);
140 	if (newmax > NFS_MAXASYNCDAEMON)
141 		return (EINVAL);
142 	mtx_lock(&ncl_iod_mutex);
143 	ncl_iodmax = newmax;
144 	if (ncl_numasync <= ncl_iodmax)
145 		goto out;
146 	/*
147 	 * If there are some asleep nfsiods that should
148 	 * exit, wakeup() them so that they check ncl_iodmax
149 	 * and exit.  Those who are active will exit as
150 	 * soon as they finish I/O.
151 	 */
152 	iod = ncl_numasync - 1;
153 	for (i = 0; i < ncl_numasync - ncl_iodmax; i++) {
154 		if (ncl_iodwant[iod] == NFSIOD_AVAILABLE)
155 			wakeup(&ncl_iodwant[iod]);
156 		iod--;
157 	}
158 out:
159 	mtx_unlock(&ncl_iod_mutex);
160 	return (0);
161 }
162 SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
163     sizeof (ncl_iodmax), sysctl_iodmax, "IU",
164     "Max number of nfsiod kthreads");
165 
166 static int
167 nfs_nfsiodnew_sync(void)
168 {
169 	int error, i;
170 
171 	mtx_assert(&ncl_iod_mutex, MA_OWNED);
172 	for (i = 0; i < ncl_iodmax; i++) {
173 		if (nfs_asyncdaemon[i] == 0) {
174 			nfs_asyncdaemon[i] = 1;
175 			break;
176 		}
177 	}
178 	if (i == ncl_iodmax)
179 		return (0);
180 	mtx_unlock(&ncl_iod_mutex);
181 	error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL,
182 	    RFHIGHPID, 0, "newnfs %d", i);
183 	mtx_lock(&ncl_iod_mutex);
184 	if (error == 0) {
185 		ncl_numasync++;
186 		ncl_iodwant[i] = NFSIOD_AVAILABLE;
187 	} else
188 		nfs_asyncdaemon[i] = 0;
189 	return (error);
190 }
191 
192 void
193 ncl_nfsiodnew_tq(__unused void *arg, int pending)
194 {
195 
196 	mtx_lock(&ncl_iod_mutex);
197 	while (pending > 0) {
198 		pending--;
199 		nfs_nfsiodnew_sync();
200 	}
201 	mtx_unlock(&ncl_iod_mutex);
202 }
203 
204 void
205 ncl_nfsiodnew(void)
206 {
207 
208 	mtx_assert(&ncl_iod_mutex, MA_OWNED);
209 	taskqueue_enqueue(taskqueue_thread, &ncl_nfsiodnew_task);
210 }
211 
212 static void
213 nfsiod_setup(void *dummy)
214 {
215 	int error;
216 
217 	TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
218 	nfscl_init();
219 	mtx_lock(&ncl_iod_mutex);
220 	/* Silently limit the start number of nfsiod's */
221 	if (nfs_iodmin > NFS_MAXASYNCDAEMON)
222 		nfs_iodmin = NFS_MAXASYNCDAEMON;
223 
224 	while (ncl_numasync < nfs_iodmin) {
225 		error = nfs_nfsiodnew_sync();
226 		if (error == -1)
227 			panic("nfsiod_setup: nfs_nfsiodnew failed");
228 	}
229 	mtx_unlock(&ncl_iod_mutex);
230 }
231 SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
232 
233 static int nfs_defect = 0;
234 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
235     "Allow nfsiods to migrate serving different mounts");
236 
237 /*
238  * Asynchronous I/O daemons for client nfs.
239  * They do read-ahead and write-behind operations on the block I/O cache.
240  * Returns if we hit the timeout defined by the iodmaxidle sysctl.
241  */
242 static void
243 nfssvc_iod(void *instance)
244 {
245 	struct buf *bp;
246 	struct nfsmount *nmp;
247 	int myiod, timo;
248 	int error = 0;
249 
250 	mtx_lock(&ncl_iod_mutex);
251 	myiod = (int *)instance - nfs_asyncdaemon;
252 	/*
253 	 * Main loop
254 	 */
255 	for (;;) {
256 	    while (((nmp = ncl_iodmount[myiod]) == NULL)
257 		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
258 		if (myiod >= ncl_iodmax)
259 			goto finish;
260 		if (nmp)
261 			nmp->nm_bufqiods--;
262 		if (ncl_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
263 			ncl_iodwant[myiod] = NFSIOD_AVAILABLE;
264 		ncl_iodmount[myiod] = NULL;
265 		/*
266 		 * Always keep at least nfs_iodmin kthreads.
267 		 */
268 		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
269 		error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH,
270 		    "-", timo);
271 		if (error) {
272 			nmp = ncl_iodmount[myiod];
273 			/*
274 			 * Rechecking the nm_bufq closes a rare race where the
275 			 * nfsiod is woken up at the exact time the idle timeout
276 			 * fires
277 			 */
278 			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
279 				error = 0;
280 			break;
281 		}
282 	    }
283 	    if (error)
284 		    break;
285 	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
286 		/* Take one off the front of the list */
287 		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
288 		nmp->nm_bufqlen--;
289 		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= ncl_numasync) {
290 		    nmp->nm_bufqwant = 0;
291 		    wakeup(&nmp->nm_bufq);
292 		}
293 		mtx_unlock(&ncl_iod_mutex);
294 		if (bp->b_flags & B_DIRECT) {
295 			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
296 			(void)ncl_doio_directwrite(bp);
297 		} else {
298 			if (bp->b_iocmd == BIO_READ)
299 				(void) ncl_doio(bp->b_vp, bp, bp->b_rcred,
300 				    NULL, 0);
301 			else
302 				(void) ncl_doio(bp->b_vp, bp, bp->b_wcred,
303 				    NULL, 0);
304 		}
305 		mtx_lock(&ncl_iod_mutex);
306 		/*
307 		 * Make sure the nmp hasn't been dismounted as soon as
308 		 * ncl_doio() completes for the last buffer.
309 		 */
310 		nmp = ncl_iodmount[myiod];
311 		if (nmp == NULL)
312 			break;
313 
314 		/*
315 		 * If there are more than one iod on this mount, then defect
316 		 * so that the iods can be shared out fairly between the mounts
317 		 */
318 		if (nfs_defect && nmp->nm_bufqiods > 1) {
319 		    NFS_DPF(ASYNCIO,
320 			    ("nfssvc_iod: iod %d defecting from mount %p\n",
321 			     myiod, nmp));
322 		    ncl_iodmount[myiod] = NULL;
323 		    nmp->nm_bufqiods--;
324 		    break;
325 		}
326 	    }
327 	}
328 finish:
329 	nfs_asyncdaemon[myiod] = 0;
330 	if (nmp)
331 	    nmp->nm_bufqiods--;
332 	ncl_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
333 	ncl_iodmount[myiod] = NULL;
334 	/* Someone may be waiting for the last nfsiod to terminate. */
335 	if (--ncl_numasync == 0)
336 		wakeup(&ncl_numasync);
337 	mtx_unlock(&ncl_iod_mutex);
338 	if ((error == 0) || (error == EWOULDBLOCK))
339 		kproc_exit(0);
340 	/* Abnormal termination */
341 	kproc_exit(1);
342 }
343