1 /* $OpenBSD: nfs_syscalls.c,v 1.128 2024/09/18 05:21:19 jsg Exp $ */
2 /* $NetBSD: nfs_syscalls.c,v 1.19 1996/02/18 11:53:52 fvdl Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Rick Macklem at The University of Guelph.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/file.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/pool.h>
44 #include <sys/malloc.h>
45 #include <sys/buf.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/filedesc.h>
52 #include <sys/signalvar.h>
53 #include <sys/kthread.h>
54 #include <sys/queue.h>
55
56 #include <sys/syscallargs.h>
57
58 #include <netinet/in.h>
59 #include <netinet/tcp.h>
60 #include <nfs/nfsproto.h>
61 #include <nfs/nfs.h>
62 #include <nfs/nfsrvcache.h>
63 #include <nfs/nfsnode.h>
64 #include <nfs/nfs_var.h>
65
66 /* Global defs. */
67 extern int nfs_numasync;
68 extern struct nfsstats nfsstats;
69 struct nfssvc_sock *nfs_udpsock;
70 int nfsd_waiting = 0;
71
72 #ifdef NFSSERVER
73 struct pool nfsrv_descript_pl;
74
75 int nfsrv_getslp(struct nfsd *nfsd);
76
77 static int nfs_numnfsd = 0;
78 static int (*const nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *,
79 struct nfssvc_sock *, struct proc *, struct mbuf **) = {
80 nfsrv_null,
81 nfsrv_getattr,
82 nfsrv_setattr,
83 nfsrv_lookup,
84 nfsrv3_access,
85 nfsrv_readlink,
86 nfsrv_read,
87 nfsrv_write,
88 nfsrv_create,
89 nfsrv_mkdir,
90 nfsrv_symlink,
91 nfsrv_mknod,
92 nfsrv_remove,
93 nfsrv_rmdir,
94 nfsrv_rename,
95 nfsrv_link,
96 nfsrv_readdir,
97 nfsrv_readdirplus,
98 nfsrv_statfs,
99 nfsrv_fsinfo,
100 nfsrv_pathconf,
101 nfsrv_commit,
102 nfsrv_noop
103 };
104 #endif
105
106 TAILQ_HEAD(, nfssvc_sock) nfssvc_sockhead;
107 struct nfsdhead nfsd_head;
108
109 int nfssvc_sockhead_flag;
110 #define SLP_INIT 0x01 /* NFS data undergoing initialization */
111 #define SLP_WANTINIT 0x02 /* thread waiting on NFS initialization */
112 int nfsd_head_flag;
113
114 #ifdef NFSCLIENT
115 struct proc *nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
116 int nfs_niothreads = -1;
117 #endif
118
119 int nfssvc_addsock(struct file *, struct mbuf *);
120 int nfssvc_nfsd(struct nfsd *);
121 void nfsrv_slpderef(struct nfssvc_sock *);
122 void nfsrv_zapsock(struct nfssvc_sock *);
123 void nfssvc_iod(void *);
124
125 /*
126 * NFS server pseudo system call for the nfsd's
127 * Based on the flag value it either:
128 * - adds a socket to the selection list
129 * - remains in the kernel as an nfsd
130 */
131 int
sys_nfssvc(struct proc * p,void * v,register_t * retval)132 sys_nfssvc(struct proc *p, void *v, register_t *retval)
133 {
134 int error = 0;
135 #ifdef NFSSERVER
136 struct sys_nfssvc_args /* {
137 syscallarg(int) flag;
138 syscallarg(caddr_t) argp;
139 } */ *uap = v;
140 int flags = SCARG(uap, flag);
141 struct file *fp;
142 struct mbuf *nam;
143 struct nfsd_args nfsdarg;
144 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
145 struct nfsd *nfsd;
146 #endif
147
148 /* Must be super user */
149 error = suser(p);
150 if (error)
151 return (error);
152
153 #ifndef NFSSERVER
154 error = ENOSYS;
155 #else
156
157 while (nfssvc_sockhead_flag & SLP_INIT) {
158 nfssvc_sockhead_flag |= SLP_WANTINIT;
159 tsleep_nsec(&nfssvc_sockhead, PSOCK, "nfsd init", INFSLP);
160 }
161
162 switch (flags) {
163 case NFSSVC_ADDSOCK:
164 error = copyin(SCARG(uap, argp), &nfsdarg, sizeof(nfsdarg));
165 if (error)
166 return (error);
167
168 error = getsock(p, nfsdarg.sock, &fp);
169 if (error)
170 return (error);
171
172 /*
173 * Get the client address for connected sockets.
174 */
175 if (nfsdarg.name == NULL || nfsdarg.namelen == 0)
176 nam = NULL;
177 else {
178 error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen,
179 MT_SONAME);
180 if (error) {
181 FRELE(fp, p);
182 return (error);
183 }
184 }
185 error = nfssvc_addsock(fp, nam);
186 FRELE(fp, p);
187 break;
188 case NFSSVC_NFSD:
189 error = copyin(SCARG(uap, argp), nsd, sizeof(*nsd));
190 if (error)
191 return (error);
192
193 nfsd = malloc(sizeof(*nfsd), M_NFSD, M_WAITOK|M_ZERO);
194 nfsd->nfsd_procp = p;
195 nfsd->nfsd_slp = NULL;
196
197 error = nfssvc_nfsd(nfsd);
198 break;
199 default:
200 error = EINVAL;
201 break;
202 }
203
204 if (error == EINTR || error == ERESTART)
205 error = 0;
206 #endif /* !NFSSERVER */
207
208 return (error);
209 }
210
211 #ifdef NFSSERVER
212 /*
213 * Adds a socket to the list for servicing by nfsds.
214 */
215 int
nfssvc_addsock(struct file * fp,struct mbuf * mynam)216 nfssvc_addsock(struct file *fp, struct mbuf *mynam)
217 {
218 struct mbuf *m;
219 int siz;
220 struct nfssvc_sock *slp;
221 struct socket *so;
222 struct nfssvc_sock *tslp;
223 int error;
224
225 so = (struct socket *)fp->f_data;
226 tslp = NULL;
227 /*
228 * Add it to the list, as required.
229 */
230 if (so->so_proto->pr_protocol == IPPROTO_UDP) {
231 tslp = nfs_udpsock;
232 if (tslp->ns_flag & SLP_VALID) {
233 m_freem(mynam);
234 return (EPERM);
235 }
236 }
237 /*
238 * Allow only IPv4 UDP and TCP sockets.
239 */
240 if ((so->so_type != SOCK_STREAM && so->so_type != SOCK_DGRAM) ||
241 so->so_proto->pr_domain->dom_family != AF_INET) {
242 m_freem(mynam);
243 return (EINVAL);
244 }
245
246 if (so->so_type == SOCK_STREAM)
247 siz = NFS_MAXPACKET + sizeof (u_long);
248 else
249 siz = NFS_MAXPACKET;
250 solock(so);
251 error = soreserve(so, siz, siz);
252 sounlock(so);
253 if (error) {
254 m_freem(mynam);
255 return (error);
256 }
257
258 /*
259 * Set protocol specific options { for now TCP only } and
260 * reserve some space. For datagram sockets, this can get called
261 * repeatedly for the same socket, but that isn't harmful.
262 */
263 if (so->so_type == SOCK_STREAM) {
264 MGET(m, M_WAIT, MT_SOOPTS);
265 *mtod(m, int32_t *) = 1;
266 m->m_len = sizeof(int32_t);
267 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
268 m_freem(m);
269 }
270 if (so->so_proto->pr_domain->dom_family == AF_INET &&
271 so->so_proto->pr_protocol == IPPROTO_TCP) {
272 MGET(m, M_WAIT, MT_SOOPTS);
273 *mtod(m, int32_t *) = 1;
274 m->m_len = sizeof(int32_t);
275 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
276 m_freem(m);
277 }
278 solock(so);
279 mtx_enter(&so->so_rcv.sb_mtx);
280 so->so_rcv.sb_flags &= ~SB_NOINTR;
281 so->so_rcv.sb_timeo_nsecs = INFSLP;
282 mtx_leave(&so->so_rcv.sb_mtx);
283 mtx_enter(&so->so_snd.sb_mtx);
284 so->so_snd.sb_flags &= ~SB_NOINTR;
285 so->so_snd.sb_timeo_nsecs = INFSLP;
286 mtx_leave(&so->so_snd.sb_mtx);
287 sounlock(so);
288 if (tslp)
289 slp = tslp;
290 else {
291 slp = malloc(sizeof(*slp), M_NFSSVC, M_WAITOK|M_ZERO);
292 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
293 }
294 slp->ns_so = so;
295 slp->ns_nam = mynam;
296 FREF(fp);
297 slp->ns_fp = fp;
298 so->so_upcallarg = (caddr_t)slp;
299 so->so_upcall = nfsrv_rcv;
300 slp->ns_flag = (SLP_VALID | SLP_NEEDQ);
301 nfsrv_wakenfsd(slp);
302 return (0);
303 }
304
305 static inline int
nfssvc_checknam(struct mbuf * nam)306 nfssvc_checknam(struct mbuf *nam)
307 {
308 struct sockaddr_in *sin;
309
310 if (nam == NULL ||
311 in_nam2sin(nam, &sin) != 0 ||
312 ntohs(sin->sin_port) >= IPPORT_RESERVED) {
313 return -1;
314 }
315 return 0;
316 }
317
318 /*
319 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
320 * until it is killed by a signal.
321 */
322 int
nfssvc_nfsd(struct nfsd * nfsd)323 nfssvc_nfsd(struct nfsd *nfsd)
324 {
325 struct mbuf *m;
326 int siz;
327 struct nfssvc_sock *slp;
328 struct socket *so;
329 int *solockp;
330 struct nfsrv_descript *nd = NULL;
331 struct mbuf *mreq;
332 int error = 0, cacherep, sotype;
333
334 cacherep = RC_DOIT;
335
336 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
337 nfs_numnfsd++;
338
339 /* Loop getting rpc requests until SIGKILL. */
340 loop:
341 if (!ISSET(nfsd->nfsd_flag, NFSD_REQINPROG)) {
342
343 /* attach an nfssvc_sock to nfsd */
344 error = nfsrv_getslp(nfsd);
345 if (error)
346 goto done;
347
348 slp = nfsd->nfsd_slp;
349
350 if (ISSET(slp->ns_flag, SLP_VALID)) {
351 if ((slp->ns_flag & (SLP_DISCONN | SLP_NEEDQ)) ==
352 SLP_NEEDQ) {
353 CLR(slp->ns_flag, SLP_NEEDQ);
354 nfs_sndlock(&slp->ns_solock, NULL);
355 nfsrv_rcv(slp->ns_so, (caddr_t)slp, M_WAIT);
356 nfs_sndunlock(&slp->ns_solock);
357 }
358 if (ISSET(slp->ns_flag, SLP_DISCONN))
359 nfsrv_zapsock(slp);
360
361 error = nfsrv_dorec(slp, nfsd, &nd);
362 SET(nfsd->nfsd_flag, NFSD_REQINPROG);
363 }
364 } else {
365 error = 0;
366 slp = nfsd->nfsd_slp;
367 }
368
369 if (error || !ISSET(slp->ns_flag, SLP_VALID)) {
370 if (nd != NULL) {
371 pool_put(&nfsrv_descript_pl, nd);
372 nd = NULL;
373 }
374 nfsd->nfsd_slp = NULL;
375 CLR(nfsd->nfsd_flag, NFSD_REQINPROG);
376 nfsrv_slpderef(slp);
377 goto loop;
378 }
379
380 so = slp->ns_so;
381 sotype = so->so_type;
382 if (ISSET(so->so_proto->pr_flags, PR_CONNREQUIRED))
383 solockp = &slp->ns_solock;
384 else
385 solockp = NULL;
386
387 if (nd) {
388 if (nd->nd_nam2)
389 nd->nd_nam = nd->nd_nam2;
390 else
391 nd->nd_nam = slp->ns_nam;
392 }
393
394 cacherep = nfsrv_getcache(nd, slp, &mreq);
395 switch (cacherep) {
396 case RC_DOIT:
397 /*
398 * Unless this is a null request (server ping), make
399 * sure that the client is using a reserved source port.
400 */
401 if (nd->nd_procnum != 0 && nfssvc_checknam(nd->nd_nam) == -1) {
402 /* drop it */
403 m_freem(nd->nd_mrep);
404 m_freem(nd->nd_nam2);
405 break;
406 }
407 error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq);
408 if (mreq == NULL) {
409 if (nd != NULL) {
410 m_freem(nd->nd_nam2);
411 m_freem(nd->nd_mrep);
412 }
413 break;
414 }
415 if (error) {
416 nfsstats.srv_errs++;
417 nfsrv_updatecache(nd, 0, mreq);
418 m_freem(nd->nd_nam2);
419 break;
420 }
421 nfsstats.srvrpccnt[nd->nd_procnum]++;
422 nfsrv_updatecache(nd, 1, mreq);
423 nd->nd_mrep = NULL;
424
425 /* FALLTHROUGH */
426 case RC_REPLY:
427 m = mreq;
428 siz = 0;
429 while (m) {
430 siz += m->m_len;
431 m = m->m_next;
432 }
433
434 if (siz <= 0 || siz > NFS_MAXPACKET)
435 panic("bad nfs svc reply, siz = %i", siz);
436
437 m = mreq;
438 m->m_pkthdr.len = siz;
439 m->m_pkthdr.ph_ifidx = 0;
440
441 /* For stream protocols, prepend a Sun RPC Record Mark. */
442 if (sotype == SOCK_STREAM) {
443 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT);
444 *mtod(m, u_int32_t *) = htonl(0x80000000 | siz);
445 }
446
447 if (solockp)
448 nfs_sndlock(solockp, NULL);
449
450 if (ISSET(slp->ns_flag, SLP_VALID))
451 error = nfs_send(so, nd->nd_nam2, m, NULL);
452 else {
453 error = EPIPE;
454 m_freem(m);
455 }
456 m_freem(nd->nd_nam2);
457 m_freem(nd->nd_mrep);
458 if (error == EPIPE)
459 nfsrv_zapsock(slp);
460 if (solockp)
461 nfs_sndunlock(solockp);
462 if (error == EINTR || error == ERESTART) {
463 pool_put(&nfsrv_descript_pl, nd);
464 nfsrv_slpderef(slp);
465 goto done;
466 }
467 break;
468 case RC_DROPIT:
469 m_freem(nd->nd_mrep);
470 m_freem(nd->nd_nam2);
471 break;
472 }
473
474 if (nd) {
475 pool_put(&nfsrv_descript_pl, nd);
476 nd = NULL;
477 }
478
479 if (nfsrv_dorec(slp, nfsd, &nd)) {
480 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
481 nfsd->nfsd_slp = NULL;
482 nfsrv_slpderef(slp);
483 }
484 goto loop;
485
486 done:
487 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
488 free(nfsd, M_NFSD, sizeof(*nfsd));
489 if (--nfs_numnfsd == 0)
490 nfsrv_init(1); /* Reinitialize everything */
491 return (error);
492 }
493
494 /*
495 * Shut down a socket associated with an nfssvc_sock structure.
496 * Should be called with the send lock set, if required.
497 * The trick here is to increment the sref at the start, so that the nfsds
498 * will stop using it and clear ns_flag at the end so that it will not be
499 * reassigned during cleanup.
500 */
501 void
nfsrv_zapsock(struct nfssvc_sock * slp)502 nfsrv_zapsock(struct nfssvc_sock *slp)
503 {
504 struct socket *so;
505 struct file *fp;
506 struct mbuf *m, *n;
507
508 slp->ns_flag &= ~SLP_ALLFLAGS;
509 fp = slp->ns_fp;
510 if (fp) {
511 FREF(fp);
512 slp->ns_fp = NULL;
513 so = slp->ns_so;
514 so->so_upcall = NULL;
515 soshutdown(so, SHUT_RDWR);
516 closef(fp, NULL);
517 if (slp->ns_nam)
518 m = m_free(slp->ns_nam);
519 m_freem(slp->ns_raw);
520 m = slp->ns_rec;
521 while (m) {
522 n = m->m_nextpkt;
523 m_freem(m);
524 m = n;
525 }
526 }
527 }
528
529 /*
530 * Dereference a server socket structure. If it has no more references and
531 * is no longer valid, you can throw it away.
532 */
533 void
nfsrv_slpderef(struct nfssvc_sock * slp)534 nfsrv_slpderef(struct nfssvc_sock *slp)
535 {
536 if (--(slp->ns_sref) == 0 && (slp->ns_flag & SLP_VALID) == 0) {
537 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
538 free(slp, M_NFSSVC, sizeof(*slp));
539 }
540 }
541
542 /*
543 * Initialize the data structures for the server.
544 * Handshake with any new nfsds starting up to avoid any chance of
545 * corruption.
546 */
547 void
nfsrv_init(int terminating)548 nfsrv_init(int terminating)
549 {
550 struct nfssvc_sock *slp, *nslp;
551
552 if (nfssvc_sockhead_flag & SLP_INIT)
553 panic("nfsd init");
554 nfssvc_sockhead_flag |= SLP_INIT;
555 if (terminating) {
556 for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != NULL;
557 slp = nslp) {
558 nslp = TAILQ_NEXT(slp, ns_chain);
559 if (slp->ns_flag & SLP_VALID)
560 nfsrv_zapsock(slp);
561 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
562 free(slp, M_NFSSVC, sizeof(*slp));
563 }
564 nfsrv_cleancache(); /* And clear out server cache */
565 }
566
567 TAILQ_INIT(&nfssvc_sockhead);
568 nfssvc_sockhead_flag &= ~SLP_INIT;
569 if (nfssvc_sockhead_flag & SLP_WANTINIT) {
570 nfssvc_sockhead_flag &= ~SLP_WANTINIT;
571 wakeup((caddr_t)&nfssvc_sockhead);
572 }
573
574 TAILQ_INIT(&nfsd_head);
575 nfsd_head_flag &= ~NFSD_CHECKSLP;
576
577 nfs_udpsock = malloc(sizeof(*nfs_udpsock), M_NFSSVC,
578 M_WAITOK|M_ZERO);
579 TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain);
580
581 if (!terminating) {
582 pool_init(&nfsrv_descript_pl, sizeof(struct nfsrv_descript),
583 0, IPL_NONE, PR_WAITOK, "ndscpl", NULL);
584 }
585 }
586 #endif /* NFSSERVER */
587
588 #ifdef NFSCLIENT
589 /*
590 * Asynchronous I/O threads for client nfs.
591 * They do read-ahead and write-behind operations on the block I/O cache.
592 * Never returns unless it fails or gets killed.
593 */
594 void
nfssvc_iod(void * arg)595 nfssvc_iod(void *arg)
596 {
597 struct proc *p = curproc;
598 struct buf *bp, *nbp;
599 int i, myiod;
600 struct vnode *vp;
601 int error = 0, s, bufcount;
602
603 bufcount = MIN(256, bcstats.kvaslots / 8);
604 bufcount = MIN(bufcount, bcstats.numbufs / 8);
605
606 /* Assign my position or return error if too many already running. */
607 myiod = -1;
608 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
609 if (nfs_asyncdaemon[i] == NULL) {
610 myiod = i;
611 break;
612 }
613 }
614 if (myiod == -1)
615 kthread_exit(EBUSY);
616
617 nfs_asyncdaemon[myiod] = p;
618 nfs_numasync++;
619
620 /* Upper limit on how many bufs we'll queue up for this iod. */
621 if (nfs_bufqmax > bcstats.kvaslots / 4) {
622 nfs_bufqmax = bcstats.kvaslots / 4;
623 bufcount = 0;
624 }
625 if (nfs_bufqmax > bcstats.numbufs / 4) {
626 nfs_bufqmax = bcstats.numbufs / 4;
627 bufcount = 0;
628 }
629
630 nfs_bufqmax += bufcount;
631 wakeup(&nfs_bufqlen); /* wake up anyone waiting for room to enqueue IO */
632
633 /* Just loop around doin our stuff until SIGKILL. */
634 for (;;) {
635 while (TAILQ_FIRST(&nfs_bufq) == NULL && error == 0) {
636 error = tsleep_nsec(&nfs_bufq,
637 PWAIT | PCATCH, "nfsidl", INFSLP);
638 }
639 while ((bp = TAILQ_FIRST(&nfs_bufq)) != NULL) {
640 /* Take one off the front of the list */
641 TAILQ_REMOVE(&nfs_bufq, bp, b_freelist);
642 nfs_bufqlen--;
643 wakeup_one(&nfs_bufqlen);
644 if (bp->b_flags & B_READ)
645 (void) nfs_doio(bp, NULL);
646 else do {
647 /*
648 * Look for a delayed write for the same vnode, so I can do
649 * it now. We must grab it before calling nfs_doio() to
650 * avoid any risk of the vnode getting vclean()'d while
651 * we are doing the write rpc.
652 */
653 vp = bp->b_vp;
654 s = splbio();
655 LIST_FOREACH(nbp, &vp->v_dirtyblkhd, b_vnbufs) {
656 if ((nbp->b_flags &
657 (B_BUSY|B_DELWRI|B_NEEDCOMMIT|B_NOCACHE))!=B_DELWRI)
658 continue;
659 nbp->b_flags |= B_ASYNC;
660 bremfree(nbp);
661 buf_acquire(nbp);
662 break;
663 }
664 /*
665 * For the delayed write, do the first part of nfs_bwrite()
666 * up to, but not including nfs_strategy().
667 */
668 if (nbp) {
669 nbp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
670 buf_undirty(nbp);
671 nbp->b_vp->v_numoutput++;
672 }
673 splx(s);
674
675 (void) nfs_doio(bp, NULL);
676 } while ((bp = nbp) != NULL);
677 }
678 if (error) {
679 nfs_asyncdaemon[myiod] = NULL;
680 nfs_numasync--;
681 nfs_bufqmax -= bufcount;
682 kthread_exit(error);
683 }
684 }
685 }
686
687 void
nfs_getset_niothreads(int set)688 nfs_getset_niothreads(int set)
689 {
690 int i, have, start;
691
692 for (have = 0, i = 0; i < NFS_MAXASYNCDAEMON; i++)
693 if (nfs_asyncdaemon[i] != NULL)
694 have++;
695
696 if (set) {
697 /* clamp to sane range */
698 nfs_niothreads = max(0, min(nfs_niothreads, NFS_MAXASYNCDAEMON));
699
700 start = nfs_niothreads - have;
701
702 while (start > 0) {
703 kthread_create(nfssvc_iod, NULL, NULL, "nfsio");
704 start--;
705 }
706
707 for (i = 0; (start < 0) && (i < NFS_MAXASYNCDAEMON); i++)
708 if (nfs_asyncdaemon[i] != NULL) {
709 psignal(nfs_asyncdaemon[i], SIGKILL);
710 start++;
711 }
712 } else {
713 if (nfs_niothreads >= 0)
714 nfs_niothreads = have;
715 }
716 }
717 #endif /* NFSCLIENT */
718
719 #ifdef NFSSERVER
720 /*
721 * Find an nfssrv_sock for nfsd, sleeping if needed.
722 */
723 int
nfsrv_getslp(struct nfsd * nfsd)724 nfsrv_getslp(struct nfsd *nfsd)
725 {
726 struct nfssvc_sock *slp;
727 int error;
728
729 again:
730 while (nfsd->nfsd_slp == NULL &&
731 (nfsd_head_flag & NFSD_CHECKSLP) == 0) {
732 nfsd->nfsd_flag |= NFSD_WAITING;
733 nfsd_waiting++;
734 error = tsleep_nsec(nfsd, PSOCK | PCATCH, "nfsd", INFSLP);
735 nfsd_waiting--;
736 if (error)
737 return (error);
738 }
739
740 if (nfsd->nfsd_slp == NULL &&
741 (nfsd_head_flag & NFSD_CHECKSLP) != 0) {
742 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
743 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC)) ==
744 (SLP_VALID | SLP_DOREC)) {
745 slp->ns_flag &= ~SLP_DOREC;
746 slp->ns_sref++;
747 nfsd->nfsd_slp = slp;
748 break;
749 }
750 }
751 if (slp == NULL)
752 nfsd_head_flag &= ~NFSD_CHECKSLP;
753 }
754
755 if (nfsd->nfsd_slp == NULL)
756 goto again;
757
758 return (0);
759 }
760 #endif /* NFSSERVER */
761