1 /* $OpenBSD: nfs_syscalls.c,v 1.126 2024/05/01 13:15:59 jsg Exp $ */
2 /* $NetBSD: nfs_syscalls.c,v 1.19 1996/02/18 11:53:52 fvdl Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Rick Macklem at The University of Guelph.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/file.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/pool.h>
44 #include <sys/malloc.h>
45 #include <sys/buf.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/filedesc.h>
52 #include <sys/signalvar.h>
53 #include <sys/kthread.h>
54 #include <sys/queue.h>
55
56 #include <sys/syscallargs.h>
57
58 #include <netinet/in.h>
59 #include <netinet/tcp.h>
60 #include <nfs/nfsproto.h>
61 #include <nfs/nfs.h>
62 #include <nfs/nfsrvcache.h>
63 #include <nfs/nfsnode.h>
64 #include <nfs/nfs_var.h>
65
66 /* Global defs. */
67 extern int nfs_numasync;
68 extern struct nfsstats nfsstats;
69 struct nfssvc_sock *nfs_udpsock;
70 int nfsd_waiting = 0;
71
72 #ifdef NFSSERVER
73 struct pool nfsrv_descript_pl;
74
75 int nfsrv_getslp(struct nfsd *nfsd);
76
77 static int nfs_numnfsd = 0;
78 static int (*const nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *,
79 struct nfssvc_sock *, struct proc *, struct mbuf **) = {
80 nfsrv_null,
81 nfsrv_getattr,
82 nfsrv_setattr,
83 nfsrv_lookup,
84 nfsrv3_access,
85 nfsrv_readlink,
86 nfsrv_read,
87 nfsrv_write,
88 nfsrv_create,
89 nfsrv_mkdir,
90 nfsrv_symlink,
91 nfsrv_mknod,
92 nfsrv_remove,
93 nfsrv_rmdir,
94 nfsrv_rename,
95 nfsrv_link,
96 nfsrv_readdir,
97 nfsrv_readdirplus,
98 nfsrv_statfs,
99 nfsrv_fsinfo,
100 nfsrv_pathconf,
101 nfsrv_commit,
102 nfsrv_noop
103 };
104 #endif
105
106 TAILQ_HEAD(, nfssvc_sock) nfssvc_sockhead;
107 struct nfsdhead nfsd_head;
108
109 int nfssvc_sockhead_flag;
110 #define SLP_INIT 0x01 /* NFS data undergoing initialization */
111 #define SLP_WANTINIT 0x02 /* thread waiting on NFS initialization */
112 int nfsd_head_flag;
113
114 #ifdef NFSCLIENT
115 struct proc *nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
116 int nfs_niothreads = -1;
117 #endif
118
119 int nfssvc_addsock(struct file *, struct mbuf *);
120 int nfssvc_nfsd(struct nfsd *);
121 void nfsrv_slpderef(struct nfssvc_sock *);
122 void nfsrv_zapsock(struct nfssvc_sock *);
123 void nfssvc_iod(void *);
124
125 /*
126 * NFS server pseudo system call for the nfsd's
127 * Based on the flag value it either:
128 * - adds a socket to the selection list
129 * - remains in the kernel as an nfsd
130 */
131 int
sys_nfssvc(struct proc * p,void * v,register_t * retval)132 sys_nfssvc(struct proc *p, void *v, register_t *retval)
133 {
134 int error = 0;
135 #ifdef NFSSERVER
136 struct sys_nfssvc_args /* {
137 syscallarg(int) flag;
138 syscallarg(caddr_t) argp;
139 } */ *uap = v;
140 int flags = SCARG(uap, flag);
141 struct file *fp;
142 struct mbuf *nam;
143 struct nfsd_args nfsdarg;
144 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
145 struct nfsd *nfsd;
146 #endif
147
148 /* Must be super user */
149 error = suser(p);
150 if (error)
151 return (error);
152
153 #ifndef NFSSERVER
154 error = ENOSYS;
155 #else
156
157 while (nfssvc_sockhead_flag & SLP_INIT) {
158 nfssvc_sockhead_flag |= SLP_WANTINIT;
159 tsleep_nsec(&nfssvc_sockhead, PSOCK, "nfsd init", INFSLP);
160 }
161
162 switch (flags) {
163 case NFSSVC_ADDSOCK:
164 error = copyin(SCARG(uap, argp), &nfsdarg, sizeof(nfsdarg));
165 if (error)
166 return (error);
167
168 error = getsock(p, nfsdarg.sock, &fp);
169 if (error)
170 return (error);
171
172 /*
173 * Get the client address for connected sockets.
174 */
175 if (nfsdarg.name == NULL || nfsdarg.namelen == 0)
176 nam = NULL;
177 else {
178 error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen,
179 MT_SONAME);
180 if (error) {
181 FRELE(fp, p);
182 return (error);
183 }
184 }
185 error = nfssvc_addsock(fp, nam);
186 FRELE(fp, p);
187 break;
188 case NFSSVC_NFSD:
189 error = copyin(SCARG(uap, argp), nsd, sizeof(*nsd));
190 if (error)
191 return (error);
192
193 nfsd = malloc(sizeof(*nfsd), M_NFSD, M_WAITOK|M_ZERO);
194 nfsd->nfsd_procp = p;
195 nfsd->nfsd_slp = NULL;
196
197 error = nfssvc_nfsd(nfsd);
198 break;
199 default:
200 error = EINVAL;
201 break;
202 }
203
204 if (error == EINTR || error == ERESTART)
205 error = 0;
206 #endif /* !NFSSERVER */
207
208 return (error);
209 }
210
211 #ifdef NFSSERVER
212 /*
213 * Adds a socket to the list for servicing by nfsds.
214 */
215 int
nfssvc_addsock(struct file * fp,struct mbuf * mynam)216 nfssvc_addsock(struct file *fp, struct mbuf *mynam)
217 {
218 struct mbuf *m;
219 int siz;
220 struct nfssvc_sock *slp;
221 struct socket *so;
222 struct nfssvc_sock *tslp;
223 int error;
224
225 so = (struct socket *)fp->f_data;
226 tslp = NULL;
227 /*
228 * Add it to the list, as required.
229 */
230 if (so->so_proto->pr_protocol == IPPROTO_UDP) {
231 tslp = nfs_udpsock;
232 if (tslp->ns_flag & SLP_VALID) {
233 m_freem(mynam);
234 return (EPERM);
235 }
236 }
237 /*
238 * Allow only IPv4 UDP and TCP sockets.
239 */
240 if ((so->so_type != SOCK_STREAM && so->so_type != SOCK_DGRAM) ||
241 so->so_proto->pr_domain->dom_family != AF_INET) {
242 m_freem(mynam);
243 return (EINVAL);
244 }
245
246 if (so->so_type == SOCK_STREAM)
247 siz = NFS_MAXPACKET + sizeof (u_long);
248 else
249 siz = NFS_MAXPACKET;
250 solock(so);
251 error = soreserve(so, siz, siz);
252 sounlock(so);
253 if (error) {
254 m_freem(mynam);
255 return (error);
256 }
257
258 /*
259 * Set protocol specific options { for now TCP only } and
260 * reserve some space. For datagram sockets, this can get called
261 * repeatedly for the same socket, but that isn't harmful.
262 */
263 if (so->so_type == SOCK_STREAM) {
264 MGET(m, M_WAIT, MT_SOOPTS);
265 *mtod(m, int32_t *) = 1;
266 m->m_len = sizeof(int32_t);
267 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
268 m_freem(m);
269 }
270 if (so->so_proto->pr_domain->dom_family == AF_INET &&
271 so->so_proto->pr_protocol == IPPROTO_TCP) {
272 MGET(m, M_WAIT, MT_SOOPTS);
273 *mtod(m, int32_t *) = 1;
274 m->m_len = sizeof(int32_t);
275 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
276 m_freem(m);
277 }
278 solock(so);
279 mtx_enter(&so->so_rcv.sb_mtx);
280 so->so_rcv.sb_flags &= ~SB_NOINTR;
281 so->so_rcv.sb_timeo_nsecs = INFSLP;
282 mtx_leave(&so->so_rcv.sb_mtx);
283 mtx_enter(&so->so_snd.sb_mtx);
284 so->so_snd.sb_flags &= ~SB_NOINTR;
285 so->so_snd.sb_timeo_nsecs = INFSLP;
286 mtx_leave(&so->so_snd.sb_mtx);
287 sounlock(so);
288 if (tslp)
289 slp = tslp;
290 else {
291 slp = malloc(sizeof(*slp), M_NFSSVC, M_WAITOK|M_ZERO);
292 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
293 }
294 slp->ns_so = so;
295 slp->ns_nam = mynam;
296 FREF(fp);
297 slp->ns_fp = fp;
298 so->so_upcallarg = (caddr_t)slp;
299 so->so_upcall = nfsrv_rcv;
300 slp->ns_flag = (SLP_VALID | SLP_NEEDQ);
301 nfsrv_wakenfsd(slp);
302 return (0);
303 }
304
nfssvc_checknam(struct mbuf * nam)305 static inline int nfssvc_checknam(struct mbuf *nam)
306 {
307 struct sockaddr_in *sin;
308
309 if (nam == NULL ||
310 in_nam2sin(nam, &sin) != 0 ||
311 ntohs(sin->sin_port) >= IPPORT_RESERVED) {
312 return -1;
313 }
314 return 0;
315 }
316
317 /*
318 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
319 * until it is killed by a signal.
320 */
321 int
nfssvc_nfsd(struct nfsd * nfsd)322 nfssvc_nfsd(struct nfsd *nfsd)
323 {
324 struct mbuf *m;
325 int siz;
326 struct nfssvc_sock *slp;
327 struct socket *so;
328 int *solockp;
329 struct nfsrv_descript *nd = NULL;
330 struct mbuf *mreq;
331 int error = 0, cacherep, sotype;
332
333 cacherep = RC_DOIT;
334
335 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
336 nfs_numnfsd++;
337
338 /* Loop getting rpc requests until SIGKILL. */
339 loop:
340 if (!ISSET(nfsd->nfsd_flag, NFSD_REQINPROG)) {
341
342 /* attach an nfssvc_sock to nfsd */
343 error = nfsrv_getslp(nfsd);
344 if (error)
345 goto done;
346
347 slp = nfsd->nfsd_slp;
348
349 if (ISSET(slp->ns_flag, SLP_VALID)) {
350 if ((slp->ns_flag & (SLP_DISCONN | SLP_NEEDQ)) ==
351 SLP_NEEDQ) {
352 CLR(slp->ns_flag, SLP_NEEDQ);
353 nfs_sndlock(&slp->ns_solock, NULL);
354 nfsrv_rcv(slp->ns_so, (caddr_t)slp, M_WAIT);
355 nfs_sndunlock(&slp->ns_solock);
356 }
357 if (ISSET(slp->ns_flag, SLP_DISCONN))
358 nfsrv_zapsock(slp);
359
360 error = nfsrv_dorec(slp, nfsd, &nd);
361 SET(nfsd->nfsd_flag, NFSD_REQINPROG);
362 }
363 } else {
364 error = 0;
365 slp = nfsd->nfsd_slp;
366 }
367
368 if (error || !ISSET(slp->ns_flag, SLP_VALID)) {
369 if (nd != NULL) {
370 pool_put(&nfsrv_descript_pl, nd);
371 nd = NULL;
372 }
373 nfsd->nfsd_slp = NULL;
374 CLR(nfsd->nfsd_flag, NFSD_REQINPROG);
375 nfsrv_slpderef(slp);
376 goto loop;
377 }
378
379 so = slp->ns_so;
380 sotype = so->so_type;
381 if (ISSET(so->so_proto->pr_flags, PR_CONNREQUIRED))
382 solockp = &slp->ns_solock;
383 else
384 solockp = NULL;
385
386 if (nd) {
387 if (nd->nd_nam2)
388 nd->nd_nam = nd->nd_nam2;
389 else
390 nd->nd_nam = slp->ns_nam;
391 }
392
393 cacherep = nfsrv_getcache(nd, slp, &mreq);
394 switch (cacherep) {
395 case RC_DOIT:
396 /*
397 * Unless this is a null request (server ping), make
398 * sure that the client is using a reserved source port.
399 */
400 if (nd->nd_procnum != 0 && nfssvc_checknam(nd->nd_nam) == -1) {
401 /* drop it */
402 m_freem(nd->nd_mrep);
403 m_freem(nd->nd_nam2);
404 break;
405 }
406 error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq);
407 if (mreq == NULL) {
408 if (nd != NULL) {
409 m_freem(nd->nd_nam2);
410 m_freem(nd->nd_mrep);
411 }
412 break;
413 }
414 if (error) {
415 nfsstats.srv_errs++;
416 nfsrv_updatecache(nd, 0, mreq);
417 m_freem(nd->nd_nam2);
418 break;
419 }
420 nfsstats.srvrpccnt[nd->nd_procnum]++;
421 nfsrv_updatecache(nd, 1, mreq);
422 nd->nd_mrep = NULL;
423
424 /* FALLTHROUGH */
425 case RC_REPLY:
426 m = mreq;
427 siz = 0;
428 while (m) {
429 siz += m->m_len;
430 m = m->m_next;
431 }
432
433 if (siz <= 0 || siz > NFS_MAXPACKET)
434 panic("bad nfs svc reply, siz = %i", siz);
435
436 m = mreq;
437 m->m_pkthdr.len = siz;
438 m->m_pkthdr.ph_ifidx = 0;
439
440 /* For stream protocols, prepend a Sun RPC Record Mark. */
441 if (sotype == SOCK_STREAM) {
442 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT);
443 *mtod(m, u_int32_t *) = htonl(0x80000000 | siz);
444 }
445
446 if (solockp)
447 nfs_sndlock(solockp, NULL);
448
449 if (ISSET(slp->ns_flag, SLP_VALID))
450 error = nfs_send(so, nd->nd_nam2, m, NULL);
451 else {
452 error = EPIPE;
453 m_freem(m);
454 }
455 m_freem(nd->nd_nam2);
456 m_freem(nd->nd_mrep);
457 if (error == EPIPE)
458 nfsrv_zapsock(slp);
459 if (solockp)
460 nfs_sndunlock(solockp);
461 if (error == EINTR || error == ERESTART) {
462 pool_put(&nfsrv_descript_pl, nd);
463 nfsrv_slpderef(slp);
464 goto done;
465 }
466 break;
467 case RC_DROPIT:
468 m_freem(nd->nd_mrep);
469 m_freem(nd->nd_nam2);
470 break;
471 };
472
473 if (nd) {
474 pool_put(&nfsrv_descript_pl, nd);
475 nd = NULL;
476 }
477
478 if (nfsrv_dorec(slp, nfsd, &nd)) {
479 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
480 nfsd->nfsd_slp = NULL;
481 nfsrv_slpderef(slp);
482 }
483 goto loop;
484
485 done:
486 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
487 free(nfsd, M_NFSD, sizeof(*nfsd));
488 if (--nfs_numnfsd == 0)
489 nfsrv_init(1); /* Reinitialize everything */
490 return (error);
491 }
492
493 /*
494 * Shut down a socket associated with an nfssvc_sock structure.
495 * Should be called with the send lock set, if required.
496 * The trick here is to increment the sref at the start, so that the nfsds
497 * will stop using it and clear ns_flag at the end so that it will not be
498 * reassigned during cleanup.
499 */
500 void
nfsrv_zapsock(struct nfssvc_sock * slp)501 nfsrv_zapsock(struct nfssvc_sock *slp)
502 {
503 struct socket *so;
504 struct file *fp;
505 struct mbuf *m, *n;
506
507 slp->ns_flag &= ~SLP_ALLFLAGS;
508 fp = slp->ns_fp;
509 if (fp) {
510 FREF(fp);
511 slp->ns_fp = NULL;
512 so = slp->ns_so;
513 so->so_upcall = NULL;
514 soshutdown(so, SHUT_RDWR);
515 closef(fp, NULL);
516 if (slp->ns_nam)
517 m = m_free(slp->ns_nam);
518 m_freem(slp->ns_raw);
519 m = slp->ns_rec;
520 while (m) {
521 n = m->m_nextpkt;
522 m_freem(m);
523 m = n;
524 }
525 }
526 }
527
528 /*
529 * Dereference a server socket structure. If it has no more references and
530 * is no longer valid, you can throw it away.
531 */
532 void
nfsrv_slpderef(struct nfssvc_sock * slp)533 nfsrv_slpderef(struct nfssvc_sock *slp)
534 {
535 if (--(slp->ns_sref) == 0 && (slp->ns_flag & SLP_VALID) == 0) {
536 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
537 free(slp, M_NFSSVC, sizeof(*slp));
538 }
539 }
540
541 /*
542 * Initialize the data structures for the server.
543 * Handshake with any new nfsds starting up to avoid any chance of
544 * corruption.
545 */
546 void
nfsrv_init(int terminating)547 nfsrv_init(int terminating)
548 {
549 struct nfssvc_sock *slp, *nslp;
550
551 if (nfssvc_sockhead_flag & SLP_INIT)
552 panic("nfsd init");
553 nfssvc_sockhead_flag |= SLP_INIT;
554 if (terminating) {
555 for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != NULL;
556 slp = nslp) {
557 nslp = TAILQ_NEXT(slp, ns_chain);
558 if (slp->ns_flag & SLP_VALID)
559 nfsrv_zapsock(slp);
560 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
561 free(slp, M_NFSSVC, sizeof(*slp));
562 }
563 nfsrv_cleancache(); /* And clear out server cache */
564 }
565
566 TAILQ_INIT(&nfssvc_sockhead);
567 nfssvc_sockhead_flag &= ~SLP_INIT;
568 if (nfssvc_sockhead_flag & SLP_WANTINIT) {
569 nfssvc_sockhead_flag &= ~SLP_WANTINIT;
570 wakeup((caddr_t)&nfssvc_sockhead);
571 }
572
573 TAILQ_INIT(&nfsd_head);
574 nfsd_head_flag &= ~NFSD_CHECKSLP;
575
576 nfs_udpsock = malloc(sizeof(*nfs_udpsock), M_NFSSVC,
577 M_WAITOK|M_ZERO);
578 TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain);
579
580 if (!terminating) {
581 pool_init(&nfsrv_descript_pl, sizeof(struct nfsrv_descript),
582 0, IPL_NONE, PR_WAITOK, "ndscpl", NULL);
583 }
584 }
585 #endif /* NFSSERVER */
586
587 #ifdef NFSCLIENT
588 /*
589 * Asynchronous I/O threads for client nfs.
590 * They do read-ahead and write-behind operations on the block I/O cache.
591 * Never returns unless it fails or gets killed.
592 */
593 void
nfssvc_iod(void * arg)594 nfssvc_iod(void *arg)
595 {
596 struct proc *p = curproc;
597 struct buf *bp, *nbp;
598 int i, myiod;
599 struct vnode *vp;
600 int error = 0, s, bufcount;
601
602 bufcount = MIN(256, bcstats.kvaslots / 8);
603 bufcount = MIN(bufcount, bcstats.numbufs / 8);
604
605 /* Assign my position or return error if too many already running. */
606 myiod = -1;
607 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
608 if (nfs_asyncdaemon[i] == NULL) {
609 myiod = i;
610 break;
611 }
612 }
613 if (myiod == -1)
614 kthread_exit(EBUSY);
615
616 nfs_asyncdaemon[myiod] = p;
617 nfs_numasync++;
618
619 /* Upper limit on how many bufs we'll queue up for this iod. */
620 if (nfs_bufqmax > bcstats.kvaslots / 4) {
621 nfs_bufqmax = bcstats.kvaslots / 4;
622 bufcount = 0;
623 }
624 if (nfs_bufqmax > bcstats.numbufs / 4) {
625 nfs_bufqmax = bcstats.numbufs / 4;
626 bufcount = 0;
627 }
628
629 nfs_bufqmax += bufcount;
630 wakeup(&nfs_bufqlen); /* wake up anyone waiting for room to enqueue IO */
631
632 /* Just loop around doin our stuff until SIGKILL. */
633 for (;;) {
634 while (TAILQ_FIRST(&nfs_bufq) == NULL && error == 0) {
635 error = tsleep_nsec(&nfs_bufq,
636 PWAIT | PCATCH, "nfsidl", INFSLP);
637 }
638 while ((bp = TAILQ_FIRST(&nfs_bufq)) != NULL) {
639 /* Take one off the front of the list */
640 TAILQ_REMOVE(&nfs_bufq, bp, b_freelist);
641 nfs_bufqlen--;
642 wakeup_one(&nfs_bufqlen);
643 if (bp->b_flags & B_READ)
644 (void) nfs_doio(bp, NULL);
645 else do {
646 /*
647 * Look for a delayed write for the same vnode, so I can do
648 * it now. We must grab it before calling nfs_doio() to
649 * avoid any risk of the vnode getting vclean()'d while
650 * we are doing the write rpc.
651 */
652 vp = bp->b_vp;
653 s = splbio();
654 LIST_FOREACH(nbp, &vp->v_dirtyblkhd, b_vnbufs) {
655 if ((nbp->b_flags &
656 (B_BUSY|B_DELWRI|B_NEEDCOMMIT|B_NOCACHE))!=B_DELWRI)
657 continue;
658 nbp->b_flags |= B_ASYNC;
659 bremfree(nbp);
660 buf_acquire(nbp);
661 break;
662 }
663 /*
664 * For the delayed write, do the first part of nfs_bwrite()
665 * up to, but not including nfs_strategy().
666 */
667 if (nbp) {
668 nbp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
669 buf_undirty(nbp);
670 nbp->b_vp->v_numoutput++;
671 }
672 splx(s);
673
674 (void) nfs_doio(bp, NULL);
675 } while ((bp = nbp) != NULL);
676 }
677 if (error) {
678 nfs_asyncdaemon[myiod] = NULL;
679 nfs_numasync--;
680 nfs_bufqmax -= bufcount;
681 kthread_exit(error);
682 }
683 }
684 }
685
686 void
nfs_getset_niothreads(int set)687 nfs_getset_niothreads(int set)
688 {
689 int i, have, start;
690
691 for (have = 0, i = 0; i < NFS_MAXASYNCDAEMON; i++)
692 if (nfs_asyncdaemon[i] != NULL)
693 have++;
694
695 if (set) {
696 /* clamp to sane range */
697 nfs_niothreads = max(0, min(nfs_niothreads, NFS_MAXASYNCDAEMON));
698
699 start = nfs_niothreads - have;
700
701 while (start > 0) {
702 kthread_create(nfssvc_iod, NULL, NULL, "nfsio");
703 start--;
704 }
705
706 for (i = 0; (start < 0) && (i < NFS_MAXASYNCDAEMON); i++)
707 if (nfs_asyncdaemon[i] != NULL) {
708 psignal(nfs_asyncdaemon[i], SIGKILL);
709 start++;
710 }
711 } else {
712 if (nfs_niothreads >= 0)
713 nfs_niothreads = have;
714 }
715 }
716 #endif /* NFSCLIENT */
717
718 #ifdef NFSSERVER
719 /*
720 * Find an nfssrv_sock for nfsd, sleeping if needed.
721 */
722 int
nfsrv_getslp(struct nfsd * nfsd)723 nfsrv_getslp(struct nfsd *nfsd)
724 {
725 struct nfssvc_sock *slp;
726 int error;
727
728 again:
729 while (nfsd->nfsd_slp == NULL &&
730 (nfsd_head_flag & NFSD_CHECKSLP) == 0) {
731 nfsd->nfsd_flag |= NFSD_WAITING;
732 nfsd_waiting++;
733 error = tsleep_nsec(nfsd, PSOCK | PCATCH, "nfsd", INFSLP);
734 nfsd_waiting--;
735 if (error)
736 return (error);
737 }
738
739 if (nfsd->nfsd_slp == NULL &&
740 (nfsd_head_flag & NFSD_CHECKSLP) != 0) {
741 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
742 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC)) ==
743 (SLP_VALID | SLP_DOREC)) {
744 slp->ns_flag &= ~SLP_DOREC;
745 slp->ns_sref++;
746 nfsd->nfsd_slp = slp;
747 break;
748 }
749 }
750 if (slp == NULL)
751 nfsd_head_flag &= ~NFSD_CHECKSLP;
752 }
753
754 if (nfsd->nfsd_slp == NULL)
755 goto again;
756
757 return (0);
758 }
759 #endif /* NFSSERVER */
760