xref: /freebsd/sys/netsmb/smb_iod.c (revision 4b9d6057)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/endian.h>
32 #include <sys/proc.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/unistd.h>
38 
39 #include <netsmb/smb.h>
40 #include <netsmb/smb_conn.h>
41 #include <netsmb/smb_rq.h>
42 #include <netsmb/smb_tran.h>
43 #include <netsmb/smb_trantcp.h>
44 
45 #define SMBIOD_SLEEP_TIMO	2
46 #define	SMBIOD_PING_TIMO	60	/* seconds */
47 
48 #define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
49 #define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
50 #define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
51 
52 #define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
53 #define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
54 #define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
55 
56 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
57 
58 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
59 
60 static int smb_iod_next;
61 
62 static int  smb_iod_sendall(struct smbiod *iod);
63 static int  smb_iod_disconnect(struct smbiod *iod);
64 static void smb_iod_thread(void *);
65 
66 static __inline void
67 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
68 {
69 	SMBRQ_SLOCK(rqp);
70 	rqp->sr_lerror = error;
71 	rqp->sr_rpgen++;
72 	rqp->sr_state = SMBRQ_NOTIFIED;
73 	wakeup(&rqp->sr_state);
74 	SMBRQ_SUNLOCK(rqp);
75 }
76 
77 static void
78 smb_iod_invrq(struct smbiod *iod)
79 {
80 	struct smb_rq *rqp;
81 
82 	/*
83 	 * Invalidate all outstanding requests for this connection
84 	 */
85 	SMB_IOD_RQLOCK(iod);
86 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
87 		rqp->sr_flags |= SMBR_RESTART;
88 		smb_iod_rqprocessed(rqp, ENOTCONN);
89 	}
90 	SMB_IOD_RQUNLOCK(iod);
91 }
92 
93 static void
94 smb_iod_closetran(struct smbiod *iod)
95 {
96 	struct smb_vc *vcp = iod->iod_vc;
97 	struct thread *td = iod->iod_td;
98 
99 	if (vcp->vc_tdata == NULL)
100 		return;
101 	SMB_TRAN_DISCONNECT(vcp, td);
102 	SMB_TRAN_DONE(vcp, td);
103 	vcp->vc_tdata = NULL;
104 }
105 
106 static void
107 smb_iod_dead(struct smbiod *iod)
108 {
109 	iod->iod_state = SMBIOD_ST_DEAD;
110 	smb_iod_closetran(iod);
111 	smb_iod_invrq(iod);
112 }
113 
114 static int
115 smb_iod_connect(struct smbiod *iod)
116 {
117 	struct smb_vc *vcp = iod->iod_vc;
118 	struct thread *td = iod->iod_td;
119 	int error;
120 
121 	SMBIODEBUG("%d\n", iod->iod_state);
122 	switch(iod->iod_state) {
123 	    case SMBIOD_ST_VCACTIVE:
124 		SMBERROR("called for already opened connection\n");
125 		return EISCONN;
126 	    case SMBIOD_ST_DEAD:
127 		return ENOTCONN;	/* XXX: last error code ? */
128 	    default:
129 		break;
130 	}
131 	vcp->vc_genid++;
132 	error = 0;
133 
134 	error = (int)SMB_TRAN_CREATE(vcp, td);
135 	if (error)
136 		goto fail;
137 	SMBIODEBUG("tcreate\n");
138 	if (vcp->vc_laddr) {
139 		error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
140 		if (error)
141 			goto fail;
142 	}
143 	SMBIODEBUG("tbind\n");
144 	error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
145 	if (error)
146 		goto fail;
147 	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
148 	iod->iod_state = SMBIOD_ST_TRANACTIVE;
149 	SMBIODEBUG("tconnect\n");
150 	/* vcp->vc_mid = 0;*/
151 	error = (int)smb_smb_negotiate(vcp, &iod->iod_scred);
152 	if (error)
153 		goto fail;
154 	SMBIODEBUG("snegotiate\n");
155 	error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred);
156 	if (error)
157 		goto fail;
158 	iod->iod_state = SMBIOD_ST_VCACTIVE;
159 	SMBIODEBUG("completed\n");
160 	smb_iod_invrq(iod);
161 	return (0);
162 
163  fail:
164 	smb_iod_dead(iod);
165 	return (error);
166 }
167 
168 static int
169 smb_iod_disconnect(struct smbiod *iod)
170 {
171 	struct smb_vc *vcp = iod->iod_vc;
172 
173 	SMBIODEBUG("\n");
174 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
175 		smb_smb_ssnclose(vcp, &iod->iod_scred);
176 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
177 	}
178 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
179 	smb_iod_closetran(iod);
180 	iod->iod_state = SMBIOD_ST_NOTCONN;
181 	return 0;
182 }
183 
184 static int
185 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
186 {
187 	int error;
188 
189 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
190 		if (iod->iod_state != SMBIOD_ST_DEAD)
191 			return ENOTCONN;
192 		iod->iod_state = SMBIOD_ST_RECONNECT;
193 		error = smb_iod_connect(iod);
194 		if (error)
195 			return error;
196 	}
197 	SMBIODEBUG("tree reconnect\n");
198 	SMBS_ST_LOCK(ssp);
199 	ssp->ss_flags |= SMBS_RECONNECTING;
200 	SMBS_ST_UNLOCK(ssp);
201 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
202 	SMBS_ST_LOCK(ssp);
203 	ssp->ss_flags &= ~SMBS_RECONNECTING;
204 	SMBS_ST_UNLOCK(ssp);
205 	wakeup(&ssp->ss_vcgenid);
206 	return error;
207 }
208 
209 static int
210 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
211 {
212 	struct thread *td = iod->iod_td;
213 	struct smb_vc *vcp = iod->iod_vc;
214 	struct smb_share *ssp = rqp->sr_share;
215 	struct mbuf *m;
216 	int error;
217 
218 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
219 	switch (iod->iod_state) {
220 	    case SMBIOD_ST_NOTCONN:
221 		smb_iod_rqprocessed(rqp, ENOTCONN);
222 		return 0;
223 	    case SMBIOD_ST_DEAD:
224 		iod->iod_state = SMBIOD_ST_RECONNECT;
225 		return 0;
226 	    case SMBIOD_ST_RECONNECT:
227 		return 0;
228 	    default:
229 		break;
230 	}
231 	if (rqp->sr_sendcnt == 0) {
232 #ifdef movedtoanotherplace
233 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
234 			return 0;
235 #endif
236 		le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
237 		le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0);
238 		mb_fixhdr(&rqp->sr_rq);
239 		if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE)
240 			smb_rq_sign(rqp);
241 	}
242 	if (rqp->sr_sendcnt++ > 5) {
243 		rqp->sr_flags |= SMBR_RESTART;
244 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
245 		/*
246 		 * If all attempts to send a request failed, then
247 		 * something is seriously hosed.
248 		 */
249 		return ENOTCONN;
250 	}
251 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
252 	m_dumpm(rqp->sr_rq.mb_top);
253 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAITOK);
254 	error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td);
255 	if (error == 0) {
256 		getnanotime(&rqp->sr_timesent);
257 		iod->iod_lastrqsent = rqp->sr_timesent;
258 		rqp->sr_flags |= SMBR_SENT;
259 		rqp->sr_state = SMBRQ_SENT;
260 		return 0;
261 	}
262 	/*
263 	 * Check for fatal errors
264 	 */
265 	if (SMB_TRAN_FATAL(vcp, error)) {
266 		/*
267 		 * No further attempts should be made
268 		 */
269 		return ENOTCONN;
270 	}
271 	if (smb_rq_intr(rqp))
272 		smb_iod_rqprocessed(rqp, EINTR);
273 	return 0;
274 }
275 
276 /*
277  * Process incoming packets
278  */
279 static int
280 smb_iod_recvall(struct smbiod *iod)
281 {
282 	struct smb_vc *vcp = iod->iod_vc;
283 	struct thread *td = iod->iod_td;
284 	struct smb_rq *rqp;
285 	struct mbuf *m;
286 	u_char *hp;
287 	u_short mid;
288 	int error;
289 
290 	switch (iod->iod_state) {
291 	    case SMBIOD_ST_NOTCONN:
292 	    case SMBIOD_ST_DEAD:
293 	    case SMBIOD_ST_RECONNECT:
294 		return 0;
295 	    default:
296 		break;
297 	}
298 	for (;;) {
299 		m = NULL;
300 		error = SMB_TRAN_RECV(vcp, &m, td);
301 		if (error == EWOULDBLOCK)
302 			break;
303 		if (SMB_TRAN_FATAL(vcp, error)) {
304 			smb_iod_dead(iod);
305 			break;
306 		}
307 		if (error)
308 			break;
309 		if (m == NULL) {
310 			SMBERROR("tran return NULL without error\n");
311 			error = EPIPE;
312 			continue;
313 		}
314 		m = m_pullup(m, SMB_HDRLEN);
315 		if (m == NULL)
316 			continue;	/* wait for a good packet */
317 		/*
318 		 * Now we got an entire and possibly invalid SMB packet.
319 		 * Be careful while parsing it.
320 		 */
321 		m_dumpm(m);
322 		hp = mtod(m, u_char*);
323 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
324 			m_freem(m);
325 			continue;
326 		}
327 		mid = SMB_HDRMID(hp);
328 		SMBSDEBUG("mid %04x\n", (u_int)mid);
329 		SMB_IOD_RQLOCK(iod);
330 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
331 			if (rqp->sr_mid != mid)
332 				continue;
333 			SMBRQ_SLOCK(rqp);
334 			if (rqp->sr_rp.md_top == NULL) {
335 				md_initm(&rqp->sr_rp, m);
336 			} else {
337 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
338 					md_append_record(&rqp->sr_rp, m);
339 				} else {
340 					SMBRQ_SUNLOCK(rqp);
341 					SMBERROR("duplicate response %d (ignored)\n", mid);
342 					break;
343 				}
344 			}
345 			SMBRQ_SUNLOCK(rqp);
346 			smb_iod_rqprocessed(rqp, 0);
347 			break;
348 		}
349 		SMB_IOD_RQUNLOCK(iod);
350 		if (rqp == NULL) {
351 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
352 /*			smb_printrqlist(vcp);*/
353 			m_freem(m);
354 		}
355 	}
356 	/*
357 	 * check for interrupts
358 	 */
359 	SMB_IOD_RQLOCK(iod);
360 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
361 		if (smb_td_intr(rqp->sr_cred->scr_td)) {
362 			smb_iod_rqprocessed(rqp, EINTR);
363 		}
364 	}
365 	SMB_IOD_RQUNLOCK(iod);
366 	return 0;
367 }
368 
369 int
370 smb_iod_request(struct smbiod *iod, int event, void *ident)
371 {
372 	struct smbiod_event *evp;
373 	int error;
374 
375 	SMBIODEBUG("\n");
376 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
377 	evp->ev_type = event;
378 	evp->ev_ident = ident;
379 	SMB_IOD_EVLOCK(iod);
380 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
381 	if ((event & SMBIOD_EV_SYNC) == 0) {
382 		SMB_IOD_EVUNLOCK(iod);
383 		smb_iod_wakeup(iod);
384 		return 0;
385 	}
386 	smb_iod_wakeup(iod);
387 	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
388 	error = evp->ev_error;
389 	free(evp, M_SMBIOD);
390 	return error;
391 }
392 
393 /*
394  * Place request in the queue.
395  * Request from smbiod have a high priority.
396  */
397 int
398 smb_iod_addrq(struct smb_rq *rqp)
399 {
400 	struct smb_vc *vcp = rqp->sr_vc;
401 	struct smbiod *iod = vcp->vc_iod;
402 	int error;
403 
404 	SMBIODEBUG("\n");
405 	if (rqp->sr_cred->scr_td != NULL &&
406 	    rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
407 		rqp->sr_flags |= SMBR_INTERNAL;
408 		SMB_IOD_RQLOCK(iod);
409 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
410 		SMB_IOD_RQUNLOCK(iod);
411 		for (;;) {
412 			if (smb_iod_sendrq(iod, rqp) != 0) {
413 				smb_iod_dead(iod);
414 				break;
415 			}
416 			/*
417 			 * we don't need to lock state field here
418 			 */
419 			if (rqp->sr_state != SMBRQ_NOTSENT)
420 				break;
421 			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
422 		}
423 		if (rqp->sr_lerror)
424 			smb_iod_removerq(rqp);
425 		return rqp->sr_lerror;
426 	}
427 
428 	switch (iod->iod_state) {
429 	    case SMBIOD_ST_NOTCONN:
430 		return ENOTCONN;
431 	    case SMBIOD_ST_DEAD:
432 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
433 		if (error)
434 			return error;
435 		return EXDEV;
436 	    default:
437 		break;
438 	}
439 
440 	SMB_IOD_RQLOCK(iod);
441 	for (;;) {
442 		if (vcp->vc_maxmux == 0) {
443 			SMBERROR("maxmux == 0\n");
444 			break;
445 		}
446 		if (iod->iod_muxcnt < vcp->vc_maxmux)
447 			break;
448 		iod->iod_muxwant++;
449 		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
450 		    PWAIT, "90mux", 0);
451 	}
452 	iod->iod_muxcnt++;
453 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
454 	SMB_IOD_RQUNLOCK(iod);
455 	smb_iod_wakeup(iod);
456 	return 0;
457 }
458 
459 int
460 smb_iod_removerq(struct smb_rq *rqp)
461 {
462 	struct smb_vc *vcp = rqp->sr_vc;
463 	struct smbiod *iod = vcp->vc_iod;
464 
465 	SMBIODEBUG("\n");
466 	if (rqp->sr_flags & SMBR_INTERNAL) {
467 		SMB_IOD_RQLOCK(iod);
468 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
469 		SMB_IOD_RQUNLOCK(iod);
470 		return 0;
471 	}
472 	SMB_IOD_RQLOCK(iod);
473 	while (rqp->sr_flags & SMBR_XLOCK) {
474 		rqp->sr_flags |= SMBR_XLOCKWANT;
475 		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
476 	}
477 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
478 	iod->iod_muxcnt--;
479 	if (iod->iod_muxwant) {
480 		iod->iod_muxwant--;
481 		wakeup(&iod->iod_muxwant);
482 	}
483 	SMB_IOD_RQUNLOCK(iod);
484 	return 0;
485 }
486 
487 int
488 smb_iod_waitrq(struct smb_rq *rqp)
489 {
490 	struct smbiod *iod = rqp->sr_vc->vc_iod;
491 	int error;
492 
493 	SMBIODEBUG("\n");
494 	if (rqp->sr_flags & SMBR_INTERNAL) {
495 		for (;;) {
496 			smb_iod_sendall(iod);
497 			smb_iod_recvall(iod);
498 			if (rqp->sr_rpgen != rqp->sr_rplast)
499 				break;
500 			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
501 		}
502 		smb_iod_removerq(rqp);
503 		return rqp->sr_lerror;
504 	}
505 	SMBRQ_SLOCK(rqp);
506 	if (rqp->sr_rpgen == rqp->sr_rplast)
507 		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
508 	rqp->sr_rplast++;
509 	SMBRQ_SUNLOCK(rqp);
510 	error = rqp->sr_lerror;
511 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
512 		/*
513 		 * If request should stay in the list, then reinsert it
514 		 * at the end of queue so other waiters have chance to concur
515 		 */
516 		SMB_IOD_RQLOCK(iod);
517 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
518 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
519 		SMB_IOD_RQUNLOCK(iod);
520 	} else
521 		smb_iod_removerq(rqp);
522 	return error;
523 }
524 
525 static int
526 smb_iod_sendall(struct smbiod *iod)
527 {
528 	struct smb_vc *vcp = iod->iod_vc;
529 	struct smb_rq *rqp;
530 	struct timespec ts, tstimeout;
531 	int herror;
532 
533 	herror = 0;
534 	/*
535 	 * Loop through the list of requests and send them if possible
536 	 */
537 	SMB_IOD_RQLOCK(iod);
538 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
539 		switch (rqp->sr_state) {
540 		    case SMBRQ_NOTSENT:
541 			rqp->sr_flags |= SMBR_XLOCK;
542 			SMB_IOD_RQUNLOCK(iod);
543 			herror = smb_iod_sendrq(iod, rqp);
544 			SMB_IOD_RQLOCK(iod);
545 			rqp->sr_flags &= ~SMBR_XLOCK;
546 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
547 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
548 				wakeup(rqp);
549 			}
550 			break;
551 		    case SMBRQ_SENT:
552 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
553 			timespecadd(&tstimeout, &tstimeout, &tstimeout);
554 			getnanotime(&ts);
555 			timespecsub(&ts, &tstimeout, &ts);
556 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
557 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
558 			}
559 			break;
560 		    default:
561 			break;
562 		}
563 		if (herror)
564 			break;
565 	}
566 	SMB_IOD_RQUNLOCK(iod);
567 	if (herror == ENOTCONN)
568 		smb_iod_dead(iod);
569 	return 0;
570 }
571 
572 /*
573  * "main" function for smbiod daemon
574  */
575 static __inline void
576 smb_iod_main(struct smbiod *iod)
577 {
578 /*	struct smb_vc *vcp = iod->iod_vc;*/
579 	struct smbiod_event *evp;
580 /*	struct timespec tsnow;*/
581 
582 	SMBIODEBUG("\n");
583 
584 	/*
585 	 * Check all interesting events
586 	 */
587 	for (;;) {
588 		SMB_IOD_EVLOCK(iod);
589 		evp = STAILQ_FIRST(&iod->iod_evlist);
590 		if (evp == NULL) {
591 			SMB_IOD_EVUNLOCK(iod);
592 			break;
593 		}
594 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
595 		evp->ev_type |= SMBIOD_EV_PROCESSING;
596 		SMB_IOD_EVUNLOCK(iod);
597 		switch (evp->ev_type & SMBIOD_EV_MASK) {
598 		    case SMBIOD_EV_CONNECT:
599 			iod->iod_state = SMBIOD_ST_RECONNECT;
600 			evp->ev_error = smb_iod_connect(iod);
601 			break;
602 		    case SMBIOD_EV_DISCONNECT:
603 			evp->ev_error = smb_iod_disconnect(iod);
604 			break;
605 		    case SMBIOD_EV_TREECONNECT:
606 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
607 			break;
608 		    case SMBIOD_EV_SHUTDOWN:
609 			iod->iod_flags |= SMBIOD_SHUTDOWN;
610 			break;
611 		    case SMBIOD_EV_NEWRQ:
612 			break;
613 		}
614 		if (evp->ev_type & SMBIOD_EV_SYNC) {
615 			SMB_IOD_EVLOCK(iod);
616 			wakeup(evp);
617 			SMB_IOD_EVUNLOCK(iod);
618 		} else
619 			free(evp, M_SMBIOD);
620 	}
621 #if 0
622 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
623 		getnanotime(&tsnow);
624 		timespecsub(&tsnow, &iod->iod_pingtimo, &tsnow);
625 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
626 			smb_smb_echo(vcp, &iod->iod_scred);
627 		}
628 	}
629 #endif
630 	smb_iod_sendall(iod);
631 	smb_iod_recvall(iod);
632 	return;
633 }
634 
635 void
636 smb_iod_thread(void *arg)
637 {
638 	struct smbiod *iod = arg;
639 
640 	mtx_lock(&Giant);
641 
642 	/*
643 	 * Here we assume that the thread structure will be the same
644 	 * for an entire kthread (kproc, to be more precise) life.
645 	 */
646 	iod->iod_td = curthread;
647 	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
648 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
649 		smb_iod_main(iod);
650 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
651 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
652 			break;
653 		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
654 	}
655 
656 	/* We can now safely destroy the mutexes and free the iod structure. */
657 	smb_sl_destroy(&iod->iod_rqlock);
658 	smb_sl_destroy(&iod->iod_evlock);
659 	free(iod, M_SMBIOD);
660 	mtx_unlock(&Giant);
661 	kproc_exit(0);
662 }
663 
664 int
665 smb_iod_create(struct smb_vc *vcp)
666 {
667 	struct smbiod *iod;
668 	int error;
669 
670 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
671 	iod->iod_id = smb_iod_next++;
672 	iod->iod_state = SMBIOD_ST_NOTCONN;
673 	iod->iod_vc = vcp;
674 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
675 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
676 	getnanotime(&iod->iod_lastrqsent);
677 	vcp->vc_iod = iod;
678 	smb_sl_init(&iod->iod_rqlock, "90rql");
679 	TAILQ_INIT(&iod->iod_rqlist);
680 	smb_sl_init(&iod->iod_evlock, "90evl");
681 	STAILQ_INIT(&iod->iod_evlist);
682 	error = kproc_create(smb_iod_thread, iod, &iod->iod_p,
683 	    RFNOWAIT, 0, "smbiod%d", iod->iod_id);
684 	if (error) {
685 		SMBERROR("can't start smbiod: %d", error);
686 		vcp->vc_iod = NULL;
687 		smb_sl_destroy(&iod->iod_rqlock);
688 		smb_sl_destroy(&iod->iod_evlock);
689 		free(iod, M_SMBIOD);
690 		return error;
691 	}
692 	return 0;
693 }
694 
695 int
696 smb_iod_destroy(struct smbiod *iod)
697 {
698 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
699 	return 0;
700 }
701 
702 int
703 smb_iod_init(void)
704 {
705 	return 0;
706 }
707 
708 int
709 smb_iod_done(void)
710 {
711 	return 0;
712 }
713