xref: /dragonfly/sys/netproto/smb/smb_iod.c (revision f746689a)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33  * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.15 2007/02/03 17:05:58 corecode Exp $
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/unistd.h>
44 
45 #include "smb.h"
46 #include "smb_conn.h"
47 #include "smb_rq.h"
48 #include "smb_tran.h"
49 #include "smb_trantcp.h"
50 
51 
52 #define SMBIOD_SLEEP_TIMO	2
53 #define	SMBIOD_PING_TIMO	60	/* seconds */
54 
55 #define	SMB_IOD_EVLOCKPTR(iod)	(&(iod)->iod_evlock)
56 #define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&(iod)->iod_evlock)
57 #define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&(iod)->iod_evlock)
58 #define SMB_IOD_EVINTERLOCK(iod) (&(iod)->iod_evlock)
59 
60 #define	SMB_IOD_RQLOCKPTR(iod)	(&(iod)->iod_rqlock)
61 #define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
62 #define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&(iod)->iod_rqlock)
63 #define	SMB_IOD_RQINTERLOCK(iod) (&(iod)->iod_rqlock)
64 
65 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
66 
67 
68 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
69 
70 static int smb_iod_next;
71 
72 static int  smb_iod_sendall(struct smbiod *iod);
73 static int  smb_iod_disconnect(struct smbiod *iod);
74 static void smb_iod_thread(void *);
75 
76 static __inline void
77 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
78 {
79 	SMBRQ_SLOCK(rqp);
80 	rqp->sr_lerror = error;
81 	rqp->sr_rpgen++;
82 	rqp->sr_state = SMBRQ_NOTIFIED;
83 	wakeup(&rqp->sr_state);
84 	SMBRQ_SUNLOCK(rqp);
85 }
86 
87 static void
88 smb_iod_invrq(struct smbiod *iod)
89 {
90 	struct smb_rq *rqp;
91 
92 	/*
93 	 * Invalidate all outstanding requests for this connection
94 	 */
95 	SMB_IOD_RQLOCK(iod);
96 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
97 #if 0
98 		/* this makes no sense whatsoever XXX */
99 		if (rqp->sr_flags & SMBR_INTERNAL)
100 			SMBRQ_SUNLOCK(rqp);
101 #endif
102 		rqp->sr_flags |= SMBR_RESTART;
103 		smb_iod_rqprocessed(rqp, ENOTCONN);
104 	}
105 	SMB_IOD_RQUNLOCK(iod);
106 }
107 
108 static void
109 smb_iod_closetran(struct smbiod *iod)
110 {
111 	struct smb_vc *vcp = iod->iod_vc;
112 	struct thread *td = iod->iod_td;
113 
114 	if (vcp->vc_tdata == NULL)
115 		return;
116 	SMB_TRAN_DISCONNECT(vcp, td);
117 	SMB_TRAN_DONE(vcp, td);
118 	vcp->vc_tdata = NULL;
119 }
120 
121 static void
122 smb_iod_dead(struct smbiod *iod)
123 {
124 	iod->iod_state = SMBIOD_ST_DEAD;
125 	smb_iod_closetran(iod);
126 	smb_iod_invrq(iod);
127 }
128 
129 static int
130 smb_iod_connect(struct smbiod *iod)
131 {
132 	struct smb_vc *vcp = iod->iod_vc;
133 	struct thread *td = iod->iod_td;
134 	int error;
135 
136 	SMBIODEBUG("%d\n", iod->iod_state);
137 	switch(iod->iod_state) {
138 	    case SMBIOD_ST_VCACTIVE:
139 		SMBERROR("called for already opened connection\n");
140 		return EISCONN;
141 	    case SMBIOD_ST_DEAD:
142 		return ENOTCONN;	/* XXX: last error code ? */
143 	    default:
144 		break;
145 	}
146 	vcp->vc_genid++;
147 	error = 0;
148 	itry {
149 		ithrow(SMB_TRAN_CREATE(vcp, td));
150 		SMBIODEBUG("tcreate\n");
151 		if (vcp->vc_laddr) {
152 			ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, td));
153 		}
154 		SMBIODEBUG("tbind\n");
155 		ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td));
156 		SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
157 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
158 		SMBIODEBUG("tconnect\n");
159 /*		vcp->vc_mid = 0;*/
160 		ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
161 		SMBIODEBUG("snegotiate\n");
162 		ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
163 		iod->iod_state = SMBIOD_ST_VCACTIVE;
164 		SMBIODEBUG("completed\n");
165 		smb_iod_invrq(iod);
166 	} icatch(error) {
167 		smb_iod_dead(iod);
168 	} ifinally {
169 	} iendtry;
170 	return error;
171 }
172 
173 static int
174 smb_iod_disconnect(struct smbiod *iod)
175 {
176 	struct smb_vc *vcp = iod->iod_vc;
177 
178 	SMBIODEBUG("\n");
179 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
180 		smb_smb_ssnclose(vcp, &iod->iod_scred);
181 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
182 	}
183 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
184 	smb_iod_closetran(iod);
185 	iod->iod_state = SMBIOD_ST_NOTCONN;
186 	return 0;
187 }
188 
189 static int
190 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
191 {
192 	int error;
193 
194 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
195 		if (iod->iod_state != SMBIOD_ST_DEAD)
196 			return ENOTCONN;
197 		iod->iod_state = SMBIOD_ST_RECONNECT;
198 		error = smb_iod_connect(iod);
199 		if (error)
200 			return error;
201 	}
202 	SMBIODEBUG("tree reconnect\n");
203 	SMBS_ST_LOCK(ssp);
204 	ssp->ss_flags |= SMBS_RECONNECTING;
205 	SMBS_ST_UNLOCK(ssp);
206 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
207 	SMBS_ST_LOCK(ssp);
208 	ssp->ss_flags &= ~SMBS_RECONNECTING;
209 	SMBS_ST_UNLOCK(ssp);
210 	wakeup(&ssp->ss_vcgenid);
211 	return error;
212 }
213 
214 static int
215 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
216 {
217 	struct thread *td = iod->iod_td;
218 	struct smb_vc *vcp = iod->iod_vc;
219 	struct smb_share *ssp = rqp->sr_share;
220 	struct mbuf *m;
221 	int error;
222 
223 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
224 	switch (iod->iod_state) {
225 	    case SMBIOD_ST_NOTCONN:
226 		smb_iod_rqprocessed(rqp, ENOTCONN);
227 		return 0;
228 	    case SMBIOD_ST_DEAD:
229 		iod->iod_state = SMBIOD_ST_RECONNECT;
230 		return 0;
231 	    case SMBIOD_ST_RECONNECT:
232 		return 0;
233 	    default:
234 		break;
235 	}
236 	if (rqp->sr_sendcnt == 0) {
237 #ifdef movedtoanotherplace
238 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
239 			return 0;
240 #endif
241 		*rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
242 		*rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
243 		mb_fixhdr(&rqp->sr_rq);
244 	}
245 	if (rqp->sr_sendcnt++ > 5) {
246 		rqp->sr_flags |= SMBR_RESTART;
247 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
248 		/*
249 		 * If all attempts to send a request failed, then
250 		 * something is seriously hosed.
251 		 */
252 		return ENOTCONN;
253 	}
254 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
255 	m_dumpm(rqp->sr_rq.mb_top);
256 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, MB_WAIT);
257 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
258 	if (error == 0) {
259 		getnanotime(&rqp->sr_timesent);
260 		iod->iod_lastrqsent = rqp->sr_timesent;
261 		rqp->sr_flags |= SMBR_SENT;
262 		rqp->sr_state = SMBRQ_SENT;
263 		return 0;
264 	}
265 	/*
266 	 * Check for fatal errors
267 	 */
268 	if (SMB_TRAN_FATAL(vcp, error)) {
269 		/*
270 		 * No further attempts should be made
271 		 */
272 		return ENOTCONN;
273 	}
274 	if (smb_rq_intr(rqp))
275 		smb_iod_rqprocessed(rqp, EINTR);
276 	return 0;
277 }
278 
279 /*
280  * Process incoming packets
281  */
282 static int
283 smb_iod_recvall(struct smbiod *iod)
284 {
285 	struct smb_vc *vcp = iod->iod_vc;
286 	struct thread *td = iod->iod_td;
287 	struct smb_rq *rqp;
288 	struct mbuf *m;
289 	u_char *hp;
290 	u_short mid;
291 	int error;
292 
293 	switch (iod->iod_state) {
294 	    case SMBIOD_ST_NOTCONN:
295 	    case SMBIOD_ST_DEAD:
296 	    case SMBIOD_ST_RECONNECT:
297 		return 0;
298 	    default:
299 		break;
300 	}
301 	for (;;) {
302 		m = NULL;
303 		error = SMB_TRAN_RECV(vcp, &m, td);
304 		if (error == EWOULDBLOCK)
305 			break;
306 		if (SMB_TRAN_FATAL(vcp, error)) {
307 			smb_iod_dead(iod);
308 			break;
309 		}
310 		if (error)
311 			break;
312 		if (m == NULL) {
313 			SMBERROR("tran return NULL without error\n");
314 			error = EPIPE;
315 			continue;
316 		}
317 		m = m_pullup(m, SMB_HDRLEN);
318 		if (m == NULL)
319 			continue;	/* wait for a good packet */
320 		/*
321 		 * Now we got an entire and possibly invalid SMB packet.
322 		 * Be careful while parsing it.
323 		 */
324 		m_dumpm(m);
325 		hp = mtod(m, u_char*);
326 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
327 			m_freem(m);
328 			continue;
329 		}
330 		mid = SMB_HDRMID(hp);
331 		SMBSDEBUG("mid %04x\n", (u_int)mid);
332 		SMB_IOD_RQLOCK(iod);
333 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
334 			if (rqp->sr_mid != mid)
335 				continue;
336 			SMBRQ_SLOCK(rqp);
337 			if (rqp->sr_rp.md_top == NULL) {
338 				md_initm(&rqp->sr_rp, m);
339 			} else {
340 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
341 					md_append_record(&rqp->sr_rp, m);
342 				} else {
343 					SMBRQ_SUNLOCK(rqp);
344 					SMBERROR("duplicate response %d (ignored)\n", mid);
345 					break;
346 				}
347 			}
348 			SMBRQ_SUNLOCK(rqp);
349 			smb_iod_rqprocessed(rqp, 0);
350 			break;
351 		}
352 		SMB_IOD_RQUNLOCK(iod);
353 		if (rqp == NULL) {
354 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
355 /*			smb_printrqlist(vcp);*/
356 			m_freem(m);
357 		}
358 	}
359 	/*
360 	 * check for interrupts
361 	 */
362 	SMB_IOD_RQLOCK(iod);
363 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
364 		if (smb_proc_intr(rqp->sr_cred->scr_td)) {
365 			smb_iod_rqprocessed(rqp, EINTR);
366 		}
367 	}
368 	SMB_IOD_RQUNLOCK(iod);
369 	return 0;
370 }
371 
372 int
373 smb_iod_request(struct smbiod *iod, int event, void *ident)
374 {
375 	struct smbiod_event *evp;
376 	int error;
377 
378 	SMBIODEBUG("\n");
379 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
380 	evp->ev_type = event;
381 	evp->ev_ident = ident;
382 	SMB_IOD_EVLOCK(iod);
383 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
384 	if ((event & SMBIOD_EV_SYNC) == 0) {
385 		SMB_IOD_EVUNLOCK(iod);
386 		smb_iod_wakeup(iod);
387 		return 0;
388 	}
389 	smb_iod_wakeup(iod);
390 	smb_sleep(evp, SMB_IOD_EVINTERLOCK(iod), PDROP, "90evw", 0);
391 	error = evp->ev_error;
392 	kfree(evp, M_SMBIOD);
393 	return error;
394 }
395 
396 /*
397  * Place request in the queue.
398  * Request from smbiod have a high priority.
399  */
400 int
401 smb_iod_addrq(struct smb_rq *rqp)
402 {
403 	struct smb_vc *vcp = rqp->sr_vc;
404 	struct smbiod *iod = vcp->vc_iod;
405 	int error;
406 
407 	SMBIODEBUG("\n");
408 	if (rqp->sr_cred->scr_td == iod->iod_td) {
409 		rqp->sr_flags |= SMBR_INTERNAL;
410 		SMB_IOD_RQLOCK(iod);
411 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
412 		SMB_IOD_RQUNLOCK(iod);
413 		for (;;) {
414 			if (smb_iod_sendrq(iod, rqp) != 0) {
415 				smb_iod_dead(iod);
416 				break;
417 			}
418 			/*
419 			 * we don't need to lock state field here
420 			 */
421 			if (rqp->sr_state != SMBRQ_NOTSENT)
422 				break;
423 			tsleep(&iod->iod_flags, 0, "90sndw", hz);
424 		}
425 		if (rqp->sr_lerror)
426 			smb_iod_removerq(rqp);
427 		return rqp->sr_lerror;
428 	}
429 
430 	switch (iod->iod_state) {
431 	    case SMBIOD_ST_NOTCONN:
432 		return ENOTCONN;
433 	    case SMBIOD_ST_DEAD:
434 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
435 		if (error)
436 			return error;
437 		return EXDEV;
438 	    default:
439 		break;
440 	}
441 
442 	SMB_IOD_RQLOCK(iod);
443 	for (;;) {
444 		if (vcp->vc_maxmux == 0) {
445 			SMBERROR("maxmux == 0\n");
446 			break;
447 		}
448 		if (iod->iod_muxcnt < vcp->vc_maxmux)
449 			break;
450 		iod->iod_muxwant++;
451 		smb_sleep(&iod->iod_muxwant, SMB_IOD_RQINTERLOCK(iod), 0, "90mux", 0);
452 	}
453 	iod->iod_muxcnt++;
454 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
455 	SMB_IOD_RQUNLOCK(iod);
456 	smb_iod_wakeup(iod);
457 	return 0;
458 }
459 
460 int
461 smb_iod_removerq(struct smb_rq *rqp)
462 {
463 	struct smb_vc *vcp = rqp->sr_vc;
464 	struct smbiod *iod = vcp->vc_iod;
465 
466 	SMBIODEBUG("\n");
467 	if (rqp->sr_flags & SMBR_INTERNAL) {
468 		SMB_IOD_RQLOCK(iod);
469 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
470 		SMB_IOD_RQUNLOCK(iod);
471 		return 0;
472 	}
473 	SMB_IOD_RQLOCK(iod);
474 	while (rqp->sr_flags & SMBR_XLOCK) {
475 		rqp->sr_flags |= SMBR_XLOCKWANT;
476 		smb_sleep(rqp, SMB_IOD_RQINTERLOCK(iod), 0, "90xrm", 0);
477 	}
478 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
479 	iod->iod_muxcnt--;
480 	if (iod->iod_muxwant) {
481 		iod->iod_muxwant--;
482 		wakeup(&iod->iod_muxwant);
483 	}
484 	SMB_IOD_RQUNLOCK(iod);
485 	return 0;
486 }
487 
488 int
489 smb_iod_waitrq(struct smb_rq *rqp)
490 {
491 	struct smbiod *iod = rqp->sr_vc->vc_iod;
492 	int error;
493 
494 	SMBIODEBUG("\n");
495 	if (rqp->sr_flags & SMBR_INTERNAL) {
496 		for (;;) {
497 			smb_iod_sendall(iod);
498 			smb_iod_recvall(iod);
499 			if (rqp->sr_rpgen != rqp->sr_rplast)
500 				break;
501 			tsleep(&iod->iod_flags, 0, "90irq", hz);
502 		}
503 		smb_iod_removerq(rqp);
504 		return rqp->sr_lerror;
505 
506 	}
507 	SMBRQ_SLOCK(rqp);
508 	if (rqp->sr_rpgen == rqp->sr_rplast)
509 		smb_sleep(&rqp->sr_state, SMBRQ_INTERLOCK(rqp), 0, "90wrq", 0);
510 	rqp->sr_rplast++;
511 	SMBRQ_SUNLOCK(rqp);
512 	error = rqp->sr_lerror;
513 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
514 		/*
515 		 * If request should stay in the list, then reinsert it
516 		 * at the end of queue so other waiters have chance to concur
517 		 */
518 		SMB_IOD_RQLOCK(iod);
519 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
520 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
521 		SMB_IOD_RQUNLOCK(iod);
522 	} else
523 		smb_iod_removerq(rqp);
524 	return error;
525 }
526 
527 
528 static int
529 smb_iod_sendall(struct smbiod *iod)
530 {
531 	struct smb_vc *vcp = iod->iod_vc;
532 	struct smb_rq *rqp;
533 	struct timespec ts, tstimeout;
534 	int herror;
535 
536 	herror = 0;
537 	/*
538 	 * Loop through the list of requests and send them if possible
539 	 */
540 	SMB_IOD_RQLOCK(iod);
541 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
542 		switch (rqp->sr_state) {
543 		    case SMBRQ_NOTSENT:
544 			rqp->sr_flags |= SMBR_XLOCK;
545 			SMB_IOD_RQUNLOCK(iod);
546 			herror = smb_iod_sendrq(iod, rqp);
547 			SMB_IOD_RQLOCK(iod);
548 			rqp->sr_flags &= ~SMBR_XLOCK;
549 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
550 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
551 				wakeup(rqp);
552 			}
553 			break;
554 		    case SMBRQ_SENT:
555 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
556 			timespecadd(&tstimeout, &tstimeout);
557 			getnanotime(&ts);
558 			timespecsub(&ts, &tstimeout);
559 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
560 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
561 			}
562 			break;
563 		    default:
564 			break;
565 		}
566 		if (herror)
567 			break;
568 	}
569 	SMB_IOD_RQUNLOCK(iod);
570 	if (herror == ENOTCONN)
571 		smb_iod_dead(iod);
572 	return 0;
573 }
574 
575 /*
576  * "main" function for smbiod daemon
577  */
578 static __inline void
579 smb_iod_main(struct smbiod *iod)
580 {
581 /*	struct smb_vc *vcp = iod->iod_vc;*/
582 	struct smbiod_event *evp;
583 /*	struct timespec tsnow;*/
584 	int error;
585 
586 	SMBIODEBUG("\n");
587 	error = 0;
588 
589 	/*
590 	 * Check all interesting events
591 	 */
592 	for (;;) {
593 		SMB_IOD_EVLOCK(iod);
594 		evp = STAILQ_FIRST(&iod->iod_evlist);
595 		if (evp == NULL) {
596 			SMB_IOD_EVUNLOCK(iod);
597 			break;
598 		}
599 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
600 		evp->ev_type |= SMBIOD_EV_PROCESSING;
601 		SMB_IOD_EVUNLOCK(iod);
602 		switch (evp->ev_type & SMBIOD_EV_MASK) {
603 		    case SMBIOD_EV_CONNECT:
604 			iod->iod_state = SMBIOD_ST_RECONNECT;
605 			evp->ev_error = smb_iod_connect(iod);
606 			break;
607 		    case SMBIOD_EV_DISCONNECT:
608 			evp->ev_error = smb_iod_disconnect(iod);
609 			break;
610 		    case SMBIOD_EV_TREECONNECT:
611 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
612 			break;
613 		    case SMBIOD_EV_SHUTDOWN:
614 			iod->iod_flags |= SMBIOD_SHUTDOWN;
615 			break;
616 		    case SMBIOD_EV_NEWRQ:
617 			break;
618 		}
619 		if (evp->ev_type & SMBIOD_EV_SYNC) {
620 			SMB_IOD_EVLOCK(iod);
621 			wakeup(evp);
622 			SMB_IOD_EVUNLOCK(iod);
623 		} else
624 			kfree(evp, M_SMBIOD);
625 	}
626 #if 0
627 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
628 		getnanotime(&tsnow);
629 		timespecsub(&tsnow, &iod->iod_pingtimo);
630 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
631 			smb_smb_echo(vcp, &iod->iod_scred);
632 		}
633 	}
634 #endif
635 	smb_iod_sendall(iod);
636 	smb_iod_recvall(iod);
637 	return;
638 }
639 
640 #define	kthread_create_compat	kthread_create2
641 #define kthread_exit_compat	kthread_exit2
642 
643 void
644 smb_iod_thread(void *arg)
645 {
646 	struct smbiod *iod = arg;
647 
648 	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
649 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
650 		smb_iod_main(iod);
651 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
652 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
653 			break;
654 		tsleep(&iod->iod_flags, 0, "90idle", iod->iod_sleeptimo);
655 	}
656 	kthread_exit_compat();
657 }
658 
659 int
660 smb_iod_create(struct smb_vc *vcp)
661 {
662 	struct smbiod *iod;
663 	struct proc *newp = NULL;
664 	int error;
665 
666 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
667 	iod->iod_id = smb_iod_next++;
668 	iod->iod_state = SMBIOD_ST_NOTCONN;
669 	iod->iod_vc = vcp;
670 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
671 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
672 	getnanotime(&iod->iod_lastrqsent);
673 	vcp->vc_iod = iod;
674 	smb_sl_init(&iod->iod_rqlock, "90rql");
675 	TAILQ_INIT(&iod->iod_rqlist);
676 	smb_sl_init(&iod->iod_evlock, "90evl");
677 	STAILQ_INIT(&iod->iod_evlist);
678 	error = kthread_create_compat(smb_iod_thread, iod, &newp,
679 	    RFNOWAIT, "smbiod%d", iod->iod_id);
680 	if (error) {
681 		SMBERROR("can't start smbiod: %d", error);
682 		kfree(iod, M_SMBIOD);
683 		return error;
684 	}
685 	/* XXX lwp */
686 	iod->iod_td = ONLY_LWP_IN_PROC(newp)->lwp_thread;
687 	return 0;
688 }
689 
690 int
691 smb_iod_destroy(struct smbiod *iod)
692 {
693 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
694 	smb_sl_destroy(&iod->iod_rqlock);
695 	smb_sl_destroy(&iod->iod_evlock);
696 	kfree(iod, M_SMBIOD);
697 	return 0;
698 }
699 
700 int
701 smb_iod_init(void)
702 {
703 	return 0;
704 }
705 
706 int
707 smb_iod_done(void)
708 {
709 	return 0;
710 }
711 
712