xref: /dragonfly/sys/netproto/smb/smb_iod.c (revision 4d0c54c1)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33  * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.15 2007/02/03 17:05:58 corecode Exp $
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/proc.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/unistd.h>
45 
46 #include <sys/mplock2.h>
47 
48 #include "smb.h"
49 #include "smb_conn.h"
50 #include "smb_rq.h"
51 #include "smb_tran.h"
52 #include "smb_trantcp.h"
53 
54 
55 #define SMBIOD_SLEEP_TIMO	2
56 #define	SMBIOD_PING_TIMO	60	/* seconds */
57 
58 #define	SMB_IOD_EVLOCKPTR(iod)	(&(iod)->iod_evlock)
59 #define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&(iod)->iod_evlock)
60 #define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&(iod)->iod_evlock)
61 #define SMB_IOD_EVINTERLOCK(iod) (&(iod)->iod_evlock)
62 
63 #define	SMB_IOD_RQLOCKPTR(iod)	(&(iod)->iod_rqlock)
64 #define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
65 #define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&(iod)->iod_rqlock)
66 #define	SMB_IOD_RQINTERLOCK(iod) (&(iod)->iod_rqlock)
67 
68 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
69 
70 
71 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
72 
73 static int smb_iod_next;
74 
75 static int  smb_iod_sendall(struct smbiod *iod);
76 static int  smb_iod_disconnect(struct smbiod *iod);
77 static void smb_iod_thread(void *);
78 
79 static __inline void
80 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
81 {
82 	SMBRQ_SLOCK(rqp);
83 	rqp->sr_lerror = error;
84 	rqp->sr_rpgen++;
85 	rqp->sr_state = SMBRQ_NOTIFIED;
86 	wakeup(&rqp->sr_state);
87 	SMBRQ_SUNLOCK(rqp);
88 }
89 
90 static void
91 smb_iod_invrq(struct smbiod *iod)
92 {
93 	struct smb_rq *rqp;
94 
95 	/*
96 	 * Invalidate all outstanding requests for this connection
97 	 */
98 	SMB_IOD_RQLOCK(iod);
99 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
100 #if 0
101 		/* this makes no sense whatsoever XXX */
102 		if (rqp->sr_flags & SMBR_INTERNAL)
103 			SMBRQ_SUNLOCK(rqp);
104 #endif
105 		rqp->sr_flags |= SMBR_RESTART;
106 		smb_iod_rqprocessed(rqp, ENOTCONN);
107 	}
108 	SMB_IOD_RQUNLOCK(iod);
109 }
110 
111 static void
112 smb_iod_closetran(struct smbiod *iod)
113 {
114 	struct smb_vc *vcp = iod->iod_vc;
115 	struct thread *td = iod->iod_td;
116 
117 	if (vcp->vc_tdata == NULL)
118 		return;
119 	SMB_TRAN_DISCONNECT(vcp, td);
120 	SMB_TRAN_DONE(vcp, td);
121 	vcp->vc_tdata = NULL;
122 }
123 
124 static void
125 smb_iod_dead(struct smbiod *iod)
126 {
127 	iod->iod_state = SMBIOD_ST_DEAD;
128 	smb_iod_closetran(iod);
129 	smb_iod_invrq(iod);
130 }
131 
132 static int
133 smb_iod_connect(struct smbiod *iod)
134 {
135 	struct smb_vc *vcp = iod->iod_vc;
136 	struct thread *td = iod->iod_td;
137 	int error;
138 
139 	SMBIODEBUG("%d\n", iod->iod_state);
140 	switch(iod->iod_state) {
141 	    case SMBIOD_ST_VCACTIVE:
142 		SMBERROR("called for already opened connection\n");
143 		return EISCONN;
144 	    case SMBIOD_ST_DEAD:
145 		return ENOTCONN;	/* XXX: last error code ? */
146 	    default:
147 		break;
148 	}
149 	vcp->vc_genid++;
150 
151 	do {
152 		error = SMB_TRAN_CREATE(vcp, td);
153 		if (error != 0)
154 			break;
155 		SMBIODEBUG("tcreate\n");
156 
157 		if (vcp->vc_laddr) {
158 			error = SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
159 			if (error != 0)
160 				break;
161 		}
162 		SMBIODEBUG("tbind\n");
163 
164 		error = SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
165 		if (error != 0)
166 			break;
167 		SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
168 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
169 		SMBIODEBUG("tconnect\n");
170 
171 /*		vcp->vc_mid = 0;*/
172 
173 		error = smb_smb_negotiate(vcp, &iod->iod_scred);
174 		if (error != 0)
175 			break;
176 		SMBIODEBUG("snegotiate\n");
177 
178 		error = smb_smb_ssnsetup(vcp, &iod->iod_scred);
179 		if (error != 0)
180 			break;
181 		iod->iod_state = SMBIOD_ST_VCACTIVE;
182 		SMBIODEBUG("completed\n");
183 
184 		smb_iod_invrq(iod);
185 		error = 0;
186 	} while (0);
187 
188 	if (error)
189 		smb_iod_dead(iod);
190 	return error;
191 }
192 
193 static int
194 smb_iod_disconnect(struct smbiod *iod)
195 {
196 	struct smb_vc *vcp = iod->iod_vc;
197 
198 	SMBIODEBUG("\n");
199 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
200 		smb_smb_ssnclose(vcp, &iod->iod_scred);
201 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
202 	}
203 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
204 	smb_iod_closetran(iod);
205 	iod->iod_state = SMBIOD_ST_NOTCONN;
206 	return 0;
207 }
208 
209 static int
210 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
211 {
212 	int error;
213 
214 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
215 		if (iod->iod_state != SMBIOD_ST_DEAD)
216 			return ENOTCONN;
217 		iod->iod_state = SMBIOD_ST_RECONNECT;
218 		error = smb_iod_connect(iod);
219 		if (error)
220 			return error;
221 	}
222 	SMBIODEBUG("tree reconnect\n");
223 	SMBS_ST_LOCK(ssp);
224 	ssp->ss_flags |= SMBS_RECONNECTING;
225 	SMBS_ST_UNLOCK(ssp);
226 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
227 	SMBS_ST_LOCK(ssp);
228 	ssp->ss_flags &= ~SMBS_RECONNECTING;
229 	SMBS_ST_UNLOCK(ssp);
230 	wakeup(&ssp->ss_vcgenid);
231 	return error;
232 }
233 
234 static int
235 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
236 {
237 	struct thread *td = iod->iod_td;
238 	struct smb_vc *vcp = iod->iod_vc;
239 	struct smb_share *ssp = rqp->sr_share;
240 	struct mbuf *m;
241 	int error;
242 
243 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
244 	switch (iod->iod_state) {
245 	    case SMBIOD_ST_NOTCONN:
246 		smb_iod_rqprocessed(rqp, ENOTCONN);
247 		return 0;
248 	    case SMBIOD_ST_DEAD:
249 		iod->iod_state = SMBIOD_ST_RECONNECT;
250 		return 0;
251 	    case SMBIOD_ST_RECONNECT:
252 		return 0;
253 	    default:
254 		break;
255 	}
256 	if (rqp->sr_sendcnt == 0) {
257 #ifdef movedtoanotherplace
258 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
259 			return 0;
260 #endif
261 		*rqp->sr_rqtid = htole16(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
262 		*rqp->sr_rquid = htole16(vcp ? vcp->vc_smbuid : 0);
263 		mb_fixhdr(&rqp->sr_rq);
264 	}
265 	if (rqp->sr_sendcnt++ > 5) {
266 		rqp->sr_flags |= SMBR_RESTART;
267 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
268 		/*
269 		 * If all attempts to send a request failed, then
270 		 * something is seriously hosed.
271 		 */
272 		return ENOTCONN;
273 	}
274 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
275 	m_dumpm(rqp->sr_rq.mb_top);
276 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, MB_WAIT);
277 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
278 	if (error == 0) {
279 		getnanotime(&rqp->sr_timesent);
280 		iod->iod_lastrqsent = rqp->sr_timesent;
281 		rqp->sr_flags |= SMBR_SENT;
282 		rqp->sr_state = SMBRQ_SENT;
283 		return 0;
284 	}
285 	/*
286 	 * Check for fatal errors
287 	 */
288 	if (SMB_TRAN_FATAL(vcp, error)) {
289 		/*
290 		 * No further attempts should be made
291 		 */
292 		return ENOTCONN;
293 	}
294 	if (smb_rq_intr(rqp))
295 		smb_iod_rqprocessed(rqp, EINTR);
296 	return 0;
297 }
298 
299 /*
300  * Process incoming packets
301  */
302 static int
303 smb_iod_recvall(struct smbiod *iod)
304 {
305 	struct smb_vc *vcp = iod->iod_vc;
306 	struct thread *td = iod->iod_td;
307 	struct smb_rq *rqp;
308 	struct mbuf *m;
309 	u_char *hp;
310 	u_short mid;
311 	int error;
312 
313 	switch (iod->iod_state) {
314 	    case SMBIOD_ST_NOTCONN:
315 	    case SMBIOD_ST_DEAD:
316 	    case SMBIOD_ST_RECONNECT:
317 		return 0;
318 	    default:
319 		break;
320 	}
321 	for (;;) {
322 		m = NULL;
323 		error = SMB_TRAN_RECV(vcp, &m, td);
324 		if (error == EWOULDBLOCK)
325 			break;
326 		if (SMB_TRAN_FATAL(vcp, error)) {
327 			smb_iod_dead(iod);
328 			break;
329 		}
330 		if (error)
331 			break;
332 		if (m == NULL) {
333 			SMBERROR("tran return NULL without error\n");
334 			error = EPIPE;
335 			continue;
336 		}
337 		m = m_pullup(m, SMB_HDRLEN);
338 		if (m == NULL)
339 			continue;	/* wait for a good packet */
340 		/*
341 		 * Now we got an entire and possibly invalid SMB packet.
342 		 * Be careful while parsing it.
343 		 */
344 		m_dumpm(m);
345 		hp = mtod(m, u_char*);
346 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
347 			m_freem(m);
348 			continue;
349 		}
350 		mid = SMB_HDRMID(hp);
351 		SMBSDEBUG("mid %04x\n", (u_int)mid);
352 		SMB_IOD_RQLOCK(iod);
353 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
354 			if (rqp->sr_mid != mid)
355 				continue;
356 			SMBRQ_SLOCK(rqp);
357 			if (rqp->sr_rp.md_top == NULL) {
358 				md_initm(&rqp->sr_rp, m);
359 			} else {
360 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
361 					md_append_record(&rqp->sr_rp, m);
362 				} else {
363 					SMBRQ_SUNLOCK(rqp);
364 					SMBERROR("duplicate response %d (ignored)\n", mid);
365 					break;
366 				}
367 			}
368 			SMBRQ_SUNLOCK(rqp);
369 			smb_iod_rqprocessed(rqp, 0);
370 			break;
371 		}
372 		SMB_IOD_RQUNLOCK(iod);
373 		if (rqp == NULL) {
374 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
375 /*			smb_printrqlist(vcp);*/
376 			m_freem(m);
377 		}
378 	}
379 	/*
380 	 * check for interrupts
381 	 */
382 	SMB_IOD_RQLOCK(iod);
383 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
384 		if (smb_proc_intr(rqp->sr_cred->scr_td)) {
385 			smb_iod_rqprocessed(rqp, EINTR);
386 		}
387 	}
388 	SMB_IOD_RQUNLOCK(iod);
389 	return 0;
390 }
391 
392 int
393 smb_iod_request(struct smbiod *iod, int event, void *ident)
394 {
395 	struct smbiod_event *evp;
396 	int error;
397 
398 	SMBIODEBUG("\n");
399 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
400 	evp->ev_type = event;
401 	evp->ev_ident = ident;
402 	SMB_IOD_EVLOCK(iod);
403 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
404 	if ((event & SMBIOD_EV_SYNC) == 0) {
405 		SMB_IOD_EVUNLOCK(iod);
406 		smb_iod_wakeup(iod);
407 		return 0;
408 	}
409 	smb_iod_wakeup(iod);
410 	smb_sleep(evp, SMB_IOD_EVINTERLOCK(iod), PDROP, "90evw", 0);
411 	error = evp->ev_error;
412 	kfree(evp, M_SMBIOD);
413 	return error;
414 }
415 
416 /*
417  * Place request in the queue.
418  * Request from smbiod have a high priority.
419  */
420 int
421 smb_iod_addrq(struct smb_rq *rqp)
422 {
423 	struct smb_vc *vcp = rqp->sr_vc;
424 	struct smbiod *iod = vcp->vc_iod;
425 	int error;
426 
427 	SMBIODEBUG("\n");
428 	if (rqp->sr_cred->scr_td == iod->iod_td) {
429 		rqp->sr_flags |= SMBR_INTERNAL;
430 		SMB_IOD_RQLOCK(iod);
431 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
432 		SMB_IOD_RQUNLOCK(iod);
433 		for (;;) {
434 			if (smb_iod_sendrq(iod, rqp) != 0) {
435 				smb_iod_dead(iod);
436 				break;
437 			}
438 			/*
439 			 * we don't need to lock state field here
440 			 */
441 			if (rqp->sr_state != SMBRQ_NOTSENT)
442 				break;
443 			tsleep(&iod->iod_flags, 0, "90sndw", hz);
444 		}
445 		if (rqp->sr_lerror)
446 			smb_iod_removerq(rqp);
447 		return rqp->sr_lerror;
448 	}
449 
450 	switch (iod->iod_state) {
451 	    case SMBIOD_ST_NOTCONN:
452 		return ENOTCONN;
453 	    case SMBIOD_ST_DEAD:
454 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
455 		if (error)
456 			return error;
457 		return EXDEV;
458 	    default:
459 		break;
460 	}
461 
462 	SMB_IOD_RQLOCK(iod);
463 	for (;;) {
464 		if (vcp->vc_maxmux == 0) {
465 			SMBERROR("maxmux == 0\n");
466 			break;
467 		}
468 		if (iod->iod_muxcnt < vcp->vc_maxmux)
469 			break;
470 		iod->iod_muxwant++;
471 		smb_sleep(&iod->iod_muxwant, SMB_IOD_RQINTERLOCK(iod), 0, "90mux", 0);
472 	}
473 	iod->iod_muxcnt++;
474 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
475 	SMB_IOD_RQUNLOCK(iod);
476 	smb_iod_wakeup(iod);
477 	return 0;
478 }
479 
480 int
481 smb_iod_removerq(struct smb_rq *rqp)
482 {
483 	struct smb_vc *vcp = rqp->sr_vc;
484 	struct smbiod *iod = vcp->vc_iod;
485 
486 	SMBIODEBUG("\n");
487 	if (rqp->sr_flags & SMBR_INTERNAL) {
488 		SMB_IOD_RQLOCK(iod);
489 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
490 		SMB_IOD_RQUNLOCK(iod);
491 		return 0;
492 	}
493 	SMB_IOD_RQLOCK(iod);
494 	while (rqp->sr_flags & SMBR_XLOCK) {
495 		rqp->sr_flags |= SMBR_XLOCKWANT;
496 		smb_sleep(rqp, SMB_IOD_RQINTERLOCK(iod), 0, "90xrm", 0);
497 	}
498 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
499 	iod->iod_muxcnt--;
500 	if (iod->iod_muxwant) {
501 		iod->iod_muxwant--;
502 		wakeup(&iod->iod_muxwant);
503 	}
504 	SMB_IOD_RQUNLOCK(iod);
505 	return 0;
506 }
507 
508 int
509 smb_iod_waitrq(struct smb_rq *rqp)
510 {
511 	struct smbiod *iod = rqp->sr_vc->vc_iod;
512 	int error;
513 
514 	SMBIODEBUG("\n");
515 	if (rqp->sr_flags & SMBR_INTERNAL) {
516 		for (;;) {
517 			smb_iod_sendall(iod);
518 			smb_iod_recvall(iod);
519 			if (rqp->sr_rpgen != rqp->sr_rplast)
520 				break;
521 			tsleep(&iod->iod_flags, 0, "90irq", hz);
522 		}
523 		smb_iod_removerq(rqp);
524 		return rqp->sr_lerror;
525 
526 	}
527 	SMBRQ_SLOCK(rqp);
528 	if (rqp->sr_rpgen == rqp->sr_rplast)
529 		smb_sleep(&rqp->sr_state, SMBRQ_INTERLOCK(rqp), 0, "90wrq", 0);
530 	rqp->sr_rplast++;
531 	SMBRQ_SUNLOCK(rqp);
532 	error = rqp->sr_lerror;
533 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
534 		/*
535 		 * If request should stay in the list, then reinsert it
536 		 * at the end of queue so other waiters have chance to concur
537 		 */
538 		SMB_IOD_RQLOCK(iod);
539 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
540 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
541 		SMB_IOD_RQUNLOCK(iod);
542 	} else
543 		smb_iod_removerq(rqp);
544 	return error;
545 }
546 
547 
548 static int
549 smb_iod_sendall(struct smbiod *iod)
550 {
551 	struct smb_vc *vcp = iod->iod_vc;
552 	struct smb_rq *rqp;
553 	struct timespec ts, tstimeout;
554 	int herror;
555 
556 	herror = 0;
557 	/*
558 	 * Loop through the list of requests and send them if possible
559 	 */
560 	SMB_IOD_RQLOCK(iod);
561 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
562 		switch (rqp->sr_state) {
563 		    case SMBRQ_NOTSENT:
564 			rqp->sr_flags |= SMBR_XLOCK;
565 			SMB_IOD_RQUNLOCK(iod);
566 			herror = smb_iod_sendrq(iod, rqp);
567 			SMB_IOD_RQLOCK(iod);
568 			rqp->sr_flags &= ~SMBR_XLOCK;
569 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
570 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
571 				wakeup(rqp);
572 			}
573 			break;
574 		    case SMBRQ_SENT:
575 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
576 			timespecadd(&tstimeout, &tstimeout);
577 			getnanotime(&ts);
578 			timespecsub(&ts, &tstimeout);
579 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
580 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
581 			}
582 			break;
583 		    default:
584 			break;
585 		}
586 		if (herror)
587 			break;
588 	}
589 	SMB_IOD_RQUNLOCK(iod);
590 	if (herror == ENOTCONN)
591 		smb_iod_dead(iod);
592 	return 0;
593 }
594 
595 /*
596  * "main" function for smbiod daemon
597  */
598 static __inline void
599 smb_iod_main(struct smbiod *iod)
600 {
601 /*	struct smb_vc *vcp = iod->iod_vc;*/
602 	struct smbiod_event *evp;
603 /*	struct timespec tsnow;*/
604 	int error;
605 
606 	SMBIODEBUG("\n");
607 	error = 0;
608 
609 	/*
610 	 * Check all interesting events
611 	 */
612 	for (;;) {
613 		SMB_IOD_EVLOCK(iod);
614 		evp = STAILQ_FIRST(&iod->iod_evlist);
615 		if (evp == NULL) {
616 			SMB_IOD_EVUNLOCK(iod);
617 			break;
618 		}
619 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
620 		evp->ev_type |= SMBIOD_EV_PROCESSING;
621 		SMB_IOD_EVUNLOCK(iod);
622 		switch (evp->ev_type & SMBIOD_EV_MASK) {
623 		    case SMBIOD_EV_CONNECT:
624 			iod->iod_state = SMBIOD_ST_RECONNECT;
625 			evp->ev_error = smb_iod_connect(iod);
626 			break;
627 		    case SMBIOD_EV_DISCONNECT:
628 			evp->ev_error = smb_iod_disconnect(iod);
629 			break;
630 		    case SMBIOD_EV_TREECONNECT:
631 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
632 			break;
633 		    case SMBIOD_EV_SHUTDOWN:
634 			iod->iod_flags |= SMBIOD_SHUTDOWN;
635 			break;
636 		    case SMBIOD_EV_NEWRQ:
637 			break;
638 		}
639 		if (evp->ev_type & SMBIOD_EV_SYNC) {
640 			SMB_IOD_EVLOCK(iod);
641 			wakeup(evp);
642 			SMB_IOD_EVUNLOCK(iod);
643 		} else
644 			kfree(evp, M_SMBIOD);
645 	}
646 #if 0
647 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
648 		getnanotime(&tsnow);
649 		timespecsub(&tsnow, &iod->iod_pingtimo);
650 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
651 			smb_smb_echo(vcp, &iod->iod_scred);
652 		}
653 	}
654 #endif
655 	smb_iod_sendall(iod);
656 	smb_iod_recvall(iod);
657 	return;
658 }
659 
660 #define	kthread_create_compat	smb_kthread_create
661 #define kthread_exit_compat	smb_kthread_exit
662 
663 void
664 smb_iod_thread(void *arg)
665 {
666 	struct smbiod *iod = arg;
667 
668 	/*
669 	 * mplock not held on entry but we aren't mpsafe yet.
670 	 */
671 	get_mplock();
672 
673 	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
674 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
675 		smb_iod_main(iod);
676 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
677 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
678 			break;
679 		tsleep(&iod->iod_flags, 0, "90idle", iod->iod_sleeptimo);
680 	}
681 	kthread_exit_compat();
682 }
683 
684 int
685 smb_iod_create(struct smb_vc *vcp)
686 {
687 	struct smbiod *iod;
688 	struct proc *newp = NULL;
689 	int error;
690 
691 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
692 	iod->iod_id = smb_iod_next++;
693 	iod->iod_state = SMBIOD_ST_NOTCONN;
694 	iod->iod_vc = vcp;
695 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
696 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
697 	getnanotime(&iod->iod_lastrqsent);
698 	vcp->vc_iod = iod;
699 	smb_sl_init(&iod->iod_rqlock, "90rql");
700 	TAILQ_INIT(&iod->iod_rqlist);
701 	smb_sl_init(&iod->iod_evlock, "90evl");
702 	STAILQ_INIT(&iod->iod_evlist);
703 	error = kthread_create_compat(smb_iod_thread, iod, &newp,
704 	    RFNOWAIT, "smbiod%d", iod->iod_id);
705 	if (error) {
706 		SMBERROR("can't start smbiod: %d", error);
707 		kfree(iod, M_SMBIOD);
708 		return error;
709 	}
710 	/* XXX lwp */
711 	iod->iod_td = ONLY_LWP_IN_PROC(newp)->lwp_thread;
712 	return 0;
713 }
714 
715 int
716 smb_iod_destroy(struct smbiod *iod)
717 {
718 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
719 	smb_sl_destroy(&iod->iod_rqlock);
720 	smb_sl_destroy(&iod->iod_evlock);
721 	kfree(iod, M_SMBIOD);
722 	return 0;
723 }
724 
725 int
726 smb_iod_init(void)
727 {
728 	return 0;
729 }
730 
731 int
732 smb_iod_done(void)
733 {
734 	return 0;
735 }
736 
737