xref: /dragonfly/sys/netproto/smb/smb_iod.c (revision 5de36205)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33  * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.11 2005/01/06 22:31:16 dillon Exp $
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/unistd.h>
44 
45 #include "smb.h"
46 #include "smb_conn.h"
47 #include "smb_rq.h"
48 #include "smb_tran.h"
49 #include "smb_trantcp.h"
50 
51 
52 #define SMBIOD_SLEEP_TIMO	2
53 #define	SMBIOD_PING_TIMO	60	/* seconds */
54 
55 #define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
56 #define	SMB_IOD_EVLOCK(ilock, iod)	smb_sl_lock(ilock, &((iod)->iod_evlock))
57 #define	SMB_IOD_EVUNLOCK(ilock)	smb_sl_unlock(ilock)
58 
59 #define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
60 #define	SMB_IOD_RQLOCK(ilock, iod)	smb_sl_lock(ilock, &((iod)->iod_rqlock))
61 #define	SMB_IOD_RQUNLOCK(ilock)	smb_sl_unlock(ilock)
62 
63 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
64 
65 
66 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
67 
68 static int smb_iod_next;
69 
70 static int  smb_iod_sendall(struct smbiod *iod);
71 static int  smb_iod_disconnect(struct smbiod *iod);
72 static void smb_iod_thread(void *);
73 
74 static __inline void
75 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
76 {
77 	smb_ilock ilock;
78 
79 	SMBRQ_SLOCK(&ilock, rqp);
80 	rqp->sr_lerror = error;
81 	rqp->sr_rpgen++;
82 	rqp->sr_state = SMBRQ_NOTIFIED;
83 	wakeup(&rqp->sr_state);
84 	SMBRQ_SUNLOCK(&ilock);
85 }
86 
87 static void
88 smb_iod_invrq(struct smbiod *iod)
89 {
90 	struct smb_rq *rqp;
91 	smb_ilock ilock;
92 
93 	/*
94 	 * Invalidate all outstanding requests for this connection
95 	 */
96 	SMB_IOD_RQLOCK(&ilock, iod);
97 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
98 #if 0
99 		/* this makes no sense whatsoever XXX */
100 		if (rqp->sr_flags & SMBR_INTERNAL)
101 			SMBRQ_SUNLOCK(rqp);
102 #endif
103 		rqp->sr_flags |= SMBR_RESTART;
104 		smb_iod_rqprocessed(rqp, ENOTCONN);
105 	}
106 	SMB_IOD_RQUNLOCK(&ilock);
107 }
108 
109 static void
110 smb_iod_closetran(struct smbiod *iod)
111 {
112 	struct smb_vc *vcp = iod->iod_vc;
113 	struct thread *td = iod->iod_td;
114 
115 	if (vcp->vc_tdata == NULL)
116 		return;
117 	SMB_TRAN_DISCONNECT(vcp, td);
118 	SMB_TRAN_DONE(vcp, td);
119 	vcp->vc_tdata = NULL;
120 }
121 
122 static void
123 smb_iod_dead(struct smbiod *iod)
124 {
125 	iod->iod_state = SMBIOD_ST_DEAD;
126 	smb_iod_closetran(iod);
127 	smb_iod_invrq(iod);
128 }
129 
130 static int
131 smb_iod_connect(struct smbiod *iod)
132 {
133 	struct smb_vc *vcp = iod->iod_vc;
134 	struct thread *td = iod->iod_td;
135 	int error;
136 
137 	SMBIODEBUG("%d\n", iod->iod_state);
138 	switch(iod->iod_state) {
139 	    case SMBIOD_ST_VCACTIVE:
140 		SMBERROR("called for already opened connection\n");
141 		return EISCONN;
142 	    case SMBIOD_ST_DEAD:
143 		return ENOTCONN;	/* XXX: last error code ? */
144 	    default:
145 		break;
146 	}
147 	vcp->vc_genid++;
148 	error = 0;
149 	itry {
150 		ithrow(SMB_TRAN_CREATE(vcp, td));
151 		SMBIODEBUG("tcreate\n");
152 		if (vcp->vc_laddr) {
153 			ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, td));
154 		}
155 		SMBIODEBUG("tbind\n");
156 		ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td));
157 		SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
158 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
159 		SMBIODEBUG("tconnect\n");
160 /*		vcp->vc_mid = 0;*/
161 		ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
162 		SMBIODEBUG("snegotiate\n");
163 		ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
164 		iod->iod_state = SMBIOD_ST_VCACTIVE;
165 		SMBIODEBUG("completed\n");
166 		smb_iod_invrq(iod);
167 	} icatch(error) {
168 		smb_iod_dead(iod);
169 	} ifinally {
170 	} iendtry;
171 	return error;
172 }
173 
174 static int
175 smb_iod_disconnect(struct smbiod *iod)
176 {
177 	struct smb_vc *vcp = iod->iod_vc;
178 
179 	SMBIODEBUG("\n");
180 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
181 		smb_smb_ssnclose(vcp, &iod->iod_scred);
182 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
183 	}
184 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
185 	smb_iod_closetran(iod);
186 	iod->iod_state = SMBIOD_ST_NOTCONN;
187 	return 0;
188 }
189 
190 static int
191 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
192 {
193 	int error;
194 	smb_ilock ilock;
195 
196 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
197 		if (iod->iod_state != SMBIOD_ST_DEAD)
198 			return ENOTCONN;
199 		iod->iod_state = SMBIOD_ST_RECONNECT;
200 		error = smb_iod_connect(iod);
201 		if (error)
202 			return error;
203 	}
204 	SMBIODEBUG("tree reconnect\n");
205 	SMBS_ST_LOCK(&ilock, ssp);
206 	ssp->ss_flags |= SMBS_RECONNECTING;
207 	SMBS_ST_UNLOCK(&ilock);
208 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
209 	SMBS_ST_LOCK(&ilock, ssp);
210 	ssp->ss_flags &= ~SMBS_RECONNECTING;
211 	SMBS_ST_UNLOCK(&ilock);
212 	wakeup(&ssp->ss_vcgenid);
213 	return error;
214 }
215 
216 static int
217 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
218 {
219 	struct thread *td = iod->iod_td;
220 	struct smb_vc *vcp = iod->iod_vc;
221 	struct smb_share *ssp = rqp->sr_share;
222 	struct mbuf *m;
223 	int error;
224 
225 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
226 	switch (iod->iod_state) {
227 	    case SMBIOD_ST_NOTCONN:
228 		smb_iod_rqprocessed(rqp, ENOTCONN);
229 		return 0;
230 	    case SMBIOD_ST_DEAD:
231 		iod->iod_state = SMBIOD_ST_RECONNECT;
232 		return 0;
233 	    case SMBIOD_ST_RECONNECT:
234 		return 0;
235 	    default:
236 		break;
237 	}
238 	if (rqp->sr_sendcnt == 0) {
239 #ifdef movedtoanotherplace
240 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
241 			return 0;
242 #endif
243 		*rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
244 		*rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
245 		mb_fixhdr(&rqp->sr_rq);
246 	}
247 	if (rqp->sr_sendcnt++ > 5) {
248 		rqp->sr_flags |= SMBR_RESTART;
249 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
250 		/*
251 		 * If all attempts to send a request failed, then
252 		 * something is seriously hosed.
253 		 */
254 		return ENOTCONN;
255 	}
256 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
257 	m_dumpm(rqp->sr_rq.mb_top);
258 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, MB_WAIT);
259 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
260 	if (error == 0) {
261 		getnanotime(&rqp->sr_timesent);
262 		iod->iod_lastrqsent = rqp->sr_timesent;
263 		rqp->sr_flags |= SMBR_SENT;
264 		rqp->sr_state = SMBRQ_SENT;
265 		return 0;
266 	}
267 	/*
268 	 * Check for fatal errors
269 	 */
270 	if (SMB_TRAN_FATAL(vcp, error)) {
271 		/*
272 		 * No further attempts should be made
273 		 */
274 		return ENOTCONN;
275 	}
276 	if (smb_rq_intr(rqp))
277 		smb_iod_rqprocessed(rqp, EINTR);
278 	return 0;
279 }
280 
281 /*
282  * Process incoming packets
283  */
284 static int
285 smb_iod_recvall(struct smbiod *iod)
286 {
287 	struct smb_vc *vcp = iod->iod_vc;
288 	struct thread *td = iod->iod_td;
289 	struct smb_rq *rqp;
290 	struct mbuf *m;
291 	u_char *hp;
292 	u_short mid;
293 	int error;
294 	smb_ilock ilock;
295 	smb_ilock jlock;
296 
297 	switch (iod->iod_state) {
298 	    case SMBIOD_ST_NOTCONN:
299 	    case SMBIOD_ST_DEAD:
300 	    case SMBIOD_ST_RECONNECT:
301 		return 0;
302 	    default:
303 		break;
304 	}
305 	for (;;) {
306 		m = NULL;
307 		error = SMB_TRAN_RECV(vcp, &m, td);
308 		if (error == EWOULDBLOCK)
309 			break;
310 		if (SMB_TRAN_FATAL(vcp, error)) {
311 			smb_iod_dead(iod);
312 			break;
313 		}
314 		if (error)
315 			break;
316 		if (m == NULL) {
317 			SMBERROR("tran return NULL without error\n");
318 			error = EPIPE;
319 			continue;
320 		}
321 		m = m_pullup(m, SMB_HDRLEN);
322 		if (m == NULL)
323 			continue;	/* wait for a good packet */
324 		/*
325 		 * Now we got an entire and possibly invalid SMB packet.
326 		 * Be careful while parsing it.
327 		 */
328 		m_dumpm(m);
329 		hp = mtod(m, u_char*);
330 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
331 			m_freem(m);
332 			continue;
333 		}
334 		mid = SMB_HDRMID(hp);
335 		SMBSDEBUG("mid %04x\n", (u_int)mid);
336 		SMB_IOD_RQLOCK(&ilock, iod);
337 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
338 			if (rqp->sr_mid != mid)
339 				continue;
340 			SMBRQ_SLOCK(&jlock, rqp);
341 			if (rqp->sr_rp.md_top == NULL) {
342 				md_initm(&rqp->sr_rp, m);
343 			} else {
344 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
345 					md_append_record(&rqp->sr_rp, m);
346 				} else {
347 					SMBRQ_SUNLOCK(&jlock);
348 					SMBERROR("duplicate response %d (ignored)\n", mid);
349 					break;
350 				}
351 			}
352 			SMBRQ_SUNLOCK(&jlock);
353 			smb_iod_rqprocessed(rqp, 0);
354 			break;
355 		}
356 		SMB_IOD_RQUNLOCK(&ilock);
357 		if (rqp == NULL) {
358 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
359 /*			smb_printrqlist(vcp);*/
360 			m_freem(m);
361 		}
362 	}
363 	/*
364 	 * check for interrupts
365 	 */
366 	SMB_IOD_RQLOCK(&ilock, iod);
367 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
368 		if (smb_proc_intr(rqp->sr_cred->scr_td)) {
369 			smb_iod_rqprocessed(rqp, EINTR);
370 		}
371 	}
372 	SMB_IOD_RQUNLOCK(&ilock);
373 	return 0;
374 }
375 
376 int
377 smb_iod_request(struct smbiod *iod, int event, void *ident)
378 {
379 	struct smbiod_event *evp;
380 	int error;
381 	smb_ilock ilock;
382 
383 	SMBIODEBUG("\n");
384 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
385 	evp->ev_type = event;
386 	evp->ev_ident = ident;
387 	SMB_IOD_EVLOCK(&ilock, iod);
388 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
389 	if ((event & SMBIOD_EV_SYNC) == 0) {
390 		SMB_IOD_EVUNLOCK(&ilock);
391 		smb_iod_wakeup(iod);
392 		return 0;
393 	}
394 	smb_iod_wakeup(iod);
395 	smb_sleep(evp, &ilock, PDROP, "90evw", 0);
396 	error = evp->ev_error;
397 	free(evp, M_SMBIOD);
398 	return error;
399 }
400 
401 /*
402  * Place request in the queue.
403  * Request from smbiod have a high priority.
404  */
405 int
406 smb_iod_addrq(struct smb_rq *rqp)
407 {
408 	struct smb_vc *vcp = rqp->sr_vc;
409 	struct smbiod *iod = vcp->vc_iod;
410 	smb_ilock ilock;
411 	int error;
412 
413 	SMBIODEBUG("\n");
414 	if (rqp->sr_cred->scr_td == iod->iod_td) {
415 		rqp->sr_flags |= SMBR_INTERNAL;
416 		SMB_IOD_RQLOCK(&ilock, iod);
417 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
418 		SMB_IOD_RQUNLOCK(&ilock);
419 		for (;;) {
420 			if (smb_iod_sendrq(iod, rqp) != 0) {
421 				smb_iod_dead(iod);
422 				break;
423 			}
424 			/*
425 			 * we don't need to lock state field here
426 			 */
427 			if (rqp->sr_state != SMBRQ_NOTSENT)
428 				break;
429 			tsleep(&iod->iod_flags, 0, "90sndw", hz);
430 		}
431 		if (rqp->sr_lerror)
432 			smb_iod_removerq(rqp);
433 		return rqp->sr_lerror;
434 	}
435 
436 	switch (iod->iod_state) {
437 	    case SMBIOD_ST_NOTCONN:
438 		return ENOTCONN;
439 	    case SMBIOD_ST_DEAD:
440 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
441 		if (error)
442 			return error;
443 		return EXDEV;
444 	    default:
445 		break;
446 	}
447 
448 	SMB_IOD_RQLOCK(&ilock, iod);
449 	for (;;) {
450 		if (vcp->vc_maxmux == 0) {
451 			SMBERROR("maxmux == 0\n");
452 			break;
453 		}
454 		if (iod->iod_muxcnt < vcp->vc_maxmux)
455 			break;
456 		iod->iod_muxwant++;
457 		smb_sleep(&iod->iod_muxwant, &ilock, 0, "90mux", 0);
458 	}
459 	iod->iod_muxcnt++;
460 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
461 	SMB_IOD_RQUNLOCK(&ilock);
462 	smb_iod_wakeup(iod);
463 	return 0;
464 }
465 
466 int
467 smb_iod_removerq(struct smb_rq *rqp)
468 {
469 	struct smb_vc *vcp = rqp->sr_vc;
470 	struct smbiod *iod = vcp->vc_iod;
471 	smb_ilock ilock;
472 
473 	SMBIODEBUG("\n");
474 	if (rqp->sr_flags & SMBR_INTERNAL) {
475 		SMB_IOD_RQLOCK(&ilock, iod);
476 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
477 		SMB_IOD_RQUNLOCK(&ilock);
478 		return 0;
479 	}
480 	SMB_IOD_RQLOCK(&ilock, iod);
481 	while (rqp->sr_flags & SMBR_XLOCK) {
482 		rqp->sr_flags |= SMBR_XLOCKWANT;
483 		smb_sleep(rqp, &ilock, 0, "90xrm", 0);
484 	}
485 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
486 	iod->iod_muxcnt--;
487 	if (iod->iod_muxwant) {
488 		iod->iod_muxwant--;
489 		wakeup(&iod->iod_muxwant);
490 	}
491 	SMB_IOD_RQUNLOCK(&ilock);
492 	return 0;
493 }
494 
495 int
496 smb_iod_waitrq(struct smb_rq *rqp)
497 {
498 	struct smbiod *iod = rqp->sr_vc->vc_iod;
499 	smb_ilock ilock;
500 	int error;
501 
502 	SMBIODEBUG("\n");
503 	if (rqp->sr_flags & SMBR_INTERNAL) {
504 		for (;;) {
505 			smb_iod_sendall(iod);
506 			smb_iod_recvall(iod);
507 			if (rqp->sr_rpgen != rqp->sr_rplast)
508 				break;
509 			tsleep(&iod->iod_flags, 0, "90irq", hz);
510 		}
511 		smb_iod_removerq(rqp);
512 		return rqp->sr_lerror;
513 
514 	}
515 	SMBRQ_SLOCK(&ilock, rqp);
516 	if (rqp->sr_rpgen == rqp->sr_rplast)
517 		smb_sleep(&rqp->sr_state, &ilock, 0, "90wrq", 0);
518 	rqp->sr_rplast++;
519 	SMBRQ_SUNLOCK(&ilock);
520 	error = rqp->sr_lerror;
521 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
522 		/*
523 		 * If request should stay in the list, then reinsert it
524 		 * at the end of queue so other waiters have chance to concur
525 		 */
526 		SMB_IOD_RQLOCK(&ilock, iod);
527 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
528 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
529 		SMB_IOD_RQUNLOCK(&ilock);
530 	} else
531 		smb_iod_removerq(rqp);
532 	return error;
533 }
534 
535 
536 static int
537 smb_iod_sendall(struct smbiod *iod)
538 {
539 	struct smb_vc *vcp = iod->iod_vc;
540 	struct smb_rq *rqp;
541 	struct timespec ts, tstimeout;
542 	smb_ilock ilock;
543 	int herror;
544 
545 	herror = 0;
546 	/*
547 	 * Loop through the list of requests and send them if possible
548 	 */
549 	SMB_IOD_RQLOCK(&ilock, iod);
550 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
551 		switch (rqp->sr_state) {
552 		    case SMBRQ_NOTSENT:
553 			rqp->sr_flags |= SMBR_XLOCK;
554 			SMB_IOD_RQUNLOCK(&ilock);
555 			herror = smb_iod_sendrq(iod, rqp);
556 			SMB_IOD_RQLOCK(&ilock, iod);
557 			rqp->sr_flags &= ~SMBR_XLOCK;
558 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
559 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
560 				wakeup(rqp);
561 			}
562 			break;
563 		    case SMBRQ_SENT:
564 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
565 			timespecadd(&tstimeout, &tstimeout);
566 			getnanotime(&ts);
567 			timespecsub(&ts, &tstimeout);
568 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
569 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
570 			}
571 			break;
572 		    default:
573 			break;
574 		}
575 		if (herror)
576 			break;
577 	}
578 	SMB_IOD_RQUNLOCK(&ilock);
579 	if (herror == ENOTCONN)
580 		smb_iod_dead(iod);
581 	return 0;
582 }
583 
584 /*
585  * "main" function for smbiod daemon
586  */
587 static __inline void
588 smb_iod_main(struct smbiod *iod)
589 {
590 /*	struct smb_vc *vcp = iod->iod_vc;*/
591 	struct smbiod_event *evp;
592 /*	struct timespec tsnow;*/
593 	int error;
594 	smb_ilock ilock;
595 
596 	SMBIODEBUG("\n");
597 	error = 0;
598 
599 	/*
600 	 * Check all interesting events
601 	 */
602 	for (;;) {
603 		SMB_IOD_EVLOCK(&ilock, iod);
604 		evp = STAILQ_FIRST(&iod->iod_evlist);
605 		if (evp == NULL) {
606 			SMB_IOD_EVUNLOCK(&ilock);
607 			break;
608 		}
609 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
610 		evp->ev_type |= SMBIOD_EV_PROCESSING;
611 		SMB_IOD_EVUNLOCK(&ilock);
612 		switch (evp->ev_type & SMBIOD_EV_MASK) {
613 		    case SMBIOD_EV_CONNECT:
614 			iod->iod_state = SMBIOD_ST_RECONNECT;
615 			evp->ev_error = smb_iod_connect(iod);
616 			break;
617 		    case SMBIOD_EV_DISCONNECT:
618 			evp->ev_error = smb_iod_disconnect(iod);
619 			break;
620 		    case SMBIOD_EV_TREECONNECT:
621 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
622 			break;
623 		    case SMBIOD_EV_SHUTDOWN:
624 			iod->iod_flags |= SMBIOD_SHUTDOWN;
625 			break;
626 		    case SMBIOD_EV_NEWRQ:
627 			break;
628 		}
629 		if (evp->ev_type & SMBIOD_EV_SYNC) {
630 			SMB_IOD_EVLOCK(&ilock, iod);
631 			wakeup(evp);
632 			SMB_IOD_EVUNLOCK(&ilock);
633 		} else
634 			free(evp, M_SMBIOD);
635 	}
636 #if 0
637 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
638 		getnanotime(&tsnow);
639 		timespecsub(&tsnow, &iod->iod_pingtimo);
640 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
641 			smb_smb_echo(vcp, &iod->iod_scred);
642 		}
643 	}
644 #endif
645 	smb_iod_sendall(iod);
646 	smb_iod_recvall(iod);
647 	return;
648 }
649 
650 #define	kthread_create_compat	kthread_create2
651 #define kthread_exit_compat	kthread_exit2
652 
653 void
654 smb_iod_thread(void *arg)
655 {
656 	struct smbiod *iod = arg;
657 
658 	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
659 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
660 		smb_iod_main(iod);
661 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
662 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
663 			break;
664 		tsleep(&iod->iod_flags, 0, "90idle", iod->iod_sleeptimo);
665 	}
666 	kthread_exit_compat();
667 }
668 
669 int
670 smb_iod_create(struct smb_vc *vcp)
671 {
672 	struct smbiod *iod;
673 	struct proc *newp = NULL;
674 	int error;
675 
676 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
677 	iod->iod_id = smb_iod_next++;
678 	iod->iod_state = SMBIOD_ST_NOTCONN;
679 	iod->iod_vc = vcp;
680 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
681 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
682 	getnanotime(&iod->iod_lastrqsent);
683 	vcp->vc_iod = iod;
684 	smb_sl_init(&iod->iod_rqlock, "90rql");
685 	TAILQ_INIT(&iod->iod_rqlist);
686 	smb_sl_init(&iod->iod_evlock, "90evl");
687 	STAILQ_INIT(&iod->iod_evlist);
688 	error = kthread_create_compat(smb_iod_thread, iod, &newp,
689 	    RFNOWAIT, "smbiod%d", iod->iod_id);
690 	if (error) {
691 		SMBERROR("can't start smbiod: %d", error);
692 		free(iod, M_SMBIOD);
693 		return error;
694 	}
695 	iod->iod_td = newp->p_thread;
696 	return 0;
697 }
698 
699 int
700 smb_iod_destroy(struct smbiod *iod)
701 {
702 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
703 	smb_sl_destroy(&iod->iod_rqlock);
704 	smb_sl_destroy(&iod->iod_evlock);
705 	free(iod, M_SMBIOD);
706 	return 0;
707 }
708 
709 int
710 smb_iod_init(void)
711 {
712 	return 0;
713 }
714 
715 int
716 smb_iod_done(void)
717 {
718 	return 0;
719 }
720 
721