xref: /dragonfly/sys/net/tun/if_tun.c (revision e2f5ccfb)
1 /*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has it's
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  *
16  * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $
17  * $DragonFly: src/sys/net/tun/if_tun.c,v 1.37 2008/06/05 18:06:32 swildner Exp $
18  */
19 
20 #include "opt_atalk.h"
21 #include "opt_inet.h"
22 #include "opt_inet6.h"
23 #include "opt_ipx.h"
24 
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/socket.h>
30 #include <sys/conf.h>
31 #include <sys/device.h>
32 #include <sys/filio.h>
33 #include <sys/sockio.h>
34 #include <sys/thread2.h>
35 #include <sys/ttycom.h>
36 #include <sys/poll.h>
37 #include <sys/signalvar.h>
38 #include <sys/filedesc.h>
39 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/malloc.h>
44 
45 #include <net/if.h>
46 #include <net/if_types.h>
47 #include <net/ifq_var.h>
48 #include <net/netisr.h>
49 #include <net/route.h>
50 
51 #ifdef INET
52 #include <netinet/in.h>
53 #endif
54 
55 #include <net/bpf.h>
56 
57 #include "if_tunvar.h"
58 #include "if_tun.h"
59 
60 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface");
61 
62 static void tunattach (void *);
63 PSEUDO_SET(tunattach, if_tun);
64 
65 static void tuncreate (cdev_t dev);
66 
67 #define TUNDEBUG	if (tundebug) if_printf
68 static int tundebug = 0;
69 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
70 
71 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *,
72 	    struct rtentry *rt);
73 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *);
74 static int tuninit (struct ifnet *);
75 static void tunstart(struct ifnet *);
76 
77 static	d_open_t	tunopen;
78 static	d_close_t	tunclose;
79 static	d_read_t	tunread;
80 static	d_write_t	tunwrite;
81 static	d_ioctl_t	tunioctl;
82 static	d_poll_t	tunpoll;
83 
84 #define CDEV_MAJOR 52
85 static struct dev_ops tun_ops = {
86 	{ "tun", CDEV_MAJOR, 0 },
87 	.d_open =	tunopen,
88 	.d_close =	tunclose,
89 	.d_read =	tunread,
90 	.d_write =	tunwrite,
91 	.d_ioctl =	tunioctl,
92 	.d_poll =	tunpoll,
93 };
94 
95 static void
96 tunattach(void *dummy)
97 {
98 	dev_ops_add(&tun_ops, 0, 0);
99 }
100 
101 static void
102 tuncreate(cdev_t dev)
103 {
104 	struct tun_softc *sc;
105 	struct ifnet *ifp;
106 
107 	dev = make_dev(&tun_ops, minor(dev),
108 	    UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev));
109 
110 	MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
111 	sc->tun_flags = TUN_INITED;
112 
113 	ifp = &sc->tun_if;
114 	if_initname(ifp, "tun", lminor(dev));
115 	ifp->if_mtu = TUNMTU;
116 	ifp->if_ioctl = tunifioctl;
117 	ifp->if_output = tunoutput;
118 	ifp->if_start = tunstart;
119 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
120 	ifp->if_type = IFT_PPP;
121 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
122 	ifq_set_ready(&ifp->if_snd);
123 	ifp->if_softc = sc;
124 	if_attach(ifp, NULL);
125 	bpfattach(ifp, DLT_NULL, sizeof(u_int));
126 	dev->si_drv1 = sc;
127 }
128 
129 /*
130  * tunnel open - must be superuser & the device must be
131  * configured in
132  */
133 static	int
134 tunopen(struct dev_open_args *ap)
135 {
136 	cdev_t dev = ap->a_head.a_dev;
137 	struct ifnet	*ifp;
138 	struct tun_softc *tp;
139 	int	error;
140 
141 	if ((error = suser_cred(ap->a_cred, 0)) != 0)
142 		return (error);
143 
144 	tp = dev->si_drv1;
145 	if (!tp) {
146 		tuncreate(dev);
147 		tp = dev->si_drv1;
148 	}
149 	if (tp->tun_flags & TUN_OPEN)
150 		return EBUSY;
151 	tp->tun_pid = curproc->p_pid;
152 	ifp = &tp->tun_if;
153 	tp->tun_flags |= TUN_OPEN;
154 	TUNDEBUG(ifp, "open\n");
155 	return (0);
156 }
157 
158 /*
159  * tunclose - close the device - mark i/f down & delete
160  * routing info
161  */
162 static	int
163 tunclose(struct dev_close_args *ap)
164 {
165 	cdev_t dev = ap->a_head.a_dev;
166 	struct tun_softc *tp;
167 	struct ifnet	*ifp;
168 
169 	tp = dev->si_drv1;
170 	ifp = &tp->tun_if;
171 
172 	tp->tun_flags &= ~TUN_OPEN;
173 	tp->tun_pid = 0;
174 
175 	/* Junk all pending output. */
176 	lwkt_serialize_enter(ifp->if_serializer);
177 	ifq_purge(&ifp->if_snd);
178 	lwkt_serialize_exit(ifp->if_serializer);
179 
180 	if (ifp->if_flags & IFF_UP) {
181 		lwkt_serialize_enter(ifp->if_serializer);
182 		if_down(ifp);
183 		lwkt_serialize_exit(ifp->if_serializer);
184 	}
185 	ifp->if_flags &= ~IFF_RUNNING;
186 	if_purgeaddrs_nolink(ifp);
187 
188 	funsetown(tp->tun_sigio);
189 	selwakeup(&tp->tun_rsel);
190 
191 	TUNDEBUG(ifp, "closed\n");
192 	return (0);
193 }
194 
195 static int
196 tuninit(struct ifnet *ifp)
197 {
198 	struct tun_softc *tp = ifp->if_softc;
199 	struct ifaddr_container *ifac;
200 	int error = 0;
201 
202 	TUNDEBUG(ifp, "tuninit\n");
203 
204 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
205 	getmicrotime(&ifp->if_lastchange);
206 
207 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
208 		struct ifaddr *ifa = ifac->ifa;
209 
210 		if (ifa->ifa_addr == NULL) {
211 			error = EFAULT;
212 			/* XXX: Should maybe return straight off? */
213 		} else {
214 #ifdef INET
215 			if (ifa->ifa_addr->sa_family == AF_INET) {
216 			    struct sockaddr_in *si;
217 
218 			    si = (struct sockaddr_in *)ifa->ifa_addr;
219 			    if (si->sin_addr.s_addr)
220 				    tp->tun_flags |= TUN_IASET;
221 			}
222 #endif
223 		}
224 	}
225 	return (error);
226 }
227 
228 /*
229  * Process an ioctl request.
230  *
231  * MPSAFE
232  */
233 int
234 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
235 {
236 	struct ifreq *ifr = (struct ifreq *)data;
237 	struct tun_softc *tp = ifp->if_softc;
238 	struct ifstat *ifs;
239 	int error = 0;
240 
241 	switch(cmd) {
242 	case SIOCGIFSTATUS:
243 		ifs = (struct ifstat *)data;
244 		if (tp->tun_pid)
245 			ksprintf(ifs->ascii + strlen(ifs->ascii),
246 			    "\tOpened by PID %d\n", tp->tun_pid);
247 		break;
248 	case SIOCSIFADDR:
249 		error = tuninit(ifp);
250 		TUNDEBUG(ifp, "address set, error=%d\n", error);
251 		break;
252 	case SIOCSIFDSTADDR:
253 		error = tuninit(ifp);
254 		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
255 		break;
256 	case SIOCSIFMTU:
257 		ifp->if_mtu = ifr->ifr_mtu;
258 		TUNDEBUG(ifp, "mtu set\n");
259 		break;
260 	case SIOCSIFFLAGS:
261 	case SIOCADDMULTI:
262 	case SIOCDELMULTI:
263 		break;
264 	default:
265 		error = EINVAL;
266 	}
267 	return (error);
268 }
269 
270 /*
271  * tunoutput - queue packets from higher level ready to put out.
272  *
273  * MPSAFE
274  */
275 static int
276 tunoutput_serialized(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
277 		     struct rtentry *rt)
278 {
279 	struct tun_softc *tp = ifp->if_softc;
280 	int error;
281 	struct altq_pktattr pktattr;
282 
283 	TUNDEBUG(ifp, "tunoutput\n");
284 
285 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
286 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
287 		m_freem (m0);
288 		return EHOSTDOWN;
289 	}
290 
291 	/*
292 	 * if the queueing discipline needs packet classification,
293 	 * do it before prepending link headers.
294 	 */
295 	ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr);
296 
297 	/* BPF write needs to be handled specially */
298 	if (dst->sa_family == AF_UNSPEC) {
299 		dst->sa_family = *(mtod(m0, int *));
300 		m0->m_len -= sizeof(int);
301 		m0->m_pkthdr.len -= sizeof(int);
302 		m0->m_data += sizeof(int);
303 	}
304 
305 	if (ifp->if_bpf) {
306 		/*
307 		 * We need to prepend the address family as
308 		 * a four byte field.
309 		 */
310 		uint32_t af = dst->sa_family;
311 
312 		bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af));
313 	}
314 
315 	/* prepend sockaddr? this may abort if the mbuf allocation fails */
316 	if (tp->tun_flags & TUN_LMODE) {
317 		/* allocate space for sockaddr */
318 		M_PREPEND(m0, dst->sa_len, MB_DONTWAIT);
319 
320 		/* if allocation failed drop packet */
321 		if (m0 == NULL){
322 			IF_DROP(&ifp->if_snd);
323 			ifp->if_oerrors++;
324 			return (ENOBUFS);
325 		} else {
326 			bcopy(dst, m0->m_data, dst->sa_len);
327 		}
328 	}
329 
330 	if (tp->tun_flags & TUN_IFHEAD) {
331 		/* Prepend the address family */
332 		M_PREPEND(m0, 4, MB_DONTWAIT);
333 
334 		/* if allocation failed drop packet */
335 		if (m0 == NULL){
336 			IF_DROP(&ifp->if_snd);
337 			ifp->if_oerrors++;
338 			return ENOBUFS;
339 		} else
340 			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
341 	} else {
342 #ifdef INET
343 		if (dst->sa_family != AF_INET)
344 #endif
345 		{
346 			m_freem(m0);
347 			return EAFNOSUPPORT;
348 		}
349 	}
350 
351 	error = ifq_handoff(ifp, m0, &pktattr);
352 	if (error) {
353 		ifp->if_collisions++;
354 	} else {
355 		ifp->if_opackets++;
356 		if (tp->tun_flags & TUN_RWAIT) {
357 			tp->tun_flags &= ~TUN_RWAIT;
358 			wakeup((caddr_t)tp);
359 		}
360 		get_mplock();
361 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
362 			pgsigio(tp->tun_sigio, SIGIO, 0);
363 		selwakeup(&tp->tun_rsel);
364 		rel_mplock();
365 	}
366 	return (error);
367 }
368 
369 static int
370 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
371 	  struct rtentry *rt)
372 {
373 	int error;
374 
375 	lwkt_serialize_enter(ifp->if_serializer);
376 	error = tunoutput_serialized(ifp, m0, dst, rt);
377 	lwkt_serialize_exit(ifp->if_serializer);
378 
379 	return error;
380 }
381 
382 /*
383  * the ops interface is now pretty minimal.
384  */
385 static	int
386 tunioctl(struct dev_ioctl_args *ap)
387 {
388 	cdev_t dev = ap->a_head.a_dev;
389 	struct tun_softc *tp = dev->si_drv1;
390  	struct tuninfo *tunp;
391 
392 	switch (ap->a_cmd) {
393  	case TUNSIFINFO:
394  		tunp = (struct tuninfo *)ap->a_data;
395 		if (tunp->mtu < IF_MINMTU)
396 			return (EINVAL);
397  		tp->tun_if.if_mtu = tunp->mtu;
398  		tp->tun_if.if_type = tunp->type;
399  		tp->tun_if.if_baudrate = tunp->baudrate;
400  		break;
401  	case TUNGIFINFO:
402  		tunp = (struct tuninfo *)ap->a_data;
403  		tunp->mtu = tp->tun_if.if_mtu;
404  		tunp->type = tp->tun_if.if_type;
405  		tunp->baudrate = tp->tun_if.if_baudrate;
406  		break;
407 	case TUNSDEBUG:
408 		tundebug = *(int *)ap->a_data;
409 		break;
410 	case TUNGDEBUG:
411 		*(int *)ap->a_data = tundebug;
412 		break;
413 	case TUNSLMODE:
414 		if (*(int *)ap->a_data) {
415 			tp->tun_flags |= TUN_LMODE;
416 			tp->tun_flags &= ~TUN_IFHEAD;
417 		} else
418 			tp->tun_flags &= ~TUN_LMODE;
419 		break;
420 	case TUNSIFHEAD:
421 		if (*(int *)ap->a_data) {
422 			tp->tun_flags |= TUN_IFHEAD;
423 			tp->tun_flags &= ~TUN_LMODE;
424 		} else
425 			tp->tun_flags &= ~TUN_IFHEAD;
426 		break;
427 	case TUNGIFHEAD:
428 		*(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
429 		break;
430 	case TUNSIFMODE:
431 		/* deny this if UP */
432 		if (tp->tun_if.if_flags & IFF_UP)
433 			return(EBUSY);
434 
435 		switch (*(int *)ap->a_data & ~IFF_MULTICAST) {
436 		case IFF_POINTOPOINT:
437 		case IFF_BROADCAST:
438 			tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT);
439 			tp->tun_if.if_flags |= *(int *)ap->a_data;
440 			break;
441 		default:
442 			return(EINVAL);
443 		}
444 		break;
445 	case TUNSIFPID:
446 		tp->tun_pid = curproc->p_pid;
447 		break;
448 	case FIOASYNC:
449 		if (*(int *)ap->a_data)
450 			tp->tun_flags |= TUN_ASYNC;
451 		else
452 			tp->tun_flags &= ~TUN_ASYNC;
453 		break;
454 	case FIONREAD:
455 		lwkt_serialize_enter(tp->tun_if.if_serializer);
456 		if (!ifq_is_empty(&tp->tun_if.if_snd)) {
457 			struct mbuf *mb;
458 
459 			mb = ifq_poll(&tp->tun_if.if_snd);
460 			for( *(int *)ap->a_data = 0; mb != 0; mb = mb->m_next)
461 				*(int *)ap->a_data += mb->m_len;
462 		} else {
463 			*(int *)ap->a_data = 0;
464 		}
465 		lwkt_serialize_exit(tp->tun_if.if_serializer);
466 		break;
467 	case FIOSETOWN:
468 		return (fsetown(*(int *)ap->a_data, &tp->tun_sigio));
469 
470 	case FIOGETOWN:
471 		*(int *)ap->a_data = fgetown(tp->tun_sigio);
472 		return (0);
473 
474 	/* This is deprecated, FIOSETOWN should be used instead. */
475 	case TIOCSPGRP:
476 		return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio));
477 
478 	/* This is deprecated, FIOGETOWN should be used instead. */
479 	case TIOCGPGRP:
480 		*(int *)ap->a_data = -fgetown(tp->tun_sigio);
481 		return (0);
482 
483 	default:
484 		return (ENOTTY);
485 	}
486 	return (0);
487 }
488 
489 /*
490  * The ops read interface - reads a packet at a time, or at
491  * least as much of a packet as can be read.
492  */
493 static	int
494 tunread(struct dev_read_args *ap)
495 {
496 	cdev_t dev = ap->a_head.a_dev;
497 	struct uio *uio = ap->a_uio;
498 	struct tun_softc *tp = dev->si_drv1;
499 	struct ifnet	*ifp = &tp->tun_if;
500 	struct mbuf	*m0;
501 	int		error=0, len;
502 
503 	TUNDEBUG(ifp, "read\n");
504 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
505 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
506 		return EHOSTDOWN;
507 	}
508 
509 	tp->tun_flags &= ~TUN_RWAIT;
510 
511 	lwkt_serialize_enter(ifp->if_serializer);
512 
513 	while ((m0 = ifq_dequeue(&ifp->if_snd, NULL)) == NULL) {
514 		if (ap->a_ioflag & IO_NDELAY) {
515 			lwkt_serialize_exit(ifp->if_serializer);
516 			return EWOULDBLOCK;
517 		}
518 		tp->tun_flags |= TUN_RWAIT;
519 		lwkt_serialize_exit(ifp->if_serializer);
520 		if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0)
521 			return error;
522 		lwkt_serialize_enter(ifp->if_serializer);
523 	}
524 
525 	lwkt_serialize_exit(ifp->if_serializer);
526 
527 	while (m0 && uio->uio_resid > 0 && error == 0) {
528 		len = min(uio->uio_resid, m0->m_len);
529 		if (len != 0)
530 			error = uiomove(mtod(m0, caddr_t), len, uio);
531 		m0 = m_free(m0);
532 	}
533 
534 	if (m0) {
535 		TUNDEBUG(ifp, "Dropping mbuf\n");
536 		m_freem(m0);
537 	}
538 	return error;
539 }
540 
541 /*
542  * the ops write interface - an atomic write is a packet - or else!
543  */
544 static	int
545 tunwrite(struct dev_write_args *ap)
546 {
547 	cdev_t dev = ap->a_head.a_dev;
548 	struct uio *uio = ap->a_uio;
549 	struct tun_softc *tp = dev->si_drv1;
550 	struct ifnet	*ifp = &tp->tun_if;
551 	struct mbuf	*top, **mp, *m;
552 	int		error=0, tlen, mlen;
553 	uint32_t	family;
554 	int		isr;
555 
556 	TUNDEBUG(ifp, "tunwrite\n");
557 
558 	if (uio->uio_resid == 0)
559 		return 0;
560 
561 	if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
562 		TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
563 		return EIO;
564 	}
565 	tlen = uio->uio_resid;
566 
567 	/* get a header mbuf */
568 	MGETHDR(m, MB_DONTWAIT, MT_DATA);
569 	if (m == NULL)
570 		return ENOBUFS;
571 	mlen = MHLEN;
572 
573 	top = 0;
574 	mp = &top;
575 	while (error == 0 && uio->uio_resid > 0) {
576 		m->m_len = min(mlen, uio->uio_resid);
577 		error = uiomove(mtod (m, caddr_t), m->m_len, uio);
578 		*mp = m;
579 		mp = &m->m_next;
580 		if (uio->uio_resid > 0) {
581 			MGET (m, MB_DONTWAIT, MT_DATA);
582 			if (m == 0) {
583 				error = ENOBUFS;
584 				break;
585 			}
586 			mlen = MLEN;
587 		}
588 	}
589 	if (error) {
590 		if (top)
591 			m_freem (top);
592 		ifp->if_ierrors++;
593 		return error;
594 	}
595 
596 	top->m_pkthdr.len = tlen;
597 	top->m_pkthdr.rcvif = ifp;
598 
599 	if (ifp->if_bpf) {
600 		if (tp->tun_flags & TUN_IFHEAD) {
601 			/*
602 			 * Conveniently, we already have a 4-byte address
603 			 * family prepended to our packet !
604 			 * Inconveniently, it's in the wrong byte order !
605 			 */
606 			if ((top = m_pullup(top, sizeof(family))) == NULL)
607 				return ENOBUFS;
608 			*mtod(top, u_int32_t *) =
609 			    ntohl(*mtod(top, u_int32_t *));
610 			bpf_mtap(ifp->if_bpf, top);
611 			*mtod(top, u_int32_t *) =
612 			    htonl(*mtod(top, u_int32_t *));
613 		} else {
614 			/*
615 			 * We need to prepend the address family as
616 			 * a four byte field.
617 			 */
618 			static const uint32_t af = AF_INET;
619 
620 			bpf_ptap(ifp->if_bpf, top, &af, sizeof(af));
621 		}
622 	}
623 
624 	if (tp->tun_flags & TUN_IFHEAD) {
625 		if (top->m_len < sizeof(family) &&
626 		    (top = m_pullup(top, sizeof(family))) == NULL)
627 				return ENOBUFS;
628 		family = ntohl(*mtod(top, u_int32_t *));
629 		m_adj(top, sizeof(family));
630 	} else
631 		family = AF_INET;
632 
633 	ifp->if_ibytes += top->m_pkthdr.len;
634 	ifp->if_ipackets++;
635 
636 	switch (family) {
637 #ifdef INET
638 	case AF_INET:
639 		isr = NETISR_IP;
640 		break;
641 #endif
642 #ifdef INET6
643 	case AF_INET6:
644 		isr = NETISR_IPV6;
645 		break;
646 #endif
647 #ifdef IPX
648 	case AF_IPX:
649 		isr = NETISR_IPX;
650 		break;
651 #endif
652 #ifdef NETATALK
653 	case AF_APPLETALK:
654 		isr = NETISR_ATALK2;
655 		break;
656 #endif
657 	default:
658 		m_freem(m);
659 		return (EAFNOSUPPORT);
660 	}
661 
662 	netisr_dispatch(isr, top);
663 	return (0);
664 }
665 
666 /*
667  * tunpoll - the poll interface, this is only useful on reads
668  * really. The write detect always returns true, write never blocks
669  * anyway, it either accepts the packet or drops it.
670  */
671 static	int
672 tunpoll(struct dev_poll_args *ap)
673 {
674 	cdev_t dev = ap->a_head.a_dev;
675 	struct tun_softc *tp = dev->si_drv1;
676 	struct ifnet	*ifp = &tp->tun_if;
677 	int		revents = 0;
678 
679 	TUNDEBUG(ifp, "tunpoll\n");
680 
681 	lwkt_serialize_enter(ifp->if_serializer);
682 
683 	if (ap->a_events & (POLLIN | POLLRDNORM)) {
684 		if (!ifq_is_empty(&ifp->if_snd)) {
685 			TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
686 			revents |= ap->a_events & (POLLIN | POLLRDNORM);
687 		} else {
688 			TUNDEBUG(ifp, "tunpoll waiting\n");
689 			selrecord(curthread, &tp->tun_rsel);
690 		}
691 	}
692 	if (ap->a_events & (POLLOUT | POLLWRNORM))
693 		revents |= ap->a_events & (POLLOUT | POLLWRNORM);
694 
695 	lwkt_serialize_exit(ifp->if_serializer);
696 	ap->a_events = revents;
697 	return(0);
698 }
699 
700 /*
701  * Start packet transmission on the interface.
702  * when the interface queue is rate-limited by ALTQ,
703  * if_start is needed to drain packets from the queue in order
704  * to notify readers when outgoing packets become ready.
705  */
706 static void
707 tunstart(struct ifnet *ifp)
708 {
709 	struct tun_softc *tp = ifp->if_softc;
710 	struct mbuf *m;
711 
712 	if (!ifq_is_enabled(&ifp->if_snd))
713 		return;
714 
715 	m = ifq_poll(&ifp->if_snd);
716 	if (m != NULL) {
717 		if (tp->tun_flags & TUN_RWAIT) {
718 			tp->tun_flags &= ~TUN_RWAIT;
719 			wakeup((caddr_t)tp);
720 		}
721 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
722 			pgsigio(tp->tun_sigio, SIGIO, 0);
723 		selwakeup(&tp->tun_rsel);
724 	}
725 }
726