xref: /dragonfly/sys/net/tun/if_tun.c (revision c03f08f3)
1 /*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has it's
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  *
16  * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $
17  * $DragonFly: src/sys/net/tun/if_tun.c,v 1.31 2006/12/20 18:14:42 dillon Exp $
18  */
19 
20 #include "opt_atalk.h"
21 #include "opt_inet.h"
22 #include "opt_inet6.h"
23 #include "opt_ipx.h"
24 
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/socket.h>
30 #include <sys/conf.h>
31 #include <sys/device.h>
32 #include <sys/filio.h>
33 #include <sys/sockio.h>
34 #include <sys/thread2.h>
35 #include <sys/ttycom.h>
36 #include <sys/poll.h>
37 #include <sys/signalvar.h>
38 #include <sys/filedesc.h>
39 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/malloc.h>
44 
45 #include <net/if.h>
46 #include <net/if_types.h>
47 #include <net/ifq_var.h>
48 #include <net/netisr.h>
49 #include <net/route.h>
50 
51 #ifdef INET
52 #include <netinet/in.h>
53 #endif
54 
55 #include <net/bpf.h>
56 
57 #include "if_tunvar.h"
58 #include "if_tun.h"
59 
60 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface");
61 
62 static void tunattach (void *);
63 PSEUDO_SET(tunattach, if_tun);
64 
65 static void tuncreate (cdev_t dev);
66 
67 #define TUNDEBUG	if (tundebug) if_printf
68 static int tundebug = 0;
69 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
70 
71 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *,
72 	    struct rtentry *rt);
73 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *);
74 static int tuninit (struct ifnet *);
75 static void tunstart(struct ifnet *);
76 
77 static	d_open_t	tunopen;
78 static	d_close_t	tunclose;
79 static	d_read_t	tunread;
80 static	d_write_t	tunwrite;
81 static	d_ioctl_t	tunioctl;
82 static	d_poll_t	tunpoll;
83 
84 #define CDEV_MAJOR 52
85 static struct dev_ops tun_ops = {
86 	{ "tun", CDEV_MAJOR, 0 },
87 	.d_open =	tunopen,
88 	.d_close =	tunclose,
89 	.d_read =	tunread,
90 	.d_write =	tunwrite,
91 	.d_ioctl =	tunioctl,
92 	.d_poll =	tunpoll,
93 };
94 
95 static void
96 tunattach(void *dummy)
97 {
98 	dev_ops_add(&tun_ops, 0, 0);
99 }
100 
101 static void
102 tuncreate(cdev_t dev)
103 {
104 	struct tun_softc *sc;
105 	struct ifnet *ifp;
106 
107 	dev = make_dev(&tun_ops, minor(dev),
108 	    UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev));
109 
110 	MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK);
111 	bzero(sc, sizeof *sc);
112 	sc->tun_flags = TUN_INITED;
113 
114 	ifp = &sc->tun_if;
115 	if_initname(ifp, "tun", lminor(dev));
116 	ifp->if_mtu = TUNMTU;
117 	ifp->if_ioctl = tunifioctl;
118 	ifp->if_output = tunoutput;
119 	ifp->if_start = tunstart;
120 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
121 	ifp->if_type = IFT_PPP;
122 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
123 	ifq_set_ready(&ifp->if_snd);
124 	ifp->if_softc = sc;
125 	if_attach(ifp, NULL);
126 	bpfattach(ifp, DLT_NULL, sizeof(u_int));
127 	dev->si_drv1 = sc;
128 }
129 
130 /*
131  * tunnel open - must be superuser & the device must be
132  * configured in
133  */
134 static	int
135 tunopen(struct dev_open_args *ap)
136 {
137 	cdev_t dev = ap->a_head.a_dev;
138 	struct ifnet	*ifp;
139 	struct tun_softc *tp;
140 	int	error;
141 
142 	if ((error = suser_cred(ap->a_cred, 0)) != NULL)
143 		return (error);
144 
145 	tp = dev->si_drv1;
146 	if (!tp) {
147 		tuncreate(dev);
148 		tp = dev->si_drv1;
149 	}
150 	if (tp->tun_flags & TUN_OPEN)
151 		return EBUSY;
152 	tp->tun_pid = curproc->p_pid;
153 	ifp = &tp->tun_if;
154 	tp->tun_flags |= TUN_OPEN;
155 	TUNDEBUG(ifp, "open\n");
156 	return (0);
157 }
158 
159 /*
160  * tunclose - close the device - mark i/f down & delete
161  * routing info
162  */
163 static	int
164 tunclose(struct dev_close_args *ap)
165 {
166 	cdev_t dev = ap->a_head.a_dev;
167 	struct tun_softc *tp;
168 	struct ifnet	*ifp;
169 
170 	tp = dev->si_drv1;
171 	ifp = &tp->tun_if;
172 
173 	tp->tun_flags &= ~TUN_OPEN;
174 	tp->tun_pid = 0;
175 
176 	/* Junk all pending output. */
177 	lwkt_serialize_enter(ifp->if_serializer);
178 	ifq_purge(&ifp->if_snd);
179 	lwkt_serialize_exit(ifp->if_serializer);
180 
181 	if (ifp->if_flags & IFF_UP) {
182 		lwkt_serialize_enter(ifp->if_serializer);
183 		if_down(ifp);
184 		lwkt_serialize_exit(ifp->if_serializer);
185 	}
186 
187 	if (ifp->if_flags & IFF_RUNNING) {
188 		struct ifaddr *ifa;
189 
190 		lwkt_serialize_enter(ifp->if_serializer);
191 		/* find internet addresses and delete routes */
192 		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
193 			if (ifa->ifa_addr->sa_family == AF_INET) {
194 				rtinit(ifa, (int)RTM_DELETE,
195 				    tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
196 			}
197 		}
198 		ifp->if_flags &= ~IFF_RUNNING;
199 		lwkt_serialize_exit(ifp->if_serializer);
200 	}
201 
202 	funsetown(tp->tun_sigio);
203 	selwakeup(&tp->tun_rsel);
204 
205 	TUNDEBUG(ifp, "closed\n");
206 	return (0);
207 }
208 
209 static int
210 tuninit(struct ifnet *ifp)
211 {
212 	struct tun_softc *tp = ifp->if_softc;
213 	struct ifaddr *ifa;
214 	int error = 0;
215 
216 	TUNDEBUG(ifp, "tuninit\n");
217 
218 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
219 	getmicrotime(&ifp->if_lastchange);
220 
221 	for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
222 	     ifa = TAILQ_NEXT(ifa, ifa_link)) {
223 		if (ifa->ifa_addr == NULL)
224 			error = EFAULT;
225 			/* XXX: Should maybe return straight off? */
226 		else {
227 #ifdef INET
228 			if (ifa->ifa_addr->sa_family == AF_INET) {
229 			    struct sockaddr_in *si;
230 
231 			    si = (struct sockaddr_in *)ifa->ifa_addr;
232 			    if (si->sin_addr.s_addr)
233 				    tp->tun_flags |= TUN_IASET;
234 
235 			    si = (struct sockaddr_in *)ifa->ifa_dstaddr;
236 			    if (si && si->sin_addr.s_addr)
237 				    tp->tun_flags |= TUN_DSTADDR;
238 			}
239 #endif
240 		}
241 	}
242 	return (error);
243 }
244 
245 /*
246  * Process an ioctl request.
247  *
248  * MPSAFE
249  */
250 int
251 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
252 {
253 	struct ifreq *ifr = (struct ifreq *)data;
254 	struct tun_softc *tp = ifp->if_softc;
255 	struct ifstat *ifs;
256 	int error = 0;
257 
258 	switch(cmd) {
259 	case SIOCGIFSTATUS:
260 		ifs = (struct ifstat *)data;
261 		if (tp->tun_pid)
262 			ksprintf(ifs->ascii + strlen(ifs->ascii),
263 			    "\tOpened by PID %d\n", tp->tun_pid);
264 		break;
265 	case SIOCSIFADDR:
266 		error = tuninit(ifp);
267 		TUNDEBUG(ifp, "address set, error=%d\n", error);
268 		break;
269 	case SIOCSIFDSTADDR:
270 		error = tuninit(ifp);
271 		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
272 		break;
273 	case SIOCSIFMTU:
274 		ifp->if_mtu = ifr->ifr_mtu;
275 		TUNDEBUG(ifp, "mtu set\n");
276 		break;
277 	case SIOCSIFFLAGS:
278 	case SIOCADDMULTI:
279 	case SIOCDELMULTI:
280 		break;
281 	default:
282 		error = EINVAL;
283 	}
284 	return (error);
285 }
286 
287 /*
288  * tunoutput - queue packets from higher level ready to put out.
289  *
290  * MPSAFE
291  */
292 int
293 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
294 	  struct rtentry *rt)
295 {
296 	struct tun_softc *tp = ifp->if_softc;
297 	int error;
298 	struct altq_pktattr pktattr;
299 
300 	TUNDEBUG(ifp, "tunoutput\n");
301 
302 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
303 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
304 		m_freem (m0);
305 		return EHOSTDOWN;
306 	}
307 
308 	/*
309 	 * if the queueing discipline needs packet classification,
310 	 * do it before prepending link headers.
311 	 */
312 	ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr);
313 
314 	/* BPF write needs to be handled specially */
315 	if (dst->sa_family == AF_UNSPEC) {
316 		dst->sa_family = *(mtod(m0, int *));
317 		m0->m_len -= sizeof(int);
318 		m0->m_pkthdr.len -= sizeof(int);
319 		m0->m_data += sizeof(int);
320 	}
321 
322 	if (ifp->if_bpf) {
323 		/*
324 		 * We need to prepend the address family as
325 		 * a four byte field.
326 		 */
327 		uint32_t af = dst->sa_family;
328 
329 		bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af));
330 	}
331 
332 	/* prepend sockaddr? this may abort if the mbuf allocation fails */
333 	if (tp->tun_flags & TUN_LMODE) {
334 		/* allocate space for sockaddr */
335 		M_PREPEND(m0, dst->sa_len, MB_DONTWAIT);
336 
337 		/* if allocation failed drop packet */
338 		if (m0 == NULL){
339 			IF_DROP(&ifp->if_snd);
340 			ifp->if_oerrors++;
341 			return (ENOBUFS);
342 		} else {
343 			bcopy(dst, m0->m_data, dst->sa_len);
344 		}
345 	}
346 
347 	if (tp->tun_flags & TUN_IFHEAD) {
348 		/* Prepend the address family */
349 		M_PREPEND(m0, 4, MB_DONTWAIT);
350 
351 		/* if allocation failed drop packet */
352 		if (m0 == NULL){
353 			IF_DROP(&ifp->if_snd);
354 			ifp->if_oerrors++;
355 			return ENOBUFS;
356 		} else
357 			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
358 	} else {
359 #ifdef INET
360 		if (dst->sa_family != AF_INET)
361 #endif
362 		{
363 			m_freem(m0);
364 			return EAFNOSUPPORT;
365 		}
366 	}
367 
368 	error = ifq_handoff(ifp, m0, &pktattr);
369 	if (error) {
370 		ifp->if_collisions++;
371 	} else {
372 		ifp->if_opackets++;
373 		if (tp->tun_flags & TUN_RWAIT) {
374 			tp->tun_flags &= ~TUN_RWAIT;
375 			wakeup((caddr_t)tp);
376 		}
377 		get_mplock();
378 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
379 			pgsigio(tp->tun_sigio, SIGIO, 0);
380 		selwakeup(&tp->tun_rsel);
381 		rel_mplock();
382 	}
383 	return (error);
384 }
385 
386 /*
387  * the ops interface is now pretty minimal.
388  */
389 static	int
390 tunioctl(struct dev_ioctl_args *ap)
391 {
392 	cdev_t dev = ap->a_head.a_dev;
393 	struct tun_softc *tp = dev->si_drv1;
394  	struct tuninfo *tunp;
395 
396 	switch (ap->a_cmd) {
397  	case TUNSIFINFO:
398  		tunp = (struct tuninfo *)ap->a_data;
399 		if (tunp->mtu < IF_MINMTU)
400 			return (EINVAL);
401  		tp->tun_if.if_mtu = tunp->mtu;
402  		tp->tun_if.if_type = tunp->type;
403  		tp->tun_if.if_baudrate = tunp->baudrate;
404  		break;
405  	case TUNGIFINFO:
406  		tunp = (struct tuninfo *)ap->a_data;
407  		tunp->mtu = tp->tun_if.if_mtu;
408  		tunp->type = tp->tun_if.if_type;
409  		tunp->baudrate = tp->tun_if.if_baudrate;
410  		break;
411 	case TUNSDEBUG:
412 		tundebug = *(int *)ap->a_data;
413 		break;
414 	case TUNGDEBUG:
415 		*(int *)ap->a_data = tundebug;
416 		break;
417 	case TUNSLMODE:
418 		if (*(int *)ap->a_data) {
419 			tp->tun_flags |= TUN_LMODE;
420 			tp->tun_flags &= ~TUN_IFHEAD;
421 		} else
422 			tp->tun_flags &= ~TUN_LMODE;
423 		break;
424 	case TUNSIFHEAD:
425 		if (*(int *)ap->a_data) {
426 			tp->tun_flags |= TUN_IFHEAD;
427 			tp->tun_flags &= ~TUN_LMODE;
428 		} else
429 			tp->tun_flags &= ~TUN_IFHEAD;
430 		break;
431 	case TUNGIFHEAD:
432 		*(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
433 		break;
434 	case TUNSIFMODE:
435 		/* deny this if UP */
436 		if (tp->tun_if.if_flags & IFF_UP)
437 			return(EBUSY);
438 
439 		switch (*(int *)ap->a_data & ~IFF_MULTICAST) {
440 		case IFF_POINTOPOINT:
441 		case IFF_BROADCAST:
442 			tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT);
443 			tp->tun_if.if_flags |= *(int *)ap->a_data;
444 			break;
445 		default:
446 			return(EINVAL);
447 		}
448 		break;
449 	case TUNSIFPID:
450 		tp->tun_pid = curproc->p_pid;
451 		break;
452 	case FIOASYNC:
453 		if (*(int *)ap->a_data)
454 			tp->tun_flags |= TUN_ASYNC;
455 		else
456 			tp->tun_flags &= ~TUN_ASYNC;
457 		break;
458 	case FIONREAD:
459 		lwkt_serialize_enter(tp->tun_if.if_serializer);
460 		if (!ifq_is_empty(&tp->tun_if.if_snd)) {
461 			struct mbuf *mb;
462 
463 			mb = ifq_poll(&tp->tun_if.if_snd);
464 			for( *(int *)ap->a_data = 0; mb != 0; mb = mb->m_next)
465 				*(int *)ap->a_data += mb->m_len;
466 		} else {
467 			*(int *)ap->a_data = 0;
468 		}
469 		lwkt_serialize_exit(tp->tun_if.if_serializer);
470 		break;
471 	case FIOSETOWN:
472 		return (fsetown(*(int *)ap->a_data, &tp->tun_sigio));
473 
474 	case FIOGETOWN:
475 		*(int *)ap->a_data = fgetown(tp->tun_sigio);
476 		return (0);
477 
478 	/* This is deprecated, FIOSETOWN should be used instead. */
479 	case TIOCSPGRP:
480 		return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio));
481 
482 	/* This is deprecated, FIOGETOWN should be used instead. */
483 	case TIOCGPGRP:
484 		*(int *)ap->a_data = -fgetown(tp->tun_sigio);
485 		return (0);
486 
487 	default:
488 		return (ENOTTY);
489 	}
490 	return (0);
491 }
492 
493 /*
494  * The ops read interface - reads a packet at a time, or at
495  * least as much of a packet as can be read.
496  */
497 static	int
498 tunread(struct dev_read_args *ap)
499 {
500 	cdev_t dev = ap->a_head.a_dev;
501 	struct uio *uio = ap->a_uio;
502 	struct tun_softc *tp = dev->si_drv1;
503 	struct ifnet	*ifp = &tp->tun_if;
504 	struct mbuf	*m0;
505 	int		error=0, len;
506 
507 	TUNDEBUG(ifp, "read\n");
508 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
509 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
510 		return EHOSTDOWN;
511 	}
512 
513 	tp->tun_flags &= ~TUN_RWAIT;
514 
515 	lwkt_serialize_enter(ifp->if_serializer);
516 
517 	while ((m0 = ifq_dequeue(&ifp->if_snd, NULL)) == NULL) {
518 		if (ap->a_ioflag & IO_NDELAY) {
519 			lwkt_serialize_exit(ifp->if_serializer);
520 			return EWOULDBLOCK;
521 		}
522 		tp->tun_flags |= TUN_RWAIT;
523 		lwkt_serialize_exit(ifp->if_serializer);
524 		if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0)
525 			return error;
526 		lwkt_serialize_enter(ifp->if_serializer);
527 	}
528 
529 	lwkt_serialize_exit(ifp->if_serializer);
530 
531 	while (m0 && uio->uio_resid > 0 && error == 0) {
532 		len = min(uio->uio_resid, m0->m_len);
533 		if (len != 0)
534 			error = uiomove(mtod(m0, caddr_t), len, uio);
535 		m0 = m_free(m0);
536 	}
537 
538 	if (m0) {
539 		TUNDEBUG(ifp, "Dropping mbuf\n");
540 		m_freem(m0);
541 	}
542 	return error;
543 }
544 
545 /*
546  * the ops write interface - an atomic write is a packet - or else!
547  */
548 static	int
549 tunwrite(struct dev_write_args *ap)
550 {
551 	cdev_t dev = ap->a_head.a_dev;
552 	struct uio *uio = ap->a_uio;
553 	struct tun_softc *tp = dev->si_drv1;
554 	struct ifnet	*ifp = &tp->tun_if;
555 	struct mbuf	*top, **mp, *m;
556 	int		error=0, tlen, mlen;
557 	uint32_t	family;
558 	int		isr;
559 
560 	TUNDEBUG(ifp, "tunwrite\n");
561 
562 	if (uio->uio_resid == 0)
563 		return 0;
564 
565 	if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
566 		TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
567 		return EIO;
568 	}
569 	tlen = uio->uio_resid;
570 
571 	/* get a header mbuf */
572 	MGETHDR(m, MB_DONTWAIT, MT_DATA);
573 	if (m == NULL)
574 		return ENOBUFS;
575 	mlen = MHLEN;
576 
577 	top = 0;
578 	mp = &top;
579 	while (error == 0 && uio->uio_resid > 0) {
580 		m->m_len = min(mlen, uio->uio_resid);
581 		error = uiomove(mtod (m, caddr_t), m->m_len, uio);
582 		*mp = m;
583 		mp = &m->m_next;
584 		if (uio->uio_resid > 0) {
585 			MGET (m, MB_DONTWAIT, MT_DATA);
586 			if (m == 0) {
587 				error = ENOBUFS;
588 				break;
589 			}
590 			mlen = MLEN;
591 		}
592 	}
593 	if (error) {
594 		if (top)
595 			m_freem (top);
596 		ifp->if_ierrors++;
597 		return error;
598 	}
599 
600 	top->m_pkthdr.len = tlen;
601 	top->m_pkthdr.rcvif = ifp;
602 
603 	if (ifp->if_bpf) {
604 		if (tp->tun_flags & TUN_IFHEAD) {
605 			/*
606 			 * Conveniently, we already have a 4-byte address
607 			 * family prepended to our packet !
608 			 * Inconveniently, it's in the wrong byte order !
609 			 */
610 			if ((top = m_pullup(top, sizeof(family))) == NULL)
611 				return ENOBUFS;
612 			*mtod(top, u_int32_t *) =
613 			    ntohl(*mtod(top, u_int32_t *));
614 			bpf_mtap(ifp->if_bpf, top);
615 			*mtod(top, u_int32_t *) =
616 			    htonl(*mtod(top, u_int32_t *));
617 		} else {
618 			/*
619 			 * We need to prepend the address family as
620 			 * a four byte field.
621 			 */
622 			static const uint32_t af = AF_INET;
623 
624 			bpf_ptap(ifp->if_bpf, top, &af, sizeof(af));
625 		}
626 	}
627 
628 	if (tp->tun_flags & TUN_IFHEAD) {
629 		if (top->m_len < sizeof(family) &&
630 		    (top = m_pullup(top, sizeof(family))) == NULL)
631 				return ENOBUFS;
632 		family = ntohl(*mtod(top, u_int32_t *));
633 		m_adj(top, sizeof(family));
634 	} else
635 		family = AF_INET;
636 
637 	ifp->if_ibytes += top->m_pkthdr.len;
638 	ifp->if_ipackets++;
639 
640 	switch (family) {
641 #ifdef INET
642 	case AF_INET:
643 		isr = NETISR_IP;
644 		break;
645 #endif
646 #ifdef INET6
647 	case AF_INET6:
648 		isr = NETISR_IPV6;
649 		break;
650 #endif
651 #ifdef IPX
652 	case AF_IPX:
653 		isr = NETISR_IPX;
654 		break;
655 #endif
656 #ifdef NETATALK
657 	case AF_APPLETALK:
658 		isr = NETISR_ATALK2;
659 		break;
660 #endif
661 	default:
662 		m_freem(m);
663 		return (EAFNOSUPPORT);
664 	}
665 
666 	netisr_dispatch(isr, top);
667 	return (0);
668 }
669 
670 /*
671  * tunpoll - the poll interface, this is only useful on reads
672  * really. The write detect always returns true, write never blocks
673  * anyway, it either accepts the packet or drops it.
674  */
675 static	int
676 tunpoll(struct dev_poll_args *ap)
677 {
678 	cdev_t dev = ap->a_head.a_dev;
679 	struct tun_softc *tp = dev->si_drv1;
680 	struct ifnet	*ifp = &tp->tun_if;
681 	int		revents = 0;
682 
683 	TUNDEBUG(ifp, "tunpoll\n");
684 
685 	lwkt_serialize_enter(ifp->if_serializer);
686 
687 	if (ap->a_events & (POLLIN | POLLRDNORM)) {
688 		if (!ifq_is_empty(&ifp->if_snd)) {
689 			TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
690 			revents |= ap->a_events & (POLLIN | POLLRDNORM);
691 		} else {
692 			TUNDEBUG(ifp, "tunpoll waiting\n");
693 			selrecord(curthread, &tp->tun_rsel);
694 		}
695 	}
696 	if (ap->a_events & (POLLOUT | POLLWRNORM))
697 		revents |= ap->a_events & (POLLOUT | POLLWRNORM);
698 
699 	lwkt_serialize_exit(ifp->if_serializer);
700 	ap->a_events = revents;
701 	return(0);
702 }
703 
704 /*
705  * Start packet transmission on the interface.
706  * when the interface queue is rate-limited by ALTQ,
707  * if_start is needed to drain packets from the queue in order
708  * to notify readers when outgoing packets become ready.
709  */
710 static void
711 tunstart(struct ifnet *ifp)
712 {
713 	struct tun_softc *tp = ifp->if_softc;
714 	struct mbuf *m;
715 
716 	if (!ifq_is_enabled(&ifp->if_snd))
717 		return;
718 
719 	m = ifq_poll(&ifp->if_snd);
720 	if (m != NULL) {
721 		if (tp->tun_flags & TUN_RWAIT) {
722 			tp->tun_flags &= ~TUN_RWAIT;
723 			wakeup((caddr_t)tp);
724 		}
725 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
726 			pgsigio(tp->tun_sigio, SIGIO, 0);
727 		selwakeup(&tp->tun_rsel);
728 	}
729 }
730