xref: /dragonfly/sys/net/tun/if_tun.c (revision 548a3528)
1 /*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has it's
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  *
16  * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $
17  */
18 
19 #include "use_tun.h"
20 #include "opt_inet.h"
21 #include "opt_inet6.h"
22 
23 #include <sys/param.h>
24 #include <sys/proc.h>
25 #include <sys/priv.h>
26 #include <sys/systm.h>
27 #include <sys/mbuf.h>
28 #include <sys/socket.h>
29 #include <sys/conf.h>
30 #include <sys/device.h>
31 #include <sys/filio.h>
32 #include <sys/sockio.h>
33 #include <sys/thread2.h>
34 #include <sys/ttycom.h>
35 #include <sys/signalvar.h>
36 #include <sys/filedesc.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/uio.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 
43 #include <sys/mplock2.h>
44 
45 #include <net/if.h>
46 #include <net/if_types.h>
47 #include <net/ifq_var.h>
48 #include <net/netisr.h>
49 #include <net/route.h>
50 #include <sys/devfs.h>
51 
52 #ifdef INET
53 #include <netinet/in.h>
54 #endif
55 
56 #include <net/bpf.h>
57 
58 #include "if_tunvar.h"
59 #include "if_tun.h"
60 
61 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface");
62 
63 static void tunattach (void *);
64 PSEUDO_SET(tunattach, if_tun);
65 
66 static void tuncreate (cdev_t dev);
67 
68 #define TUNDEBUG	if (tundebug) if_printf
69 static int tundebug = 0;
70 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0,
71     "Enable debug output");
72 
73 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *,
74 	    struct rtentry *rt);
75 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *);
76 static int tuninit (struct ifnet *);
77 static void tunstart(struct ifnet *, struct ifaltq_subque *);
78 static void tun_filter_detach(struct knote *);
79 static int tun_filter_read(struct knote *, long);
80 static int tun_filter_write(struct knote *, long);
81 
82 static	d_open_t	tunopen;
83 static	d_close_t	tunclose;
84 static	d_read_t	tunread;
85 static	d_write_t	tunwrite;
86 static	d_ioctl_t	tunioctl;
87 static	d_kqfilter_t	tunkqfilter;
88 
89 static d_clone_t tunclone;
90 DEVFS_DECLARE_CLONE_BITMAP(tun);
91 
92 #if NTUN <= 1
93 #define TUN_PREALLOCATED_UNITS	4
94 #else
95 #define TUN_PREALLOCATED_UNITS	NTUN
96 #endif
97 
98 static struct dev_ops tun_ops = {
99 	{ "tun", 0, 0 },
100 	.d_open =	tunopen,
101 	.d_close =	tunclose,
102 	.d_read =	tunread,
103 	.d_write =	tunwrite,
104 	.d_ioctl =	tunioctl,
105 	.d_kqfilter =	tunkqfilter
106 };
107 
108 static void
109 tunattach(void *dummy)
110 {
111 	int i;
112 	make_autoclone_dev(&tun_ops, &DEVFS_CLONE_BITMAP(tun),
113 		tunclone, UID_UUCP, GID_DIALER, 0600, "tun");
114 	for (i = 0; i < TUN_PREALLOCATED_UNITS; i++) {
115 		make_dev(&tun_ops, i, UID_UUCP, GID_DIALER, 0600, "tun%d", i);
116 		devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(tun), i);
117 	}
118 	/* Doesn't need uninit because unloading is not possible, see PSEUDO_SET */
119 }
120 
121 static int
122 tunclone(struct dev_clone_args *ap)
123 {
124 	int unit;
125 
126 	unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(tun), 0);
127 	ap->a_dev = make_only_dev(&tun_ops, unit, UID_UUCP, GID_DIALER, 0600,
128 								"tun%d", unit);
129 
130 	return 0;
131 }
132 
133 static void
134 tuncreate(cdev_t dev)
135 {
136 	struct tun_softc *sc;
137 	struct ifnet *ifp;
138 
139 #if 0
140 	dev = make_dev(&tun_ops, minor(dev),
141 	    UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev));
142 #endif
143 
144 	sc = kmalloc(sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
145 	sc->tun_flags = TUN_INITED;
146 
147 	ifp = &sc->tun_if;
148 	if_initname(ifp, "tun", lminor(dev));
149 	ifp->if_mtu = TUNMTU;
150 	ifp->if_ioctl = tunifioctl;
151 	ifp->if_output = tunoutput;
152 	ifp->if_start = tunstart;
153 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
154 	ifp->if_type = IFT_PPP;
155 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
156 	ifq_set_ready(&ifp->if_snd);
157 	ifp->if_softc = sc;
158 	if_attach(ifp, NULL);
159 	bpfattach(ifp, DLT_NULL, sizeof(u_int));
160 	dev->si_drv1 = sc;
161 }
162 
163 /*
164  * tunnel open - must be superuser & the device must be
165  * configured in
166  */
167 static	int
168 tunopen(struct dev_open_args *ap)
169 {
170 	cdev_t dev = ap->a_head.a_dev;
171 	struct ifnet	*ifp;
172 	struct tun_softc *tp;
173 	int	error;
174 
175 	if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) != 0)
176 		return (error);
177 
178 	tp = dev->si_drv1;
179 	if (!tp) {
180 		tuncreate(dev);
181 		tp = dev->si_drv1;
182 	}
183 	if (tp->tun_flags & TUN_OPEN)
184 		return EBUSY;
185 	tp->tun_pid = curproc->p_pid;
186 	ifp = &tp->tun_if;
187 	tp->tun_flags |= TUN_OPEN;
188 	TUNDEBUG(ifp, "open\n");
189 	return (0);
190 }
191 
192 /*
193  * tunclose - close the device - mark i/f down & delete
194  * routing info
195  */
196 static	int
197 tunclose(struct dev_close_args *ap)
198 {
199 	cdev_t dev = ap->a_head.a_dev;
200 	struct tun_softc *tp;
201 	struct ifnet	*ifp;
202 
203 	tp = dev->si_drv1;
204 	ifp = &tp->tun_if;
205 
206 	tp->tun_flags &= ~TUN_OPEN;
207 	tp->tun_pid = 0;
208 
209 	/* Junk all pending output. */
210 	ifq_purge_all(&ifp->if_snd);
211 
212 	if (ifp->if_flags & IFF_UP)
213 		if_down(ifp);
214 	ifp->if_flags &= ~IFF_RUNNING;
215 	if_purgeaddrs_nolink(ifp);
216 
217 	funsetown(&tp->tun_sigio);
218 	KNOTE(&tp->tun_rkq.ki_note, 0);
219 
220 	TUNDEBUG(ifp, "closed\n");
221 #if 0
222 	if (dev->si_uminor >= TUN_PREALLOCATED_UNITS) {
223 		devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(tun), dev->si_uminor);
224 	}
225 #endif
226 	return (0);
227 }
228 
229 static int
230 tuninit(struct ifnet *ifp)
231 {
232 #ifdef INET
233 	struct tun_softc *tp = ifp->if_softc;
234 #endif
235 	struct ifaddr_container *ifac;
236 	int error = 0;
237 
238 	TUNDEBUG(ifp, "tuninit\n");
239 
240 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
241 	getmicrotime(&ifp->if_lastchange);
242 
243 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
244 		struct ifaddr *ifa = ifac->ifa;
245 
246 		if (ifa->ifa_addr == NULL) {
247 			error = EFAULT;
248 			/* XXX: Should maybe return straight off? */
249 		} else {
250 #ifdef INET
251 			if (ifa->ifa_addr->sa_family == AF_INET) {
252 			    struct sockaddr_in *si;
253 
254 			    si = (struct sockaddr_in *)ifa->ifa_addr;
255 			    if (si->sin_addr.s_addr)
256 				    tp->tun_flags |= TUN_IASET;
257 			}
258 #endif
259 		}
260 	}
261 	return (error);
262 }
263 
264 /*
265  * Process an ioctl request.
266  *
267  * MPSAFE
268  */
269 int
270 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
271 {
272 	struct ifreq *ifr = (struct ifreq *)data;
273 	struct tun_softc *tp = ifp->if_softc;
274 	struct ifstat *ifs;
275 	int error = 0;
276 
277 	switch(cmd) {
278 	case SIOCGIFSTATUS:
279 		ifs = (struct ifstat *)data;
280 		if (tp->tun_pid)
281 			ksprintf(ifs->ascii + strlen(ifs->ascii),
282 			    "\tOpened by PID %d\n", tp->tun_pid);
283 		break;
284 	case SIOCSIFADDR:
285 		error = tuninit(ifp);
286 		TUNDEBUG(ifp, "address set, error=%d\n", error);
287 		break;
288 	case SIOCSIFDSTADDR:
289 		error = tuninit(ifp);
290 		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
291 		break;
292 	case SIOCSIFMTU:
293 		ifp->if_mtu = ifr->ifr_mtu;
294 		TUNDEBUG(ifp, "mtu set\n");
295 		break;
296 	case SIOCSIFFLAGS:
297 	case SIOCADDMULTI:
298 	case SIOCDELMULTI:
299 		break;
300 	default:
301 		error = EINVAL;
302 	}
303 	return (error);
304 }
305 
306 /*
307  * tunoutput - queue packets from higher level ready to put out.
308  *
309  * MPSAFE
310  */
311 static int
312 tunoutput_serialized(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
313 		     struct rtentry *rt)
314 {
315 	struct tun_softc *tp = ifp->if_softc;
316 	int error;
317 	struct altq_pktattr pktattr;
318 
319 	TUNDEBUG(ifp, "tunoutput\n");
320 
321 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
322 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
323 		m_freem (m0);
324 		return EHOSTDOWN;
325 	}
326 
327 	/*
328 	 * if the queueing discipline needs packet classification,
329 	 * do it before prepending link headers.
330 	 */
331 	ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr);
332 
333 	/* BPF write needs to be handled specially */
334 	if (dst->sa_family == AF_UNSPEC) {
335 		dst->sa_family = *(mtod(m0, int *));
336 		m0->m_len -= sizeof(int);
337 		m0->m_pkthdr.len -= sizeof(int);
338 		m0->m_data += sizeof(int);
339 	}
340 
341 	if (ifp->if_bpf) {
342 		bpf_gettoken();
343 		if (ifp->if_bpf) {
344 			/*
345 			 * We need to prepend the address family as
346 			 * a four byte field.
347 			 */
348 			uint32_t af = dst->sa_family;
349 
350 			bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af));
351 		}
352 		bpf_reltoken();
353 	}
354 
355 	/* prepend sockaddr? this may abort if the mbuf allocation fails */
356 	if (tp->tun_flags & TUN_LMODE) {
357 		/* allocate space for sockaddr */
358 		M_PREPEND(m0, dst->sa_len, MB_DONTWAIT);
359 
360 		/* if allocation failed drop packet */
361 		if (m0 == NULL){
362 			IFNET_STAT_INC(ifp, oerrors, 1);
363 			return (ENOBUFS);
364 		} else {
365 			bcopy(dst, m0->m_data, dst->sa_len);
366 		}
367 	}
368 
369 	if (tp->tun_flags & TUN_IFHEAD) {
370 		/* Prepend the address family */
371 		M_PREPEND(m0, 4, MB_DONTWAIT);
372 
373 		/* if allocation failed drop packet */
374 		if (m0 == NULL){
375 			IFNET_STAT_INC(ifp, oerrors, 1);
376 			return ENOBUFS;
377 		} else
378 			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
379 	} else {
380 #ifdef INET
381 		if (dst->sa_family != AF_INET)
382 #endif
383 		{
384 			m_freem(m0);
385 			return EAFNOSUPPORT;
386 		}
387 	}
388 
389 	error = ifq_handoff(ifp, m0, &pktattr);
390 	if (error) {
391 		IFNET_STAT_INC(ifp, collisions, 1);
392 	} else {
393 		IFNET_STAT_INC(ifp, opackets, 1);
394 		if (tp->tun_flags & TUN_RWAIT) {
395 			tp->tun_flags &= ~TUN_RWAIT;
396 			wakeup((caddr_t)tp);
397 		}
398 		get_mplock();
399 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
400 			pgsigio(tp->tun_sigio, SIGIO, 0);
401 		rel_mplock();
402 		ifnet_deserialize_all(ifp);
403 		KNOTE(&tp->tun_rkq.ki_note, 0);
404 		ifnet_serialize_all(ifp);
405 	}
406 	return (error);
407 }
408 
409 static int
410 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
411 	  struct rtentry *rt)
412 {
413 	int error;
414 
415 	ifnet_serialize_all(ifp);
416 	error = tunoutput_serialized(ifp, m0, dst, rt);
417 	ifnet_deserialize_all(ifp);
418 
419 	return error;
420 }
421 
422 /*
423  * the ops interface is now pretty minimal.
424  */
425 static	int
426 tunioctl(struct dev_ioctl_args *ap)
427 {
428 	cdev_t dev = ap->a_head.a_dev;
429 	struct tun_softc *tp = dev->si_drv1;
430  	struct tuninfo *tunp;
431 
432 	switch (ap->a_cmd) {
433 	case TUNSIFINFO:
434 		tunp = (struct tuninfo *)ap->a_data;
435 		if (tunp->mtu < IF_MINMTU)
436 			return (EINVAL);
437 		tp->tun_if.if_mtu = tunp->mtu;
438 		tp->tun_if.if_type = tunp->type;
439 		tp->tun_if.if_baudrate = tunp->baudrate;
440 		break;
441 	case TUNGIFINFO:
442 		tunp = (struct tuninfo *)ap->a_data;
443 		tunp->mtu = tp->tun_if.if_mtu;
444 		tunp->type = tp->tun_if.if_type;
445 		tunp->baudrate = tp->tun_if.if_baudrate;
446 		break;
447 	case TUNSDEBUG:
448 		tundebug = *(int *)ap->a_data;
449 		break;
450 	case TUNGDEBUG:
451 		*(int *)ap->a_data = tundebug;
452 		break;
453 	case TUNSLMODE:
454 		if (*(int *)ap->a_data) {
455 			tp->tun_flags |= TUN_LMODE;
456 			tp->tun_flags &= ~TUN_IFHEAD;
457 		} else
458 			tp->tun_flags &= ~TUN_LMODE;
459 		break;
460 	case TUNSIFHEAD:
461 		if (*(int *)ap->a_data) {
462 			tp->tun_flags |= TUN_IFHEAD;
463 			tp->tun_flags &= ~TUN_LMODE;
464 		} else
465 			tp->tun_flags &= ~TUN_IFHEAD;
466 		break;
467 	case TUNGIFHEAD:
468 		*(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
469 		break;
470 	case TUNSIFMODE:
471 		/* deny this if UP */
472 		if (tp->tun_if.if_flags & IFF_UP)
473 			return(EBUSY);
474 
475 		switch (*(int *)ap->a_data & ~IFF_MULTICAST) {
476 		case IFF_POINTOPOINT:
477 		case IFF_BROADCAST:
478 			tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT);
479 			tp->tun_if.if_flags |= *(int *)ap->a_data;
480 			break;
481 		default:
482 			return(EINVAL);
483 		}
484 		break;
485 	case TUNSIFPID:
486 		tp->tun_pid = curproc->p_pid;
487 		break;
488 	case FIOASYNC:
489 		if (*(int *)ap->a_data)
490 			tp->tun_flags |= TUN_ASYNC;
491 		else
492 			tp->tun_flags &= ~TUN_ASYNC;
493 		break;
494 	case FIONREAD:
495 		*(int *)ap->a_data = ifsq_poll_pktlen(
496 		    ifq_get_subq_default(&tp->tun_if.if_snd));
497 		break;
498 	case FIOSETOWN:
499 		return (fsetown(*(int *)ap->a_data, &tp->tun_sigio));
500 
501 	case FIOGETOWN:
502 		*(int *)ap->a_data = fgetown(&tp->tun_sigio);
503 		return (0);
504 
505 	/* This is deprecated, FIOSETOWN should be used instead. */
506 	case TIOCSPGRP:
507 		return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio));
508 
509 	/* This is deprecated, FIOGETOWN should be used instead. */
510 	case TIOCGPGRP:
511 		*(int *)ap->a_data = -fgetown(&tp->tun_sigio);
512 		return (0);
513 
514 	default:
515 		return (ENOTTY);
516 	}
517 	return (0);
518 }
519 
520 /*
521  * The ops read interface - reads a packet at a time, or at
522  * least as much of a packet as can be read.
523  */
524 static	int
525 tunread(struct dev_read_args *ap)
526 {
527 	cdev_t dev = ap->a_head.a_dev;
528 	struct uio *uio = ap->a_uio;
529 	struct tun_softc *tp = dev->si_drv1;
530 	struct ifnet	*ifp = &tp->tun_if;
531 	struct ifaltq_subque *ifsq = ifq_get_subq_default(&ifp->if_snd);
532 	struct mbuf	*m0;
533 	int		error=0, len;
534 
535 	TUNDEBUG(ifp, "read\n");
536 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
537 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
538 		return EHOSTDOWN;
539 	}
540 
541 	tp->tun_flags &= ~TUN_RWAIT;
542 
543 	ifnet_serialize_all(ifp);
544 
545 	while ((m0 = ifsq_dequeue(ifsq)) == NULL) {
546 		if (ap->a_ioflag & IO_NDELAY) {
547 			ifnet_deserialize_all(ifp);
548 			return EWOULDBLOCK;
549 		}
550 		tp->tun_flags |= TUN_RWAIT;
551 		ifnet_deserialize_all(ifp);
552 		if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0)
553 			return error;
554 		ifnet_serialize_all(ifp);
555 	}
556 
557 	ifnet_deserialize_all(ifp);
558 
559 	while (m0 && uio->uio_resid > 0 && error == 0) {
560 		len = (int)szmin(uio->uio_resid, m0->m_len);
561 		if (len != 0)
562 			error = uiomove(mtod(m0, caddr_t), (size_t)len, uio);
563 		m0 = m_free(m0);
564 	}
565 
566 	if (m0) {
567 		TUNDEBUG(ifp, "Dropping mbuf\n");
568 		m_freem(m0);
569 	}
570 	return error;
571 }
572 
573 /*
574  * the ops write interface - an atomic write is a packet - or else!
575  */
576 static	int
577 tunwrite(struct dev_write_args *ap)
578 {
579 	cdev_t dev = ap->a_head.a_dev;
580 	struct uio *uio = ap->a_uio;
581 	struct tun_softc *tp = dev->si_drv1;
582 	struct ifnet	*ifp = &tp->tun_if;
583 	struct mbuf	*top, **mp, *m;
584 	int		error=0;
585 	size_t		tlen, mlen;
586 	uint32_t	family;
587 	int		isr;
588 
589 	TUNDEBUG(ifp, "tunwrite\n");
590 
591 	if (uio->uio_resid == 0)
592 		return 0;
593 
594 	if (uio->uio_resid > TUNMRU) {
595 		TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
596 		return EIO;
597 	}
598 	tlen = uio->uio_resid;
599 
600 	/* get a header mbuf */
601 	MGETHDR(m, MB_WAIT, MT_DATA);
602 	if (m == NULL)
603 		return ENOBUFS;
604 	mlen = MHLEN;
605 
606 	top = NULL;
607 	mp = &top;
608 	while (error == 0 && uio->uio_resid > 0) {
609 		m->m_len = (int)szmin(mlen, uio->uio_resid);
610 		error = uiomove(mtod (m, caddr_t), (size_t)m->m_len, uio);
611 		*mp = m;
612 		mp = &m->m_next;
613 		if (uio->uio_resid > 0) {
614 			MGET (m, MB_WAIT, MT_DATA);
615 			if (m == NULL) {
616 				error = ENOBUFS;
617 				break;
618 			}
619 			mlen = MLEN;
620 		}
621 	}
622 	if (error) {
623 		if (top)
624 			m_freem (top);
625 		IFNET_STAT_INC(ifp, ierrors, 1);
626 		return error;
627 	}
628 
629 	top->m_pkthdr.len = (int)tlen;
630 	top->m_pkthdr.rcvif = ifp;
631 
632 	if (ifp->if_bpf) {
633 		bpf_gettoken();
634 
635 		if (ifp->if_bpf) {
636 			if (tp->tun_flags & TUN_IFHEAD) {
637 				/*
638 				 * Conveniently, we already have a 4-byte
639 				 * address family prepended to our packet !
640 				 * Inconveniently, it's in the wrong byte
641 				 * order !
642 				 */
643 				if ((top = m_pullup(top, sizeof(family)))
644 				    == NULL) {
645 					bpf_reltoken();
646 					return ENOBUFS;
647 				}
648 				*mtod(top, u_int32_t *) =
649 				    ntohl(*mtod(top, u_int32_t *));
650 				bpf_mtap(ifp->if_bpf, top);
651 				*mtod(top, u_int32_t *) =
652 				    htonl(*mtod(top, u_int32_t *));
653 			} else {
654 				/*
655 				 * We need to prepend the address family as
656 				 * a four byte field.
657 				 */
658 				static const uint32_t af = AF_INET;
659 
660 				bpf_ptap(ifp->if_bpf, top, &af, sizeof(af));
661 			}
662 		}
663 
664 		bpf_reltoken();
665 	}
666 
667 	if (tp->tun_flags & TUN_IFHEAD) {
668 		if (top->m_len < sizeof(family) &&
669 		    (top = m_pullup(top, sizeof(family))) == NULL)
670 				return ENOBUFS;
671 		family = ntohl(*mtod(top, u_int32_t *));
672 		m_adj(top, sizeof(family));
673 	} else
674 		family = AF_INET;
675 
676 	IFNET_STAT_INC(ifp, ibytes, top->m_pkthdr.len);
677 	IFNET_STAT_INC(ifp, ipackets, 1);
678 
679 	switch (family) {
680 #ifdef INET
681 	case AF_INET:
682 		isr = NETISR_IP;
683 		break;
684 #endif
685 #ifdef INET6
686 	case AF_INET6:
687 		isr = NETISR_IPV6;
688 		break;
689 #endif
690 	default:
691 		m_freem(m);
692 		return (EAFNOSUPPORT);
693 	}
694 
695 	netisr_queue(isr, top);
696 	return (0);
697 }
698 
699 static struct filterops tun_read_filtops =
700 	{ FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_read };
701 static struct filterops tun_write_filtops =
702 	{ FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_write };
703 
704 static int
705 tunkqfilter(struct dev_kqfilter_args *ap)
706 {
707 	cdev_t dev = ap->a_head.a_dev;
708 	struct tun_softc *tp = dev->si_drv1;
709 	struct knote *kn = ap->a_kn;
710 	struct klist *klist;
711 
712 	ap->a_result = 0;
713 	ifnet_serialize_all(&tp->tun_if);
714 
715 	switch (kn->kn_filter) {
716 	case EVFILT_READ:
717 		kn->kn_fop = &tun_read_filtops;
718 		kn->kn_hook = (caddr_t)tp;
719 		break;
720 	case EVFILT_WRITE:
721 		kn->kn_fop = &tun_write_filtops;
722 		kn->kn_hook = (caddr_t)tp;
723 		break;
724 	default:
725 		ifnet_deserialize_all(&tp->tun_if);
726 		ap->a_result = EOPNOTSUPP;
727 		return (0);
728 	}
729 
730 	klist = &tp->tun_rkq.ki_note;
731 	knote_insert(klist, kn);
732 	ifnet_deserialize_all(&tp->tun_if);
733 
734 	return (0);
735 }
736 
737 static void
738 tun_filter_detach(struct knote *kn)
739 {
740 	struct tun_softc *tp = (struct tun_softc *)kn->kn_hook;
741 	struct klist *klist = &tp->tun_rkq.ki_note;
742 
743 	knote_remove(klist, kn);
744 }
745 
746 static int
747 tun_filter_write(struct knote *kn, long hint)
748 {
749 	/* Always ready for a write */
750 	return (1);
751 }
752 
753 static int
754 tun_filter_read(struct knote *kn, long hint)
755 {
756 	struct tun_softc *tp = (struct tun_softc *)kn->kn_hook;
757 	int ready = 0;
758 
759 	ifnet_serialize_all(&tp->tun_if);
760 	if (!ifsq_is_empty(ifq_get_subq_default(&tp->tun_if.if_snd)))
761 		ready = 1;
762 	ifnet_deserialize_all(&tp->tun_if);
763 
764 	return (ready);
765 }
766 
767 /*
768  * Start packet transmission on the interface.
769  * when the interface queue is rate-limited by ALTQ,
770  * if_start is needed to drain packets from the queue in order
771  * to notify readers when outgoing packets become ready.
772  */
773 static void
774 tunstart(struct ifnet *ifp, struct ifaltq_subque *ifsq)
775 {
776 	struct tun_softc *tp = ifp->if_softc;
777 	struct mbuf *m;
778 
779 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
780 
781 	if (!ifq_is_enabled(&ifp->if_snd))
782 		return;
783 
784 	m = ifsq_poll(ifsq);
785 	if (m != NULL) {
786 		if (tp->tun_flags & TUN_RWAIT) {
787 			tp->tun_flags &= ~TUN_RWAIT;
788 			wakeup((caddr_t)tp);
789 		}
790 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
791 			pgsigio(tp->tun_sigio, SIGIO, 0);
792 		ifsq_deserialize_hw(ifsq);
793 		KNOTE(&tp->tun_rkq.ki_note, 0);
794 		ifsq_serialize_hw(ifsq);
795 	}
796 }
797