xref: /dragonfly/sys/net/tun/if_tun.c (revision e7d467f4)
1 /*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has it's
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  *
16  * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $
17  */
18 
19 #include "use_tun.h"
20 #include "opt_inet.h"
21 #include "opt_inet6.h"
22 #include "opt_ipx.h"
23 
24 #include <sys/param.h>
25 #include <sys/proc.h>
26 #include <sys/priv.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/socket.h>
30 #include <sys/conf.h>
31 #include <sys/device.h>
32 #include <sys/filio.h>
33 #include <sys/sockio.h>
34 #include <sys/thread2.h>
35 #include <sys/ttycom.h>
36 #include <sys/signalvar.h>
37 #include <sys/filedesc.h>
38 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #include <sys/uio.h>
41 #include <sys/vnode.h>
42 #include <sys/malloc.h>
43 
44 #include <sys/mplock2.h>
45 
46 #include <net/if.h>
47 #include <net/if_types.h>
48 #include <net/ifq_var.h>
49 #include <net/netisr.h>
50 #include <net/route.h>
51 #include <sys/devfs.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #endif
56 
57 #include <net/bpf.h>
58 
59 #include "if_tunvar.h"
60 #include "if_tun.h"
61 
62 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface");
63 
64 static void tunattach (void *);
65 PSEUDO_SET(tunattach, if_tun);
66 
67 static void tuncreate (cdev_t dev);
68 
69 #define TUNDEBUG	if (tundebug) if_printf
70 static int tundebug = 0;
71 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0,
72     "Enable debug output");
73 
74 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *,
75 	    struct rtentry *rt);
76 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *);
77 static int tuninit (struct ifnet *);
78 static void tunstart(struct ifnet *, struct ifaltq_subque *);
79 static void tun_filter_detach(struct knote *);
80 static int tun_filter_read(struct knote *, long);
81 static int tun_filter_write(struct knote *, long);
82 
83 static	d_open_t	tunopen;
84 static	d_close_t	tunclose;
85 static	d_read_t	tunread;
86 static	d_write_t	tunwrite;
87 static	d_ioctl_t	tunioctl;
88 static	d_kqfilter_t	tunkqfilter;
89 
90 static d_clone_t tunclone;
91 DEVFS_DECLARE_CLONE_BITMAP(tun);
92 
93 #if NTUN <= 1
94 #define TUN_PREALLOCATED_UNITS	4
95 #else
96 #define TUN_PREALLOCATED_UNITS	NTUN
97 #endif
98 
99 static struct dev_ops tun_ops = {
100 	{ "tun", 0, 0 },
101 	.d_open =	tunopen,
102 	.d_close =	tunclose,
103 	.d_read =	tunread,
104 	.d_write =	tunwrite,
105 	.d_ioctl =	tunioctl,
106 	.d_kqfilter =	tunkqfilter
107 };
108 
109 static void
110 tunattach(void *dummy)
111 {
112 	int i;
113 	make_autoclone_dev(&tun_ops, &DEVFS_CLONE_BITMAP(tun),
114 		tunclone, UID_UUCP, GID_DIALER, 0600, "tun");
115 	for (i = 0; i < TUN_PREALLOCATED_UNITS; i++) {
116 		make_dev(&tun_ops, i, UID_UUCP, GID_DIALER, 0600, "tun%d", i);
117 		devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(tun), i);
118 	}
119 	/* Doesn't need uninit because unloading is not possible, see PSEUDO_SET */
120 }
121 
122 static int
123 tunclone(struct dev_clone_args *ap)
124 {
125 	int unit;
126 
127 	unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(tun), 0);
128 	ap->a_dev = make_only_dev(&tun_ops, unit, UID_UUCP, GID_DIALER, 0600,
129 								"tun%d", unit);
130 
131 	return 0;
132 }
133 
134 static void
135 tuncreate(cdev_t dev)
136 {
137 	struct tun_softc *sc;
138 	struct ifnet *ifp;
139 
140 #if 0
141 	dev = make_dev(&tun_ops, minor(dev),
142 	    UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev));
143 #endif
144 
145 	sc = kmalloc(sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
146 	sc->tun_flags = TUN_INITED;
147 
148 	ifp = &sc->tun_if;
149 	if_initname(ifp, "tun", lminor(dev));
150 	ifp->if_mtu = TUNMTU;
151 	ifp->if_ioctl = tunifioctl;
152 	ifp->if_output = tunoutput;
153 	ifp->if_start = tunstart;
154 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
155 	ifp->if_type = IFT_PPP;
156 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
157 	ifq_set_ready(&ifp->if_snd);
158 	ifp->if_softc = sc;
159 	if_attach(ifp, NULL);
160 	bpfattach(ifp, DLT_NULL, sizeof(u_int));
161 	dev->si_drv1 = sc;
162 }
163 
164 /*
165  * tunnel open - must be superuser & the device must be
166  * configured in
167  */
168 static	int
169 tunopen(struct dev_open_args *ap)
170 {
171 	cdev_t dev = ap->a_head.a_dev;
172 	struct ifnet	*ifp;
173 	struct tun_softc *tp;
174 	int	error;
175 
176 	if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) != 0)
177 		return (error);
178 
179 	tp = dev->si_drv1;
180 	if (!tp) {
181 		tuncreate(dev);
182 		tp = dev->si_drv1;
183 	}
184 	if (tp->tun_flags & TUN_OPEN)
185 		return EBUSY;
186 	tp->tun_pid = curproc->p_pid;
187 	ifp = &tp->tun_if;
188 	tp->tun_flags |= TUN_OPEN;
189 	TUNDEBUG(ifp, "open\n");
190 	return (0);
191 }
192 
193 /*
194  * tunclose - close the device - mark i/f down & delete
195  * routing info
196  */
197 static	int
198 tunclose(struct dev_close_args *ap)
199 {
200 	cdev_t dev = ap->a_head.a_dev;
201 	struct tun_softc *tp;
202 	struct ifnet	*ifp;
203 
204 	tp = dev->si_drv1;
205 	ifp = &tp->tun_if;
206 
207 	tp->tun_flags &= ~TUN_OPEN;
208 	tp->tun_pid = 0;
209 
210 	/* Junk all pending output. */
211 	ifq_purge_all(&ifp->if_snd);
212 
213 	if (ifp->if_flags & IFF_UP)
214 		if_down(ifp);
215 	ifp->if_flags &= ~IFF_RUNNING;
216 	if_purgeaddrs_nolink(ifp);
217 
218 	funsetown(&tp->tun_sigio);
219 	KNOTE(&tp->tun_rkq.ki_note, 0);
220 
221 	TUNDEBUG(ifp, "closed\n");
222 #if 0
223 	if (dev->si_uminor >= TUN_PREALLOCATED_UNITS) {
224 		devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(tun), dev->si_uminor);
225 	}
226 #endif
227 	return (0);
228 }
229 
230 static int
231 tuninit(struct ifnet *ifp)
232 {
233 #ifdef INET
234 	struct tun_softc *tp = ifp->if_softc;
235 #endif
236 	struct ifaddr_container *ifac;
237 	int error = 0;
238 
239 	TUNDEBUG(ifp, "tuninit\n");
240 
241 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
242 	getmicrotime(&ifp->if_lastchange);
243 
244 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
245 		struct ifaddr *ifa = ifac->ifa;
246 
247 		if (ifa->ifa_addr == NULL) {
248 			error = EFAULT;
249 			/* XXX: Should maybe return straight off? */
250 		} else {
251 #ifdef INET
252 			if (ifa->ifa_addr->sa_family == AF_INET) {
253 			    struct sockaddr_in *si;
254 
255 			    si = (struct sockaddr_in *)ifa->ifa_addr;
256 			    if (si->sin_addr.s_addr)
257 				    tp->tun_flags |= TUN_IASET;
258 			}
259 #endif
260 		}
261 	}
262 	return (error);
263 }
264 
265 /*
266  * Process an ioctl request.
267  *
268  * MPSAFE
269  */
270 int
271 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
272 {
273 	struct ifreq *ifr = (struct ifreq *)data;
274 	struct tun_softc *tp = ifp->if_softc;
275 	struct ifstat *ifs;
276 	int error = 0;
277 
278 	switch(cmd) {
279 	case SIOCGIFSTATUS:
280 		ifs = (struct ifstat *)data;
281 		if (tp->tun_pid)
282 			ksprintf(ifs->ascii + strlen(ifs->ascii),
283 			    "\tOpened by PID %d\n", tp->tun_pid);
284 		break;
285 	case SIOCSIFADDR:
286 		error = tuninit(ifp);
287 		TUNDEBUG(ifp, "address set, error=%d\n", error);
288 		break;
289 	case SIOCSIFDSTADDR:
290 		error = tuninit(ifp);
291 		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
292 		break;
293 	case SIOCSIFMTU:
294 		ifp->if_mtu = ifr->ifr_mtu;
295 		TUNDEBUG(ifp, "mtu set\n");
296 		break;
297 	case SIOCSIFFLAGS:
298 	case SIOCADDMULTI:
299 	case SIOCDELMULTI:
300 		break;
301 	default:
302 		error = EINVAL;
303 	}
304 	return (error);
305 }
306 
307 /*
308  * tunoutput - queue packets from higher level ready to put out.
309  *
310  * MPSAFE
311  */
312 static int
313 tunoutput_serialized(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
314 		     struct rtentry *rt)
315 {
316 	struct tun_softc *tp = ifp->if_softc;
317 	int error;
318 	struct altq_pktattr pktattr;
319 
320 	TUNDEBUG(ifp, "tunoutput\n");
321 
322 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
323 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
324 		m_freem (m0);
325 		return EHOSTDOWN;
326 	}
327 
328 	/*
329 	 * if the queueing discipline needs packet classification,
330 	 * do it before prepending link headers.
331 	 */
332 	ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr);
333 
334 	/* BPF write needs to be handled specially */
335 	if (dst->sa_family == AF_UNSPEC) {
336 		dst->sa_family = *(mtod(m0, int *));
337 		m0->m_len -= sizeof(int);
338 		m0->m_pkthdr.len -= sizeof(int);
339 		m0->m_data += sizeof(int);
340 	}
341 
342 	if (ifp->if_bpf) {
343 		bpf_gettoken();
344 		if (ifp->if_bpf) {
345 			/*
346 			 * We need to prepend the address family as
347 			 * a four byte field.
348 			 */
349 			uint32_t af = dst->sa_family;
350 
351 			bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af));
352 		}
353 		bpf_reltoken();
354 	}
355 
356 	/* prepend sockaddr? this may abort if the mbuf allocation fails */
357 	if (tp->tun_flags & TUN_LMODE) {
358 		/* allocate space for sockaddr */
359 		M_PREPEND(m0, dst->sa_len, MB_DONTWAIT);
360 
361 		/* if allocation failed drop packet */
362 		if (m0 == NULL){
363 			IFNET_STAT_INC(ifp, oerrors, 1);
364 			return (ENOBUFS);
365 		} else {
366 			bcopy(dst, m0->m_data, dst->sa_len);
367 		}
368 	}
369 
370 	if (tp->tun_flags & TUN_IFHEAD) {
371 		/* Prepend the address family */
372 		M_PREPEND(m0, 4, MB_DONTWAIT);
373 
374 		/* if allocation failed drop packet */
375 		if (m0 == NULL){
376 			IFNET_STAT_INC(ifp, oerrors, 1);
377 			return ENOBUFS;
378 		} else
379 			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
380 	} else {
381 #ifdef INET
382 		if (dst->sa_family != AF_INET)
383 #endif
384 		{
385 			m_freem(m0);
386 			return EAFNOSUPPORT;
387 		}
388 	}
389 
390 	error = ifq_handoff(ifp, m0, &pktattr);
391 	if (error) {
392 		IFNET_STAT_INC(ifp, collisions, 1);
393 	} else {
394 		IFNET_STAT_INC(ifp, opackets, 1);
395 		if (tp->tun_flags & TUN_RWAIT) {
396 			tp->tun_flags &= ~TUN_RWAIT;
397 			wakeup((caddr_t)tp);
398 		}
399 		get_mplock();
400 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
401 			pgsigio(tp->tun_sigio, SIGIO, 0);
402 		rel_mplock();
403 		ifnet_deserialize_all(ifp);
404 		KNOTE(&tp->tun_rkq.ki_note, 0);
405 		ifnet_serialize_all(ifp);
406 	}
407 	return (error);
408 }
409 
410 static int
411 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
412 	  struct rtentry *rt)
413 {
414 	int error;
415 
416 	ifnet_serialize_all(ifp);
417 	error = tunoutput_serialized(ifp, m0, dst, rt);
418 	ifnet_deserialize_all(ifp);
419 
420 	return error;
421 }
422 
423 /*
424  * the ops interface is now pretty minimal.
425  */
426 static	int
427 tunioctl(struct dev_ioctl_args *ap)
428 {
429 	cdev_t dev = ap->a_head.a_dev;
430 	struct tun_softc *tp = dev->si_drv1;
431  	struct tuninfo *tunp;
432 
433 	switch (ap->a_cmd) {
434 	case TUNSIFINFO:
435 		tunp = (struct tuninfo *)ap->a_data;
436 		if (tunp->mtu < IF_MINMTU)
437 			return (EINVAL);
438 		tp->tun_if.if_mtu = tunp->mtu;
439 		tp->tun_if.if_type = tunp->type;
440 		tp->tun_if.if_baudrate = tunp->baudrate;
441 		break;
442 	case TUNGIFINFO:
443 		tunp = (struct tuninfo *)ap->a_data;
444 		tunp->mtu = tp->tun_if.if_mtu;
445 		tunp->type = tp->tun_if.if_type;
446 		tunp->baudrate = tp->tun_if.if_baudrate;
447 		break;
448 	case TUNSDEBUG:
449 		tundebug = *(int *)ap->a_data;
450 		break;
451 	case TUNGDEBUG:
452 		*(int *)ap->a_data = tundebug;
453 		break;
454 	case TUNSLMODE:
455 		if (*(int *)ap->a_data) {
456 			tp->tun_flags |= TUN_LMODE;
457 			tp->tun_flags &= ~TUN_IFHEAD;
458 		} else
459 			tp->tun_flags &= ~TUN_LMODE;
460 		break;
461 	case TUNSIFHEAD:
462 		if (*(int *)ap->a_data) {
463 			tp->tun_flags |= TUN_IFHEAD;
464 			tp->tun_flags &= ~TUN_LMODE;
465 		} else
466 			tp->tun_flags &= ~TUN_IFHEAD;
467 		break;
468 	case TUNGIFHEAD:
469 		*(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
470 		break;
471 	case TUNSIFMODE:
472 		/* deny this if UP */
473 		if (tp->tun_if.if_flags & IFF_UP)
474 			return(EBUSY);
475 
476 		switch (*(int *)ap->a_data & ~IFF_MULTICAST) {
477 		case IFF_POINTOPOINT:
478 		case IFF_BROADCAST:
479 			tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT);
480 			tp->tun_if.if_flags |= *(int *)ap->a_data;
481 			break;
482 		default:
483 			return(EINVAL);
484 		}
485 		break;
486 	case TUNSIFPID:
487 		tp->tun_pid = curproc->p_pid;
488 		break;
489 	case FIOASYNC:
490 		if (*(int *)ap->a_data)
491 			tp->tun_flags |= TUN_ASYNC;
492 		else
493 			tp->tun_flags &= ~TUN_ASYNC;
494 		break;
495 	case FIONREAD:
496 		if (!ifsq_is_empty(ifq_get_subq_default(&tp->tun_if.if_snd))) {
497 			struct mbuf *mb;
498 
499 			mb = ifsq_poll(
500 			    ifq_get_subq_default(&tp->tun_if.if_snd));
501 			for( *(int *)ap->a_data = 0; mb != NULL; mb = mb->m_next)
502 				*(int *)ap->a_data += mb->m_len;
503 		} else {
504 			*(int *)ap->a_data = 0;
505 		}
506 		break;
507 	case FIOSETOWN:
508 		return (fsetown(*(int *)ap->a_data, &tp->tun_sigio));
509 
510 	case FIOGETOWN:
511 		*(int *)ap->a_data = fgetown(&tp->tun_sigio);
512 		return (0);
513 
514 	/* This is deprecated, FIOSETOWN should be used instead. */
515 	case TIOCSPGRP:
516 		return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio));
517 
518 	/* This is deprecated, FIOGETOWN should be used instead. */
519 	case TIOCGPGRP:
520 		*(int *)ap->a_data = -fgetown(&tp->tun_sigio);
521 		return (0);
522 
523 	default:
524 		return (ENOTTY);
525 	}
526 	return (0);
527 }
528 
529 /*
530  * The ops read interface - reads a packet at a time, or at
531  * least as much of a packet as can be read.
532  */
533 static	int
534 tunread(struct dev_read_args *ap)
535 {
536 	cdev_t dev = ap->a_head.a_dev;
537 	struct uio *uio = ap->a_uio;
538 	struct tun_softc *tp = dev->si_drv1;
539 	struct ifnet	*ifp = &tp->tun_if;
540 	struct ifaltq_subque *ifsq = ifq_get_subq_default(&ifp->if_snd);
541 	struct mbuf	*m0;
542 	int		error=0, len;
543 
544 	TUNDEBUG(ifp, "read\n");
545 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
546 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
547 		return EHOSTDOWN;
548 	}
549 
550 	tp->tun_flags &= ~TUN_RWAIT;
551 
552 	ifnet_serialize_all(ifp);
553 
554 	while ((m0 = ifsq_dequeue(ifsq, NULL)) == NULL) {
555 		if (ap->a_ioflag & IO_NDELAY) {
556 			ifnet_deserialize_all(ifp);
557 			return EWOULDBLOCK;
558 		}
559 		tp->tun_flags |= TUN_RWAIT;
560 		ifnet_deserialize_all(ifp);
561 		if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0)
562 			return error;
563 		ifnet_serialize_all(ifp);
564 	}
565 
566 	ifnet_deserialize_all(ifp);
567 
568 	while (m0 && uio->uio_resid > 0 && error == 0) {
569 		len = (int)szmin(uio->uio_resid, m0->m_len);
570 		if (len != 0)
571 			error = uiomove(mtod(m0, caddr_t), (size_t)len, uio);
572 		m0 = m_free(m0);
573 	}
574 
575 	if (m0) {
576 		TUNDEBUG(ifp, "Dropping mbuf\n");
577 		m_freem(m0);
578 	}
579 	return error;
580 }
581 
582 /*
583  * the ops write interface - an atomic write is a packet - or else!
584  */
585 static	int
586 tunwrite(struct dev_write_args *ap)
587 {
588 	cdev_t dev = ap->a_head.a_dev;
589 	struct uio *uio = ap->a_uio;
590 	struct tun_softc *tp = dev->si_drv1;
591 	struct ifnet	*ifp = &tp->tun_if;
592 	struct mbuf	*top, **mp, *m;
593 	int		error=0;
594 	size_t		tlen, mlen;
595 	uint32_t	family;
596 	int		isr;
597 
598 	TUNDEBUG(ifp, "tunwrite\n");
599 
600 	if (uio->uio_resid == 0)
601 		return 0;
602 
603 	if (uio->uio_resid > TUNMRU) {
604 		TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
605 		return EIO;
606 	}
607 	tlen = uio->uio_resid;
608 
609 	/* get a header mbuf */
610 	MGETHDR(m, MB_WAIT, MT_DATA);
611 	if (m == NULL)
612 		return ENOBUFS;
613 	mlen = MHLEN;
614 
615 	top = NULL;
616 	mp = &top;
617 	while (error == 0 && uio->uio_resid > 0) {
618 		m->m_len = (int)szmin(mlen, uio->uio_resid);
619 		error = uiomove(mtod (m, caddr_t), (size_t)m->m_len, uio);
620 		*mp = m;
621 		mp = &m->m_next;
622 		if (uio->uio_resid > 0) {
623 			MGET (m, MB_WAIT, MT_DATA);
624 			if (m == NULL) {
625 				error = ENOBUFS;
626 				break;
627 			}
628 			mlen = MLEN;
629 		}
630 	}
631 	if (error) {
632 		if (top)
633 			m_freem (top);
634 		IFNET_STAT_INC(ifp, ierrors, 1);
635 		return error;
636 	}
637 
638 	top->m_pkthdr.len = (int)tlen;
639 	top->m_pkthdr.rcvif = ifp;
640 
641 	if (ifp->if_bpf) {
642 		bpf_gettoken();
643 
644 		if (ifp->if_bpf) {
645 			if (tp->tun_flags & TUN_IFHEAD) {
646 				/*
647 				 * Conveniently, we already have a 4-byte
648 				 * address family prepended to our packet !
649 				 * Inconveniently, it's in the wrong byte
650 				 * order !
651 				 */
652 				if ((top = m_pullup(top, sizeof(family)))
653 				    == NULL) {
654 					bpf_reltoken();
655 					return ENOBUFS;
656 				}
657 				*mtod(top, u_int32_t *) =
658 				    ntohl(*mtod(top, u_int32_t *));
659 				bpf_mtap(ifp->if_bpf, top);
660 				*mtod(top, u_int32_t *) =
661 				    htonl(*mtod(top, u_int32_t *));
662 			} else {
663 				/*
664 				 * We need to prepend the address family as
665 				 * a four byte field.
666 				 */
667 				static const uint32_t af = AF_INET;
668 
669 				bpf_ptap(ifp->if_bpf, top, &af, sizeof(af));
670 			}
671 		}
672 
673 		bpf_reltoken();
674 	}
675 
676 	if (tp->tun_flags & TUN_IFHEAD) {
677 		if (top->m_len < sizeof(family) &&
678 		    (top = m_pullup(top, sizeof(family))) == NULL)
679 				return ENOBUFS;
680 		family = ntohl(*mtod(top, u_int32_t *));
681 		m_adj(top, sizeof(family));
682 	} else
683 		family = AF_INET;
684 
685 	IFNET_STAT_INC(ifp, ibytes, top->m_pkthdr.len);
686 	IFNET_STAT_INC(ifp, ipackets, 1);
687 
688 	switch (family) {
689 #ifdef INET
690 	case AF_INET:
691 		isr = NETISR_IP;
692 		break;
693 #endif
694 #ifdef INET6
695 	case AF_INET6:
696 		isr = NETISR_IPV6;
697 		break;
698 #endif
699 #ifdef IPX
700 	case AF_IPX:
701 		isr = NETISR_IPX;
702 		break;
703 #endif
704 	default:
705 		m_freem(m);
706 		return (EAFNOSUPPORT);
707 	}
708 
709 	netisr_queue(isr, top);
710 	return (0);
711 }
712 
713 static struct filterops tun_read_filtops =
714 	{ FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_read };
715 static struct filterops tun_write_filtops =
716 	{ FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_write };
717 
718 static int
719 tunkqfilter(struct dev_kqfilter_args *ap)
720 {
721 	cdev_t dev = ap->a_head.a_dev;
722 	struct tun_softc *tp = dev->si_drv1;
723 	struct knote *kn = ap->a_kn;
724 	struct klist *klist;
725 
726 	ap->a_result = 0;
727 	ifnet_serialize_all(&tp->tun_if);
728 
729 	switch (kn->kn_filter) {
730 	case EVFILT_READ:
731 		kn->kn_fop = &tun_read_filtops;
732 		kn->kn_hook = (caddr_t)tp;
733 		break;
734 	case EVFILT_WRITE:
735 		kn->kn_fop = &tun_write_filtops;
736 		kn->kn_hook = (caddr_t)tp;
737 		break;
738 	default:
739 		ifnet_deserialize_all(&tp->tun_if);
740 		ap->a_result = EOPNOTSUPP;
741 		return (0);
742 	}
743 
744 	klist = &tp->tun_rkq.ki_note;
745 	knote_insert(klist, kn);
746 	ifnet_deserialize_all(&tp->tun_if);
747 
748 	return (0);
749 }
750 
751 static void
752 tun_filter_detach(struct knote *kn)
753 {
754 	struct tun_softc *tp = (struct tun_softc *)kn->kn_hook;
755 	struct klist *klist = &tp->tun_rkq.ki_note;
756 
757 	knote_remove(klist, kn);
758 }
759 
760 static int
761 tun_filter_write(struct knote *kn, long hint)
762 {
763 	/* Always ready for a write */
764 	return (1);
765 }
766 
767 static int
768 tun_filter_read(struct knote *kn, long hint)
769 {
770 	struct tun_softc *tp = (struct tun_softc *)kn->kn_hook;
771 	int ready = 0;
772 
773 	ifnet_serialize_all(&tp->tun_if);
774 	if (!ifsq_is_empty(ifq_get_subq_default(&tp->tun_if.if_snd)))
775 		ready = 1;
776 	ifnet_deserialize_all(&tp->tun_if);
777 
778 	return (ready);
779 }
780 
781 /*
782  * Start packet transmission on the interface.
783  * when the interface queue is rate-limited by ALTQ,
784  * if_start is needed to drain packets from the queue in order
785  * to notify readers when outgoing packets become ready.
786  */
787 static void
788 tunstart(struct ifnet *ifp, struct ifaltq_subque *ifsq)
789 {
790 	struct tun_softc *tp = ifp->if_softc;
791 	struct mbuf *m;
792 
793 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
794 
795 	if (!ifq_is_enabled(&ifp->if_snd))
796 		return;
797 
798 	m = ifsq_poll(ifsq);
799 	if (m != NULL) {
800 		if (tp->tun_flags & TUN_RWAIT) {
801 			tp->tun_flags &= ~TUN_RWAIT;
802 			wakeup((caddr_t)tp);
803 		}
804 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
805 			pgsigio(tp->tun_sigio, SIGIO, 0);
806 		ifsq_deserialize_hw(ifsq);
807 		KNOTE(&tp->tun_rkq.ki_note, 0);
808 		ifsq_serialize_hw(ifsq);
809 	}
810 }
811