xref: /dragonfly/sys/net/tun/if_tun.c (revision 532828a0)
1 /*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has it's
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  *
16  * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $
17  */
18 
19 #include "use_tun.h"
20 #include "opt_inet.h"
21 #include "opt_inet6.h"
22 #include "opt_ipx.h"
23 
24 #include <sys/param.h>
25 #include <sys/proc.h>
26 #include <sys/priv.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/socket.h>
30 #include <sys/conf.h>
31 #include <sys/device.h>
32 #include <sys/filio.h>
33 #include <sys/sockio.h>
34 #include <sys/thread2.h>
35 #include <sys/ttycom.h>
36 #include <sys/signalvar.h>
37 #include <sys/filedesc.h>
38 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #include <sys/uio.h>
41 #include <sys/vnode.h>
42 #include <sys/malloc.h>
43 
44 #include <sys/mplock2.h>
45 
46 #include <net/if.h>
47 #include <net/if_types.h>
48 #include <net/ifq_var.h>
49 #include <net/netisr.h>
50 #include <net/route.h>
51 #include <sys/devfs.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #endif
56 
57 #include <net/bpf.h>
58 
59 #include "if_tunvar.h"
60 #include "if_tun.h"
61 
62 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface");
63 
64 static void tunattach (void *);
65 PSEUDO_SET(tunattach, if_tun);
66 
67 static void tuncreate (cdev_t dev);
68 
69 #define TUNDEBUG	if (tundebug) if_printf
70 static int tundebug = 0;
71 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0,
72     "Enable debug output");
73 
74 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *,
75 	    struct rtentry *rt);
76 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *);
77 static int tuninit (struct ifnet *);
78 static void tunstart(struct ifnet *, struct ifaltq_subque *);
79 static void tun_filter_detach(struct knote *);
80 static int tun_filter_read(struct knote *, long);
81 static int tun_filter_write(struct knote *, long);
82 
83 static	d_open_t	tunopen;
84 static	d_close_t	tunclose;
85 static	d_read_t	tunread;
86 static	d_write_t	tunwrite;
87 static	d_ioctl_t	tunioctl;
88 static	d_kqfilter_t	tunkqfilter;
89 
90 static d_clone_t tunclone;
91 DEVFS_DECLARE_CLONE_BITMAP(tun);
92 
93 #if NTUN <= 1
94 #define TUN_PREALLOCATED_UNITS	4
95 #else
96 #define TUN_PREALLOCATED_UNITS	NTUN
97 #endif
98 
99 static struct dev_ops tun_ops = {
100 	{ "tun", 0, 0 },
101 	.d_open =	tunopen,
102 	.d_close =	tunclose,
103 	.d_read =	tunread,
104 	.d_write =	tunwrite,
105 	.d_ioctl =	tunioctl,
106 	.d_kqfilter =	tunkqfilter
107 };
108 
109 static void
110 tunattach(void *dummy)
111 {
112 	int i;
113 	make_autoclone_dev(&tun_ops, &DEVFS_CLONE_BITMAP(tun),
114 		tunclone, UID_UUCP, GID_DIALER, 0600, "tun");
115 	for (i = 0; i < TUN_PREALLOCATED_UNITS; i++) {
116 		make_dev(&tun_ops, i, UID_UUCP, GID_DIALER, 0600, "tun%d", i);
117 		devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(tun), i);
118 	}
119 	/* Doesn't need uninit because unloading is not possible, see PSEUDO_SET */
120 }
121 
122 static int
123 tunclone(struct dev_clone_args *ap)
124 {
125 	int unit;
126 
127 	unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(tun), 0);
128 	ap->a_dev = make_only_dev(&tun_ops, unit, UID_UUCP, GID_DIALER, 0600,
129 								"tun%d", unit);
130 
131 	return 0;
132 }
133 
134 static void
135 tuncreate(cdev_t dev)
136 {
137 	struct tun_softc *sc;
138 	struct ifnet *ifp;
139 
140 #if 0
141 	dev = make_dev(&tun_ops, minor(dev),
142 	    UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev));
143 #endif
144 
145 	sc = kmalloc(sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
146 	sc->tun_flags = TUN_INITED;
147 
148 	ifp = &sc->tun_if;
149 	if_initname(ifp, "tun", lminor(dev));
150 	ifp->if_mtu = TUNMTU;
151 	ifp->if_ioctl = tunifioctl;
152 	ifp->if_output = tunoutput;
153 	ifp->if_start = tunstart;
154 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
155 	ifp->if_type = IFT_PPP;
156 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
157 	ifq_set_ready(&ifp->if_snd);
158 	ifp->if_softc = sc;
159 	if_attach(ifp, NULL);
160 	bpfattach(ifp, DLT_NULL, sizeof(u_int));
161 	dev->si_drv1 = sc;
162 }
163 
164 /*
165  * tunnel open - must be superuser & the device must be
166  * configured in
167  */
168 static	int
169 tunopen(struct dev_open_args *ap)
170 {
171 	cdev_t dev = ap->a_head.a_dev;
172 	struct ifnet	*ifp;
173 	struct tun_softc *tp;
174 	int	error;
175 
176 	if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) != 0)
177 		return (error);
178 
179 	tp = dev->si_drv1;
180 	if (!tp) {
181 		tuncreate(dev);
182 		tp = dev->si_drv1;
183 	}
184 	if (tp->tun_flags & TUN_OPEN)
185 		return EBUSY;
186 	tp->tun_pid = curproc->p_pid;
187 	ifp = &tp->tun_if;
188 	tp->tun_flags |= TUN_OPEN;
189 	TUNDEBUG(ifp, "open\n");
190 	return (0);
191 }
192 
193 /*
194  * tunclose - close the device - mark i/f down & delete
195  * routing info
196  */
197 static	int
198 tunclose(struct dev_close_args *ap)
199 {
200 	cdev_t dev = ap->a_head.a_dev;
201 	struct tun_softc *tp;
202 	struct ifnet	*ifp;
203 
204 	tp = dev->si_drv1;
205 	ifp = &tp->tun_if;
206 
207 	tp->tun_flags &= ~TUN_OPEN;
208 	tp->tun_pid = 0;
209 
210 	/* Junk all pending output. */
211 	ifq_purge_all(&ifp->if_snd);
212 
213 	if (ifp->if_flags & IFF_UP)
214 		if_down(ifp);
215 	ifp->if_flags &= ~IFF_RUNNING;
216 	if_purgeaddrs_nolink(ifp);
217 
218 	funsetown(&tp->tun_sigio);
219 	KNOTE(&tp->tun_rkq.ki_note, 0);
220 
221 	TUNDEBUG(ifp, "closed\n");
222 #if 0
223 	if (dev->si_uminor >= TUN_PREALLOCATED_UNITS) {
224 		devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(tun), dev->si_uminor);
225 	}
226 #endif
227 	return (0);
228 }
229 
230 static int
231 tuninit(struct ifnet *ifp)
232 {
233 #ifdef INET
234 	struct tun_softc *tp = ifp->if_softc;
235 #endif
236 	struct ifaddr_container *ifac;
237 	int error = 0;
238 
239 	TUNDEBUG(ifp, "tuninit\n");
240 
241 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
242 	getmicrotime(&ifp->if_lastchange);
243 
244 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
245 		struct ifaddr *ifa = ifac->ifa;
246 
247 		if (ifa->ifa_addr == NULL) {
248 			error = EFAULT;
249 			/* XXX: Should maybe return straight off? */
250 		} else {
251 #ifdef INET
252 			if (ifa->ifa_addr->sa_family == AF_INET) {
253 			    struct sockaddr_in *si;
254 
255 			    si = (struct sockaddr_in *)ifa->ifa_addr;
256 			    if (si->sin_addr.s_addr)
257 				    tp->tun_flags |= TUN_IASET;
258 			}
259 #endif
260 		}
261 	}
262 	return (error);
263 }
264 
265 /*
266  * Process an ioctl request.
267  *
268  * MPSAFE
269  */
270 int
271 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
272 {
273 	struct ifreq *ifr = (struct ifreq *)data;
274 	struct tun_softc *tp = ifp->if_softc;
275 	struct ifstat *ifs;
276 	int error = 0;
277 
278 	switch(cmd) {
279 	case SIOCGIFSTATUS:
280 		ifs = (struct ifstat *)data;
281 		if (tp->tun_pid)
282 			ksprintf(ifs->ascii + strlen(ifs->ascii),
283 			    "\tOpened by PID %d\n", tp->tun_pid);
284 		break;
285 	case SIOCSIFADDR:
286 		error = tuninit(ifp);
287 		TUNDEBUG(ifp, "address set, error=%d\n", error);
288 		break;
289 	case SIOCSIFDSTADDR:
290 		error = tuninit(ifp);
291 		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
292 		break;
293 	case SIOCSIFMTU:
294 		ifp->if_mtu = ifr->ifr_mtu;
295 		TUNDEBUG(ifp, "mtu set\n");
296 		break;
297 	case SIOCSIFFLAGS:
298 	case SIOCADDMULTI:
299 	case SIOCDELMULTI:
300 		break;
301 	default:
302 		error = EINVAL;
303 	}
304 	return (error);
305 }
306 
307 /*
308  * tunoutput - queue packets from higher level ready to put out.
309  *
310  * MPSAFE
311  */
312 static int
313 tunoutput_serialized(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
314 		     struct rtentry *rt)
315 {
316 	struct tun_softc *tp = ifp->if_softc;
317 	int error;
318 	struct altq_pktattr pktattr;
319 
320 	TUNDEBUG(ifp, "tunoutput\n");
321 
322 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
323 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
324 		m_freem (m0);
325 		return EHOSTDOWN;
326 	}
327 
328 	/*
329 	 * if the queueing discipline needs packet classification,
330 	 * do it before prepending link headers.
331 	 */
332 	ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr);
333 
334 	/* BPF write needs to be handled specially */
335 	if (dst->sa_family == AF_UNSPEC) {
336 		dst->sa_family = *(mtod(m0, int *));
337 		m0->m_len -= sizeof(int);
338 		m0->m_pkthdr.len -= sizeof(int);
339 		m0->m_data += sizeof(int);
340 	}
341 
342 	if (ifp->if_bpf) {
343 		bpf_gettoken();
344 		if (ifp->if_bpf) {
345 			/*
346 			 * We need to prepend the address family as
347 			 * a four byte field.
348 			 */
349 			uint32_t af = dst->sa_family;
350 
351 			bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af));
352 		}
353 		bpf_reltoken();
354 	}
355 
356 	/* prepend sockaddr? this may abort if the mbuf allocation fails */
357 	if (tp->tun_flags & TUN_LMODE) {
358 		/* allocate space for sockaddr */
359 		M_PREPEND(m0, dst->sa_len, MB_DONTWAIT);
360 
361 		/* if allocation failed drop packet */
362 		if (m0 == NULL){
363 			IFNET_STAT_INC(ifp, oerrors, 1);
364 			return (ENOBUFS);
365 		} else {
366 			bcopy(dst, m0->m_data, dst->sa_len);
367 		}
368 	}
369 
370 	if (tp->tun_flags & TUN_IFHEAD) {
371 		/* Prepend the address family */
372 		M_PREPEND(m0, 4, MB_DONTWAIT);
373 
374 		/* if allocation failed drop packet */
375 		if (m0 == NULL){
376 			IFNET_STAT_INC(ifp, oerrors, 1);
377 			return ENOBUFS;
378 		} else
379 			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
380 	} else {
381 #ifdef INET
382 		if (dst->sa_family != AF_INET)
383 #endif
384 		{
385 			m_freem(m0);
386 			return EAFNOSUPPORT;
387 		}
388 	}
389 
390 	error = ifq_handoff(ifp, m0, &pktattr);
391 	if (error) {
392 		IFNET_STAT_INC(ifp, collisions, 1);
393 	} else {
394 		IFNET_STAT_INC(ifp, opackets, 1);
395 		if (tp->tun_flags & TUN_RWAIT) {
396 			tp->tun_flags &= ~TUN_RWAIT;
397 			wakeup((caddr_t)tp);
398 		}
399 		get_mplock();
400 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
401 			pgsigio(tp->tun_sigio, SIGIO, 0);
402 		rel_mplock();
403 		ifnet_deserialize_all(ifp);
404 		KNOTE(&tp->tun_rkq.ki_note, 0);
405 		ifnet_serialize_all(ifp);
406 	}
407 	return (error);
408 }
409 
410 static int
411 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
412 	  struct rtentry *rt)
413 {
414 	int error;
415 
416 	ifnet_serialize_all(ifp);
417 	error = tunoutput_serialized(ifp, m0, dst, rt);
418 	ifnet_deserialize_all(ifp);
419 
420 	return error;
421 }
422 
423 /*
424  * the ops interface is now pretty minimal.
425  */
426 static	int
427 tunioctl(struct dev_ioctl_args *ap)
428 {
429 	cdev_t dev = ap->a_head.a_dev;
430 	struct tun_softc *tp = dev->si_drv1;
431  	struct tuninfo *tunp;
432 
433 	switch (ap->a_cmd) {
434 	case TUNSIFINFO:
435 		tunp = (struct tuninfo *)ap->a_data;
436 		if (tunp->mtu < IF_MINMTU)
437 			return (EINVAL);
438 		tp->tun_if.if_mtu = tunp->mtu;
439 		tp->tun_if.if_type = tunp->type;
440 		tp->tun_if.if_baudrate = tunp->baudrate;
441 		break;
442 	case TUNGIFINFO:
443 		tunp = (struct tuninfo *)ap->a_data;
444 		tunp->mtu = tp->tun_if.if_mtu;
445 		tunp->type = tp->tun_if.if_type;
446 		tunp->baudrate = tp->tun_if.if_baudrate;
447 		break;
448 	case TUNSDEBUG:
449 		tundebug = *(int *)ap->a_data;
450 		break;
451 	case TUNGDEBUG:
452 		*(int *)ap->a_data = tundebug;
453 		break;
454 	case TUNSLMODE:
455 		if (*(int *)ap->a_data) {
456 			tp->tun_flags |= TUN_LMODE;
457 			tp->tun_flags &= ~TUN_IFHEAD;
458 		} else
459 			tp->tun_flags &= ~TUN_LMODE;
460 		break;
461 	case TUNSIFHEAD:
462 		if (*(int *)ap->a_data) {
463 			tp->tun_flags |= TUN_IFHEAD;
464 			tp->tun_flags &= ~TUN_LMODE;
465 		} else
466 			tp->tun_flags &= ~TUN_IFHEAD;
467 		break;
468 	case TUNGIFHEAD:
469 		*(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
470 		break;
471 	case TUNSIFMODE:
472 		/* deny this if UP */
473 		if (tp->tun_if.if_flags & IFF_UP)
474 			return(EBUSY);
475 
476 		switch (*(int *)ap->a_data & ~IFF_MULTICAST) {
477 		case IFF_POINTOPOINT:
478 		case IFF_BROADCAST:
479 			tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT);
480 			tp->tun_if.if_flags |= *(int *)ap->a_data;
481 			break;
482 		default:
483 			return(EINVAL);
484 		}
485 		break;
486 	case TUNSIFPID:
487 		tp->tun_pid = curproc->p_pid;
488 		break;
489 	case FIOASYNC:
490 		if (*(int *)ap->a_data)
491 			tp->tun_flags |= TUN_ASYNC;
492 		else
493 			tp->tun_flags &= ~TUN_ASYNC;
494 		break;
495 	case FIONREAD:
496 		*(int *)ap->a_data = ifsq_poll_pktlen(
497 		    ifq_get_subq_default(&tp->tun_if.if_snd));
498 		break;
499 	case FIOSETOWN:
500 		return (fsetown(*(int *)ap->a_data, &tp->tun_sigio));
501 
502 	case FIOGETOWN:
503 		*(int *)ap->a_data = fgetown(&tp->tun_sigio);
504 		return (0);
505 
506 	/* This is deprecated, FIOSETOWN should be used instead. */
507 	case TIOCSPGRP:
508 		return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio));
509 
510 	/* This is deprecated, FIOGETOWN should be used instead. */
511 	case TIOCGPGRP:
512 		*(int *)ap->a_data = -fgetown(&tp->tun_sigio);
513 		return (0);
514 
515 	default:
516 		return (ENOTTY);
517 	}
518 	return (0);
519 }
520 
521 /*
522  * The ops read interface - reads a packet at a time, or at
523  * least as much of a packet as can be read.
524  */
525 static	int
526 tunread(struct dev_read_args *ap)
527 {
528 	cdev_t dev = ap->a_head.a_dev;
529 	struct uio *uio = ap->a_uio;
530 	struct tun_softc *tp = dev->si_drv1;
531 	struct ifnet	*ifp = &tp->tun_if;
532 	struct ifaltq_subque *ifsq = ifq_get_subq_default(&ifp->if_snd);
533 	struct mbuf	*m0;
534 	int		error=0, len;
535 
536 	TUNDEBUG(ifp, "read\n");
537 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
538 		TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags);
539 		return EHOSTDOWN;
540 	}
541 
542 	tp->tun_flags &= ~TUN_RWAIT;
543 
544 	ifnet_serialize_all(ifp);
545 
546 	while ((m0 = ifsq_dequeue(ifsq)) == NULL) {
547 		if (ap->a_ioflag & IO_NDELAY) {
548 			ifnet_deserialize_all(ifp);
549 			return EWOULDBLOCK;
550 		}
551 		tp->tun_flags |= TUN_RWAIT;
552 		ifnet_deserialize_all(ifp);
553 		if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0)
554 			return error;
555 		ifnet_serialize_all(ifp);
556 	}
557 
558 	ifnet_deserialize_all(ifp);
559 
560 	while (m0 && uio->uio_resid > 0 && error == 0) {
561 		len = (int)szmin(uio->uio_resid, m0->m_len);
562 		if (len != 0)
563 			error = uiomove(mtod(m0, caddr_t), (size_t)len, uio);
564 		m0 = m_free(m0);
565 	}
566 
567 	if (m0) {
568 		TUNDEBUG(ifp, "Dropping mbuf\n");
569 		m_freem(m0);
570 	}
571 	return error;
572 }
573 
574 /*
575  * the ops write interface - an atomic write is a packet - or else!
576  */
577 static	int
578 tunwrite(struct dev_write_args *ap)
579 {
580 	cdev_t dev = ap->a_head.a_dev;
581 	struct uio *uio = ap->a_uio;
582 	struct tun_softc *tp = dev->si_drv1;
583 	struct ifnet	*ifp = &tp->tun_if;
584 	struct mbuf	*top, **mp, *m;
585 	int		error=0;
586 	size_t		tlen, mlen;
587 	uint32_t	family;
588 	int		isr;
589 
590 	TUNDEBUG(ifp, "tunwrite\n");
591 
592 	if (uio->uio_resid == 0)
593 		return 0;
594 
595 	if (uio->uio_resid > TUNMRU) {
596 		TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
597 		return EIO;
598 	}
599 	tlen = uio->uio_resid;
600 
601 	/* get a header mbuf */
602 	MGETHDR(m, MB_WAIT, MT_DATA);
603 	if (m == NULL)
604 		return ENOBUFS;
605 	mlen = MHLEN;
606 
607 	top = NULL;
608 	mp = &top;
609 	while (error == 0 && uio->uio_resid > 0) {
610 		m->m_len = (int)szmin(mlen, uio->uio_resid);
611 		error = uiomove(mtod (m, caddr_t), (size_t)m->m_len, uio);
612 		*mp = m;
613 		mp = &m->m_next;
614 		if (uio->uio_resid > 0) {
615 			MGET (m, MB_WAIT, MT_DATA);
616 			if (m == NULL) {
617 				error = ENOBUFS;
618 				break;
619 			}
620 			mlen = MLEN;
621 		}
622 	}
623 	if (error) {
624 		if (top)
625 			m_freem (top);
626 		IFNET_STAT_INC(ifp, ierrors, 1);
627 		return error;
628 	}
629 
630 	top->m_pkthdr.len = (int)tlen;
631 	top->m_pkthdr.rcvif = ifp;
632 
633 	if (ifp->if_bpf) {
634 		bpf_gettoken();
635 
636 		if (ifp->if_bpf) {
637 			if (tp->tun_flags & TUN_IFHEAD) {
638 				/*
639 				 * Conveniently, we already have a 4-byte
640 				 * address family prepended to our packet !
641 				 * Inconveniently, it's in the wrong byte
642 				 * order !
643 				 */
644 				if ((top = m_pullup(top, sizeof(family)))
645 				    == NULL) {
646 					bpf_reltoken();
647 					return ENOBUFS;
648 				}
649 				*mtod(top, u_int32_t *) =
650 				    ntohl(*mtod(top, u_int32_t *));
651 				bpf_mtap(ifp->if_bpf, top);
652 				*mtod(top, u_int32_t *) =
653 				    htonl(*mtod(top, u_int32_t *));
654 			} else {
655 				/*
656 				 * We need to prepend the address family as
657 				 * a four byte field.
658 				 */
659 				static const uint32_t af = AF_INET;
660 
661 				bpf_ptap(ifp->if_bpf, top, &af, sizeof(af));
662 			}
663 		}
664 
665 		bpf_reltoken();
666 	}
667 
668 	if (tp->tun_flags & TUN_IFHEAD) {
669 		if (top->m_len < sizeof(family) &&
670 		    (top = m_pullup(top, sizeof(family))) == NULL)
671 				return ENOBUFS;
672 		family = ntohl(*mtod(top, u_int32_t *));
673 		m_adj(top, sizeof(family));
674 	} else
675 		family = AF_INET;
676 
677 	IFNET_STAT_INC(ifp, ibytes, top->m_pkthdr.len);
678 	IFNET_STAT_INC(ifp, ipackets, 1);
679 
680 	switch (family) {
681 #ifdef INET
682 	case AF_INET:
683 		isr = NETISR_IP;
684 		break;
685 #endif
686 #ifdef INET6
687 	case AF_INET6:
688 		isr = NETISR_IPV6;
689 		break;
690 #endif
691 #ifdef IPX
692 	case AF_IPX:
693 		isr = NETISR_IPX;
694 		break;
695 #endif
696 	default:
697 		m_freem(m);
698 		return (EAFNOSUPPORT);
699 	}
700 
701 	netisr_queue(isr, top);
702 	return (0);
703 }
704 
705 static struct filterops tun_read_filtops =
706 	{ FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_read };
707 static struct filterops tun_write_filtops =
708 	{ FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_write };
709 
710 static int
711 tunkqfilter(struct dev_kqfilter_args *ap)
712 {
713 	cdev_t dev = ap->a_head.a_dev;
714 	struct tun_softc *tp = dev->si_drv1;
715 	struct knote *kn = ap->a_kn;
716 	struct klist *klist;
717 
718 	ap->a_result = 0;
719 	ifnet_serialize_all(&tp->tun_if);
720 
721 	switch (kn->kn_filter) {
722 	case EVFILT_READ:
723 		kn->kn_fop = &tun_read_filtops;
724 		kn->kn_hook = (caddr_t)tp;
725 		break;
726 	case EVFILT_WRITE:
727 		kn->kn_fop = &tun_write_filtops;
728 		kn->kn_hook = (caddr_t)tp;
729 		break;
730 	default:
731 		ifnet_deserialize_all(&tp->tun_if);
732 		ap->a_result = EOPNOTSUPP;
733 		return (0);
734 	}
735 
736 	klist = &tp->tun_rkq.ki_note;
737 	knote_insert(klist, kn);
738 	ifnet_deserialize_all(&tp->tun_if);
739 
740 	return (0);
741 }
742 
743 static void
744 tun_filter_detach(struct knote *kn)
745 {
746 	struct tun_softc *tp = (struct tun_softc *)kn->kn_hook;
747 	struct klist *klist = &tp->tun_rkq.ki_note;
748 
749 	knote_remove(klist, kn);
750 }
751 
752 static int
753 tun_filter_write(struct knote *kn, long hint)
754 {
755 	/* Always ready for a write */
756 	return (1);
757 }
758 
759 static int
760 tun_filter_read(struct knote *kn, long hint)
761 {
762 	struct tun_softc *tp = (struct tun_softc *)kn->kn_hook;
763 	int ready = 0;
764 
765 	ifnet_serialize_all(&tp->tun_if);
766 	if (!ifsq_is_empty(ifq_get_subq_default(&tp->tun_if.if_snd)))
767 		ready = 1;
768 	ifnet_deserialize_all(&tp->tun_if);
769 
770 	return (ready);
771 }
772 
773 /*
774  * Start packet transmission on the interface.
775  * when the interface queue is rate-limited by ALTQ,
776  * if_start is needed to drain packets from the queue in order
777  * to notify readers when outgoing packets become ready.
778  */
779 static void
780 tunstart(struct ifnet *ifp, struct ifaltq_subque *ifsq)
781 {
782 	struct tun_softc *tp = ifp->if_softc;
783 	struct mbuf *m;
784 
785 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
786 
787 	if (!ifq_is_enabled(&ifp->if_snd))
788 		return;
789 
790 	m = ifsq_poll(ifsq);
791 	if (m != NULL) {
792 		if (tp->tun_flags & TUN_RWAIT) {
793 			tp->tun_flags &= ~TUN_RWAIT;
794 			wakeup((caddr_t)tp);
795 		}
796 		if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
797 			pgsigio(tp->tun_sigio, SIGIO, 0);
798 		ifsq_deserialize_hw(ifsq);
799 		KNOTE(&tp->tun_rkq.ki_note, 0);
800 		ifsq_serialize_hw(ifsq);
801 	}
802 }
803