xref: /dragonfly/sys/net/tap/if_tap.c (revision f02303f9)
1 /*
2  * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * BASED ON:
27  * -------------------------------------------------------------------------
28  *
29  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
30  * Nottingham University 1987.
31  */
32 
33 /*
34  * $FreeBSD: src/sys/net/if_tap.c,v 1.3.2.3 2002/04/14 21:41:48 luigi Exp $
35  * $DragonFly: src/sys/net/tap/if_tap.c,v 1.35 2007/01/15 00:51:43 dillon Exp $
36  * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $
37  */
38 
39 #include "opt_inet.h"
40 
41 #include <sys/param.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/filedesc.h>
45 #include <sys/filio.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/poll.h>
50 #include <sys/proc.h>
51 #include <sys/signalvar.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/thread2.h>
57 #include <sys/ttycom.h>
58 #include <sys/uio.h>
59 #include <sys/vnode.h>
60 #include <sys/serialize.h>
61 
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/ifq_var.h>
66 #include <net/if_arp.h>
67 #include <net/route.h>
68 
69 #include <netinet/in.h>
70 
71 #include "if_tapvar.h"
72 #include "if_tap.h"
73 
74 
75 #define CDEV_NAME	"tap"
76 #define CDEV_MAJOR	149
77 #define TAPDEBUG	if (tapdebug) if_printf
78 
79 #define TAP		"tap"
80 #define VMNET		"vmnet"
81 #define VMNET_DEV_MASK	0x00010000
82 
83 /* module */
84 static int 		tapmodevent	(module_t, int, void *);
85 
86 /* device */
87 static void		tapcreate	(cdev_t);
88 
89 /* network interface */
90 static void		tapifstart	(struct ifnet *);
91 static int		tapifioctl	(struct ifnet *, u_long, caddr_t,
92 					 struct ucred *);
93 static void		tapifinit	(void *);
94 
95 /* character device */
96 static d_open_t		tapopen;
97 static d_close_t	tapclose;
98 static d_read_t		tapread;
99 static d_write_t	tapwrite;
100 static d_ioctl_t	tapioctl;
101 static d_poll_t		tappoll;
102 static d_kqfilter_t	tapkqfilter;
103 
104 static struct dev_ops	tap_ops = {
105 	{ CDEV_NAME, CDEV_MAJOR, 0 },
106 	.d_open =	tapopen,
107 	.d_close =	tapclose,
108 	.d_read =	tapread,
109 	.d_write =	tapwrite,
110 	.d_ioctl =	tapioctl,
111 	.d_poll =	tappoll,
112 	.d_kqfilter =	tapkqfilter
113 };
114 
115 static int		taprefcnt = 0;		/* module ref. counter   */
116 static int		taplastunit = -1;	/* max. open unit number */
117 static int		tapdebug = 0;		/* debug flag            */
118 
119 MALLOC_DECLARE(M_TAP);
120 MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface");
121 SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, "");
122 DEV_MODULE(if_tap, tapmodevent, NULL);
123 
124 /*
125  * tapmodevent
126  *
127  * module event handler
128  */
129 static int
130 tapmodevent(module_t mod, int type, void *data)
131 {
132 	static int		 attached = 0;
133 	struct ifnet		*ifp = NULL;
134 	int			 unit;
135 
136 	switch (type) {
137 	case MOD_LOAD:
138 		if (attached)
139 			return (EEXIST);
140 
141 		dev_ops_add(&tap_ops, 0, 0);
142 		attached = 1;
143 	break;
144 
145 	case MOD_UNLOAD:
146 		if (taprefcnt > 0)
147 			return (EBUSY);
148 
149 		dev_ops_remove(&tap_ops, 0, 0);
150 
151 		/* XXX: maintain tap ifs in a local list */
152 		unit = 0;
153 		while (unit <= taplastunit) {
154 			TAILQ_FOREACH(ifp, &ifnet, if_link) {
155 				if ((strcmp(ifp->if_dname, TAP) == 0) ||
156 				    (strcmp(ifp->if_dname, VMNET) == 0)) {
157 					if (ifp->if_dunit == unit)
158 						break;
159 				}
160 			}
161 
162 			if (ifp != NULL) {
163 				struct tap_softc	*tp = ifp->if_softc;
164 
165 				TAPDEBUG(ifp, "detached. minor = %#x, " \
166 					"taplastunit = %d\n",
167 					minor(tp->tap_dev), taplastunit);
168 
169 				ether_ifdetach(ifp);
170 				destroy_dev(tp->tap_dev);
171 				kfree(tp, M_TAP);
172 			}
173 			else
174 				unit ++;
175 		}
176 
177 		attached = 0;
178 	break;
179 
180 	default:
181 		return (EOPNOTSUPP);
182 	}
183 
184 	return (0);
185 } /* tapmodevent */
186 
187 
188 /*
189  * tapcreate
190  *
191  * to create interface
192  */
193 static void
194 tapcreate(cdev_t dev)
195 {
196 	struct ifnet		*ifp = NULL;
197 	struct tap_softc	*tp = NULL;
198 	uint8_t			ether_addr[ETHER_ADDR_LEN];
199 	int			 unit;
200 	char			*name = NULL;
201 
202 	/* allocate driver storage and create device */
203 	MALLOC(tp, struct tap_softc *, sizeof(*tp), M_TAP, M_WAITOK);
204 	bzero(tp, sizeof(*tp));
205 
206 	/* select device: tap or vmnet */
207 	if (minor(dev) & VMNET_DEV_MASK) {
208 		name = VMNET;
209 		unit = lminor(dev) & 0xff;
210 		tp->tap_flags |= TAP_VMNET;
211 	}
212 	else {
213 		name = TAP;
214 		unit = lminor(dev);
215 	}
216 
217 	tp->tap_dev = make_dev(&tap_ops, minor(dev), UID_ROOT, GID_WHEEL,
218 						0600, "%s%d", name, unit);
219 	tp->tap_dev->si_drv1 = dev->si_drv1 = tp;
220 	reference_dev(tp->tap_dev);	/* so we can destroy it later */
221 
222 	/* generate fake MAC address: 00 bd xx xx xx unit_no */
223 	ether_addr[0] = 0x00;
224 	ether_addr[1] = 0xbd;
225 	bcopy(&ticks, &ether_addr[2], 3);
226 	ether_addr[5] = (u_char)unit;
227 
228 	/* fill the rest and attach interface */
229 	ifp = &tp->tap_if;
230 	ifp->if_softc = tp;
231 
232 	if_initname(ifp, name, unit);
233 	if (unit > taplastunit)
234 		taplastunit = unit;
235 
236 	ifp->if_init = tapifinit;
237 	ifp->if_start = tapifstart;
238 	ifp->if_ioctl = tapifioctl;
239 	ifp->if_mtu = ETHERMTU;
240 	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
241 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
242 	ifq_set_ready(&ifp->if_snd);
243 
244 	ether_ifattach(ifp, ether_addr, NULL);
245 
246 	tp->tap_flags |= TAP_INITED;
247 
248 	TAPDEBUG(ifp, "created. minor = %#x\n", minor(tp->tap_dev));
249 } /* tapcreate */
250 
251 
252 /*
253  * tapopen
254  *
255  * to open tunnel. must be superuser
256  */
257 static int
258 tapopen(struct dev_open_args *ap)
259 {
260 	cdev_t dev = ap->a_head.a_dev;
261 	struct tap_softc *tp = NULL;
262 	struct ifnet *ifp = NULL;
263 	int error;
264 
265 	if ((error = suser_cred(ap->a_cred, 0)) != 0)
266 		return (error);
267 
268 	tp = dev->si_drv1;
269 	if (tp == NULL) {
270 		tapcreate(dev);
271 		tp = dev->si_drv1;
272 		ifp = &tp->arpcom.ac_if;
273 	} else {
274 		ifp = &tp->arpcom.ac_if;
275 
276                 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
277 
278 		/* Announce the return of the interface. */
279 		rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
280 	}
281 
282 	if (tp->tap_flags & TAP_OPEN)
283 		return (EBUSY);
284 
285 	bcopy(tp->arpcom.ac_enaddr, tp->ether_addr, sizeof(tp->ether_addr));
286 
287 	tp->tap_td = curthread;
288 	tp->tap_flags |= TAP_OPEN;
289 	taprefcnt ++;
290 
291 	TAPDEBUG(ifp, "opened. minor = %#x, refcnt = %d, taplastunit = %d\n",
292 		 minor(tp->tap_dev), taprefcnt, taplastunit);
293 
294 	return (0);
295 } /* tapopen */
296 
297 
298 /*
299  * tapclose
300  *
301  * close the device - mark i/f down & delete routing info
302  */
303 static int
304 tapclose(struct dev_close_args *ap)
305 {
306 	cdev_t dev = ap->a_head.a_dev;
307 	struct tap_softc	*tp = dev->si_drv1;
308 	struct ifnet		*ifp = &tp->tap_if;
309 
310 	/* junk all pending output */
311 
312 	lwkt_serialize_enter(ifp->if_serializer);
313 	ifq_purge(&ifp->if_snd);
314 	lwkt_serialize_exit(ifp->if_serializer);
315 
316 	/*
317 	 * do not bring the interface down, and do not anything with
318 	 * interface, if we are in VMnet mode. just close the device.
319 	 */
320 
321 	if (((tp->tap_flags & TAP_VMNET) == 0) && (ifp->if_flags & IFF_UP)) {
322 		EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
323 
324 		/* Announce the departure of the interface. */
325 		rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
326 
327 		if_down(ifp);
328 		lwkt_serialize_enter(ifp->if_serializer);
329 		if (ifp->if_flags & IFF_RUNNING) {
330 			/* find internet addresses and delete routes */
331 			struct ifaddr	*ifa = NULL;
332 
333 			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
334 				if (ifa->ifa_addr->sa_family == AF_INET) {
335 					rtinit(ifa, (int)RTM_DELETE, 0);
336 
337 					/* remove address from interface */
338 					bzero(ifa->ifa_addr,
339 						   sizeof(*(ifa->ifa_addr)));
340 					bzero(ifa->ifa_dstaddr,
341 						   sizeof(*(ifa->ifa_dstaddr)));
342 					bzero(ifa->ifa_netmask,
343 						   sizeof(*(ifa->ifa_netmask)));
344 				}
345 			}
346 
347 			ifp->if_flags &= ~IFF_RUNNING;
348 		}
349 		lwkt_serialize_exit(ifp->if_serializer);
350 	}
351 
352 	funsetown(tp->tap_sigio);
353 	selwakeup(&tp->tap_rsel);
354 
355 	tp->tap_flags &= ~TAP_OPEN;
356 	tp->tap_td = NULL;
357 
358 	taprefcnt --;
359 	if (taprefcnt < 0) {
360 		taprefcnt = 0;
361 		if_printf(ifp, "minor = %#x, refcnt = %d is out of sync. "
362 			"set refcnt to 0\n", minor(tp->tap_dev), taprefcnt);
363 	}
364 
365 	TAPDEBUG(ifp, "closed. minor = %#x, refcnt = %d, taplastunit = %d\n",
366 		 minor(tp->tap_dev), taprefcnt, taplastunit);
367 
368 	return (0);
369 } /* tapclose */
370 
371 
372 /*
373  * tapifinit
374  *
375  * network interface initialization function
376  */
377 static void
378 tapifinit(void *xtp)
379 {
380 	struct tap_softc	*tp = (struct tap_softc *)xtp;
381 	struct ifnet		*ifp = &tp->tap_if;
382 
383 	TAPDEBUG(ifp, "initializing, minor = %#x\n", minor(tp->tap_dev));
384 
385 	ifp->if_flags |= IFF_RUNNING;
386 	ifp->if_flags &= ~IFF_OACTIVE;
387 
388 	/* attempt to start output */
389 	tapifstart(ifp);
390 } /* tapifinit */
391 
392 
393 /*
394  * tapifioctl
395  *
396  * Process an ioctl request on network interface
397  *
398  * MPSAFE
399  */
400 int
401 tapifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
402 {
403 	struct tap_softc 	*tp = (struct tap_softc *)(ifp->if_softc);
404 	struct ifstat		*ifs = NULL;
405 	int			 dummy;
406 
407 	switch (cmd) {
408 		case SIOCSIFADDR:
409 		case SIOCGIFADDR:
410 		case SIOCSIFMTU:
411 			dummy = ether_ioctl(ifp, cmd, data);
412 			return (dummy);
413 
414 		case SIOCSIFFLAGS:
415 			if ((tp->tap_flags & TAP_VMNET) == 0) {
416 				/*
417 				 * Only for non-vmnet tap(4)
418 				 */
419 				if (ifp->if_flags & IFF_UP) {
420 					if ((ifp->if_flags & IFF_RUNNING) == 0)
421 						tapifinit(tp);
422 				}
423 			}
424 			break;
425 		case SIOCADDMULTI: /* XXX -- just like vmnet does */
426 		case SIOCDELMULTI:
427 			break;
428 
429 		case SIOCGIFSTATUS:
430 			ifs = (struct ifstat *)data;
431 			dummy = strlen(ifs->ascii);
432 			if (tp->tap_td != NULL && dummy < sizeof(ifs->ascii)) {
433 				if (tp->tap_td->td_proc) {
434 				    ksnprintf(ifs->ascii + dummy,
435 					sizeof(ifs->ascii) - dummy,
436 					"\tOpened by pid %d\n",
437 					(int)tp->tap_td->td_proc->p_pid);
438 				} else {
439 				    ksnprintf(ifs->ascii + dummy,
440 					sizeof(ifs->ascii) - dummy,
441 					"\tOpened by td %p\n", tp->tap_td);
442 				}
443 			}
444 			break;
445 
446 		default:
447 			return (EINVAL);
448 	}
449 
450 	return (0);
451 } /* tapifioctl */
452 
453 
454 /*
455  * tapifstart
456  *
457  * queue packets from higher level ready to put out
458  */
459 static void
460 tapifstart(struct ifnet *ifp)
461 {
462 	struct tap_softc	*tp = ifp->if_softc;
463 
464 	TAPDEBUG(ifp, "starting, minor = %#x\n", minor(tp->tap_dev));
465 
466 	/*
467 	 * do not junk pending output if we are in VMnet mode.
468 	 * XXX: can this do any harm because of queue overflow?
469 	 */
470 
471 	if (((tp->tap_flags & TAP_VMNET) == 0) &&
472 	    ((tp->tap_flags & TAP_READY) != TAP_READY)) {
473 		TAPDEBUG(ifp, "not ready. minor = %#x, tap_flags = 0x%x\n",
474 			 minor(tp->tap_dev), tp->tap_flags);
475 
476 		ifq_purge(&ifp->if_snd);
477 		return;
478 	}
479 
480 	ifp->if_flags |= IFF_OACTIVE;
481 
482 	if (!ifq_is_empty(&ifp->if_snd)) {
483 		if (tp->tap_flags & TAP_RWAIT) {
484 			tp->tap_flags &= ~TAP_RWAIT;
485 			wakeup((caddr_t)tp);
486 		}
487 		KNOTE(&tp->tap_rsel.si_note, 0);
488 
489 		if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL))
490 			pgsigio(tp->tap_sigio, SIGIO, 0);
491 
492 		/*
493 		 * selwakeup is not MPSAFE.  tapifstart is.
494 		 */
495 		get_mplock();
496 		selwakeup(&tp->tap_rsel);
497 		rel_mplock();
498 		ifp->if_opackets ++; /* obytes are counted in ether_output */
499 	}
500 
501 	ifp->if_flags &= ~IFF_OACTIVE;
502 } /* tapifstart */
503 
504 
505 /*
506  * tapioctl
507  *
508  * the ops interface is now pretty minimal
509  */
510 static int
511 tapioctl(struct dev_ioctl_args *ap)
512 {
513 	cdev_t dev = ap->a_head.a_dev;
514 	caddr_t data = ap->a_data;
515 	struct tap_softc	*tp = dev->si_drv1;
516 	struct ifnet		*ifp = &tp->tap_if;
517  	struct tapinfo		*tapp = NULL;
518 	struct mbuf *mb;
519 	short f;
520 	int error;
521 
522 	lwkt_serialize_enter(ifp->if_serializer);
523 	error = 0;
524 
525 	switch (ap->a_cmd) {
526 	case TAPSIFINFO:
527 		tapp = (struct tapinfo *)data;
528 		ifp->if_mtu = tapp->mtu;
529 		ifp->if_type = tapp->type;
530 		ifp->if_baudrate = tapp->baudrate;
531 		break;
532 
533 	case TAPGIFINFO:
534 		tapp = (struct tapinfo *)data;
535 		tapp->mtu = ifp->if_mtu;
536 		tapp->type = ifp->if_type;
537 		tapp->baudrate = ifp->if_baudrate;
538 		break;
539 
540 	case TAPSDEBUG:
541 		tapdebug = *(int *)data;
542 		break;
543 
544 	case TAPGDEBUG:
545 		*(int *)data = tapdebug;
546 		break;
547 
548 	case FIOASYNC:
549 		if (*(int *)data)
550 			tp->tap_flags |= TAP_ASYNC;
551 		else
552 			tp->tap_flags &= ~TAP_ASYNC;
553 		break;
554 
555 	case FIONREAD:
556 		*(int *)data = 0;
557 		if ((mb = ifq_poll(&ifp->if_snd)) != NULL) {
558 			for(; mb != NULL; mb = mb->m_next)
559 				*(int *)data += mb->m_len;
560 		}
561 		break;
562 
563 	case FIOSETOWN:
564 		error = fsetown(*(int *)data, &tp->tap_sigio);
565 		break;
566 
567 	case FIOGETOWN:
568 		*(int *)data = fgetown(tp->tap_sigio);
569 		break;
570 
571 	/* this is deprecated, FIOSETOWN should be used instead */
572 	case TIOCSPGRP:
573 		error = fsetown(-(*(int *)data), &tp->tap_sigio);
574 		break;
575 
576 	/* this is deprecated, FIOGETOWN should be used instead */
577 	case TIOCGPGRP:
578 		*(int *)data = -fgetown(tp->tap_sigio);
579 		break;
580 
581 	/* VMware/VMnet port ioctl's */
582 
583 	case SIOCGIFFLAGS:	/* get ifnet flags */
584 		bcopy(&ifp->if_flags, data, sizeof(ifp->if_flags));
585 		break;
586 
587 	case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */
588 		f = *(short *)data;
589 		f &= 0x0fff;
590 		f &= ~IFF_CANTCHANGE;
591 		f |= IFF_UP;
592 		ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE);
593 		break;
594 
595 	case OSIOCGIFADDR:	/* get MAC address of the remote side */
596 	case SIOCGIFADDR:
597 		bcopy(tp->ether_addr, data, sizeof(tp->ether_addr));
598 		break;
599 
600 	case SIOCSIFADDR:	/* set MAC address of the remote side */
601 		bcopy(data, tp->ether_addr, sizeof(tp->ether_addr));
602 		break;
603 
604 	default:
605 		error = ENOTTY;
606 		break;
607 	}
608 	lwkt_serialize_exit(ifp->if_serializer);
609 	return (error);
610 } /* tapioctl */
611 
612 
613 /*
614  * tapread
615  *
616  * the ops read interface - reads a packet at a time, or at
617  * least as much of a packet as can be read
618  */
619 static int
620 tapread(struct dev_read_args *ap)
621 {
622 	cdev_t dev = ap->a_head.a_dev;
623 	struct uio *uio = ap->a_uio;
624 	struct tap_softc	*tp = dev->si_drv1;
625 	struct ifnet		*ifp = &tp->tap_if;
626 	struct mbuf		*m0 = NULL;
627 	int			 error = 0, len;
628 
629 	TAPDEBUG(ifp, "reading, minor = %#x\n", minor(tp->tap_dev));
630 
631 	if ((tp->tap_flags & TAP_READY) != TAP_READY) {
632 		TAPDEBUG(ifp, "not ready. minor = %#x, tap_flags = 0x%x\n",
633 			 minor(tp->tap_dev), tp->tap_flags);
634 
635 		return (EHOSTDOWN);
636 	}
637 
638 	tp->tap_flags &= ~TAP_RWAIT;
639 
640 	/* sleep until we get a packet */
641 	do {
642 		lwkt_serialize_enter(ifp->if_serializer);
643 		m0 = ifq_dequeue(&ifp->if_snd, NULL);
644 		if (m0 == NULL) {
645 			if (ap->a_ioflag & IO_NDELAY) {
646 				lwkt_serialize_exit(ifp->if_serializer);
647 				return (EWOULDBLOCK);
648 			}
649 			tp->tap_flags |= TAP_RWAIT;
650 			crit_enter();
651 			tsleep_interlock(tp);
652 			lwkt_serialize_exit(ifp->if_serializer);
653 			error = tsleep(tp, PCATCH, "taprd", 0);
654 			crit_exit();
655 			if (error)
656 				return (error);
657 		} else {
658 			lwkt_serialize_exit(ifp->if_serializer);
659 		}
660 	} while (m0 == NULL);
661 
662 	BPF_MTAP(ifp, m0);
663 
664 	/* xfer packet to user space */
665 	while ((m0 != NULL) && (uio->uio_resid > 0) && (error == 0)) {
666 		len = min(uio->uio_resid, m0->m_len);
667 		if (len == 0)
668 			break;
669 
670 		error = uiomove(mtod(m0, caddr_t), len, uio);
671 		m0 = m_free(m0);
672 	}
673 
674 	if (m0 != NULL) {
675 		TAPDEBUG(ifp, "dropping mbuf, minor = %#x\n",
676 			 minor(tp->tap_dev));
677 		m_freem(m0);
678 	}
679 
680 	return (error);
681 } /* tapread */
682 
683 
684 /*
685  * tapwrite
686  *
687  * the ops write interface - an atomic write is a packet - or else!
688  */
689 static int
690 tapwrite(struct dev_write_args *ap)
691 {
692 	cdev_t dev = ap->a_head.a_dev;
693 	struct uio *uio = ap->a_uio;
694 	struct tap_softc	*tp = dev->si_drv1;
695 	struct ifnet		*ifp = &tp->tap_if;
696 	struct mbuf		*top = NULL, **mp = NULL, *m = NULL;
697 	int		 	 error = 0, tlen, mlen;
698 
699 	TAPDEBUG(ifp, "writing, minor = %#x\n", minor(tp->tap_dev));
700 
701 	if (uio->uio_resid == 0)
702 		return (0);
703 
704 	if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) {
705 		TAPDEBUG(ifp, "invalid packet len = %d, minor = %#x\n",
706 			 uio->uio_resid, minor(tp->tap_dev));
707 
708 		return (EIO);
709 	}
710 	tlen = uio->uio_resid;
711 
712 	/* get a header mbuf */
713 	MGETHDR(m, MB_DONTWAIT, MT_DATA);
714 	if (m == NULL)
715 		return (ENOBUFS);
716 	mlen = MHLEN;
717 
718 	top = 0;
719 	mp = &top;
720 	while ((error == 0) && (uio->uio_resid > 0)) {
721 		m->m_len = min(mlen, uio->uio_resid);
722 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
723 		*mp = m;
724 		mp = &m->m_next;
725 		if (uio->uio_resid > 0) {
726 			MGET(m, MB_DONTWAIT, MT_DATA);
727 			if (m == NULL) {
728 				error = ENOBUFS;
729 				break;
730 			}
731 			mlen = MLEN;
732 		}
733 	}
734 	if (error) {
735 		ifp->if_ierrors ++;
736 		if (top)
737 			m_freem(top);
738 		return (error);
739 	}
740 
741 	top->m_pkthdr.len = tlen;
742 	top->m_pkthdr.rcvif = ifp;
743 
744 	/*
745 	 * Ethernet bridge and bpf are handled in ether_input
746 	 *
747 	 * adjust mbuf and give packet to the ether_input
748 	 */
749 	lwkt_serialize_enter(ifp->if_serializer);
750 	ifp->if_input(ifp, top);
751 	ifp->if_ipackets ++; /* ibytes are counted in ether_input */
752 	lwkt_serialize_exit(ifp->if_serializer);
753 
754 	return (0);
755 } /* tapwrite */
756 
757 
758 /*
759  * tappoll
760  *
761  * the poll interface, this is only useful on reads
762  * really. the write detect always returns true, write never blocks
763  * anyway, it either accepts the packet or drops it
764  */
765 static int
766 tappoll(struct dev_poll_args *ap)
767 {
768 	cdev_t dev = ap->a_head.a_dev;
769 	struct tap_softc	*tp = dev->si_drv1;
770 	struct ifnet		*ifp = &tp->tap_if;
771 	int		 	 revents = 0;
772 
773 	TAPDEBUG(ifp, "polling, minor = %#x\n", minor(tp->tap_dev));
774 
775 	lwkt_serialize_enter(ifp->if_serializer);
776 	if (ap->a_events & (POLLIN | POLLRDNORM)) {
777 		if (!ifq_is_empty(&ifp->if_snd)) {
778 			TAPDEBUG(ifp,
779 				 "has data in queue. minor = %#x\n",
780 				 minor(tp->tap_dev));
781 
782 			revents |= (ap->a_events & (POLLIN | POLLRDNORM));
783 		}
784 		else {
785 			TAPDEBUG(ifp, "waiting for data, minor = %#x\n",
786 				 minor(tp->tap_dev));
787 
788 			selrecord(curthread, &tp->tap_rsel);
789 		}
790 	}
791 	lwkt_serialize_exit(ifp->if_serializer);
792 
793 	if (ap->a_events & (POLLOUT | POLLWRNORM))
794 		revents |= (ap->a_events & (POLLOUT | POLLWRNORM));
795 	ap->a_events = revents;
796 	return(0);
797 } /* tappoll */
798 
799 static int filt_tapread(struct knote *kn, long hint);
800 static void filt_tapdetach(struct knote *kn);
801 static struct filterops tapread_filtops =
802 	{ 1, NULL, filt_tapdetach, filt_tapread };
803 
804 int
805 tapkqfilter(struct dev_kqfilter_args *ap)
806 {
807 	cdev_t dev = ap->a_head.a_dev;
808 	struct knote *kn = ap->a_kn;
809 	struct tap_softc *tp;
810 	struct klist *list;
811 	struct ifnet *ifp;
812 
813 	get_mplock();
814 	tp = dev->si_drv1;
815 	ifp = &tp->tap_if;
816 	ap->a_result =0;
817 
818 	switch(kn->kn_filter) {
819 	case EVFILT_READ:
820 		list = &tp->tap_rsel.si_note;
821 		kn->kn_fop = &tapread_filtops;
822 		kn->kn_hook = (void *)tp;
823 		break;
824 	case EVFILT_WRITE:
825 		/* fall through */
826 	default:
827 		ap->a_result = 1;
828 		rel_mplock();
829 		return(0);
830 	}
831 	crit_enter();
832 	SLIST_INSERT_HEAD(list, kn, kn_selnext);
833 	crit_exit();
834 	rel_mplock();
835 	return(0);
836 }
837 
838 static int
839 filt_tapread(struct knote *kn, long hint)
840 {
841 	struct tap_softc *tp = (void *)kn->kn_hook;
842 	struct ifnet *ifp = &tp->tap_if;
843 
844 	if (ifq_is_empty(&ifp->if_snd) == 0) {
845 		return(1);
846 	} else {
847 		return(0);
848 	}
849 }
850 
851 static void
852 filt_tapdetach(struct knote *kn)
853 {
854 	struct tap_softc *tp = (void *)kn->kn_hook;
855 
856 	SLIST_REMOVE(&tp->tap_rsel.si_note, kn, knote, kn_selnext);
857 }
858