xref: /dragonfly/sys/net/bpf.c (revision b3e108b2)
1 /*
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      @(#)bpf.c	8.2 (Berkeley) 3/28/94
39  *
40  * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
41  * $DragonFly: src/sys/net/bpf.c,v 1.20 2004/12/21 02:54:14 hsu Exp $
42  */
43 
44 #include "use_bpf.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/conf.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/time.h>
52 #include <sys/proc.h>
53 #include <sys/signalvar.h>
54 #include <sys/filio.h>
55 #include <sys/sockio.h>
56 #include <sys/ttycom.h>
57 #include <sys/filedesc.h>
58 
59 #include <sys/poll.h>
60 
61 #include <sys/socket.h>
62 #include <sys/vnode.h>
63 
64 #include <net/if.h>
65 #include <net/bpf.h>
66 #include <net/bpfdesc.h>
67 
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <sys/kernel.h>
71 #include <sys/sysctl.h>
72 
73 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
74 
75 #if NBPF > 0
76 
77 /*
78  * The default read buffer size is patchable.
79  */
80 static int bpf_bufsize = BPF_DEFAULTBUFSIZE;
81 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
82 	   &bpf_bufsize, 0, "");
83 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
84 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
85 	   &bpf_maxbufsize, 0, "");
86 
87 /*
88  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
89  */
90 static struct bpf_if	*bpf_iflist;
91 
92 static int	bpf_allocbufs(struct bpf_d *);
93 static void	bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
94 static void	bpf_detachd(struct bpf_d *d);
95 static void	bpf_freed(struct bpf_d *);
96 static void	bpf_mcopy(const void *, void *, size_t);
97 static int	bpf_movein(struct uio *, int, struct mbuf **,
98 			   struct sockaddr *, int *);
99 static int	bpf_setif(struct bpf_d *, struct ifreq *);
100 static void	bpf_timed_out(void *);
101 static void	bpf_wakeup(struct bpf_d *);
102 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
103 			    void (*)(const void *, void *, size_t));
104 static void	reset_d(struct bpf_d *);
105 static int	bpf_setf(struct bpf_d *, struct bpf_program *);
106 static void	bpf_drvinit(void *unused);
107 
108 static d_open_t		bpfopen;
109 static d_close_t	bpfclose;
110 static d_read_t		bpfread;
111 static d_write_t	bpfwrite;
112 static d_ioctl_t	bpfioctl;
113 static d_poll_t		bpfpoll;
114 
115 #define CDEV_MAJOR 23
116 static struct cdevsw bpf_cdevsw = {
117 	/* name */	"bpf",
118 	/* maj */	CDEV_MAJOR,
119 	/* flags */	0,
120 	/* port */	NULL,
121 	/* clone */	NULL,
122 
123 	/* open */	bpfopen,
124 	/* close */	bpfclose,
125 	/* read */	bpfread,
126 	/* write */	bpfwrite,
127 	/* ioctl */	bpfioctl,
128 	/* poll */	bpfpoll,
129 	/* mmap */	nommap,
130 	/* strategy */	nostrategy,
131 	/* dump */	nodump,
132 	/* psize */	nopsize
133 };
134 
135 
136 static int
137 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp,
138 	   struct sockaddr *sockp, int *datlen)
139 {
140 	struct mbuf *m;
141 	int error;
142 	int len;
143 	int hlen;
144 
145 	/*
146 	 * Build a sockaddr based on the data link layer type.
147 	 * We do this at this level because the ethernet header
148 	 * is copied directly into the data field of the sockaddr.
149 	 * In the case of SLIP, there is no header and the packet
150 	 * is forwarded as is.
151 	 * Also, we are careful to leave room at the front of the mbuf
152 	 * for the link level header.
153 	 */
154 	switch (linktype) {
155 
156 	case DLT_SLIP:
157 		sockp->sa_family = AF_INET;
158 		hlen = 0;
159 		break;
160 
161 	case DLT_EN10MB:
162 		sockp->sa_family = AF_UNSPEC;
163 		/* XXX Would MAXLINKHDR be better? */
164 		hlen = sizeof(struct ether_header);
165 		break;
166 
167 	case DLT_FDDI:
168 		sockp->sa_family = AF_IMPLINK;
169 		hlen = 0;
170 		break;
171 
172 	case DLT_RAW:
173 	case DLT_NULL:
174 		sockp->sa_family = AF_UNSPEC;
175 		hlen = 0;
176 		break;
177 
178 	case DLT_ATM_RFC1483:
179 		/*
180 		 * en atm driver requires 4-byte atm pseudo header.
181 		 * though it isn't standard, vpi:vci needs to be
182 		 * specified anyway.
183 		 */
184 		sockp->sa_family = AF_UNSPEC;
185 		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
186 		break;
187 
188 	case DLT_PPP:
189 		sockp->sa_family = AF_UNSPEC;
190 		hlen = 4;	/* This should match PPP_HDRLEN */
191 		break;
192 
193 	default:
194 		return(EIO);
195 	}
196 
197 	len = uio->uio_resid;
198 	*datlen = len - hlen;
199 	if ((unsigned)len > MCLBYTES)
200 		return(EIO);
201 
202 	MGETHDR(m, MB_WAIT, MT_DATA);
203 	if (m == NULL)
204 		return(ENOBUFS);
205 	if (len > MHLEN) {
206 		MCLGET(m, MB_WAIT);
207 		if (!(m->m_flags & M_EXT)) {
208 			error = ENOBUFS;
209 			goto bad;
210 		}
211 	}
212 	m->m_pkthdr.len = m->m_len = len;
213 	m->m_pkthdr.rcvif = NULL;
214 	*mp = m;
215 	/*
216 	 * Make room for link header.
217 	 */
218 	if (hlen != 0) {
219 		m->m_pkthdr.len -= hlen;
220 		m->m_len -= hlen;
221 		m->m_data += hlen; /* XXX */
222 		error = uiomove(sockp->sa_data, hlen, uio);
223 		if (error)
224 			goto bad;
225 	}
226 	error = uiomove(mtod(m, caddr_t), len - hlen, uio);
227 	if (!error)
228 		return(0);
229 bad:
230 	m_freem(m);
231 	return(error);
232 }
233 
234 /*
235  * Attach file to the bpf interface, i.e. make d listen on bp.
236  * Must be called at splimp.
237  */
238 static void
239 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
240 {
241 	/*
242 	 * Point d at bp, and add d to the interface's list of listeners.
243 	 * Finally, point the driver's bpf cookie at the interface so
244 	 * it will divert packets to bpf.
245 	 */
246 	d->bd_bif = bp;
247 	SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
248 	bp->bif_ifp->if_bpf = bp;
249 }
250 
251 /*
252  * Detach a file from its interface.
253  */
254 static void
255 bpf_detachd(struct bpf_d *d)
256 {
257 	struct bpf_if *bp;
258 
259 	bp = d->bd_bif;
260 	/*
261 	 * Check if this descriptor had requested promiscuous mode.
262 	 * If so, turn it off.
263 	 */
264 	if (d->bd_promisc) {
265 		d->bd_promisc = 0;
266 		if (ifpromisc(bp->bif_ifp, 0)) {
267 			/*
268 			 * Something is really wrong if we were able to put
269 			 * the driver into promiscuous mode, but can't
270 			 * take it out.
271 			 */
272 			panic("bpf: ifpromisc failed");
273 		}
274 	}
275 	/* Remove d from the interface's descriptor list. */
276 	SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next);
277 
278 	if (SLIST_EMPTY(&bp->bif_dlist)) {
279 		/*
280 		 * Let the driver know that there are no more listeners.
281 		 */
282 		d->bd_bif->bif_ifp->if_bpf = NULL;
283 	}
284 	d->bd_bif = NULL;
285 }
286 
287 /*
288  * Open ethernet device.  Returns ENXIO for illegal minor device number,
289  * EBUSY if file is open by another process.
290  */
291 /* ARGSUSED */
292 static int
293 bpfopen(dev_t dev, int flags, int fmt, struct thread *td)
294 {
295 	struct bpf_d *d;
296 	struct proc *p = td->td_proc;
297 
298 	KKASSERT(p != NULL);
299 
300 	if (p->p_ucred->cr_prison)
301 		return(EPERM);
302 
303 	d = dev->si_drv1;
304 	/*
305 	 * Each minor can be opened by only one process.  If the requested
306 	 * minor is in use, return EBUSY.
307 	 */
308 	if (d != NULL)
309 		return(EBUSY);
310 	make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
311 	MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
312 	dev->si_drv1 = d;
313 	d->bd_bufsize = bpf_bufsize;
314 	d->bd_sig = SIGIO;
315 	d->bd_seesent = 1;
316 	callout_init(&d->bd_callout);
317 	return(0);
318 }
319 
320 /*
321  * Close the descriptor by detaching it from its interface,
322  * deallocating its buffers, and marking it free.
323  */
324 /* ARGSUSED */
325 static int
326 bpfclose(dev_t dev, int flags, int fmt, struct thread *td)
327 {
328 	struct bpf_d *d = dev->si_drv1;
329 	int s;
330 
331 	funsetown(d->bd_sigio);
332 	s = splimp();
333 	if (d->bd_state == BPF_WAITING)
334 		callout_stop(&d->bd_callout);
335 	d->bd_state = BPF_IDLE;
336 	if (d->bd_bif != NULL)
337 		bpf_detachd(d);
338 	splx(s);
339 	bpf_freed(d);
340 	dev->si_drv1 = NULL;
341 	free(d, M_BPF);
342 
343 	return(0);
344 }
345 
346 /*
347  * Rotate the packet buffers in descriptor d.  Move the store buffer
348  * into the hold slot, and the free buffer into the store slot.
349  * Zero the length of the new store buffer.
350  */
351 #define ROTATE_BUFFERS(d) \
352 	(d)->bd_hbuf = (d)->bd_sbuf; \
353 	(d)->bd_hlen = (d)->bd_slen; \
354 	(d)->bd_sbuf = (d)->bd_fbuf; \
355 	(d)->bd_slen = 0; \
356 	(d)->bd_fbuf = NULL;
357 /*
358  *  bpfread - read next chunk of packets from buffers
359  */
360 static int
361 bpfread(dev_t dev, struct uio *uio, int ioflag)
362 {
363 	struct bpf_d *d = dev->si_drv1;
364 	int timed_out;
365 	int error;
366 	int s;
367 
368 	/*
369 	 * Restrict application to use a buffer the same size as
370 	 * as kernel buffers.
371 	 */
372 	if (uio->uio_resid != d->bd_bufsize)
373 		return(EINVAL);
374 
375 	s = splimp();
376 	if (d->bd_state == BPF_WAITING)
377 		callout_stop(&d->bd_callout);
378 	timed_out = (d->bd_state == BPF_TIMED_OUT);
379 	d->bd_state = BPF_IDLE;
380 	/*
381 	 * If the hold buffer is empty, then do a timed sleep, which
382 	 * ends when the timeout expires or when enough packets
383 	 * have arrived to fill the store buffer.
384 	 */
385 	while (d->bd_hbuf == NULL) {
386 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
387 			/*
388 			 * A packet(s) either arrived since the previous
389 			 * read or arrived while we were asleep.
390 			 * Rotate the buffers and return what's here.
391 			 */
392 			ROTATE_BUFFERS(d);
393 			break;
394 		}
395 
396 		/*
397 		 * No data is available, check to see if the bpf device
398 		 * is still pointed at a real interface.  If not, return
399 		 * ENXIO so that the userland process knows to rebind
400 		 * it before using it again.
401 		 */
402 		if (d->bd_bif == NULL) {
403 			splx(s);
404 			return(ENXIO);
405 		}
406 
407 		if (ioflag & IO_NDELAY) {
408 			splx(s);
409 			return(EWOULDBLOCK);
410 		}
411 		error = tsleep(d, PCATCH, "bpf", d->bd_rtout);
412 		if (error == EINTR || error == ERESTART) {
413 			splx(s);
414 			return(error);
415 		}
416 		if (error == EWOULDBLOCK) {
417 			/*
418 			 * On a timeout, return what's in the buffer,
419 			 * which may be nothing.  If there is something
420 			 * in the store buffer, we can rotate the buffers.
421 			 */
422 			if (d->bd_hbuf)
423 				/*
424 				 * We filled up the buffer in between
425 				 * getting the timeout and arriving
426 				 * here, so we don't need to rotate.
427 				 */
428 				break;
429 
430 			if (d->bd_slen == 0) {
431 				splx(s);
432 				return(0);
433 			}
434 			ROTATE_BUFFERS(d);
435 			break;
436 		}
437 	}
438 	/*
439 	 * At this point, we know we have something in the hold slot.
440 	 */
441 	splx(s);
442 
443 	/*
444 	 * Move data from hold buffer into user space.
445 	 * We know the entire buffer is transferred since
446 	 * we checked above that the read buffer is bpf_bufsize bytes.
447 	 */
448 	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
449 
450 	s = splimp();
451 	d->bd_fbuf = d->bd_hbuf;
452 	d->bd_hbuf = NULL;
453 	d->bd_hlen = 0;
454 	splx(s);
455 
456 	return(error);
457 }
458 
459 
460 /*
461  * If there are processes sleeping on this descriptor, wake them up.
462  */
463 static void
464 bpf_wakeup(struct bpf_d *d)
465 {
466 	if (d->bd_state == BPF_WAITING) {
467 		callout_stop(&d->bd_callout);
468 		d->bd_state = BPF_IDLE;
469 	}
470 	wakeup(d);
471 	if (d->bd_async && d->bd_sig && d->bd_sigio)
472 		pgsigio(d->bd_sigio, d->bd_sig, 0);
473 
474 	selwakeup(&d->bd_sel);
475 	/* XXX */
476 	d->bd_sel.si_pid = 0;
477 }
478 
479 static void
480 bpf_timed_out(void *arg)
481 {
482 	struct bpf_d *d = (struct bpf_d *)arg;
483 	int s;
484 
485 	s = splimp();
486 	if (d->bd_state == BPF_WAITING) {
487 		d->bd_state = BPF_TIMED_OUT;
488 		if (d->bd_slen != 0)
489 			bpf_wakeup(d);
490 	}
491 	splx(s);
492 }
493 
494 static	int
495 bpfwrite(dev_t dev, struct uio *uio, int ioflag)
496 {
497 	struct bpf_d *d = dev->si_drv1;
498 	struct ifnet *ifp;
499 	struct mbuf *m;
500 	int error, s;
501 	static struct sockaddr dst;
502 	int datlen;
503 
504 	if (d->bd_bif == NULL)
505 		return(ENXIO);
506 
507 	ifp = d->bd_bif->bif_ifp;
508 
509 	if (uio->uio_resid == 0)
510 		return(0);
511 
512 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
513 	if (error)
514 		return(error);
515 
516 	if (datlen > ifp->if_mtu)
517 		return(EMSGSIZE);
518 
519 	if (d->bd_hdrcmplt)
520 		dst.sa_family = pseudo_AF_HDRCMPLT;
521 
522 	s = splnet();
523 	error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)NULL);
524 	splx(s);
525 	/*
526 	 * The driver frees the mbuf.
527 	 */
528 	return(error);
529 }
530 
531 /*
532  * Reset a descriptor by flushing its packet buffer and clearing the
533  * receive and drop counts.  Should be called at splimp.
534  */
535 static void
536 reset_d(struct bpf_d *d)
537 {
538 	if (d->bd_hbuf) {
539 		/* Free the hold buffer. */
540 		d->bd_fbuf = d->bd_hbuf;
541 		d->bd_hbuf = NULL;
542 	}
543 	d->bd_slen = 0;
544 	d->bd_hlen = 0;
545 	d->bd_rcount = 0;
546 	d->bd_dcount = 0;
547 }
548 
549 /*
550  *  FIONREAD		Check for read packet available.
551  *  SIOCGIFADDR		Get interface address - convenient hook to driver.
552  *  BIOCGBLEN		Get buffer len [for read()].
553  *  BIOCSETF		Set ethernet read filter.
554  *  BIOCFLUSH		Flush read packet buffer.
555  *  BIOCPROMISC		Put interface into promiscuous mode.
556  *  BIOCGDLT		Get link layer type.
557  *  BIOCGETIF		Get interface name.
558  *  BIOCSETIF		Set interface.
559  *  BIOCSRTIMEOUT	Set read timeout.
560  *  BIOCGRTIMEOUT	Get read timeout.
561  *  BIOCGSTATS		Get packet stats.
562  *  BIOCIMMEDIATE	Set immediate mode.
563  *  BIOCVERSION		Get filter language version.
564  *  BIOCGHDRCMPLT	Get "header already complete" flag
565  *  BIOCSHDRCMPLT	Set "header already complete" flag
566  *  BIOCGSEESENT	Get "see packets sent" flag
567  *  BIOCSSEESENT	Set "see packets sent" flag
568  */
569 /* ARGSUSED */
570 static int
571 bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
572 {
573 	struct bpf_d *d = dev->si_drv1;
574 	int s, error = 0;
575 
576 	s = splimp();
577 	if (d->bd_state == BPF_WAITING)
578 		callout_stop(&d->bd_callout);
579 	d->bd_state = BPF_IDLE;
580 	splx(s);
581 
582 	switch (cmd) {
583 
584 	default:
585 		error = EINVAL;
586 		break;
587 
588 	/*
589 	 * Check for read packet available.
590 	 */
591 	case FIONREAD:
592 		{
593 			int n;
594 
595 			s = splimp();
596 			n = d->bd_slen;
597 			if (d->bd_hbuf)
598 				n += d->bd_hlen;
599 			splx(s);
600 
601 			*(int *)addr = n;
602 			break;
603 		}
604 
605 	case SIOCGIFADDR:
606 		{
607 			struct ifnet *ifp;
608 
609 			if (d->bd_bif == NULL)
610 				error = EINVAL;
611 			else {
612 				ifp = d->bd_bif->bif_ifp;
613 				error = (*ifp->if_ioctl)(ifp, cmd, addr,
614 							 td->td_proc->p_ucred);
615 			}
616 			break;
617 		}
618 
619 	/*
620 	 * Get buffer len [for read()].
621 	 */
622 	case BIOCGBLEN:
623 		*(u_int *)addr = d->bd_bufsize;
624 		break;
625 
626 	/*
627 	 * Set buffer length.
628 	 */
629 	case BIOCSBLEN:
630 		if (d->bd_bif != 0)
631 			error = EINVAL;
632 		else {
633 			u_int size = *(u_int *)addr;
634 
635 			if (size > bpf_maxbufsize)
636 				*(u_int *)addr = size = bpf_maxbufsize;
637 			else if (size < BPF_MINBUFSIZE)
638 				*(u_int *)addr = size = BPF_MINBUFSIZE;
639 			d->bd_bufsize = size;
640 		}
641 		break;
642 
643 	/*
644 	 * Set link layer read filter.
645 	 */
646 	case BIOCSETF:
647 		error = bpf_setf(d, (struct bpf_program *)addr);
648 		break;
649 
650 	/*
651 	 * Flush read packet buffer.
652 	 */
653 	case BIOCFLUSH:
654 		s = splimp();
655 		reset_d(d);
656 		splx(s);
657 		break;
658 
659 	/*
660 	 * Put interface into promiscuous mode.
661 	 */
662 	case BIOCPROMISC:
663 		if (d->bd_bif == NULL) {
664 			/*
665 			 * No interface attached yet.
666 			 */
667 			error = EINVAL;
668 			break;
669 		}
670 		s = splimp();
671 		if (d->bd_promisc == 0) {
672 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
673 			if (error == 0)
674 				d->bd_promisc = 1;
675 		}
676 		splx(s);
677 		break;
678 
679 	/*
680 	 * Get device parameters.
681 	 */
682 	case BIOCGDLT:
683 		if (d->bd_bif == NULL)
684 			error = EINVAL;
685 		else
686 			*(u_int *)addr = d->bd_bif->bif_dlt;
687 		break;
688 
689 	/*
690 	 * Get interface name.
691 	 */
692 	case BIOCGETIF:
693 		if (d->bd_bif == NULL)
694 			error = EINVAL;
695 		else {
696 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
697 			struct ifreq *const ifr = (struct ifreq *)addr;
698 
699 			strlcpy(ifr->ifr_name, ifp->if_xname,
700 			    sizeof(ifr->ifr_name));
701 		}
702 		break;
703 
704 	/*
705 	 * Set interface.
706 	 */
707 	case BIOCSETIF:
708 		error = bpf_setif(d, (struct ifreq *)addr);
709 		break;
710 
711 	/*
712 	 * Set read timeout.
713 	 */
714 	case BIOCSRTIMEOUT:
715 		{
716 			struct timeval *tv = (struct timeval *)addr;
717 
718 			/*
719 			 * Subtract 1 tick from tvtohz() since this isn't
720 			 * a one-shot timer.
721 			 */
722 			if ((error = itimerfix(tv)) == 0)
723 				d->bd_rtout = tvtohz_low(tv);
724 			break;
725 		}
726 
727 	/*
728 	 * Get read timeout.
729 	 */
730 	case BIOCGRTIMEOUT:
731 		{
732 			struct timeval *tv = (struct timeval *)addr;
733 
734 			tv->tv_sec = d->bd_rtout / hz;
735 			tv->tv_usec = (d->bd_rtout % hz) * tick;
736 			break;
737 		}
738 
739 	/*
740 	 * Get packet stats.
741 	 */
742 	case BIOCGSTATS:
743 		{
744 			struct bpf_stat *bs = (struct bpf_stat *)addr;
745 
746 			bs->bs_recv = d->bd_rcount;
747 			bs->bs_drop = d->bd_dcount;
748 			break;
749 		}
750 
751 	/*
752 	 * Set immediate mode.
753 	 */
754 	case BIOCIMMEDIATE:
755 		d->bd_immediate = *(u_int *)addr;
756 		break;
757 
758 	case BIOCVERSION:
759 		{
760 			struct bpf_version *bv = (struct bpf_version *)addr;
761 
762 			bv->bv_major = BPF_MAJOR_VERSION;
763 			bv->bv_minor = BPF_MINOR_VERSION;
764 			break;
765 		}
766 
767 	/*
768 	 * Get "header already complete" flag
769 	 */
770 	case BIOCGHDRCMPLT:
771 		*(u_int *)addr = d->bd_hdrcmplt;
772 		break;
773 
774 	/*
775 	 * Set "header already complete" flag
776 	 */
777 	case BIOCSHDRCMPLT:
778 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
779 		break;
780 
781 	/*
782 	 * Get "see sent packets" flag
783 	 */
784 	case BIOCGSEESENT:
785 		*(u_int *)addr = d->bd_seesent;
786 		break;
787 
788 	/*
789 	 * Set "see sent packets" flag
790 	 */
791 	case BIOCSSEESENT:
792 		d->bd_seesent = *(u_int *)addr;
793 		break;
794 
795 	case FIONBIO:		/* Non-blocking I/O */
796 		break;
797 
798 	case FIOASYNC:		/* Send signal on receive packets */
799 		d->bd_async = *(int *)addr;
800 		break;
801 
802 	case FIOSETOWN:
803 		error = fsetown(*(int *)addr, &d->bd_sigio);
804 		break;
805 
806 	case FIOGETOWN:
807 		*(int *)addr = fgetown(d->bd_sigio);
808 		break;
809 
810 	/* This is deprecated, FIOSETOWN should be used instead. */
811 	case TIOCSPGRP:
812 		error = fsetown(-(*(int *)addr), &d->bd_sigio);
813 		break;
814 
815 	/* This is deprecated, FIOGETOWN should be used instead. */
816 	case TIOCGPGRP:
817 		*(int *)addr = -fgetown(d->bd_sigio);
818 		break;
819 
820 	case BIOCSRSIG:		/* Set receive signal */
821 		{
822 			u_int sig;
823 
824 			sig = *(u_int *)addr;
825 
826 			if (sig >= NSIG)
827 				error = EINVAL;
828 			else
829 				d->bd_sig = sig;
830 			break;
831 		}
832 	case BIOCGRSIG:
833 		*(u_int *)addr = d->bd_sig;
834 		break;
835 	}
836 	return(error);
837 }
838 
839 /*
840  * Set d's packet filter program to fp.  If this file already has a filter,
841  * free it and replace it.  Returns EINVAL for bogus requests.
842  */
843 static int
844 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
845 {
846 	struct bpf_insn *fcode, *old;
847 	u_int flen, size;
848 	int s;
849 
850 	old = d->bd_filter;
851 	if (fp->bf_insns == NULL) {
852 		if (fp->bf_len != 0)
853 			return(EINVAL);
854 		s = splimp();
855 		d->bd_filter = NULL;
856 		reset_d(d);
857 		splx(s);
858 		if (old != 0)
859 			free(old, M_BPF);
860 		return(0);
861 	}
862 	flen = fp->bf_len;
863 	if (flen > BPF_MAXINSNS)
864 		return(EINVAL);
865 
866 	size = flen * sizeof(*fp->bf_insns);
867 	fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
868 	if (copyin(fp->bf_insns, fcode, size) == 0 &&
869 	    bpf_validate(fcode, (int)flen)) {
870 		s = splimp();
871 		d->bd_filter = fcode;
872 		reset_d(d);
873 		splx(s);
874 		if (old != 0)
875 			free(old, M_BPF);
876 
877 		return(0);
878 	}
879 	free(fcode, M_BPF);
880 	return(EINVAL);
881 }
882 
883 /*
884  * Detach a file from its current interface (if attached at all) and attach
885  * to the interface indicated by the name stored in ifr.
886  * Return an errno or 0.
887  */
888 static int
889 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
890 {
891 	struct bpf_if *bp;
892 	int s, error;
893 	struct ifnet *theywant;
894 
895 	theywant = ifunit(ifr->ifr_name);
896 	if (theywant == NULL)
897 		return(ENXIO);
898 
899 	/*
900 	 * Look through attached interfaces for the named one.
901 	 */
902 	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
903 		struct ifnet *ifp = bp->bif_ifp;
904 
905 		if (ifp == NULL || ifp != theywant)
906 			continue;
907 		/*
908 		 * We found the requested interface.
909 		 * If it's not up, return an error.
910 		 * Allocate the packet buffers if we need to.
911 		 * If we're already attached to requested interface,
912 		 * just flush the buffer.
913 		 */
914 		if (!(ifp->if_flags & IFF_UP))
915 			return(ENETDOWN);
916 
917 		if (d->bd_sbuf == NULL) {
918 			error = bpf_allocbufs(d);
919 			if (error != 0)
920 				return(error);
921 		}
922 		s = splimp();
923 		if (bp != d->bd_bif) {
924 			if (d->bd_bif != NULL) {
925 				/*
926 				 * Detach if attached to something else.
927 				 */
928 				bpf_detachd(d);
929 			}
930 
931 			bpf_attachd(d, bp);
932 		}
933 		reset_d(d);
934 		splx(s);
935 		return(0);
936 	}
937 
938 	/* Not found. */
939 	return(ENXIO);
940 }
941 
942 /*
943  * Support for select() and poll() system calls
944  *
945  * Return true iff the specific operation will not block indefinitely.
946  * Otherwise, return false but make a note that a selwakeup() must be done.
947  */
948 int
949 bpfpoll(dev_t dev, int events, struct thread *td)
950 {
951 	struct bpf_d *d;
952 	int s;
953 	int revents;
954 
955 	d = dev->si_drv1;
956 	if (d->bd_bif == NULL)
957 		return(ENXIO);
958 
959 	revents = events & (POLLOUT | POLLWRNORM);
960 	s = splimp();
961 	if (events & (POLLIN | POLLRDNORM)) {
962 		/*
963 		 * An imitation of the FIONREAD ioctl code.
964 		 * XXX not quite.  An exact imitation:
965 		 *	if (d->b_slen != 0 ||
966 		 *	    (d->bd_hbuf != NULL && d->bd_hlen != 0)
967 		 */
968 		if (d->bd_hlen != 0 ||
969 		    ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
970 		    d->bd_slen != 0))
971 			revents |= events & (POLLIN | POLLRDNORM);
972 		else {
973 			selrecord(td, &d->bd_sel);
974 			/* Start the read timeout if necessary. */
975 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
976 				callout_reset(&d->bd_callout, d->bd_rtout,
977 				    bpf_timed_out, d);
978 				d->bd_state = BPF_WAITING;
979 			}
980 		}
981 	}
982 	splx(s);
983 	return(revents);
984 }
985 
986 /*
987  * Incoming linkage from device drivers.  Process the packet pkt, of length
988  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
989  * by each process' filter, and if accepted, stashed into the corresponding
990  * buffer.
991  */
992 void
993 bpf_tap(struct ifnet *ifp, u_char *pkt, u_int pktlen)
994 {
995 	struct bpf_if *bp = ifp->if_bpf;
996 	struct bpf_d *d;
997 	u_int slen;
998 
999 	/*
1000 	 * Note that the ipl does not have to be raised at this point.
1001 	 * The only problem that could arise here is that if two different
1002 	 * interfaces shared any data.  This is not the case.
1003 	 */
1004 	SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1005 		++d->bd_rcount;
1006 		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1007 		if (slen != 0)
1008 			catchpacket(d, pkt, pktlen, slen, ovbcopy);
1009 	}
1010 }
1011 
1012 /*
1013  * Copy data from an mbuf chain into a buffer.  This code is derived
1014  * from m_copydata in sys/uipc_mbuf.c.
1015  */
1016 static void
1017 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1018 {
1019 	const struct mbuf *m;
1020 	u_int count;
1021 	u_char *dst;
1022 
1023 	m = src_arg;
1024 	dst = dst_arg;
1025 	while (len > 0) {
1026 		if (m == NULL)
1027 			panic("bpf_mcopy");
1028 		count = min(m->m_len, len);
1029 		bcopy(mtod(m, void *), dst, count);
1030 		m = m->m_next;
1031 		dst += count;
1032 		len -= count;
1033 	}
1034 }
1035 
1036 /*
1037  * Incoming linkage from device drivers, when packet is in an mbuf chain.
1038  */
1039 void
1040 bpf_mtap(struct ifnet *ifp, struct mbuf *m)
1041 {
1042 	struct bpf_if *bp = ifp->if_bpf;
1043 	struct bpf_d *d;
1044 	u_int pktlen, slen;
1045 	struct mbuf *m0;
1046 
1047 	pktlen = 0;
1048 	for (m0 = m; m0 != NULL; m0 = m0->m_next)
1049 		pktlen += m0->m_len;
1050 
1051 	SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1052 		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1053 			continue;
1054 		++d->bd_rcount;
1055 		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1056 		if (slen != 0)
1057 			catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1058 	}
1059 }
1060 
1061 /*
1062  * Move the packet data from interface memory (pkt) into the
1063  * store buffer.  Return 1 if it's time to wakeup a listener (buffer full),
1064  * otherwise 0.  "copy" is the routine called to do the actual data
1065  * transfer.  bcopy is passed in to copy contiguous chunks, while
1066  * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1067  * pkt is really an mbuf.
1068  */
1069 static void
1070 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1071 	    void (*cpfn)(const void *, void *, size_t))
1072 {
1073 	struct bpf_hdr *hp;
1074 	int totlen, curlen;
1075 	int hdrlen = d->bd_bif->bif_hdrlen;
1076 	/*
1077 	 * Figure out how many bytes to move.  If the packet is
1078 	 * greater or equal to the snapshot length, transfer that
1079 	 * much.  Otherwise, transfer the whole packet (unless
1080 	 * we hit the buffer size limit).
1081 	 */
1082 	totlen = hdrlen + min(snaplen, pktlen);
1083 	if (totlen > d->bd_bufsize)
1084 		totlen = d->bd_bufsize;
1085 
1086 	/*
1087 	 * Round up the end of the previous packet to the next longword.
1088 	 */
1089 	curlen = BPF_WORDALIGN(d->bd_slen);
1090 	if (curlen + totlen > d->bd_bufsize) {
1091 		/*
1092 		 * This packet will overflow the storage buffer.
1093 		 * Rotate the buffers if we can, then wakeup any
1094 		 * pending reads.
1095 		 */
1096 		if (d->bd_fbuf == NULL) {
1097 			/*
1098 			 * We haven't completed the previous read yet,
1099 			 * so drop the packet.
1100 			 */
1101 			++d->bd_dcount;
1102 			return;
1103 		}
1104 		ROTATE_BUFFERS(d);
1105 		bpf_wakeup(d);
1106 		curlen = 0;
1107 	}
1108 	else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1109 		/*
1110 		 * Immediate mode is set, or the read timeout has
1111 		 * already expired during a select call.  A packet
1112 		 * arrived, so the reader should be woken up.
1113 		 */
1114 		bpf_wakeup(d);
1115 
1116 	/*
1117 	 * Append the bpf header.
1118 	 */
1119 	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1120 	microtime(&hp->bh_tstamp);
1121 	hp->bh_datalen = pktlen;
1122 	hp->bh_hdrlen = hdrlen;
1123 	/*
1124 	 * Copy the packet data into the store buffer and update its length.
1125 	 */
1126 	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1127 	d->bd_slen = curlen + totlen;
1128 }
1129 
1130 /*
1131  * Initialize all nonzero fields of a descriptor.
1132  */
1133 static int
1134 bpf_allocbufs(struct bpf_d *d)
1135 {
1136 	d->bd_fbuf = malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1137 	if (d->bd_fbuf == NULL)
1138 		return(ENOBUFS);
1139 
1140 	d->bd_sbuf = malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1141 	if (d->bd_sbuf == NULL) {
1142 		free(d->bd_fbuf, M_BPF);
1143 		return(ENOBUFS);
1144 	}
1145 	d->bd_slen = 0;
1146 	d->bd_hlen = 0;
1147 	return(0);
1148 }
1149 
1150 /*
1151  * Free buffers currently in use by a descriptor.
1152  * Called on close.
1153  */
1154 static void
1155 bpf_freed(struct bpf_d *d)
1156 {
1157 	/*
1158 	 * We don't need to lock out interrupts since this descriptor has
1159 	 * been detached from its interface and it yet hasn't been marked
1160 	 * free.
1161 	 */
1162 	if (d->bd_sbuf != NULL) {
1163 		free(d->bd_sbuf, M_BPF);
1164 		if (d->bd_hbuf != NULL)
1165 			free(d->bd_hbuf, M_BPF);
1166 		if (d->bd_fbuf != NULL)
1167 			free(d->bd_fbuf, M_BPF);
1168 	}
1169 	if (d->bd_filter)
1170 		free(d->bd_filter, M_BPF);
1171 }
1172 
1173 /*
1174  * Attach an interface to bpf.  ifp is a pointer to the structure
1175  * defining the interface to be attached, dlt is the link layer type,
1176  * and hdrlen is the fixed size of the link header (variable length
1177  * headers are not yet supporrted).
1178  */
1179 void
1180 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1181 {
1182 	struct bpf_if *bp;
1183 
1184 	bp = malloc(sizeof(*bp), M_BPF, M_WAITOK | M_ZERO);
1185 
1186 	bp->bif_ifp = ifp;
1187 	bp->bif_dlt = dlt;
1188 
1189 	bp->bif_next = bpf_iflist;
1190 	bpf_iflist = bp;
1191 
1192 	bp->bif_ifp->if_bpf = NULL;
1193 
1194 	/*
1195 	 * Compute the length of the bpf header.  This is not necessarily
1196 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1197 	 * that the network layer header begins on a longword boundary (for
1198 	 * performance reasons and to alleviate alignment restrictions).
1199 	 */
1200 	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1201 
1202 	if (bootverbose)
1203 		printf("bpf: %s attached\n", ifp->if_xname);
1204 }
1205 
1206 /*
1207  * Detach bpf from an interface.  This involves detaching each descriptor
1208  * associated with the interface, and leaving bd_bif NULL.  Notify each
1209  * descriptor as it's detached so that any sleepers wake up and get
1210  * ENXIO.
1211  */
1212 void
1213 bpfdetach(struct ifnet *ifp)
1214 {
1215 	struct bpf_if *bp, *bp_prev;
1216 	struct bpf_d *d;
1217 	int s;
1218 
1219 	s = splimp();
1220 
1221 	/* Locate BPF interface information */
1222 	bp_prev = NULL;
1223 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1224 		if (ifp == bp->bif_ifp)
1225 			break;
1226 		bp_prev = bp;
1227 	}
1228 
1229 	/* Interface wasn't attached */
1230 	if (bp->bif_ifp == NULL) {
1231 		splx(s);
1232 		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1233 		return;
1234 	}
1235 
1236 	while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) {
1237 		bpf_detachd(d);
1238 		bpf_wakeup(d);
1239 	}
1240 
1241 	if (bp_prev != NULL)
1242 		bp_prev->bif_next = bp->bif_next;
1243 	else
1244 		bpf_iflist = bp->bif_next;
1245 
1246 	free(bp, M_BPF);
1247 
1248 	splx(s);
1249 }
1250 
1251 static void
1252 bpf_drvinit(void *unused)
1253 {
1254 	cdevsw_add(&bpf_cdevsw, 0, 0);
1255 }
1256 
1257 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1258 
1259 #else /* !BPF */
1260 /*
1261  * NOP stubs to allow bpf-using drivers to load and function.
1262  *
1263  * A 'better' implementation would allow the core bpf functionality
1264  * to be loaded at runtime.
1265  */
1266 
1267 void
1268 bpf_tap(struct ifnet *ifp, u_char *pkt, u_int pktlen)
1269 {
1270 }
1271 
1272 void
1273 bpf_mtap(struct ifnet *ifp, struct mbuf *m)
1274 {
1275 }
1276 
1277 void
1278 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1279 {
1280 }
1281 
1282 void
1283 bpfdetach(struct ifnet *ifp)
1284 {
1285 }
1286 
1287 u_int
1288 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
1289 {
1290 	return -1;	/* "no filter" behaviour */
1291 }
1292 
1293 #endif /* !BPF */
1294