xref: /dragonfly/sys/net/bpf.c (revision 984263bc)
1 /*
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      @(#)bpf.c	8.2 (Berkeley) 3/28/94
39  *
40  * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
41  */
42 
43 #include "bpf.h"
44 
45 #ifndef __GNUC__
46 #define inline
47 #else
48 #define inline __inline
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/conf.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/time.h>
57 #include <sys/proc.h>
58 #include <sys/signalvar.h>
59 #include <sys/filio.h>
60 #include <sys/sockio.h>
61 #include <sys/ttycom.h>
62 #include <sys/filedesc.h>
63 
64 #if defined(sparc) && BSD < 199103
65 #include <sys/stream.h>
66 #endif
67 #include <sys/poll.h>
68 
69 #include <sys/socket.h>
70 #include <sys/vnode.h>
71 
72 #include <net/if.h>
73 #include <net/bpf.h>
74 #include <net/bpfdesc.h>
75 
76 #include <netinet/in.h>
77 #include <netinet/if_ether.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 
81 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
82 
83 #if NBPF > 0
84 
85 /*
86  * Older BSDs don't have kernel malloc.
87  */
88 #if BSD < 199103
89 extern bcopy();
90 static caddr_t bpf_alloc();
91 #include <net/bpf_compat.h>
92 #define BPF_BUFSIZE (MCLBYTES-8)
93 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
94 #else
95 #define BPF_BUFSIZE 4096
96 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
97 #endif
98 
99 #define PRINET  26			/* interruptible */
100 
101 /*
102  * The default read buffer size is patchable.
103  */
104 static int bpf_bufsize = BPF_BUFSIZE;
105 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
106 	&bpf_bufsize, 0, "");
107 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
108 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
109 	&bpf_maxbufsize, 0, "");
110 
111 /*
112  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
113  */
114 static struct bpf_if	*bpf_iflist;
115 
116 static int	bpf_allocbufs __P((struct bpf_d *));
117 static void	bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
118 static void	bpf_detachd __P((struct bpf_d *d));
119 static void	bpf_freed __P((struct bpf_d *));
120 static void	bpf_mcopy __P((const void *, void *, size_t));
121 static int	bpf_movein __P((struct uio *, int,
122 		    struct mbuf **, struct sockaddr *, int *));
123 static int	bpf_setif __P((struct bpf_d *, struct ifreq *));
124 static void	bpf_timed_out __P((void *));
125 static inline void
126 		bpf_wakeup __P((struct bpf_d *));
127 static void	catchpacket __P((struct bpf_d *, u_char *, u_int,
128 		    u_int, void (*)(const void *, void *, size_t)));
129 static void	reset_d __P((struct bpf_d *));
130 static int	 bpf_setf __P((struct bpf_d *, struct bpf_program *));
131 
132 static	d_open_t	bpfopen;
133 static	d_close_t	bpfclose;
134 static	d_read_t	bpfread;
135 static	d_write_t	bpfwrite;
136 static	d_ioctl_t	bpfioctl;
137 static	d_poll_t	bpfpoll;
138 
139 #define CDEV_MAJOR 23
140 static struct cdevsw bpf_cdevsw = {
141 	/* open */	bpfopen,
142 	/* close */	bpfclose,
143 	/* read */	bpfread,
144 	/* write */	bpfwrite,
145 	/* ioctl */	bpfioctl,
146 	/* poll */	bpfpoll,
147 	/* mmap */	nommap,
148 	/* strategy */	nostrategy,
149 	/* name */	"bpf",
150 	/* maj */	CDEV_MAJOR,
151 	/* dump */	nodump,
152 	/* psize */	nopsize,
153 	/* flags */	0,
154 	/* bmaj */	-1
155 };
156 
157 
158 static int
159 bpf_movein(uio, linktype, mp, sockp, datlen)
160 	register struct uio *uio;
161 	int linktype, *datlen;
162 	register struct mbuf **mp;
163 	register struct sockaddr *sockp;
164 {
165 	struct mbuf *m;
166 	int error;
167 	int len;
168 	int hlen;
169 
170 	/*
171 	 * Build a sockaddr based on the data link layer type.
172 	 * We do this at this level because the ethernet header
173 	 * is copied directly into the data field of the sockaddr.
174 	 * In the case of SLIP, there is no header and the packet
175 	 * is forwarded as is.
176 	 * Also, we are careful to leave room at the front of the mbuf
177 	 * for the link level header.
178 	 */
179 	switch (linktype) {
180 
181 	case DLT_SLIP:
182 		sockp->sa_family = AF_INET;
183 		hlen = 0;
184 		break;
185 
186 	case DLT_EN10MB:
187 		sockp->sa_family = AF_UNSPEC;
188 		/* XXX Would MAXLINKHDR be better? */
189 		hlen = sizeof(struct ether_header);
190 		break;
191 
192 	case DLT_FDDI:
193 #if defined(__FreeBSD__) || defined(__bsdi__)
194 		sockp->sa_family = AF_IMPLINK;
195 		hlen = 0;
196 #else
197 		sockp->sa_family = AF_UNSPEC;
198 		/* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
199 		hlen = 24;
200 #endif
201 		break;
202 
203 	case DLT_RAW:
204 	case DLT_NULL:
205 		sockp->sa_family = AF_UNSPEC;
206 		hlen = 0;
207 		break;
208 
209 #ifdef __FreeBSD__
210 	case DLT_ATM_RFC1483:
211 		/*
212 		 * en atm driver requires 4-byte atm pseudo header.
213 		 * though it isn't standard, vpi:vci needs to be
214 		 * specified anyway.
215 		 */
216 		sockp->sa_family = AF_UNSPEC;
217 		hlen = 12; 	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
218 		break;
219 #endif
220 	case DLT_PPP:
221 		sockp->sa_family = AF_UNSPEC;
222 		hlen = 4;	/* This should match PPP_HDRLEN */
223 		break;
224 
225 	default:
226 		return (EIO);
227 	}
228 
229 	len = uio->uio_resid;
230 	*datlen = len - hlen;
231 	if ((unsigned)len > MCLBYTES)
232 		return (EIO);
233 
234 	MGETHDR(m, M_WAIT, MT_DATA);
235 	if (m == 0)
236 		return (ENOBUFS);
237 	if (len > MHLEN) {
238 #if BSD >= 199103
239 		MCLGET(m, M_WAIT);
240 		if ((m->m_flags & M_EXT) == 0) {
241 #else
242 		MCLGET(m);
243 		if (m->m_len != MCLBYTES) {
244 #endif
245 			error = ENOBUFS;
246 			goto bad;
247 		}
248 	}
249 	m->m_pkthdr.len = m->m_len = len;
250 	m->m_pkthdr.rcvif = NULL;
251 	*mp = m;
252 	/*
253 	 * Make room for link header.
254 	 */
255 	if (hlen != 0) {
256 		m->m_pkthdr.len -= hlen;
257 		m->m_len -= hlen;
258 #if BSD >= 199103
259 		m->m_data += hlen; /* XXX */
260 #else
261 		m->m_off += hlen;
262 #endif
263 		error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
264 		if (error)
265 			goto bad;
266 	}
267 	error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
268 	if (!error)
269 		return (0);
270  bad:
271 	m_freem(m);
272 	return (error);
273 }
274 
275 /*
276  * Attach file to the bpf interface, i.e. make d listen on bp.
277  * Must be called at splimp.
278  */
279 static void
280 bpf_attachd(d, bp)
281 	struct bpf_d *d;
282 	struct bpf_if *bp;
283 {
284 	/*
285 	 * Point d at bp, and add d to the interface's list of listeners.
286 	 * Finally, point the driver's bpf cookie at the interface so
287 	 * it will divert packets to bpf.
288 	 */
289 	d->bd_bif = bp;
290 	d->bd_next = bp->bif_dlist;
291 	bp->bif_dlist = d;
292 
293 	bp->bif_ifp->if_bpf = bp;
294 }
295 
296 /*
297  * Detach a file from its interface.
298  */
299 static void
300 bpf_detachd(d)
301 	struct bpf_d *d;
302 {
303 	struct bpf_d **p;
304 	struct bpf_if *bp;
305 
306 	bp = d->bd_bif;
307 	/*
308 	 * Check if this descriptor had requested promiscuous mode.
309 	 * If so, turn it off.
310 	 */
311 	if (d->bd_promisc) {
312 		d->bd_promisc = 0;
313 		if (ifpromisc(bp->bif_ifp, 0))
314 			/*
315 			 * Something is really wrong if we were able to put
316 			 * the driver into promiscuous mode, but can't
317 			 * take it out.
318 			 */
319 			panic("bpf: ifpromisc failed");
320 	}
321 	/* Remove d from the interface's descriptor list. */
322 	p = &bp->bif_dlist;
323 	while (*p != d) {
324 		p = &(*p)->bd_next;
325 		if (*p == 0)
326 			panic("bpf_detachd: descriptor not in list");
327 	}
328 	*p = (*p)->bd_next;
329 	if (bp->bif_dlist == 0)
330 		/*
331 		 * Let the driver know that there are no more listeners.
332 		 */
333 		d->bd_bif->bif_ifp->if_bpf = 0;
334 	d->bd_bif = 0;
335 }
336 
337 /*
338  * Open ethernet device.  Returns ENXIO for illegal minor device number,
339  * EBUSY if file is open by another process.
340  */
341 /* ARGSUSED */
342 static	int
343 bpfopen(dev, flags, fmt, p)
344 	dev_t dev;
345 	int flags;
346 	int fmt;
347 	struct proc *p;
348 {
349 	register struct bpf_d *d;
350 
351 	if (p->p_prison)
352 		return (EPERM);
353 
354 	d = dev->si_drv1;
355 	/*
356 	 * Each minor can be opened by only one process.  If the requested
357 	 * minor is in use, return EBUSY.
358 	 */
359 	if (d)
360 		return (EBUSY);
361 	make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
362 	MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
363 	dev->si_drv1 = d;
364 	d->bd_bufsize = bpf_bufsize;
365 	d->bd_sig = SIGIO;
366 	d->bd_seesent = 1;
367 	callout_init(&d->bd_callout);
368 	return (0);
369 }
370 
371 /*
372  * Close the descriptor by detaching it from its interface,
373  * deallocating its buffers, and marking it free.
374  */
375 /* ARGSUSED */
376 static	int
377 bpfclose(dev, flags, fmt, p)
378 	dev_t dev;
379 	int flags;
380 	int fmt;
381 	struct proc *p;
382 {
383 	register struct bpf_d *d = dev->si_drv1;
384 	register int s;
385 
386 	funsetown(d->bd_sigio);
387 	s = splimp();
388 	if (d->bd_state == BPF_WAITING)
389 		callout_stop(&d->bd_callout);
390 	d->bd_state = BPF_IDLE;
391 	if (d->bd_bif)
392 		bpf_detachd(d);
393 	splx(s);
394 	bpf_freed(d);
395 	dev->si_drv1 = 0;
396 	free(d, M_BPF);
397 
398 	return (0);
399 }
400 
401 /*
402  * Support for SunOS, which does not have tsleep.
403  */
404 #if BSD < 199103
405 static
406 bpf_timeout(arg)
407 	caddr_t arg;
408 {
409 	struct bpf_d *d = (struct bpf_d *)arg;
410 	d->bd_timedout = 1;
411 	wakeup(arg);
412 }
413 
414 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
415 
416 int
417 bpf_sleep(d)
418 	register struct bpf_d *d;
419 {
420 	register int rto = d->bd_rtout;
421 	register int st;
422 
423 	if (rto != 0) {
424 		d->bd_timedout = 0;
425 		timeout(bpf_timeout, (caddr_t)d, rto);
426 	}
427 	st = sleep((caddr_t)d, PRINET|PCATCH);
428 	if (rto != 0) {
429 		if (d->bd_timedout == 0)
430 			untimeout(bpf_timeout, (caddr_t)d);
431 		else if (st == 0)
432 			return EWOULDBLOCK;
433 	}
434 	return (st != 0) ? EINTR : 0;
435 }
436 #else
437 #define BPF_SLEEP tsleep
438 #endif
439 
440 /*
441  * Rotate the packet buffers in descriptor d.  Move the store buffer
442  * into the hold slot, and the free buffer into the store slot.
443  * Zero the length of the new store buffer.
444  */
445 #define ROTATE_BUFFERS(d) \
446 	(d)->bd_hbuf = (d)->bd_sbuf; \
447 	(d)->bd_hlen = (d)->bd_slen; \
448 	(d)->bd_sbuf = (d)->bd_fbuf; \
449 	(d)->bd_slen = 0; \
450 	(d)->bd_fbuf = 0;
451 /*
452  *  bpfread - read next chunk of packets from buffers
453  */
454 static	int
455 bpfread(dev, uio, ioflag)
456 	dev_t dev;
457 	register struct uio *uio;
458 	int ioflag;
459 {
460 	register struct bpf_d *d = dev->si_drv1;
461 	int timed_out;
462 	int error;
463 	int s;
464 
465 	/*
466 	 * Restrict application to use a buffer the same size as
467 	 * as kernel buffers.
468 	 */
469 	if (uio->uio_resid != d->bd_bufsize)
470 		return (EINVAL);
471 
472 	s = splimp();
473 	if (d->bd_state == BPF_WAITING)
474 		callout_stop(&d->bd_callout);
475 	timed_out = (d->bd_state == BPF_TIMED_OUT);
476 	d->bd_state = BPF_IDLE;
477 	/*
478 	 * If the hold buffer is empty, then do a timed sleep, which
479 	 * ends when the timeout expires or when enough packets
480 	 * have arrived to fill the store buffer.
481 	 */
482 	while (d->bd_hbuf == 0) {
483 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
484 			/*
485 			 * A packet(s) either arrived since the previous
486 			 * read or arrived while we were asleep.
487 			 * Rotate the buffers and return what's here.
488 			 */
489 			ROTATE_BUFFERS(d);
490 			break;
491 		}
492 
493 		/*
494 		 * No data is available, check to see if the bpf device
495 		 * is still pointed at a real interface.  If not, return
496 		 * ENXIO so that the userland process knows to rebind
497 		 * it before using it again.
498 		 */
499 		if (d->bd_bif == NULL) {
500 			splx(s);
501 			return (ENXIO);
502 		}
503 
504 		if (ioflag & IO_NDELAY) {
505 			splx(s);
506 			return (EWOULDBLOCK);
507 		}
508 		error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
509 				  d->bd_rtout);
510 		if (error == EINTR || error == ERESTART) {
511 			splx(s);
512 			return (error);
513 		}
514 		if (error == EWOULDBLOCK) {
515 			/*
516 			 * On a timeout, return what's in the buffer,
517 			 * which may be nothing.  If there is something
518 			 * in the store buffer, we can rotate the buffers.
519 			 */
520 			if (d->bd_hbuf)
521 				/*
522 				 * We filled up the buffer in between
523 				 * getting the timeout and arriving
524 				 * here, so we don't need to rotate.
525 				 */
526 				break;
527 
528 			if (d->bd_slen == 0) {
529 				splx(s);
530 				return (0);
531 			}
532 			ROTATE_BUFFERS(d);
533 			break;
534 		}
535 	}
536 	/*
537 	 * At this point, we know we have something in the hold slot.
538 	 */
539 	splx(s);
540 
541 	/*
542 	 * Move data from hold buffer into user space.
543 	 * We know the entire buffer is transferred since
544 	 * we checked above that the read buffer is bpf_bufsize bytes.
545 	 */
546 	error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
547 
548 	s = splimp();
549 	d->bd_fbuf = d->bd_hbuf;
550 	d->bd_hbuf = 0;
551 	d->bd_hlen = 0;
552 	splx(s);
553 
554 	return (error);
555 }
556 
557 
558 /*
559  * If there are processes sleeping on this descriptor, wake them up.
560  */
561 static inline void
562 bpf_wakeup(d)
563 	register struct bpf_d *d;
564 {
565 	if (d->bd_state == BPF_WAITING) {
566 		callout_stop(&d->bd_callout);
567 		d->bd_state = BPF_IDLE;
568 	}
569 	wakeup((caddr_t)d);
570 	if (d->bd_async && d->bd_sig && d->bd_sigio)
571 		pgsigio(d->bd_sigio, d->bd_sig, 0);
572 
573 #if BSD >= 199103
574 	selwakeup(&d->bd_sel);
575 	/* XXX */
576 	d->bd_sel.si_pid = 0;
577 #else
578 	if (d->bd_selproc) {
579 		selwakeup(d->bd_selproc, (int)d->bd_selcoll);
580 		d->bd_selcoll = 0;
581 		d->bd_selproc = 0;
582 	}
583 #endif
584 }
585 
586 static void
587 bpf_timed_out(arg)
588 	void *arg;
589 {
590 	struct bpf_d *d = (struct bpf_d *)arg;
591 	int s;
592 
593 	s = splimp();
594 	if (d->bd_state == BPF_WAITING) {
595 		d->bd_state = BPF_TIMED_OUT;
596 		if (d->bd_slen != 0)
597 			bpf_wakeup(d);
598 	}
599 	splx(s);
600 }
601 
602 static	int
603 bpfwrite(dev, uio, ioflag)
604 	dev_t dev;
605 	struct uio *uio;
606 	int ioflag;
607 {
608 	register struct bpf_d *d = dev->si_drv1;
609 	struct ifnet *ifp;
610 	struct mbuf *m;
611 	int error, s;
612 	static struct sockaddr dst;
613 	int datlen;
614 
615 	if (d->bd_bif == 0)
616 		return (ENXIO);
617 
618 	ifp = d->bd_bif->bif_ifp;
619 
620 	if (uio->uio_resid == 0)
621 		return (0);
622 
623 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
624 	if (error)
625 		return (error);
626 
627 	if (datlen > ifp->if_mtu)
628 		return (EMSGSIZE);
629 
630 	if (d->bd_hdrcmplt)
631 		dst.sa_family = pseudo_AF_HDRCMPLT;
632 
633 	s = splnet();
634 #if BSD >= 199103
635 	error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
636 #else
637 	error = (*ifp->if_output)(ifp, m, &dst);
638 #endif
639 	splx(s);
640 	/*
641 	 * The driver frees the mbuf.
642 	 */
643 	return (error);
644 }
645 
646 /*
647  * Reset a descriptor by flushing its packet buffer and clearing the
648  * receive and drop counts.  Should be called at splimp.
649  */
650 static void
651 reset_d(d)
652 	struct bpf_d *d;
653 {
654 	if (d->bd_hbuf) {
655 		/* Free the hold buffer. */
656 		d->bd_fbuf = d->bd_hbuf;
657 		d->bd_hbuf = 0;
658 	}
659 	d->bd_slen = 0;
660 	d->bd_hlen = 0;
661 	d->bd_rcount = 0;
662 	d->bd_dcount = 0;
663 }
664 
665 /*
666  *  FIONREAD		Check for read packet available.
667  *  SIOCGIFADDR		Get interface address - convenient hook to driver.
668  *  BIOCGBLEN		Get buffer len [for read()].
669  *  BIOCSETF		Set ethernet read filter.
670  *  BIOCFLUSH		Flush read packet buffer.
671  *  BIOCPROMISC		Put interface into promiscuous mode.
672  *  BIOCGDLT		Get link layer type.
673  *  BIOCGETIF		Get interface name.
674  *  BIOCSETIF		Set interface.
675  *  BIOCSRTIMEOUT	Set read timeout.
676  *  BIOCGRTIMEOUT	Get read timeout.
677  *  BIOCGSTATS		Get packet stats.
678  *  BIOCIMMEDIATE	Set immediate mode.
679  *  BIOCVERSION		Get filter language version.
680  *  BIOCGHDRCMPLT	Get "header already complete" flag
681  *  BIOCSHDRCMPLT	Set "header already complete" flag
682  *  BIOCGSEESENT	Get "see packets sent" flag
683  *  BIOCSSEESENT	Set "see packets sent" flag
684  */
685 /* ARGSUSED */
686 static	int
687 bpfioctl(dev, cmd, addr, flags, p)
688 	dev_t dev;
689 	u_long cmd;
690 	caddr_t addr;
691 	int flags;
692 	struct proc *p;
693 {
694 	register struct bpf_d *d = dev->si_drv1;
695 	int s, error = 0;
696 
697 	s = splimp();
698 	if (d->bd_state == BPF_WAITING)
699 		callout_stop(&d->bd_callout);
700 	d->bd_state = BPF_IDLE;
701 	splx(s);
702 
703 	switch (cmd) {
704 
705 	default:
706 		error = EINVAL;
707 		break;
708 
709 	/*
710 	 * Check for read packet available.
711 	 */
712 	case FIONREAD:
713 		{
714 			int n;
715 
716 			s = splimp();
717 			n = d->bd_slen;
718 			if (d->bd_hbuf)
719 				n += d->bd_hlen;
720 			splx(s);
721 
722 			*(int *)addr = n;
723 			break;
724 		}
725 
726 	case SIOCGIFADDR:
727 		{
728 			struct ifnet *ifp;
729 
730 			if (d->bd_bif == 0)
731 				error = EINVAL;
732 			else {
733 				ifp = d->bd_bif->bif_ifp;
734 				error = (*ifp->if_ioctl)(ifp, cmd, addr);
735 			}
736 			break;
737 		}
738 
739 	/*
740 	 * Get buffer len [for read()].
741 	 */
742 	case BIOCGBLEN:
743 		*(u_int *)addr = d->bd_bufsize;
744 		break;
745 
746 	/*
747 	 * Set buffer length.
748 	 */
749 	case BIOCSBLEN:
750 #if BSD < 199103
751 		error = EINVAL;
752 #else
753 		if (d->bd_bif != 0)
754 			error = EINVAL;
755 		else {
756 			register u_int size = *(u_int *)addr;
757 
758 			if (size > bpf_maxbufsize)
759 				*(u_int *)addr = size = bpf_maxbufsize;
760 			else if (size < BPF_MINBUFSIZE)
761 				*(u_int *)addr = size = BPF_MINBUFSIZE;
762 			d->bd_bufsize = size;
763 		}
764 #endif
765 		break;
766 
767 	/*
768 	 * Set link layer read filter.
769 	 */
770 	case BIOCSETF:
771 		error = bpf_setf(d, (struct bpf_program *)addr);
772 		break;
773 
774 	/*
775 	 * Flush read packet buffer.
776 	 */
777 	case BIOCFLUSH:
778 		s = splimp();
779 		reset_d(d);
780 		splx(s);
781 		break;
782 
783 	/*
784 	 * Put interface into promiscuous mode.
785 	 */
786 	case BIOCPROMISC:
787 		if (d->bd_bif == 0) {
788 			/*
789 			 * No interface attached yet.
790 			 */
791 			error = EINVAL;
792 			break;
793 		}
794 		s = splimp();
795 		if (d->bd_promisc == 0) {
796 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
797 			if (error == 0)
798 				d->bd_promisc = 1;
799 		}
800 		splx(s);
801 		break;
802 
803 	/*
804 	 * Get device parameters.
805 	 */
806 	case BIOCGDLT:
807 		if (d->bd_bif == 0)
808 			error = EINVAL;
809 		else
810 			*(u_int *)addr = d->bd_bif->bif_dlt;
811 		break;
812 
813 	/*
814 	 * Get interface name.
815 	 */
816 	case BIOCGETIF:
817 		if (d->bd_bif == 0)
818 			error = EINVAL;
819 		else {
820 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
821 			struct ifreq *const ifr = (struct ifreq *)addr;
822 
823 			snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
824 			    "%s%d", ifp->if_name, ifp->if_unit);
825 		}
826 		break;
827 
828 	/*
829 	 * Set interface.
830 	 */
831 	case BIOCSETIF:
832 		error = bpf_setif(d, (struct ifreq *)addr);
833 		break;
834 
835 	/*
836 	 * Set read timeout.
837 	 */
838 	case BIOCSRTIMEOUT:
839 		{
840 			struct timeval *tv = (struct timeval *)addr;
841 
842 			/*
843 			 * Subtract 1 tick from tvtohz() since this isn't
844 			 * a one-shot timer.
845 			 */
846 			if ((error = itimerfix(tv)) == 0)
847 				d->bd_rtout = tvtohz(tv) - 1;
848 			break;
849 		}
850 
851 	/*
852 	 * Get read timeout.
853 	 */
854 	case BIOCGRTIMEOUT:
855 		{
856 			struct timeval *tv = (struct timeval *)addr;
857 
858 			tv->tv_sec = d->bd_rtout / hz;
859 			tv->tv_usec = (d->bd_rtout % hz) * tick;
860 			break;
861 		}
862 
863 	/*
864 	 * Get packet stats.
865 	 */
866 	case BIOCGSTATS:
867 		{
868 			struct bpf_stat *bs = (struct bpf_stat *)addr;
869 
870 			bs->bs_recv = d->bd_rcount;
871 			bs->bs_drop = d->bd_dcount;
872 			break;
873 		}
874 
875 	/*
876 	 * Set immediate mode.
877 	 */
878 	case BIOCIMMEDIATE:
879 		d->bd_immediate = *(u_int *)addr;
880 		break;
881 
882 	case BIOCVERSION:
883 		{
884 			struct bpf_version *bv = (struct bpf_version *)addr;
885 
886 			bv->bv_major = BPF_MAJOR_VERSION;
887 			bv->bv_minor = BPF_MINOR_VERSION;
888 			break;
889 		}
890 
891 	/*
892 	 * Get "header already complete" flag
893 	 */
894 	case BIOCGHDRCMPLT:
895 		*(u_int *)addr = d->bd_hdrcmplt;
896 		break;
897 
898 	/*
899 	 * Set "header already complete" flag
900 	 */
901 	case BIOCSHDRCMPLT:
902 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
903 		break;
904 
905 	/*
906 	 * Get "see sent packets" flag
907 	 */
908 	case BIOCGSEESENT:
909 		*(u_int *)addr = d->bd_seesent;
910 		break;
911 
912 	/*
913 	 * Set "see sent packets" flag
914 	 */
915 	case BIOCSSEESENT:
916 		d->bd_seesent = *(u_int *)addr;
917 		break;
918 
919 	case FIONBIO:		/* Non-blocking I/O */
920 		break;
921 
922 	case FIOASYNC:		/* Send signal on receive packets */
923 		d->bd_async = *(int *)addr;
924 		break;
925 
926 	case FIOSETOWN:
927 		error = fsetown(*(int *)addr, &d->bd_sigio);
928 		break;
929 
930 	case FIOGETOWN:
931 		*(int *)addr = fgetown(d->bd_sigio);
932 		break;
933 
934 	/* This is deprecated, FIOSETOWN should be used instead. */
935 	case TIOCSPGRP:
936 		error = fsetown(-(*(int *)addr), &d->bd_sigio);
937 		break;
938 
939 	/* This is deprecated, FIOGETOWN should be used instead. */
940 	case TIOCGPGRP:
941 		*(int *)addr = -fgetown(d->bd_sigio);
942 		break;
943 
944 	case BIOCSRSIG:		/* Set receive signal */
945 		{
946 		 	u_int sig;
947 
948 			sig = *(u_int *)addr;
949 
950 			if (sig >= NSIG)
951 				error = EINVAL;
952 			else
953 				d->bd_sig = sig;
954 			break;
955 		}
956 	case BIOCGRSIG:
957 		*(u_int *)addr = d->bd_sig;
958 		break;
959 	}
960 	return (error);
961 }
962 
963 /*
964  * Set d's packet filter program to fp.  If this file already has a filter,
965  * free it and replace it.  Returns EINVAL for bogus requests.
966  */
967 static int
968 bpf_setf(d, fp)
969 	struct bpf_d *d;
970 	struct bpf_program *fp;
971 {
972 	struct bpf_insn *fcode, *old;
973 	u_int flen, size;
974 	int s;
975 
976 	old = d->bd_filter;
977 	if (fp->bf_insns == 0) {
978 		if (fp->bf_len != 0)
979 			return (EINVAL);
980 		s = splimp();
981 		d->bd_filter = 0;
982 		reset_d(d);
983 		splx(s);
984 		if (old != 0)
985 			free((caddr_t)old, M_BPF);
986 		return (0);
987 	}
988 	flen = fp->bf_len;
989 	if (flen > BPF_MAXINSNS)
990 		return (EINVAL);
991 
992 	size = flen * sizeof(*fp->bf_insns);
993 	fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
994 	if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
995 	    bpf_validate(fcode, (int)flen)) {
996 		s = splimp();
997 		d->bd_filter = fcode;
998 		reset_d(d);
999 		splx(s);
1000 		if (old != 0)
1001 			free((caddr_t)old, M_BPF);
1002 
1003 		return (0);
1004 	}
1005 	free((caddr_t)fcode, M_BPF);
1006 	return (EINVAL);
1007 }
1008 
1009 /*
1010  * Detach a file from its current interface (if attached at all) and attach
1011  * to the interface indicated by the name stored in ifr.
1012  * Return an errno or 0.
1013  */
1014 static int
1015 bpf_setif(d, ifr)
1016 	struct bpf_d *d;
1017 	struct ifreq *ifr;
1018 {
1019 	struct bpf_if *bp;
1020 	int s, error;
1021 	struct ifnet *theywant;
1022 
1023 	theywant = ifunit(ifr->ifr_name);
1024 	if (theywant == 0)
1025 		return ENXIO;
1026 
1027 	/*
1028 	 * Look through attached interfaces for the named one.
1029 	 */
1030 	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1031 		struct ifnet *ifp = bp->bif_ifp;
1032 
1033 		if (ifp == 0 || ifp != theywant)
1034 			continue;
1035 		/*
1036 		 * We found the requested interface.
1037 		 * If it's not up, return an error.
1038 		 * Allocate the packet buffers if we need to.
1039 		 * If we're already attached to requested interface,
1040 		 * just flush the buffer.
1041 		 */
1042 		if ((ifp->if_flags & IFF_UP) == 0)
1043 			return (ENETDOWN);
1044 
1045 		if (d->bd_sbuf == 0) {
1046 			error = bpf_allocbufs(d);
1047 			if (error != 0)
1048 				return (error);
1049 		}
1050 		s = splimp();
1051 		if (bp != d->bd_bif) {
1052 			if (d->bd_bif)
1053 				/*
1054 				 * Detach if attached to something else.
1055 				 */
1056 				bpf_detachd(d);
1057 
1058 			bpf_attachd(d, bp);
1059 		}
1060 		reset_d(d);
1061 		splx(s);
1062 		return (0);
1063 	}
1064 	/* Not found. */
1065 	return (ENXIO);
1066 }
1067 
1068 /*
1069  * Support for select() and poll() system calls
1070  *
1071  * Return true iff the specific operation will not block indefinitely.
1072  * Otherwise, return false but make a note that a selwakeup() must be done.
1073  */
1074 int
1075 bpfpoll(dev, events, p)
1076 	register dev_t dev;
1077 	int events;
1078 	struct proc *p;
1079 {
1080 	register struct bpf_d *d;
1081 	register int s;
1082 	int revents;
1083 
1084 	d = dev->si_drv1;
1085 	if (d->bd_bif == NULL)
1086 		return (ENXIO);
1087 
1088 	revents = events & (POLLOUT | POLLWRNORM);
1089 	s = splimp();
1090 	if (events & (POLLIN | POLLRDNORM)) {
1091 		/*
1092 		 * An imitation of the FIONREAD ioctl code.
1093 		 * XXX not quite.  An exact imitation:
1094 		 *	if (d->b_slen != 0 ||
1095 		 *	    (d->bd_hbuf != NULL && d->bd_hlen != 0)
1096 		 */
1097 		if (d->bd_hlen != 0 ||
1098 		    ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1099 		    d->bd_slen != 0))
1100 			revents |= events & (POLLIN | POLLRDNORM);
1101 		else {
1102 			selrecord(p, &d->bd_sel);
1103 			/* Start the read timeout if necessary. */
1104 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1105 				callout_reset(&d->bd_callout, d->bd_rtout,
1106 				    bpf_timed_out, d);
1107 				d->bd_state = BPF_WAITING;
1108 			}
1109 		}
1110 	}
1111 	splx(s);
1112 	return (revents);
1113 }
1114 
1115 /*
1116  * Incoming linkage from device drivers.  Process the packet pkt, of length
1117  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1118  * by each process' filter, and if accepted, stashed into the corresponding
1119  * buffer.
1120  */
1121 void
1122 bpf_tap(ifp, pkt, pktlen)
1123 	struct ifnet *ifp;
1124 	register u_char *pkt;
1125 	register u_int pktlen;
1126 {
1127 	struct bpf_if *bp;
1128 	register struct bpf_d *d;
1129 	register u_int slen;
1130 	/*
1131 	 * Note that the ipl does not have to be raised at this point.
1132 	 * The only problem that could arise here is that if two different
1133 	 * interfaces shared any data.  This is not the case.
1134 	 */
1135 	bp = ifp->if_bpf;
1136 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1137 		++d->bd_rcount;
1138 		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1139 		if (slen != 0)
1140 			catchpacket(d, pkt, pktlen, slen, bcopy);
1141 	}
1142 }
1143 
1144 /*
1145  * Copy data from an mbuf chain into a buffer.  This code is derived
1146  * from m_copydata in sys/uipc_mbuf.c.
1147  */
1148 static void
1149 bpf_mcopy(src_arg, dst_arg, len)
1150 	const void *src_arg;
1151 	void *dst_arg;
1152 	register size_t len;
1153 {
1154 	register const struct mbuf *m;
1155 	register u_int count;
1156 	u_char *dst;
1157 
1158 	m = src_arg;
1159 	dst = dst_arg;
1160 	while (len > 0) {
1161 		if (m == 0)
1162 			panic("bpf_mcopy");
1163 		count = min(m->m_len, len);
1164 		bcopy(mtod(m, void *), dst, count);
1165 		m = m->m_next;
1166 		dst += count;
1167 		len -= count;
1168 	}
1169 }
1170 
1171 /*
1172  * Incoming linkage from device drivers, when packet is in an mbuf chain.
1173  */
1174 void
1175 bpf_mtap(ifp, m)
1176 	struct ifnet *ifp;
1177 	struct mbuf *m;
1178 {
1179 	struct bpf_if *bp = ifp->if_bpf;
1180 	struct bpf_d *d;
1181 	u_int pktlen, slen;
1182 	struct mbuf *m0;
1183 
1184 	pktlen = 0;
1185 	for (m0 = m; m0 != 0; m0 = m0->m_next)
1186 		pktlen += m0->m_len;
1187 
1188 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1189 		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1190 			continue;
1191 		++d->bd_rcount;
1192 		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1193 		if (slen != 0)
1194 			catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1195 	}
1196 }
1197 
1198 /*
1199  * Move the packet data from interface memory (pkt) into the
1200  * store buffer.  Return 1 if it's time to wakeup a listener (buffer full),
1201  * otherwise 0.  "copy" is the routine called to do the actual data
1202  * transfer.  bcopy is passed in to copy contiguous chunks, while
1203  * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1204  * pkt is really an mbuf.
1205  */
1206 static void
1207 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1208 	register struct bpf_d *d;
1209 	register u_char *pkt;
1210 	register u_int pktlen, snaplen;
1211 	register void (*cpfn) __P((const void *, void *, size_t));
1212 {
1213 	register struct bpf_hdr *hp;
1214 	register int totlen, curlen;
1215 	register int hdrlen = d->bd_bif->bif_hdrlen;
1216 	/*
1217 	 * Figure out how many bytes to move.  If the packet is
1218 	 * greater or equal to the snapshot length, transfer that
1219 	 * much.  Otherwise, transfer the whole packet (unless
1220 	 * we hit the buffer size limit).
1221 	 */
1222 	totlen = hdrlen + min(snaplen, pktlen);
1223 	if (totlen > d->bd_bufsize)
1224 		totlen = d->bd_bufsize;
1225 
1226 	/*
1227 	 * Round up the end of the previous packet to the next longword.
1228 	 */
1229 	curlen = BPF_WORDALIGN(d->bd_slen);
1230 	if (curlen + totlen > d->bd_bufsize) {
1231 		/*
1232 		 * This packet will overflow the storage buffer.
1233 		 * Rotate the buffers if we can, then wakeup any
1234 		 * pending reads.
1235 		 */
1236 		if (d->bd_fbuf == 0) {
1237 			/*
1238 			 * We haven't completed the previous read yet,
1239 			 * so drop the packet.
1240 			 */
1241 			++d->bd_dcount;
1242 			return;
1243 		}
1244 		ROTATE_BUFFERS(d);
1245 		bpf_wakeup(d);
1246 		curlen = 0;
1247 	}
1248 	else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1249 		/*
1250 		 * Immediate mode is set, or the read timeout has
1251 		 * already expired during a select call.  A packet
1252 		 * arrived, so the reader should be woken up.
1253 		 */
1254 		bpf_wakeup(d);
1255 
1256 	/*
1257 	 * Append the bpf header.
1258 	 */
1259 	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1260 #if BSD >= 199103
1261 	microtime(&hp->bh_tstamp);
1262 #elif defined(sun)
1263 	uniqtime(&hp->bh_tstamp);
1264 #else
1265 	hp->bh_tstamp = time;
1266 #endif
1267 	hp->bh_datalen = pktlen;
1268 	hp->bh_hdrlen = hdrlen;
1269 	/*
1270 	 * Copy the packet data into the store buffer and update its length.
1271 	 */
1272 	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1273 	d->bd_slen = curlen + totlen;
1274 }
1275 
1276 /*
1277  * Initialize all nonzero fields of a descriptor.
1278  */
1279 static int
1280 bpf_allocbufs(d)
1281 	register struct bpf_d *d;
1282 {
1283 	d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1284 	if (d->bd_fbuf == 0)
1285 		return (ENOBUFS);
1286 
1287 	d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1288 	if (d->bd_sbuf == 0) {
1289 		free(d->bd_fbuf, M_BPF);
1290 		return (ENOBUFS);
1291 	}
1292 	d->bd_slen = 0;
1293 	d->bd_hlen = 0;
1294 	return (0);
1295 }
1296 
1297 /*
1298  * Free buffers currently in use by a descriptor.
1299  * Called on close.
1300  */
1301 static void
1302 bpf_freed(d)
1303 	register struct bpf_d *d;
1304 {
1305 	/*
1306 	 * We don't need to lock out interrupts since this descriptor has
1307 	 * been detached from its interface and it yet hasn't been marked
1308 	 * free.
1309 	 */
1310 	if (d->bd_sbuf != 0) {
1311 		free(d->bd_sbuf, M_BPF);
1312 		if (d->bd_hbuf != 0)
1313 			free(d->bd_hbuf, M_BPF);
1314 		if (d->bd_fbuf != 0)
1315 			free(d->bd_fbuf, M_BPF);
1316 	}
1317 	if (d->bd_filter)
1318 		free((caddr_t)d->bd_filter, M_BPF);
1319 }
1320 
1321 /*
1322  * Attach an interface to bpf.  ifp is a pointer to the structure
1323  * defining the interface to be attached, dlt is the link layer type,
1324  * and hdrlen is the fixed size of the link header (variable length
1325  * headers are not yet supporrted).
1326  */
1327 void
1328 bpfattach(ifp, dlt, hdrlen)
1329 	struct ifnet *ifp;
1330 	u_int dlt, hdrlen;
1331 {
1332 	struct bpf_if *bp;
1333 	bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_DONTWAIT | M_ZERO);
1334 	if (bp == 0)
1335 		panic("bpfattach");
1336 
1337 	bp->bif_ifp = ifp;
1338 	bp->bif_dlt = dlt;
1339 
1340 	bp->bif_next = bpf_iflist;
1341 	bpf_iflist = bp;
1342 
1343 	bp->bif_ifp->if_bpf = 0;
1344 
1345 	/*
1346 	 * Compute the length of the bpf header.  This is not necessarily
1347 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1348 	 * that the network layer header begins on a longword boundary (for
1349 	 * performance reasons and to alleviate alignment restrictions).
1350 	 */
1351 	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1352 
1353 	if (bootverbose)
1354 		printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1355 }
1356 
1357 /*
1358  * Detach bpf from an interface.  This involves detaching each descriptor
1359  * associated with the interface, and leaving bd_bif NULL.  Notify each
1360  * descriptor as it's detached so that any sleepers wake up and get
1361  * ENXIO.
1362  */
1363 void
1364 bpfdetach(ifp)
1365 	struct ifnet *ifp;
1366 {
1367 	struct bpf_if	*bp, *bp_prev;
1368 	struct bpf_d	*d;
1369 	int	s;
1370 
1371 	s = splimp();
1372 
1373 	/* Locate BPF interface information */
1374 	bp_prev = NULL;
1375 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1376 		if (ifp == bp->bif_ifp)
1377 			break;
1378 		bp_prev = bp;
1379 	}
1380 
1381 	/* Interface wasn't attached */
1382 	if (bp->bif_ifp == NULL) {
1383 		splx(s);
1384 		printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1385 		    ifp->if_unit);
1386 		return;
1387 	}
1388 
1389 	while ((d = bp->bif_dlist) != NULL) {
1390 		bpf_detachd(d);
1391 		bpf_wakeup(d);
1392 	}
1393 
1394 	if (bp_prev) {
1395 		bp_prev->bif_next = bp->bif_next;
1396 	} else {
1397 		bpf_iflist = bp->bif_next;
1398 	}
1399 
1400 	free(bp, M_BPF);
1401 
1402 	splx(s);
1403 }
1404 
1405 static void bpf_drvinit __P((void *unused));
1406 
1407 static void
1408 bpf_drvinit(unused)
1409 	void *unused;
1410 {
1411 
1412 	cdevsw_add(&bpf_cdevsw);
1413 }
1414 
1415 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1416 
1417 #else /* !BPF */
1418 /*
1419  * NOP stubs to allow bpf-using drivers to load and function.
1420  *
1421  * A 'better' implementation would allow the core bpf functionality
1422  * to be loaded at runtime.
1423  */
1424 
1425 void
1426 bpf_tap(ifp, pkt, pktlen)
1427 	struct ifnet *ifp;
1428 	register u_char *pkt;
1429 	register u_int pktlen;
1430 {
1431 }
1432 
1433 void
1434 bpf_mtap(ifp, m)
1435 	struct ifnet *ifp;
1436 	struct mbuf *m;
1437 {
1438 }
1439 
1440 void
1441 bpfattach(ifp, dlt, hdrlen)
1442 	struct ifnet *ifp;
1443 	u_int dlt, hdrlen;
1444 {
1445 }
1446 
1447 void
1448 bpfdetach(ifp)
1449 	struct ifnet *ifp;
1450 {
1451 }
1452 
1453 u_int
1454 bpf_filter(pc, p, wirelen, buflen)
1455 	register const struct bpf_insn *pc;
1456 	register u_char *p;
1457 	u_int wirelen;
1458 	register u_int buflen;
1459 {
1460 	return -1;	/* "no filter" behaviour */
1461 }
1462 
1463 #endif /* !BPF */
1464