xref: /original-bsd/sys/net/bpf.c (revision e59fb703)
1 /*
2  * Copyright (c) 1990, 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * %sccs.include.redist.c%
11  *
12  *      @(#)bpf.c	7.7 (Berkeley) 10/29/91
13  *
14  * static char rcsid[] =
15  * "$Header: bpf.c,v 1.33 91/10/27 21:21:58 mccanne Exp $";
16  */
17 
18 #include "bpfilter.h"
19 
20 #if NBPFILTER > 0
21 
22 #ifndef __GNUC__
23 #define inline
24 #else
25 #define inline __inline__
26 #endif
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/mbuf.h>
31 #include <sys/buf.h>
32 #include <sys/dir.h>
33 #include <sys/proc.h>
34 #include <sys/user.h>
35 #include <sys/ioctl.h>
36 #include <sys/map.h>
37 
38 #include <sys/file.h>
39 #if defined(sparc) && BSD < 199103
40 #include <sys/stream.h>
41 #endif
42 #include <sys/tty.h>
43 #include <sys/uio.h>
44 
45 #include <sys/protosw.h>
46 #include <sys/socket.h>
47 #include <net/if.h>
48 
49 #include <net/bpf.h>
50 #include <net/bpfdesc.h>
51 
52 #include <sys/errno.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/if_ether.h>
56 #include <sys/kernel.h>
57 
58 /*
59  * Older BSDs don't have kernel malloc.
60  */
61 #if BSD < 199103
62 extern bcopy();
63 static caddr_t bpf_alloc();
64 #define malloc(size, type, canwait) bpf_alloc(size, canwait)
65 #define free(cp, type) m_free(*(struct mbuf **)(cp - 8))
66 #define M_WAITOK M_WAIT
67 #define BPF_BUFSIZE (MCLBYTES-8)
68 #define ERESTART EINTR
69 #else
70 #define BPF_BUFSIZE 4096
71 #endif
72 
73 #define PRINET  26			/* interruptible */
74 
75 /*
76  * The default read buffer size is patchable.
77  */
78 int bpf_bufsize = BPF_BUFSIZE;
79 
80 /*
81  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
82  *  bpf_dtab holds the descriptors, indexed by minor device #
83  *
84  * We really don't need NBPFILTER bpf_if entries, but this eliminates
85  * the need to account for all possible drivers here.
86  * This problem will go away when these structures are allocated dynamically.
87  */
88 static struct bpf_if 	*bpf_iflist;
89 static struct bpf_d	bpf_dtab[NBPFILTER];
90 
91 static void	bpf_ifname();
92 static void	catchpacket();
93 static int	bpf_setif();
94 static int	bpf_initd();
95 
96 static int
97 bpf_movein(uio, linktype, mp, sockp)
98 	register struct uio *uio;
99 	int linktype;
100 	register struct mbuf **mp;
101 	register struct sockaddr *sockp;
102 {
103 	struct mbuf *m;
104 	int error;
105 	int len;
106 	int hlen;
107 
108 	/*
109 	 * Build a sockaddr based on the data link layer type.
110 	 * We do this at this level because the ethernet header
111 	 * is copied directly into the data field of the sockaddr.
112 	 * In the case of SLIP, there is no header and the packet
113 	 * is forwarded as is.
114 	 * Also, we are careful to leave room at the front of the mbuf
115 	 * for the link level header.
116 	 */
117 	switch (linktype) {
118 	case DLT_SLIP:
119 		sockp->sa_family = AF_INET;
120 		hlen = 0;
121 		break;
122 
123 	case DLT_EN10MB:
124 		sockp->sa_family = AF_UNSPEC;
125 		/* XXX Would MAXLINKHDR be better? */
126 		hlen = sizeof(struct ether_header);
127 		break;
128 
129        case DLT_FDDI:
130 		sockp->sa_family = AF_UNSPEC;
131 		/* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
132 		hlen = 24;
133 		break;
134 
135 	default:
136 		return (EIO);
137 	}
138 
139 	len = uio->uio_resid;
140 	if ((unsigned)len > MCLBYTES)
141 		return (EIO);
142 
143 	MGET(m, M_WAIT, MT_DATA);
144 	if (m == 0)
145 		return (ENOBUFS);
146 	if (len > MLEN) {
147 #if BSD >= 199103
148 		MCLGET(m, M_WAIT);
149 		if ((m->m_flags & M_EXT) == 0) {
150 #else
151 		MCLGET(m);
152 		if (m->m_len == MCLBYTES) {
153 #endif
154 			error = ENOBUFS;
155 			goto bad;
156 		}
157 	}
158 	m->m_len = len;
159 	*mp = m;
160 	/*
161 	 * Make room for link header.
162 	 */
163 	if (hlen) {
164 		m->m_len -= hlen;
165 #if BSD >= 199103
166 		m->m_data += hlen; /* XXX */
167 #else
168 		m->m_off += hlen;
169 #endif
170 		error = uiomove((caddr_t)sockp->sa_data, hlen, uio);
171 		if (error)
172 			goto bad;
173 	}
174 	error = uiomove(mtod(m, caddr_t), len - hlen, uio);
175 	if (!error)
176 		return (0);
177  bad:
178 	m_freem(m);
179 	return (error);
180 }
181 
182 /*
183  * Attach file to the bpf interface, i.e. make d listen on bp.
184  * Must be called at splimp.
185  */
186 static void
187 bpf_attachd(d, bp)
188 	struct bpf_d *d;
189 	struct bpf_if *bp;
190 {
191 	/*
192 	 * Point d at bp, and add d to the interface's list of listeners.
193 	 * Finally, point the driver's bpf cookie at the interface so
194 	 * it will divert packets to bpf.
195 	 */
196 	d->bd_bif = bp;
197 	d->bd_next = bp->bif_dlist;
198 	bp->bif_dlist = d;
199 
200 	*bp->bif_driverp = bp;
201 }
202 
203 /*
204  * Detach a file from its interface.
205  */
206 static void
207 bpf_detachd(d)
208 	struct bpf_d *d;
209 {
210 	struct bpf_d **p;
211 	struct bpf_if *bp;
212 
213 	bp = d->bd_bif;
214 	/*
215 	 * Check if this descriptor had requested promiscuous mode.
216 	 * If so, turn it off.
217 	 */
218 	if (d->bd_promisc) {
219 		d->bd_promisc = 0;
220 		if (ifpromisc(bp->bif_ifp, 0))
221 			/*
222 			 * Something is really wrong if we were able to put
223 			 * the driver into promiscuous mode, but can't
224 			 * take it out.
225 			 */
226 			panic("bpf: ifpromisc failed");
227 	}
228 	/* Remove d from the interface's descriptor list. */
229 	p = &bp->bif_dlist;
230 	while (*p != d) {
231 		p = &(*p)->bd_next;
232 		if (*p == 0)
233 			panic("bpf_detachd: descriptor not in list");
234 	}
235 	*p = (*p)->bd_next;
236 	if (bp->bif_dlist == 0)
237 		/*
238 		 * Let the driver know that there are no more listeners.
239 		 */
240 		*d->bd_bif->bif_driverp = 0;
241 	d->bd_bif = 0;
242 }
243 
244 
245 /*
246  * Mark a descriptor free by making it point to itself.
247  * This is probably cheaper than marking with a constant since
248  * the address should be in a register anyway.
249  */
250 #define D_ISFREE(d) ((d) == (d)->bd_next)
251 #define D_MARKFREE(d) ((d)->bd_next = (d))
252 #define D_MARKUSED(d) ((d)->bd_next = 0)
253 
254 /*
255  *  bpfopen - open ethernet device
256  *
257  *  Errors:	ENXIO	- illegal minor device number
258  *		EBUSY	- too many files open
259  */
260 /* ARGSUSED */
261 int
262 bpfopen(dev, flag)
263 	dev_t dev;
264 	int flag;
265 {
266 	int error, s;
267 	register struct bpf_d *d;
268 
269 	if (minor(dev) >= NBPFILTER)
270 		return (ENXIO);
271 
272 	/*
273 	 * Each minor can be opened by only one process.  If the requested
274 	 * minor is in use, return EBUSY.
275 	 */
276 	s = splimp();
277 	d = &bpf_dtab[minor(dev)];
278 	if (!D_ISFREE(d)) {
279 		splx(s);
280 		return (EBUSY);
281 	} else
282 		/* Mark "free" and do most initialization. */
283 		bzero((char *)d, sizeof(*d));
284 	splx(s);
285 
286 	error = bpf_initd(d);
287 	if (error) {
288 		D_MARKFREE(d);
289 		return (error);
290 	}
291 	return (0);
292 }
293 
294 /*
295  * Close the descriptor by detaching it from its interface,
296  * deallocating its buffers, and marking it free.
297  */
298 /* ARGSUSED */
299 bpfclose(dev, flag)
300 	dev_t dev;
301 	int flag;
302 {
303 	register struct bpf_d *d = &bpf_dtab[minor(dev)];
304 	int s;
305 
306 	s = splimp();
307 	if (d->bd_bif)
308 		bpf_detachd(d);
309 	splx(s);
310 
311 	bpf_freed(d);
312 }
313 
314 #if BSD < 199103
315 static
316 bpf_timeout(arg)
317 	caddr_t arg;
318 {
319 	struct bpf_d *d = (struct bpf_d *)arg;
320 	d->bd_timedout = 1;
321 	wakeup(arg);
322 }
323 
324 static int
325 tsleep(cp, pri, s, t)
326 	register caddr_t cp;
327 	register int pri;
328 	char *s;
329 	register int t;
330 {
331 	register struct bpf_d *d = (struct bpf_d *)cp;
332 	register int error;
333 
334 	if (t != 0) {
335 		d->bd_timedout = 0;
336 		timeout(bpf_timeout, cp);
337 	}
338 	error = sleep(cp, pri);
339 	if (t != 0) {
340 		if (d->bd_timedout != 0)
341 			return EWOULDBLOCK;
342 		untimeout(bpf_timeout, cp);
343 	}
344 	return error;
345 }
346 #endif
347 
348 /*
349  * Rotate the packet buffers in descriptor d.  Move the store buffer
350  * into the hold slot, and the free buffer into the store slot.
351  * Zero the length of the new store buffer.
352  */
353 #define ROTATE_BUFFERS(d) \
354 	(d)->bd_hbuf = (d)->bd_sbuf; \
355 	(d)->bd_hlen = (d)->bd_slen; \
356 	(d)->bd_sbuf = (d)->bd_fbuf; \
357 	(d)->bd_slen = 0; \
358 	(d)->bd_fbuf = 0;
359 /*
360  *  bpfread - read next chunk of packets from buffers
361  */
362 int
363 bpfread(dev, uio)
364 	dev_t dev;
365 	register struct uio *uio;
366 {
367 	register struct bpf_d *d = &bpf_dtab[minor(dev)];
368 	int error;
369 	int s;
370 
371 	/*
372 	 * Restrict application to use a buffer the same size as
373 	 * as kernel buffers.
374 	 */
375 	if (uio->uio_resid != d->bd_bufsize)
376 		return (EINVAL);
377 
378 	s = splimp();
379 	/*
380 	 * If the hold buffer is empty, then set a timer and sleep
381 	 * until either the timeout has occurred or enough packets have
382 	 * arrived to fill the store buffer.
383 	 */
384 	while (d->bd_hbuf == 0) {
385 		if (d->bd_immediate && d->bd_slen != 0) {
386 			/*
387 			 * A packet(s) either arrived since the previous
388 			 * read or arrived while we were asleep.
389 			 * Rotate the buffers and return what's here.
390 			 */
391 			ROTATE_BUFFERS(d);
392 			break;
393 		}
394 		error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout);
395 		if (error == EINTR || error == ERESTART) {
396 			splx(s);
397 			return (error);
398 		}
399 		if (error == EWOULDBLOCK) {
400 			/*
401 			 * On a timeout, return what's in the buffer,
402 			 * which may be nothing.  If there is something
403 			 * in the store buffer, we can rotate the buffers.
404 			 */
405 			if (d->bd_hbuf)
406 				/*
407 				 * We filled up the buffer in between
408 				 * getting the timeout and arriving
409 				 * here, so we don't need to rotate.
410 				 */
411 				break;
412 
413 			if (d->bd_slen == 0) {
414 				splx(s);
415 				return (0);
416 			}
417 			ROTATE_BUFFERS(d);
418 			break;
419 		}
420 	}
421 	/*
422 	 * At this point, we know we have something in the hold slot.
423 	 */
424 	splx(s);
425 
426 	/*
427 	 * Move data from hold buffer into user space.
428 	 * We know the entire buffer is transferred since
429 	 * we checked above that the read buffer is bpf_bufsize bytes.
430 	 */
431 #if BSD >= 199103
432 	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
433 #else
434 	error = uiomove(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
435 #endif
436 	s = splimp();
437 	d->bd_fbuf = d->bd_hbuf;
438 	d->bd_hbuf = 0;
439 	splx(s);
440 
441 	return (error);
442 }
443 
444 
445 /*
446  * If there are processes sleeping on this descriptor, wake them up.
447  */
448 static inline void
449 bpf_wakeup(d)
450 	register struct bpf_d *d;
451 {
452 	wakeup((caddr_t)d);
453 	if (d->bd_selproc) {
454 		selwakeup(d->bd_selproc, (int)d->bd_selcoll);
455 		d->bd_selcoll = 0;
456 		d->bd_selproc = 0;
457 	}
458 }
459 
460 int
461 bpfwrite(dev, uio)
462 	dev_t dev;
463 	struct uio *uio;
464 {
465 	register struct bpf_d *d = &bpf_dtab[minor(dev)];
466 	struct ifnet *ifp;
467 	struct mbuf *m;
468 	int error, s;
469 	static struct sockaddr dst;
470 
471 	if (d->bd_bif == 0)
472 		return (ENXIO);
473 
474 	ifp = d->bd_bif->bif_ifp;
475 
476 	if (uio->uio_resid == 0)
477 		return (0);
478 	if (uio->uio_resid > ifp->if_mtu)
479 		return (EMSGSIZE);
480 
481 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst);
482 	if (error)
483 		return (error);
484 
485 	s = splnet();
486 #if BSD >= 199103
487 	error = (*ifp->if_output)(ifp, m, &dst, (struct rtenty *)0);
488 #else
489 	error = (*ifp->if_output)(ifp, m, &dst);
490 #endif
491 	splx(s);
492 	/*
493 	 * The driver frees the mbuf.
494 	 */
495 	return (error);
496 }
497 
498 /*
499  * Reset a descriptor by flushing its packet bufferand clearing the receive
500  * and drop counts.  Should be called at splimp.
501  */
502 static void
503 reset_d(d)
504 	struct bpf_d *d;
505 {
506 	if (d->bd_hbuf) {
507 		/* Free the hold buffer. */
508 		d->bd_fbuf = d->bd_hbuf;
509 		d->bd_hbuf = 0;
510 	}
511 	d->bd_slen = 0;
512 	d->bd_rcount = 0;
513 	d->bd_dcount = 0;
514 }
515 
516 /*
517  *  FIONREAD		Check for read packet available.
518  *  SIOCGIFADDR		Get interface address - convenient hook to driver.
519  *  BIOCGBLEN		Get buffer len [for read()].
520  *  BIOCSETF		Set ethernet read filter.
521  *  BIOCFLUSH		Flush read packet buffer.
522  *  BIOCPROMISC		Put interface into promiscuous mode.
523  *  BIOCGDLT		Get link layer type.
524  *  BIOCGETIF		Get interface name.
525  *  BIOCSETIF		Set interface.
526  *  BIOCSRTIMEOUT	Set read timeout.
527  *  BIOCGRTIMEOUT	Get read timeout.
528  *  BIOCGSTATS		Get packet stats.
529  *  BIOCIMMEDIATE	Set immediate mode.
530  */
531 /* ARGSUSED */
532 int
533 bpfioctl(dev, cmd, addr, flag)
534 	dev_t dev;
535 	int cmd;
536 	caddr_t addr;
537 	int flag;
538 {
539 	register struct bpf_d *d = &bpf_dtab[minor(dev)];
540 	int s, error = 0;
541 
542 	switch (cmd) {
543 
544 	default:
545 		error = EINVAL;
546 		break;
547 
548 	/*
549 	 * Check for read packet available.
550 	 */
551 	case FIONREAD:
552 		{
553 			int n;
554 
555 			s = splimp();
556 			n = d->bd_slen;
557 			if (d->bd_hbuf)
558 				n += d->bd_hlen;
559 			splx(s);
560 
561 			*(int *)addr = n;
562 			break;
563 		}
564 
565 	case SIOCGIFADDR:
566 		{
567 			struct ifnet *ifp;
568 
569 			if (d->bd_bif == 0)
570 				error = EINVAL;
571 			else {
572 				ifp = d->bd_bif->bif_ifp;
573 				error =  (*ifp->if_ioctl)(ifp, cmd, addr);
574 			}
575 			break;
576 		}
577 
578 	/*
579 	 * Get buffer len [for read()].
580 	 */
581 	case BIOCGBLEN:
582 		*(u_int *)addr = d->bd_bufsize;
583 		break;
584 
585 	/*
586 	 * Set link layer read filter.
587 	 */
588         case BIOCSETF:
589 		error = bpf_setf(d, (struct bpf_program *)addr);
590 		break;
591 
592 	/*
593 	 * Flush read packet buffer.
594 	 */
595 	case BIOCFLUSH:
596 		s = splimp();
597 		reset_d(d);
598 		splx(s);
599 		break;
600 
601 	/*
602 	 * Put interface into promiscuous mode.
603 	 */
604 	case BIOCPROMISC:
605 		if (d->bd_bif == 0) {
606 			/*
607 			 * No interface attached yet.
608 			 */
609 			error = EINVAL;
610 			break;
611 		}
612 		s = splimp();
613 		if (d->bd_promisc == 0) {
614 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
615 			if (error == 0)
616 				d->bd_promisc = 1;
617 		}
618 		splx(s);
619 		break;
620 
621 	/*
622 	 * Get device parameters.
623 	 */
624 	case BIOCGDLT:
625 		if (d->bd_bif == 0)
626 			error = EINVAL;
627 		else
628 			*(u_int *)addr = d->bd_bif->bif_dlt;
629 		break;
630 
631 	/*
632 	 * Set interface name.
633 	 */
634 	case BIOCGETIF:
635 		if (d->bd_bif == 0)
636 			error = EINVAL;
637 		else
638 			bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
639 		break;
640 
641 	/*
642 	 * Set interface.
643 	 */
644 	case BIOCSETIF:
645 		error = bpf_setif(d, (struct ifreq *)addr);
646 		break;
647 
648 	/*
649 	 * Set read timeout.
650 	 */
651  	case BIOCSRTIMEOUT:
652 		{
653 			struct timeval *tv = (struct timeval *)addr;
654 			u_long msec;
655 
656 			/* Compute number of milliseconds. */
657 			msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
658 			/* Scale milliseconds to ticks.  Assume hard
659 			   clock has millisecond or greater resolution
660 			   (i.e. tick >= 1000).  For 10ms hardclock,
661 			   tick/1000 = 10, so rtout<-msec/10. */
662 			d->bd_rtout = msec / (tick / 1000);
663 			break;
664 		}
665 
666 	/*
667 	 * Get read timeout.
668 	 */
669  	case BIOCGRTIMEOUT:
670 		{
671 			struct timeval *tv = (struct timeval *)addr;
672 			u_long msec = d->bd_rtout;
673 
674 			msec *= tick / 1000;
675 			tv->tv_sec = msec / 1000;
676 			tv->tv_usec = msec % 1000;
677 			break;
678 		}
679 
680 	/*
681 	 * Get packet stats.
682 	 */
683 	case BIOCGSTATS:
684 		{
685 			struct bpf_stat *bs = (struct bpf_stat *)addr;
686 
687 			bs->bs_recv = d->bd_rcount;
688 			bs->bs_drop = d->bd_dcount;
689 			break;
690 		}
691 
692 	/*
693 	 * Set immediate mode.
694 	 */
695 	case BIOCIMMEDIATE:
696 		d->bd_immediate = *(u_int *)addr;
697 		break;
698 	}
699 	return (error);
700 }
701 
702 /*
703  * Set d's packet filter program to fp.  If this file already has a filter,
704  * free it and replace it.  Returns EINVAL for bogus requests.
705  */
706 int
707 bpf_setf(d, fp)
708 	struct bpf_d *d;
709 	struct bpf_program *fp;
710 {
711 	struct bpf_insn *fcode, *old;
712 	u_int flen, size;
713 	int s;
714 
715 	old = d->bd_filter;
716 	if (fp->bf_insns == 0) {
717 		if (fp->bf_len != 0)
718 			return (EINVAL);
719 		s = splimp();
720 		d->bd_filter = 0;
721 		reset_d(d);
722 		splx(s);
723 		if (old != 0)
724 			free((caddr_t)old, M_DEVBUF);
725 		return (0);
726 	}
727 	flen = fp->bf_len;
728 	if (flen > BPF_MAXINSNS)
729 		return (EINVAL);
730 
731 	size = flen * sizeof(*fp->bf_insns);
732 	fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
733 	if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
734 	    bpf_validate(fcode, (int)flen)) {
735 		s = splimp();
736 		d->bd_filter = fcode;
737 		reset_d(d);
738 		splx(s);
739 		if (old != 0)
740 			free((caddr_t)old, M_DEVBUF);
741 
742 		return (0);
743 	}
744 	free((caddr_t)fcode, M_DEVBUF);
745 	return (EINVAL);
746 }
747 
748 /*
749  * Detach a file from its current interface (if attached at all) and attach
750  * to the interface indicated by the name stored in ifr.
751  * Return an errno or 0.
752  */
753 static int
754 bpf_setif(d, ifr)
755 	struct bpf_d *d;
756 	struct ifreq *ifr;
757 {
758 	struct bpf_if *bp;
759 	char *cp;
760 	int unit, s;
761 
762 	/*
763 	 * Separate string into name part and unit number.  Put a null
764 	 * byte at the end of the name part, and compute the number.
765 	 * If the a unit number is unspecified, the default is 0,
766 	 * as initialized above.  XXX This should be common code.
767 	 */
768 	unit = 0;
769 	cp = ifr->ifr_name;
770 	cp[sizeof(ifr->ifr_name) - 1] = '\0';
771 	while (*cp++) {
772 		if (*cp >= '0' && *cp <= '9') {
773 			unit = *cp - '0';
774 			*cp++ = '\0';
775 			while (*cp)
776 				unit = 10 * unit + *cp++ - '0';
777 			break;
778 		}
779 	}
780 	/*
781 	 * Look through attached interfaces for the named one.
782 	 */
783 	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
784 		struct ifnet *ifp = bp->bif_ifp;
785 
786 		if (ifp == 0 || unit != ifp->if_unit
787 		    || strcmp(ifp->if_name, ifr->ifr_name) != 0)
788 			continue;
789 		/*
790 		 * We found the requested interface.  If we're
791 		 * already attached to it, just flush the buffer.
792 		 * If it's not up, return an error.
793 		 */
794 		if ((ifp->if_flags & IFF_UP) == 0)
795 			return (ENETDOWN);
796 		s = splimp();
797 		if (bp != d->bd_bif) {
798 			if (d->bd_bif)
799 				/*
800 				 * Detach if attached to something else.
801 				 */
802 				bpf_detachd(d);
803 
804 			bpf_attachd(d, bp);
805 		}
806 		reset_d(d);
807 		splx(s);
808 		return (0);
809 	}
810 	/* Not found. */
811 	return (ENXIO);
812 }
813 
814 /*
815  * Convert an interface name plus unit number of an ifp to a single
816  * name which is returned in the ifr.
817  */
818 static void
819 bpf_ifname(ifp, ifr)
820 	struct ifnet *ifp;
821 	struct ifreq *ifr;
822 {
823 	char *s = ifp->if_name;
824 	char *d = ifr->ifr_name;
825 
826 	while (*d++ = *s++)
827 		;
828 	/* XXX Assume that unit number is less than 10. */
829 	*d++ = ifp->if_unit + '0';
830 	*d = '\0';
831 }
832 
833 /*
834  * The new select interface passes down the proc pointer; the old select
835  * stubs had to grab it out of the user struct.  This glue allows either case.
836  */
837 #if BSD >= 199103
838 #define bpf_select bpfselect
839 #else
840 int
841 bpfselect(dev, rw)
842 	register dev_t dev;
843 	int rw;
844 {
845 	bpf_select(dev, rw, u.u_procp);
846 }
847 #endif
848 
849 /*
850  * Support for select() system call
851  * Inspired by the code in tty.c for the same purpose.
852  *
853  * bpfselect - returns true iff the specific operation
854  *	will not block indefinitely.  Otherwise, return
855  *	false but make a note that a selwakeup() must be done.
856  */
857 int
858 bpf_select(dev, rw, p)
859 	register dev_t dev;
860 	int rw;
861 	struct proc *p;
862 {
863 	register struct bpf_d *d;
864 	register int s;
865 
866 	if (rw != FREAD)
867 		return (0);
868 	/*
869 	 * An imitation of the FIONREAD ioctl code.
870 	 */
871 	d = &bpf_dtab[minor(dev)];
872 
873 	s = splimp();
874 	if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) {
875 		/*
876 		 * There is data waiting.
877 		 */
878 		splx(s);
879 		return (1);
880 	}
881 	/*
882 	 * No data ready.  If there's already a select() waiting on this
883 	 * minor device then this is a collision.  This shouldn't happen
884 	 * because minors really should not be shared, but if a process
885 	 * forks while one of these is open, it is possible that both
886 	 * processes could select on the same descriptor.
887 	 */
888 	if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait)
889 		d->bd_selcoll = 1;
890 	else
891 		d->bd_selproc = p;
892 
893 	splx(s);
894 	return (0);
895 }
896 
897 /*
898  * bpf_tap - incoming linkage from device drivers
899  */
900 void
901 bpf_tap(arg, pkt, pktlen)
902 	caddr_t arg;
903 	register u_char *pkt;
904 	register u_int pktlen;
905 {
906 	struct bpf_if *bp;
907 	register struct bpf_d *d;
908 	register u_int slen;
909 	/*
910 	 * Note that the ipl does not have to be raised at this point.
911 	 * The only problem that could arise here is that if two different
912 	 * interfaces shared any data.  This is not the case.
913 	 */
914 	bp = (struct bpf_if *)arg;
915 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
916 		++d->bd_rcount;
917 		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
918 		if (slen != 0)
919 			catchpacket(d, pkt, pktlen, slen, bcopy);
920 	}
921 }
922 
923 /*
924  * Copy data from an mbuf chain into a buffer.  This code is derived
925  * from m_copydata in sys/uipc_mbuf.c.
926  */
927 static void
928 bpf_mcopy(src, dst, len)
929 	u_char *src;
930 	u_char *dst;
931 	register int len;
932 {
933 	register struct mbuf *m = (struct mbuf *)src;
934 	register unsigned count;
935 
936 	while (len > 0) {
937 		if (m == 0)
938 			panic("bpf_mcopy");
939 		count = MIN(m->m_len, len);
940 		bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
941 		m = m->m_next;
942 		dst += count;
943 		len -= count;
944 	}
945 }
946 
947 /*
948  * bpf_mtap -	incoming linkage from device drivers, when packet
949  *		is in an mbuf chain
950  */
951 void
952 bpf_mtap(arg, m)
953 	caddr_t arg;
954 	struct mbuf *m;
955 {
956 	struct bpf_if *bp = (struct bpf_if *)arg;
957 	struct bpf_d *d;
958 	u_int pktlen, slen;
959 	struct mbuf *m0;
960 
961 	pktlen = 0;
962 	for (m0 = m; m0 != 0; m0 = m0->m_next)
963 		pktlen += m0->m_len;
964 
965 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
966 		++d->bd_rcount;
967 		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
968 		if (slen != 0)
969 			catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
970 	}
971 }
972 
973 /*
974  * Move the packet data from interface memory (pkt) into the
975  * store buffer.  Return 1 if it's time to wakeup a listener (buffer full),
976  * otherwise 0.  "copy" is the routine called to do the actual data
977  * transfer.  bcopy is passed in to copy contiguous chunks, while
978  * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
979  * pkt is really an mbuf.
980  */
981 static void
982 catchpacket(d, pkt, pktlen, snaplen, cpfn)
983 	register struct bpf_d *d;
984 	register u_char *pkt;
985 	register u_int pktlen, snaplen;
986 	register void (*cpfn)();
987 {
988 	register struct bpf_hdr *hp;
989 	register int totlen, curlen;
990 	register int hdrlen = d->bd_bif->bif_hdrlen;
991 	/*
992 	 * Figure out how many bytes to move.  If the packet is
993 	 * greater or equal to the snapshot length, transfer that
994 	 * much.  Otherwise, transfer the whole packet (unless
995 	 * we hit the buffer size limit).
996 	 */
997 	totlen = hdrlen + MIN(snaplen, pktlen);
998 	if (totlen > d->bd_bufsize)
999 		totlen = d->bd_bufsize;
1000 
1001 	/*
1002 	 * Round up the end of the previous packet to the next longword.
1003 	 */
1004 	curlen = BPF_WORDALIGN(d->bd_slen);
1005 	if (curlen + totlen > d->bd_bufsize) {
1006 		/*
1007 		 * This packet will overflow the storage buffer.
1008 		 * Rotate the buffers if we can, then wakeup any
1009 		 * pending reads.
1010 		 */
1011 		if (d->bd_fbuf == 0) {
1012 			/*
1013 			 * We haven't completed the previous read yet,
1014 			 * so drop the packet.
1015 			 */
1016 			++d->bd_dcount;
1017 			return;
1018 		}
1019 		ROTATE_BUFFERS(d);
1020 		bpf_wakeup(d);
1021 		curlen = 0;
1022 	}
1023 	else if (d->bd_immediate)
1024 		/*
1025 		 * Immediate mode is set.  A packet arrived so any
1026 		 * reads should be woken up.
1027 		 */
1028 		bpf_wakeup(d);
1029 
1030 	/*
1031 	 * Append the bpf header.
1032 	 */
1033 	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1034 #ifdef sun
1035 	uniqtime(&hp->bh_tstamp);
1036 #else
1037 #if BSD >= 199103
1038 	microtime(&hp->bh_tstamp);
1039 #else
1040 	hp->bh_tstamp = time;
1041 #endif
1042 #endif
1043 	hp->bh_datalen = pktlen;
1044 	hp->bh_hdrlen = hdrlen;
1045 	/*
1046 	 * Copy the packet data into the store buffer and update its length.
1047 	 */
1048 	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1049 	d->bd_slen = curlen + totlen;
1050 }
1051 
1052 /*
1053  * Initialize all nonzero fields of a descriptor.
1054  */
1055 static int
1056 bpf_initd(d)
1057 	register struct bpf_d *d;
1058 {
1059 	d->bd_bufsize = bpf_bufsize;
1060 	d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1061 	if (d->bd_fbuf == 0)
1062 		return (ENOBUFS);
1063 
1064 	d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1065 	if (d->bd_sbuf == 0) {
1066 		free(d->bd_fbuf, M_DEVBUF);
1067 		return (ENOBUFS);
1068 	}
1069 	d->bd_slen = 0;
1070 	d->bd_hlen = 0;
1071 	return (0);
1072 }
1073 
1074 /*
1075  * Free buffers currently in use by a descriptor.
1076  * Called on close.
1077  */
1078 bpf_freed(d)
1079 	register struct bpf_d *d;
1080 {
1081 	/*
1082 	 * We don't need to lock out interrupts since this descriptor has
1083 	 * been detached from its interface and it yet hasn't been marked
1084 	 * free.
1085 	 */
1086 	if (d->bd_hbuf)
1087 		free(d->bd_hbuf, M_DEVBUF);
1088 	if (d->bd_fbuf)
1089 		free(d->bd_fbuf, M_DEVBUF);
1090 	free(d->bd_sbuf, M_DEVBUF);
1091 	if (d->bd_filter)
1092 		free((caddr_t)d->bd_filter, M_DEVBUF);
1093 
1094 	D_MARKFREE(d);
1095 }
1096 
1097 /*
1098  * Attach an interface to bpf.  driverp is a pointer to a (struct bpf_if *)
1099  * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1100  * size of the link header (variable length headers not yet supported).
1101  */
1102 void
1103 bpfattach(driverp, ifp, dlt, hdrlen)
1104 	caddr_t *driverp;
1105 	struct ifnet *ifp;
1106 	u_int dlt, hdrlen;
1107 {
1108 	struct bpf_if *bp;
1109 	int i;
1110 #if BSD < 199103
1111 	static struct bpf_if bpf_ifs[NBPFILTER];
1112 	static int bpfifno;
1113 
1114 	bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0;
1115 #else
1116 	bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1117 #endif
1118 	if (bp == 0)
1119 		panic("bpfattach");
1120 
1121 	bp->bif_dlist = 0;
1122 	bp->bif_driverp = (struct bpf_if **)driverp;
1123 	bp->bif_ifp = ifp;
1124 	bp->bif_dlt = dlt;
1125 
1126 	bp->bif_next = bpf_iflist;
1127 	bpf_iflist = bp;
1128 
1129 	*bp->bif_driverp = 0;
1130 
1131 	/*
1132 	 * Compute the length of the bpf header.  This is not necessarily
1133 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1134 	 * that the network layer header begins on a longword boundary (for
1135 	 * performance reasons and to alleviate alignment restrictions).
1136 	 */
1137 	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1138 
1139 	/*
1140 	 * Mark all the descriptors free if this hasn't been done.
1141 	 */
1142 	if (!D_ISFREE(&bpf_dtab[0]))
1143 		for (i = 0; i < NBPFILTER; ++i)
1144 			D_MARKFREE(&bpf_dtab[i]);
1145 
1146 	printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1147 }
1148 
1149 #if BSD >= 199103
1150 /* XXX This routine belongs in net/if.c. */
1151 /*
1152  * Set/clear promiscuous mode on interface ifp based on the truth value`
1153  * of pswitch.  The calls are reference counted so that only the first
1154  * on request actually has an effect, as does the final off request.
1155  * Results are undefined if the off and on requests are not matched.
1156  */
1157 int
1158 ifpromisc(ifp, pswitch)
1159 	struct ifnet *ifp;
1160 	int pswitch;
1161 {
1162 	struct ifreq ifr;
1163 	/*
1164 	 * If the device is not configured up, we cannot put it in
1165 	 * promiscuous mode.
1166 	 */
1167 	if ((ifp->if_flags & IFF_UP) == 0)
1168 		return (ENETDOWN);
1169 
1170 	if (pswitch) {
1171 		if (ifp->if_pcount++ != 0)
1172 			return (0);
1173 		ifp->if_flags |= IFF_PROMISC;
1174 	} else {
1175 		if (--ifp->if_pcount > 0)
1176 			return (0);
1177 		ifp->if_flags &= ~IFF_PROMISC;
1178 	}
1179 	ifr.ifr_flags = ifp->if_flags;
1180 	return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr));
1181 }
1182 #endif
1183 
1184 #if BSD < 199103
1185 /*
1186  * Allocate some memory for bpf.  This is temporary SunOS support, and
1187  * is admittedly a gross hack.
1188  * If resources unavaiable, return 0.
1189  */
1190 static caddr_t
1191 bpf_alloc(size, canwait)
1192 	register int size;
1193 	register int canwait;
1194 {
1195 	register struct mbuf *m;
1196 
1197 	if ((unsigned)size > (MCLBYTES-8))
1198 		return 0;
1199 
1200 	MGET(m, canwait, MT_DATA);
1201 	if (m == 0)
1202 		return 0;
1203 	if ((unsigned)size > (MLEN-8)) {
1204 		MCLGET(m);
1205 		if (m->m_len != MCLBYTES) {
1206 			m_freem(m);
1207 			return 0;
1208 		}
1209 	}
1210 	*mtod(m, struct mbuf **) = m;
1211 	return mtod(m, caddr_t) + 8;
1212 }
1213 #endif
1214 #endif
1215