1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * %sccs.include.redist.c% 11 * 12 * @(#)bpf.c 8.1 (Berkeley) 06/10/93 13 * 14 * static char rcsid[] = 15 * "$Header: bpf.c,v 1.33 91/10/27 21:21:58 mccanne Exp $"; 16 */ 17 18 #include "bpfilter.h" 19 20 #if NBPFILTER > 0 21 22 #ifndef __GNUC__ 23 #define inline 24 #else 25 #define inline __inline 26 #endif 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/mbuf.h> 31 #include <sys/buf.h> 32 #include <sys/time.h> 33 #include <sys/proc.h> 34 #include <sys/user.h> 35 #include <sys/ioctl.h> 36 #include <sys/map.h> 37 38 #include <sys/file.h> 39 #if defined(sparc) && BSD < 199103 40 #include <sys/stream.h> 41 #endif 42 #include <sys/tty.h> 43 #include <sys/uio.h> 44 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <net/if.h> 48 49 #include <net/bpf.h> 50 #include <net/bpfdesc.h> 51 52 #include <sys/errno.h> 53 54 #include <netinet/in.h> 55 #include <netinet/if_ether.h> 56 #include <sys/kernel.h> 57 58 /* 59 * Older BSDs don't have kernel malloc. 60 */ 61 #if BSD < 199103 62 extern bcopy(); 63 static caddr_t bpf_alloc(); 64 #include <net/bpf_compat.h> 65 #define BPF_BUFSIZE (MCLBYTES-8) 66 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 67 #else 68 #define BPF_BUFSIZE 4096 69 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 70 #endif 71 72 #define PRINET 26 /* interruptible */ 73 74 /* 75 * The default read buffer size is patchable. 76 */ 77 int bpf_bufsize = BPF_BUFSIZE; 78 79 /* 80 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 81 * bpf_dtab holds the descriptors, indexed by minor device # 82 */ 83 struct bpf_if *bpf_iflist; 84 struct bpf_d bpf_dtab[NBPFILTER]; 85 86 #if BSD >= 199207 87 /* 88 * bpfilterattach() is called at boot time in new systems. We do 89 * nothing here since old systems will not call this. 90 */ 91 /* ARGSUSED */ 92 void 93 bpfilterattach(n) 94 int n; 95 { 96 } 97 #endif 98 99 static int bpf_allocbufs __P((struct bpf_d *)); 100 static int bpf_allocbufs __P((struct bpf_d *)); 101 static void bpf_freed __P((struct bpf_d *)); 102 static void bpf_freed __P((struct bpf_d *)); 103 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 104 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 105 static void bpf_mcopy __P((void *, void *, u_int)); 106 static int bpf_movein __P((struct uio *, int, 107 struct mbuf **, struct sockaddr *, int *)); 108 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 109 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 110 static inline void 111 bpf_wakeup __P((struct bpf_d *)); 112 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 113 u_int, void (*)(void *, void *, u_int))); 114 static void reset_d __P((struct bpf_d *)); 115 116 static int 117 bpf_movein(uio, linktype, mp, sockp, datlen) 118 register struct uio *uio; 119 int linktype, *datlen; 120 register struct mbuf **mp; 121 register struct sockaddr *sockp; 122 { 123 struct mbuf *m; 124 int error; 125 int len; 126 int hlen; 127 128 /* 129 * Build a sockaddr based on the data link layer type. 130 * We do this at this level because the ethernet header 131 * is copied directly into the data field of the sockaddr. 132 * In the case of SLIP, there is no header and the packet 133 * is forwarded as is. 134 * Also, we are careful to leave room at the front of the mbuf 135 * for the link level header. 136 */ 137 switch (linktype) { 138 139 case DLT_SLIP: 140 sockp->sa_family = AF_INET; 141 hlen = 0; 142 break; 143 144 case DLT_EN10MB: 145 sockp->sa_family = AF_UNSPEC; 146 /* XXX Would MAXLINKHDR be better? */ 147 hlen = sizeof(struct ether_header); 148 break; 149 150 case DLT_FDDI: 151 sockp->sa_family = AF_UNSPEC; 152 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 153 hlen = 24; 154 break; 155 156 case DLT_NULL: 157 sockp->sa_family = AF_UNSPEC; 158 hlen = 0; 159 break; 160 161 default: 162 return (EIO); 163 } 164 165 len = uio->uio_resid; 166 *datlen = len - hlen; 167 if ((unsigned)len > MCLBYTES) 168 return (EIO); 169 170 MGET(m, M_WAIT, MT_DATA); 171 if (m == 0) 172 return (ENOBUFS); 173 if (len > MLEN) { 174 #if BSD >= 199103 175 MCLGET(m, M_WAIT); 176 if ((m->m_flags & M_EXT) == 0) { 177 #else 178 MCLGET(m); 179 if (m->m_len != MCLBYTES) { 180 #endif 181 error = ENOBUFS; 182 goto bad; 183 } 184 } 185 m->m_len = len; 186 *mp = m; 187 /* 188 * Make room for link header. 189 */ 190 if (hlen != 0) { 191 m->m_len -= hlen; 192 #if BSD >= 199103 193 m->m_data += hlen; /* XXX */ 194 #else 195 m->m_off += hlen; 196 #endif 197 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 198 if (error) 199 goto bad; 200 } 201 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 202 if (!error) 203 return (0); 204 bad: 205 m_freem(m); 206 return (error); 207 } 208 209 /* 210 * Attach file to the bpf interface, i.e. make d listen on bp. 211 * Must be called at splimp. 212 */ 213 static void 214 bpf_attachd(d, bp) 215 struct bpf_d *d; 216 struct bpf_if *bp; 217 { 218 /* 219 * Point d at bp, and add d to the interface's list of listeners. 220 * Finally, point the driver's bpf cookie at the interface so 221 * it will divert packets to bpf. 222 */ 223 d->bd_bif = bp; 224 d->bd_next = bp->bif_dlist; 225 bp->bif_dlist = d; 226 227 *bp->bif_driverp = bp; 228 } 229 230 /* 231 * Detach a file from its interface. 232 */ 233 static void 234 bpf_detachd(d) 235 struct bpf_d *d; 236 { 237 struct bpf_d **p; 238 struct bpf_if *bp; 239 240 bp = d->bd_bif; 241 /* 242 * Check if this descriptor had requested promiscuous mode. 243 * If so, turn it off. 244 */ 245 if (d->bd_promisc) { 246 d->bd_promisc = 0; 247 if (ifpromisc(bp->bif_ifp, 0)) 248 /* 249 * Something is really wrong if we were able to put 250 * the driver into promiscuous mode, but can't 251 * take it out. 252 */ 253 panic("bpf: ifpromisc failed"); 254 } 255 /* Remove d from the interface's descriptor list. */ 256 p = &bp->bif_dlist; 257 while (*p != d) { 258 p = &(*p)->bd_next; 259 if (*p == 0) 260 panic("bpf_detachd: descriptor not in list"); 261 } 262 *p = (*p)->bd_next; 263 if (bp->bif_dlist == 0) 264 /* 265 * Let the driver know that there are no more listeners. 266 */ 267 *d->bd_bif->bif_driverp = 0; 268 d->bd_bif = 0; 269 } 270 271 272 /* 273 * Mark a descriptor free by making it point to itself. 274 * This is probably cheaper than marking with a constant since 275 * the address should be in a register anyway. 276 */ 277 #define D_ISFREE(d) ((d) == (d)->bd_next) 278 #define D_MARKFREE(d) ((d)->bd_next = (d)) 279 #define D_MARKUSED(d) ((d)->bd_next = 0) 280 281 /* 282 * Open ethernet device. Returns ENXIO for illegal minor device number, 283 * EBUSY if file is open by another process. 284 */ 285 /* ARGSUSED */ 286 int 287 bpfopen(dev, flag) 288 dev_t dev; 289 int flag; 290 { 291 register struct bpf_d *d; 292 293 if (minor(dev) >= NBPFILTER) 294 return (ENXIO); 295 /* 296 * Each minor can be opened by only one process. If the requested 297 * minor is in use, return EBUSY. 298 */ 299 d = &bpf_dtab[minor(dev)]; 300 if (!D_ISFREE(d)) 301 return (EBUSY); 302 303 /* Mark "free" and do most initialization. */ 304 bzero((char *)d, sizeof(*d)); 305 d->bd_bufsize = bpf_bufsize; 306 307 return (0); 308 } 309 310 /* 311 * Close the descriptor by detaching it from its interface, 312 * deallocating its buffers, and marking it free. 313 */ 314 /* ARGSUSED */ 315 int 316 bpfclose(dev, flag) 317 dev_t dev; 318 int flag; 319 { 320 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 321 register int s; 322 323 s = splimp(); 324 if (d->bd_bif) 325 bpf_detachd(d); 326 splx(s); 327 bpf_freed(d); 328 329 return (0); 330 } 331 332 /* 333 * Support for SunOS, which does not have tsleep. 334 */ 335 #if BSD < 199103 336 static 337 bpf_timeout(arg) 338 caddr_t arg; 339 { 340 struct bpf_d *d = (struct bpf_d *)arg; 341 d->bd_timedout = 1; 342 wakeup(arg); 343 } 344 345 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 346 347 int 348 bpf_sleep(d) 349 register struct bpf_d *d; 350 { 351 register int rto = d->bd_rtout; 352 register int st; 353 354 if (rto != 0) { 355 d->bd_timedout = 0; 356 timeout(bpf_timeout, (caddr_t)d, rto); 357 } 358 st = sleep((caddr_t)d, PRINET|PCATCH); 359 if (rto != 0) { 360 if (d->bd_timedout == 0) 361 untimeout(bpf_timeout, (caddr_t)d); 362 else if (st == 0) 363 return EWOULDBLOCK; 364 } 365 return (st != 0) ? EINTR : 0; 366 } 367 #else 368 #define BPF_SLEEP tsleep 369 #endif 370 371 /* 372 * Rotate the packet buffers in descriptor d. Move the store buffer 373 * into the hold slot, and the free buffer into the store slot. 374 * Zero the length of the new store buffer. 375 */ 376 #define ROTATE_BUFFERS(d) \ 377 (d)->bd_hbuf = (d)->bd_sbuf; \ 378 (d)->bd_hlen = (d)->bd_slen; \ 379 (d)->bd_sbuf = (d)->bd_fbuf; \ 380 (d)->bd_slen = 0; \ 381 (d)->bd_fbuf = 0; 382 /* 383 * bpfread - read next chunk of packets from buffers 384 */ 385 int 386 bpfread(dev, uio) 387 dev_t dev; 388 register struct uio *uio; 389 { 390 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 391 int error; 392 int s; 393 394 /* 395 * Restrict application to use a buffer the same size as 396 * as kernel buffers. 397 */ 398 if (uio->uio_resid != d->bd_bufsize) 399 return (EINVAL); 400 401 s = splimp(); 402 /* 403 * If the hold buffer is empty, then do a timed sleep, which 404 * ends when the timeout expires or when enough packets 405 * have arrived to fill the store buffer. 406 */ 407 while (d->bd_hbuf == 0) { 408 if (d->bd_immediate && d->bd_slen != 0) { 409 /* 410 * A packet(s) either arrived since the previous 411 * read or arrived while we were asleep. 412 * Rotate the buffers and return what's here. 413 */ 414 ROTATE_BUFFERS(d); 415 break; 416 } 417 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 418 d->bd_rtout); 419 if (error == EINTR || error == ERESTART) { 420 splx(s); 421 return (error); 422 } 423 if (error == EWOULDBLOCK) { 424 /* 425 * On a timeout, return what's in the buffer, 426 * which may be nothing. If there is something 427 * in the store buffer, we can rotate the buffers. 428 */ 429 if (d->bd_hbuf) 430 /* 431 * We filled up the buffer in between 432 * getting the timeout and arriving 433 * here, so we don't need to rotate. 434 */ 435 break; 436 437 if (d->bd_slen == 0) { 438 splx(s); 439 return (0); 440 } 441 ROTATE_BUFFERS(d); 442 break; 443 } 444 } 445 /* 446 * At this point, we know we have something in the hold slot. 447 */ 448 splx(s); 449 450 /* 451 * Move data from hold buffer into user space. 452 * We know the entire buffer is transferred since 453 * we checked above that the read buffer is bpf_bufsize bytes. 454 */ 455 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 456 457 s = splimp(); 458 d->bd_fbuf = d->bd_hbuf; 459 d->bd_hbuf = 0; 460 d->bd_hlen = 0; 461 splx(s); 462 463 return (error); 464 } 465 466 467 /* 468 * If there are processes sleeping on this descriptor, wake them up. 469 */ 470 static inline void 471 bpf_wakeup(d) 472 register struct bpf_d *d; 473 { 474 wakeup((caddr_t)d); 475 #if BSD >= 199103 476 selwakeup(&d->bd_sel); 477 /* XXX */ 478 d->bd_sel.si_pid = 0; 479 #else 480 if (d->bd_selproc) { 481 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 482 d->bd_selcoll = 0; 483 d->bd_selproc = 0; 484 } 485 #endif 486 } 487 488 int 489 bpfwrite(dev, uio) 490 dev_t dev; 491 struct uio *uio; 492 { 493 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 494 struct ifnet *ifp; 495 struct mbuf *m; 496 int error, s; 497 static struct sockaddr dst; 498 int datlen; 499 500 if (d->bd_bif == 0) 501 return (ENXIO); 502 503 ifp = d->bd_bif->bif_ifp; 504 505 if (uio->uio_resid == 0) 506 return (0); 507 508 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 509 if (error) 510 return (error); 511 512 if (datlen > ifp->if_mtu) 513 return (EMSGSIZE); 514 515 s = splnet(); 516 #if BSD >= 199103 517 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 518 #else 519 error = (*ifp->if_output)(ifp, m, &dst); 520 #endif 521 splx(s); 522 /* 523 * The driver frees the mbuf. 524 */ 525 return (error); 526 } 527 528 /* 529 * Reset a descriptor by flushing its packet buffer and clearing the 530 * receive and drop counts. Should be called at splimp. 531 */ 532 static void 533 reset_d(d) 534 struct bpf_d *d; 535 { 536 if (d->bd_hbuf) { 537 /* Free the hold buffer. */ 538 d->bd_fbuf = d->bd_hbuf; 539 d->bd_hbuf = 0; 540 } 541 d->bd_slen = 0; 542 d->bd_hlen = 0; 543 d->bd_rcount = 0; 544 d->bd_dcount = 0; 545 } 546 547 /* 548 * FIONREAD Check for read packet available. 549 * SIOCGIFADDR Get interface address - convenient hook to driver. 550 * BIOCGBLEN Get buffer len [for read()]. 551 * BIOCSETF Set ethernet read filter. 552 * BIOCFLUSH Flush read packet buffer. 553 * BIOCPROMISC Put interface into promiscuous mode. 554 * BIOCGDLT Get link layer type. 555 * BIOCGETIF Get interface name. 556 * BIOCSETIF Set interface. 557 * BIOCSRTIMEOUT Set read timeout. 558 * BIOCGRTIMEOUT Get read timeout. 559 * BIOCGSTATS Get packet stats. 560 * BIOCIMMEDIATE Set immediate mode. 561 * BIOCVERSION Get filter language version. 562 */ 563 /* ARGSUSED */ 564 int 565 bpfioctl(dev, cmd, addr, flag) 566 dev_t dev; 567 int cmd; 568 caddr_t addr; 569 int flag; 570 { 571 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 572 int s, error = 0; 573 574 switch (cmd) { 575 576 default: 577 error = EINVAL; 578 break; 579 580 /* 581 * Check for read packet available. 582 */ 583 case FIONREAD: 584 { 585 int n; 586 587 s = splimp(); 588 n = d->bd_slen; 589 if (d->bd_hbuf) 590 n += d->bd_hlen; 591 splx(s); 592 593 *(int *)addr = n; 594 break; 595 } 596 597 case SIOCGIFADDR: 598 { 599 struct ifnet *ifp; 600 601 if (d->bd_bif == 0) 602 error = EINVAL; 603 else { 604 ifp = d->bd_bif->bif_ifp; 605 error = (*ifp->if_ioctl)(ifp, cmd, addr); 606 } 607 break; 608 } 609 610 /* 611 * Get buffer len [for read()]. 612 */ 613 case BIOCGBLEN: 614 *(u_int *)addr = d->bd_bufsize; 615 break; 616 617 /* 618 * Set buffer length. 619 */ 620 case BIOCSBLEN: 621 #if BSD < 199103 622 error = EINVAL; 623 #else 624 if (d->bd_bif != 0) 625 error = EINVAL; 626 else { 627 register u_int size = *(u_int *)addr; 628 629 if (size > BPF_MAXBUFSIZE) 630 *(u_int *)addr = size = BPF_MAXBUFSIZE; 631 else if (size < BPF_MINBUFSIZE) 632 *(u_int *)addr = size = BPF_MINBUFSIZE; 633 d->bd_bufsize = size; 634 } 635 #endif 636 break; 637 638 /* 639 * Set link layer read filter. 640 */ 641 case BIOCSETF: 642 error = bpf_setf(d, (struct bpf_program *)addr); 643 break; 644 645 /* 646 * Flush read packet buffer. 647 */ 648 case BIOCFLUSH: 649 s = splimp(); 650 reset_d(d); 651 splx(s); 652 break; 653 654 /* 655 * Put interface into promiscuous mode. 656 */ 657 case BIOCPROMISC: 658 if (d->bd_bif == 0) { 659 /* 660 * No interface attached yet. 661 */ 662 error = EINVAL; 663 break; 664 } 665 s = splimp(); 666 if (d->bd_promisc == 0) { 667 error = ifpromisc(d->bd_bif->bif_ifp, 1); 668 if (error == 0) 669 d->bd_promisc = 1; 670 } 671 splx(s); 672 break; 673 674 /* 675 * Get device parameters. 676 */ 677 case BIOCGDLT: 678 if (d->bd_bif == 0) 679 error = EINVAL; 680 else 681 *(u_int *)addr = d->bd_bif->bif_dlt; 682 break; 683 684 /* 685 * Set interface name. 686 */ 687 case BIOCGETIF: 688 if (d->bd_bif == 0) 689 error = EINVAL; 690 else 691 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 692 break; 693 694 /* 695 * Set interface. 696 */ 697 case BIOCSETIF: 698 error = bpf_setif(d, (struct ifreq *)addr); 699 break; 700 701 /* 702 * Set read timeout. 703 */ 704 case BIOCSRTIMEOUT: 705 { 706 struct timeval *tv = (struct timeval *)addr; 707 u_long msec; 708 709 /* Compute number of milliseconds. */ 710 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 711 /* Scale milliseconds to ticks. Assume hard 712 clock has millisecond or greater resolution 713 (i.e. tick >= 1000). For 10ms hardclock, 714 tick/1000 = 10, so rtout<-msec/10. */ 715 d->bd_rtout = msec / (tick / 1000); 716 break; 717 } 718 719 /* 720 * Get read timeout. 721 */ 722 case BIOCGRTIMEOUT: 723 { 724 struct timeval *tv = (struct timeval *)addr; 725 u_long msec = d->bd_rtout; 726 727 msec *= tick / 1000; 728 tv->tv_sec = msec / 1000; 729 tv->tv_usec = msec % 1000; 730 break; 731 } 732 733 /* 734 * Get packet stats. 735 */ 736 case BIOCGSTATS: 737 { 738 struct bpf_stat *bs = (struct bpf_stat *)addr; 739 740 bs->bs_recv = d->bd_rcount; 741 bs->bs_drop = d->bd_dcount; 742 break; 743 } 744 745 /* 746 * Set immediate mode. 747 */ 748 case BIOCIMMEDIATE: 749 d->bd_immediate = *(u_int *)addr; 750 break; 751 752 case BIOCVERSION: 753 { 754 struct bpf_version *bv = (struct bpf_version *)addr; 755 756 bv->bv_major = BPF_MAJOR_VERSION; 757 bv->bv_minor = BPF_MINOR_VERSION; 758 break; 759 } 760 } 761 return (error); 762 } 763 764 /* 765 * Set d's packet filter program to fp. If this file already has a filter, 766 * free it and replace it. Returns EINVAL for bogus requests. 767 */ 768 int 769 bpf_setf(d, fp) 770 struct bpf_d *d; 771 struct bpf_program *fp; 772 { 773 struct bpf_insn *fcode, *old; 774 u_int flen, size; 775 int s; 776 777 old = d->bd_filter; 778 if (fp->bf_insns == 0) { 779 if (fp->bf_len != 0) 780 return (EINVAL); 781 s = splimp(); 782 d->bd_filter = 0; 783 reset_d(d); 784 splx(s); 785 if (old != 0) 786 free((caddr_t)old, M_DEVBUF); 787 return (0); 788 } 789 flen = fp->bf_len; 790 if (flen > BPF_MAXINSNS) 791 return (EINVAL); 792 793 size = flen * sizeof(*fp->bf_insns); 794 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 795 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 796 bpf_validate(fcode, (int)flen)) { 797 s = splimp(); 798 d->bd_filter = fcode; 799 reset_d(d); 800 splx(s); 801 if (old != 0) 802 free((caddr_t)old, M_DEVBUF); 803 804 return (0); 805 } 806 free((caddr_t)fcode, M_DEVBUF); 807 return (EINVAL); 808 } 809 810 /* 811 * Detach a file from its current interface (if attached at all) and attach 812 * to the interface indicated by the name stored in ifr. 813 * Return an errno or 0. 814 */ 815 static int 816 bpf_setif(d, ifr) 817 struct bpf_d *d; 818 struct ifreq *ifr; 819 { 820 struct bpf_if *bp; 821 char *cp; 822 int unit, s, error; 823 824 /* 825 * Separate string into name part and unit number. Put a null 826 * byte at the end of the name part, and compute the number. 827 * If the a unit number is unspecified, the default is 0, 828 * as initialized above. XXX This should be common code. 829 */ 830 unit = 0; 831 cp = ifr->ifr_name; 832 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 833 while (*cp++) { 834 if (*cp >= '0' && *cp <= '9') { 835 unit = *cp - '0'; 836 *cp++ = '\0'; 837 while (*cp) 838 unit = 10 * unit + *cp++ - '0'; 839 break; 840 } 841 } 842 /* 843 * Look through attached interfaces for the named one. 844 */ 845 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 846 struct ifnet *ifp = bp->bif_ifp; 847 848 if (ifp == 0 || unit != ifp->if_unit 849 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 850 continue; 851 /* 852 * We found the requested interface. 853 * If it's not up, return an error. 854 * Allocate the packet buffers if we need to. 855 * If we're already attached to requested interface, 856 * just flush the buffer. 857 */ 858 if ((ifp->if_flags & IFF_UP) == 0) 859 return (ENETDOWN); 860 861 if (d->bd_sbuf == 0) { 862 error = bpf_allocbufs(d); 863 if (error != 0) 864 return (error); 865 } 866 s = splimp(); 867 if (bp != d->bd_bif) { 868 if (d->bd_bif) 869 /* 870 * Detach if attached to something else. 871 */ 872 bpf_detachd(d); 873 874 bpf_attachd(d, bp); 875 } 876 reset_d(d); 877 splx(s); 878 return (0); 879 } 880 /* Not found. */ 881 return (ENXIO); 882 } 883 884 /* 885 * Convert an interface name plus unit number of an ifp to a single 886 * name which is returned in the ifr. 887 */ 888 static void 889 bpf_ifname(ifp, ifr) 890 struct ifnet *ifp; 891 struct ifreq *ifr; 892 { 893 char *s = ifp->if_name; 894 char *d = ifr->ifr_name; 895 896 while (*d++ = *s++) 897 continue; 898 /* XXX Assume that unit number is less than 10. */ 899 *d++ = ifp->if_unit + '0'; 900 *d = '\0'; 901 } 902 903 /* 904 * The new select interface passes down the proc pointer; the old select 905 * stubs had to grab it out of the user struct. This glue allows either case. 906 */ 907 #if BSD >= 199103 908 #define bpf_select bpfselect 909 #else 910 int 911 bpfselect(dev, rw) 912 register dev_t dev; 913 int rw; 914 { 915 return (bpf_select(dev, rw, u.u_procp)); 916 } 917 #endif 918 919 /* 920 * Support for select() system call 921 * 922 * Return true iff the specific operation will not block indefinitely. 923 * Otherwise, return false but make a note that a selwakeup() must be done. 924 */ 925 int 926 bpf_select(dev, rw, p) 927 register dev_t dev; 928 int rw; 929 struct proc *p; 930 { 931 register struct bpf_d *d; 932 register int s; 933 934 if (rw != FREAD) 935 return (0); 936 /* 937 * An imitation of the FIONREAD ioctl code. 938 */ 939 d = &bpf_dtab[minor(dev)]; 940 941 s = splimp(); 942 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 943 /* 944 * There is data waiting. 945 */ 946 splx(s); 947 return (1); 948 } 949 #if BSD >= 199103 950 selrecord(p, &d->bd_sel); 951 #else 952 /* 953 * No data ready. If there's already a select() waiting on this 954 * minor device then this is a collision. This shouldn't happen 955 * because minors really should not be shared, but if a process 956 * forks while one of these is open, it is possible that both 957 * processes could select on the same descriptor. 958 */ 959 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 960 d->bd_selcoll = 1; 961 else 962 d->bd_selproc = p; 963 #endif 964 splx(s); 965 return (0); 966 } 967 968 /* 969 * Incoming linkage from device drivers. Process the packet pkt, of length 970 * pktlen, which is stored in a contiguous buffer. The packet is parsed 971 * by each process' filter, and if accepted, stashed into the corresponding 972 * buffer. 973 */ 974 void 975 bpf_tap(arg, pkt, pktlen) 976 caddr_t arg; 977 register u_char *pkt; 978 register u_int pktlen; 979 { 980 struct bpf_if *bp; 981 register struct bpf_d *d; 982 register u_int slen; 983 /* 984 * Note that the ipl does not have to be raised at this point. 985 * The only problem that could arise here is that if two different 986 * interfaces shared any data. This is not the case. 987 */ 988 bp = (struct bpf_if *)arg; 989 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 990 ++d->bd_rcount; 991 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 992 if (slen != 0) 993 catchpacket(d, pkt, pktlen, slen, bcopy); 994 } 995 } 996 997 /* 998 * Copy data from an mbuf chain into a buffer. This code is derived 999 * from m_copydata in sys/uipc_mbuf.c. 1000 */ 1001 static void 1002 bpf_mcopy(src_arg, dst_arg, len) 1003 void *src_arg, *dst_arg; 1004 register u_int len; 1005 { 1006 register struct mbuf *m; 1007 register u_int count; 1008 u_char *src, *dst; 1009 1010 src = src_arg; 1011 dst = dst_arg; 1012 m = src_arg; 1013 while (len > 0) { 1014 if (m == 0) 1015 panic("bpf_mcopy"); 1016 count = min(m->m_len, len); 1017 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 1018 m = m->m_next; 1019 dst += count; 1020 len -= count; 1021 } 1022 } 1023 1024 /* 1025 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1026 */ 1027 void 1028 bpf_mtap(arg, m) 1029 caddr_t arg; 1030 struct mbuf *m; 1031 { 1032 struct bpf_if *bp = (struct bpf_if *)arg; 1033 struct bpf_d *d; 1034 u_int pktlen, slen; 1035 struct mbuf *m0; 1036 1037 pktlen = 0; 1038 for (m0 = m; m0 != 0; m0 = m0->m_next) 1039 pktlen += m0->m_len; 1040 1041 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1042 ++d->bd_rcount; 1043 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1044 if (slen != 0) 1045 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1046 } 1047 } 1048 1049 /* 1050 * Move the packet data from interface memory (pkt) into the 1051 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1052 * otherwise 0. "copy" is the routine called to do the actual data 1053 * transfer. bcopy is passed in to copy contiguous chunks, while 1054 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1055 * pkt is really an mbuf. 1056 */ 1057 static void 1058 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1059 register struct bpf_d *d; 1060 register u_char *pkt; 1061 register u_int pktlen, snaplen; 1062 register void (*cpfn)(); 1063 { 1064 register struct bpf_hdr *hp; 1065 register int totlen, curlen; 1066 register int hdrlen = d->bd_bif->bif_hdrlen; 1067 /* 1068 * Figure out how many bytes to move. If the packet is 1069 * greater or equal to the snapshot length, transfer that 1070 * much. Otherwise, transfer the whole packet (unless 1071 * we hit the buffer size limit). 1072 */ 1073 totlen = hdrlen + min(snaplen, pktlen); 1074 if (totlen > d->bd_bufsize) 1075 totlen = d->bd_bufsize; 1076 1077 /* 1078 * Round up the end of the previous packet to the next longword. 1079 */ 1080 curlen = BPF_WORDALIGN(d->bd_slen); 1081 if (curlen + totlen > d->bd_bufsize) { 1082 /* 1083 * This packet will overflow the storage buffer. 1084 * Rotate the buffers if we can, then wakeup any 1085 * pending reads. 1086 */ 1087 if (d->bd_fbuf == 0) { 1088 /* 1089 * We haven't completed the previous read yet, 1090 * so drop the packet. 1091 */ 1092 ++d->bd_dcount; 1093 return; 1094 } 1095 ROTATE_BUFFERS(d); 1096 bpf_wakeup(d); 1097 curlen = 0; 1098 } 1099 else if (d->bd_immediate) 1100 /* 1101 * Immediate mode is set. A packet arrived so any 1102 * reads should be woken up. 1103 */ 1104 bpf_wakeup(d); 1105 1106 /* 1107 * Append the bpf header. 1108 */ 1109 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1110 #if BSD >= 199103 1111 microtime(&hp->bh_tstamp); 1112 #elif defined(sun) 1113 uniqtime(&hp->bh_tstamp); 1114 #else 1115 hp->bh_tstamp = time; 1116 #endif 1117 hp->bh_datalen = pktlen; 1118 hp->bh_hdrlen = hdrlen; 1119 /* 1120 * Copy the packet data into the store buffer and update its length. 1121 */ 1122 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1123 d->bd_slen = curlen + totlen; 1124 } 1125 1126 /* 1127 * Initialize all nonzero fields of a descriptor. 1128 */ 1129 static int 1130 bpf_allocbufs(d) 1131 register struct bpf_d *d; 1132 { 1133 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1134 if (d->bd_fbuf == 0) 1135 return (ENOBUFS); 1136 1137 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1138 if (d->bd_sbuf == 0) { 1139 free(d->bd_fbuf, M_DEVBUF); 1140 return (ENOBUFS); 1141 } 1142 d->bd_slen = 0; 1143 d->bd_hlen = 0; 1144 return (0); 1145 } 1146 1147 /* 1148 * Free buffers currently in use by a descriptor. 1149 * Called on close. 1150 */ 1151 static void 1152 bpf_freed(d) 1153 register struct bpf_d *d; 1154 { 1155 /* 1156 * We don't need to lock out interrupts since this descriptor has 1157 * been detached from its interface and it yet hasn't been marked 1158 * free. 1159 */ 1160 if (d->bd_sbuf != 0) { 1161 free(d->bd_sbuf, M_DEVBUF); 1162 if (d->bd_hbuf != 0) 1163 free(d->bd_hbuf, M_DEVBUF); 1164 if (d->bd_fbuf != 0) 1165 free(d->bd_fbuf, M_DEVBUF); 1166 } 1167 if (d->bd_filter) 1168 free((caddr_t)d->bd_filter, M_DEVBUF); 1169 1170 D_MARKFREE(d); 1171 } 1172 1173 /* 1174 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1175 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1176 * size of the link header (variable length headers not yet supported). 1177 */ 1178 void 1179 bpfattach(driverp, ifp, dlt, hdrlen) 1180 caddr_t *driverp; 1181 struct ifnet *ifp; 1182 u_int dlt, hdrlen; 1183 { 1184 struct bpf_if *bp; 1185 int i; 1186 #if BSD < 199103 1187 static struct bpf_if bpf_ifs[NBPFILTER]; 1188 static int bpfifno; 1189 1190 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1191 #else 1192 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1193 #endif 1194 if (bp == 0) 1195 panic("bpfattach"); 1196 1197 bp->bif_dlist = 0; 1198 bp->bif_driverp = (struct bpf_if **)driverp; 1199 bp->bif_ifp = ifp; 1200 bp->bif_dlt = dlt; 1201 1202 bp->bif_next = bpf_iflist; 1203 bpf_iflist = bp; 1204 1205 *bp->bif_driverp = 0; 1206 1207 /* 1208 * Compute the length of the bpf header. This is not necessarily 1209 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1210 * that the network layer header begins on a longword boundary (for 1211 * performance reasons and to alleviate alignment restrictions). 1212 */ 1213 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1214 1215 /* 1216 * Mark all the descriptors free if this hasn't been done. 1217 */ 1218 if (!D_ISFREE(&bpf_dtab[0])) 1219 for (i = 0; i < NBPFILTER; ++i) 1220 D_MARKFREE(&bpf_dtab[i]); 1221 1222 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1223 } 1224 1225 #if BSD >= 199103 1226 /* XXX This routine belongs in net/if.c. */ 1227 /* 1228 * Set/clear promiscuous mode on interface ifp based on the truth value 1229 * of pswitch. The calls are reference counted so that only the first 1230 * "on" request actually has an effect, as does the final "off" request. 1231 * Results are undefined if the "off" and "on" requests are not matched. 1232 */ 1233 int 1234 ifpromisc(ifp, pswitch) 1235 struct ifnet *ifp; 1236 int pswitch; 1237 { 1238 struct ifreq ifr; 1239 /* 1240 * If the device is not configured up, we cannot put it in 1241 * promiscuous mode. 1242 */ 1243 if ((ifp->if_flags & IFF_UP) == 0) 1244 return (ENETDOWN); 1245 1246 if (pswitch) { 1247 if (ifp->if_pcount++ != 0) 1248 return (0); 1249 ifp->if_flags |= IFF_PROMISC; 1250 } else { 1251 if (--ifp->if_pcount > 0) 1252 return (0); 1253 ifp->if_flags &= ~IFF_PROMISC; 1254 } 1255 ifr.ifr_flags = ifp->if_flags; 1256 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); 1257 } 1258 #endif 1259 1260 #if BSD < 199103 1261 /* 1262 * Allocate some memory for bpf. This is temporary SunOS support, and 1263 * is admittedly a hack. 1264 * If resources unavaiable, return 0. 1265 */ 1266 static caddr_t 1267 bpf_alloc(size, canwait) 1268 register int size; 1269 register int canwait; 1270 { 1271 register struct mbuf *m; 1272 1273 if ((unsigned)size > (MCLBYTES-8)) 1274 return 0; 1275 1276 MGET(m, canwait, MT_DATA); 1277 if (m == 0) 1278 return 0; 1279 if ((unsigned)size > (MLEN-8)) { 1280 MCLGET(m); 1281 if (m->m_len != MCLBYTES) { 1282 m_freem(m); 1283 return 0; 1284 } 1285 } 1286 *mtod(m, struct mbuf **) = m; 1287 return mtod(m, caddr_t) + 8; 1288 } 1289 #endif 1290 #endif 1291