1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $ 41 * $DragonFly: src/sys/net/bpf.c,v 1.50 2008/09/23 11:28:49 sephe Exp $ 42 */ 43 44 #include "use_bpf.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/conf.h> 49 #include <sys/device.h> 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/time.h> 53 #include <sys/proc.h> 54 #include <sys/signalvar.h> 55 #include <sys/filio.h> 56 #include <sys/sockio.h> 57 #include <sys/ttycom.h> 58 #include <sys/filedesc.h> 59 60 #include <sys/poll.h> 61 62 #include <sys/socket.h> 63 #include <sys/vnode.h> 64 65 #include <sys/thread2.h> 66 67 #include <net/if.h> 68 #include <net/bpf.h> 69 #include <net/bpfdesc.h> 70 #include <net/netmsg2.h> 71 72 #include <netinet/in.h> 73 #include <netinet/if_ether.h> 74 #include <sys/kernel.h> 75 #include <sys/sysctl.h> 76 77 #include <sys/devfs.h> 78 79 struct netmsg_bpf_output { 80 struct netmsg nm_netmsg; 81 struct mbuf *nm_mbuf; 82 struct ifnet *nm_ifp; 83 struct sockaddr *nm_dst; 84 }; 85 86 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 87 DEVFS_DECLARE_CLONE_BITMAP(bpf); 88 89 #if NBPF <= 1 90 #define BPF_PREALLOCATED_UNITS 4 91 #else 92 #define BPF_PREALLOCATED_UNITS NBPF 93 #endif 94 95 #if NBPF > 0 96 97 /* 98 * The default read buffer size is patchable. 99 */ 100 static int bpf_bufsize = BPF_DEFAULTBUFSIZE; 101 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 102 &bpf_bufsize, 0, ""); 103 int bpf_maxbufsize = BPF_MAXBUFSIZE; 104 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 105 &bpf_maxbufsize, 0, ""); 106 107 /* 108 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 109 */ 110 static struct bpf_if *bpf_iflist; 111 112 static int bpf_allocbufs(struct bpf_d *); 113 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 114 static void bpf_detachd(struct bpf_d *d); 115 static void bpf_resetd(struct bpf_d *); 116 static void bpf_freed(struct bpf_d *); 117 static void bpf_mcopy(const void *, void *, size_t); 118 static int bpf_movein(struct uio *, int, struct mbuf **, 119 struct sockaddr *, int *, struct bpf_insn *); 120 static int bpf_setif(struct bpf_d *, struct ifreq *); 121 static void bpf_timed_out(void *); 122 static void bpf_wakeup(struct bpf_d *); 123 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 124 void (*)(const void *, void *, size_t), 125 const struct timeval *); 126 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 127 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 128 static int bpf_setdlt(struct bpf_d *, u_int); 129 static void bpf_drvinit(void *unused); 130 131 static d_open_t bpfopen; 132 static d_clone_t bpfclone; 133 static d_close_t bpfclose; 134 static d_read_t bpfread; 135 static d_write_t bpfwrite; 136 static d_ioctl_t bpfioctl; 137 static d_poll_t bpfpoll; 138 139 #define CDEV_MAJOR 23 140 static struct dev_ops bpf_ops = { 141 { "bpf", CDEV_MAJOR, 0 }, 142 .d_open = bpfopen, 143 .d_close = bpfclose, 144 .d_read = bpfread, 145 .d_write = bpfwrite, 146 .d_ioctl = bpfioctl, 147 .d_poll = bpfpoll, 148 }; 149 150 151 static int 152 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, 153 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter) 154 { 155 struct mbuf *m; 156 int error; 157 int len; 158 int hlen; 159 int slen; 160 161 *datlen = 0; 162 *mp = NULL; 163 164 /* 165 * Build a sockaddr based on the data link layer type. 166 * We do this at this level because the ethernet header 167 * is copied directly into the data field of the sockaddr. 168 * In the case of SLIP, there is no header and the packet 169 * is forwarded as is. 170 * Also, we are careful to leave room at the front of the mbuf 171 * for the link level header. 172 */ 173 switch (linktype) { 174 case DLT_SLIP: 175 sockp->sa_family = AF_INET; 176 hlen = 0; 177 break; 178 179 case DLT_EN10MB: 180 sockp->sa_family = AF_UNSPEC; 181 /* XXX Would MAXLINKHDR be better? */ 182 hlen = sizeof(struct ether_header); 183 break; 184 185 case DLT_RAW: 186 case DLT_NULL: 187 sockp->sa_family = AF_UNSPEC; 188 hlen = 0; 189 break; 190 191 case DLT_ATM_RFC1483: 192 /* 193 * en atm driver requires 4-byte atm pseudo header. 194 * though it isn't standard, vpi:vci needs to be 195 * specified anyway. 196 */ 197 sockp->sa_family = AF_UNSPEC; 198 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 199 break; 200 201 case DLT_PPP: 202 sockp->sa_family = AF_UNSPEC; 203 hlen = 4; /* This should match PPP_HDRLEN */ 204 break; 205 206 default: 207 return(EIO); 208 } 209 210 len = uio->uio_resid; 211 *datlen = len - hlen; 212 if ((unsigned)len > MCLBYTES) 213 return(EIO); 214 215 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 216 if (m == NULL) 217 return(ENOBUFS); 218 m->m_pkthdr.len = m->m_len = len; 219 m->m_pkthdr.rcvif = NULL; 220 *mp = m; 221 222 if (m->m_len < hlen) { 223 error = EPERM; 224 goto bad; 225 } 226 227 error = uiomove(mtod(m, u_char *), len, uio); 228 if (error) 229 goto bad; 230 231 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 232 if (slen == 0) { 233 error = EPERM; 234 goto bad; 235 } 236 237 /* 238 * Make room for link header, and copy it to sockaddr. 239 */ 240 if (hlen != 0) { 241 bcopy(m->m_data, sockp->sa_data, hlen); 242 m->m_pkthdr.len -= hlen; 243 m->m_len -= hlen; 244 m->m_data += hlen; /* XXX */ 245 } 246 return (0); 247 bad: 248 m_freem(m); 249 return(error); 250 } 251 252 /* 253 * Attach file to the bpf interface, i.e. make d listen on bp. 254 * Must be called at splimp. 255 */ 256 static void 257 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 258 { 259 /* 260 * Point d at bp, and add d to the interface's list of listeners. 261 * Finally, point the driver's bpf cookie at the interface so 262 * it will divert packets to bpf. 263 */ 264 d->bd_bif = bp; 265 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 266 *bp->bif_driverp = bp; 267 } 268 269 /* 270 * Detach a file from its interface. 271 */ 272 static void 273 bpf_detachd(struct bpf_d *d) 274 { 275 int error; 276 struct bpf_if *bp; 277 struct ifnet *ifp; 278 279 bp = d->bd_bif; 280 ifp = bp->bif_ifp; 281 282 /* Remove d from the interface's descriptor list. */ 283 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next); 284 285 if (SLIST_EMPTY(&bp->bif_dlist)) { 286 /* 287 * Let the driver know that there are no more listeners. 288 */ 289 *bp->bif_driverp = NULL; 290 } 291 d->bd_bif = NULL; 292 /* 293 * Check if this descriptor had requested promiscuous mode. 294 * If so, turn it off. 295 */ 296 if (d->bd_promisc) { 297 d->bd_promisc = 0; 298 error = ifpromisc(ifp, 0); 299 if (error != 0 && error != ENXIO) { 300 /* 301 * ENXIO can happen if a pccard is unplugged, 302 * Something is really wrong if we were able to put 303 * the driver into promiscuous mode, but can't 304 * take it out. 305 */ 306 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n", 307 error); 308 } 309 } 310 } 311 312 /* 313 * Open ethernet device. Returns ENXIO for illegal minor device number, 314 * EBUSY if file is open by another process. 315 */ 316 /* ARGSUSED */ 317 static int 318 bpfopen(struct dev_open_args *ap) 319 { 320 cdev_t dev = ap->a_head.a_dev; 321 struct bpf_d *d; 322 323 if (ap->a_cred->cr_prison) 324 return(EPERM); 325 326 d = dev->si_drv1; 327 /* 328 * Each minor can be opened by only one process. If the requested 329 * minor is in use, return EBUSY. 330 */ 331 if (d != NULL) 332 return(EBUSY); 333 334 MALLOC(d, struct bpf_d *, sizeof *d, M_BPF, M_WAITOK | M_ZERO); 335 dev->si_drv1 = d; 336 d->bd_bufsize = bpf_bufsize; 337 d->bd_sig = SIGIO; 338 d->bd_seesent = 1; 339 callout_init(&d->bd_callout); 340 return(0); 341 } 342 343 static int 344 bpfclone(struct dev_clone_args *ap) 345 { 346 int unit; 347 348 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0); 349 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit); 350 351 return 0; 352 } 353 354 /* 355 * Close the descriptor by detaching it from its interface, 356 * deallocating its buffers, and marking it free. 357 */ 358 /* ARGSUSED */ 359 static int 360 bpfclose(struct dev_close_args *ap) 361 { 362 cdev_t dev = ap->a_head.a_dev; 363 struct bpf_d *d = dev->si_drv1; 364 365 funsetown(d->bd_sigio); 366 crit_enter(); 367 if (d->bd_state == BPF_WAITING) 368 callout_stop(&d->bd_callout); 369 d->bd_state = BPF_IDLE; 370 if (d->bd_bif != NULL) 371 bpf_detachd(d); 372 crit_exit(); 373 bpf_freed(d); 374 dev->si_drv1 = NULL; 375 if (dev->si_uminor >= BPF_PREALLOCATED_UNITS) { 376 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor); 377 destroy_dev(dev); 378 } 379 kfree(d, M_BPF); 380 return(0); 381 } 382 383 /* 384 * Rotate the packet buffers in descriptor d. Move the store buffer 385 * into the hold slot, and the free buffer into the store slot. 386 * Zero the length of the new store buffer. 387 */ 388 #define ROTATE_BUFFERS(d) \ 389 (d)->bd_hbuf = (d)->bd_sbuf; \ 390 (d)->bd_hlen = (d)->bd_slen; \ 391 (d)->bd_sbuf = (d)->bd_fbuf; \ 392 (d)->bd_slen = 0; \ 393 (d)->bd_fbuf = NULL; 394 /* 395 * bpfread - read next chunk of packets from buffers 396 */ 397 static int 398 bpfread(struct dev_read_args *ap) 399 { 400 cdev_t dev = ap->a_head.a_dev; 401 struct bpf_d *d = dev->si_drv1; 402 int timed_out; 403 int error; 404 405 /* 406 * Restrict application to use a buffer the same size as 407 * as kernel buffers. 408 */ 409 if (ap->a_uio->uio_resid != d->bd_bufsize) 410 return(EINVAL); 411 412 crit_enter(); 413 if (d->bd_state == BPF_WAITING) 414 callout_stop(&d->bd_callout); 415 timed_out = (d->bd_state == BPF_TIMED_OUT); 416 d->bd_state = BPF_IDLE; 417 /* 418 * If the hold buffer is empty, then do a timed sleep, which 419 * ends when the timeout expires or when enough packets 420 * have arrived to fill the store buffer. 421 */ 422 while (d->bd_hbuf == NULL) { 423 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 424 /* 425 * A packet(s) either arrived since the previous 426 * read or arrived while we were asleep. 427 * Rotate the buffers and return what's here. 428 */ 429 ROTATE_BUFFERS(d); 430 break; 431 } 432 433 /* 434 * No data is available, check to see if the bpf device 435 * is still pointed at a real interface. If not, return 436 * ENXIO so that the userland process knows to rebind 437 * it before using it again. 438 */ 439 if (d->bd_bif == NULL) { 440 crit_exit(); 441 return(ENXIO); 442 } 443 444 if (ap->a_ioflag & IO_NDELAY) { 445 crit_exit(); 446 return(EWOULDBLOCK); 447 } 448 error = tsleep(d, PCATCH, "bpf", d->bd_rtout); 449 if (error == EINTR || error == ERESTART) { 450 crit_exit(); 451 return(error); 452 } 453 if (error == EWOULDBLOCK) { 454 /* 455 * On a timeout, return what's in the buffer, 456 * which may be nothing. If there is something 457 * in the store buffer, we can rotate the buffers. 458 */ 459 if (d->bd_hbuf) 460 /* 461 * We filled up the buffer in between 462 * getting the timeout and arriving 463 * here, so we don't need to rotate. 464 */ 465 break; 466 467 if (d->bd_slen == 0) { 468 crit_exit(); 469 return(0); 470 } 471 ROTATE_BUFFERS(d); 472 break; 473 } 474 } 475 /* 476 * At this point, we know we have something in the hold slot. 477 */ 478 crit_exit(); 479 480 /* 481 * Move data from hold buffer into user space. 482 * We know the entire buffer is transferred since 483 * we checked above that the read buffer is bpf_bufsize bytes. 484 */ 485 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio); 486 487 crit_enter(); 488 d->bd_fbuf = d->bd_hbuf; 489 d->bd_hbuf = NULL; 490 d->bd_hlen = 0; 491 crit_exit(); 492 493 return(error); 494 } 495 496 497 /* 498 * If there are processes sleeping on this descriptor, wake them up. 499 */ 500 static void 501 bpf_wakeup(struct bpf_d *d) 502 { 503 if (d->bd_state == BPF_WAITING) { 504 callout_stop(&d->bd_callout); 505 d->bd_state = BPF_IDLE; 506 } 507 wakeup(d); 508 if (d->bd_async && d->bd_sig && d->bd_sigio) 509 pgsigio(d->bd_sigio, d->bd_sig, 0); 510 511 get_mplock(); 512 selwakeup(&d->bd_sel); 513 rel_mplock(); 514 /* XXX */ 515 d->bd_sel.si_pid = 0; 516 } 517 518 static void 519 bpf_timed_out(void *arg) 520 { 521 struct bpf_d *d = (struct bpf_d *)arg; 522 523 crit_enter(); 524 if (d->bd_state == BPF_WAITING) { 525 d->bd_state = BPF_TIMED_OUT; 526 if (d->bd_slen != 0) 527 bpf_wakeup(d); 528 } 529 crit_exit(); 530 } 531 532 static void 533 bpf_output_dispatch(struct netmsg *nmsg) 534 { 535 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)nmsg; 536 struct ifnet *ifp = bmsg->nm_ifp; 537 int error; 538 539 /* 540 * The driver frees the mbuf. 541 */ 542 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL); 543 lwkt_replymsg(&nmsg->nm_lmsg, error); 544 } 545 546 static int 547 bpfwrite(struct dev_write_args *ap) 548 { 549 cdev_t dev = ap->a_head.a_dev; 550 struct bpf_d *d = dev->si_drv1; 551 struct ifnet *ifp; 552 struct mbuf *m; 553 int error; 554 struct sockaddr dst; 555 int datlen; 556 struct netmsg_bpf_output bmsg; 557 558 if (d->bd_bif == NULL) 559 return(ENXIO); 560 561 ifp = d->bd_bif->bif_ifp; 562 563 if (ap->a_uio->uio_resid == 0) 564 return(0); 565 566 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m, 567 &dst, &datlen, d->bd_wfilter); 568 if (error) 569 return(error); 570 571 if (datlen > ifp->if_mtu) { 572 m_freem(m); 573 return(EMSGSIZE); 574 } 575 576 if (d->bd_hdrcmplt) 577 dst.sa_family = pseudo_AF_HDRCMPLT; 578 579 netmsg_init(&bmsg.nm_netmsg, &curthread->td_msgport, MSGF_MPSAFE, 580 bpf_output_dispatch); 581 bmsg.nm_mbuf = m; 582 bmsg.nm_ifp = ifp; 583 bmsg.nm_dst = &dst; 584 585 return lwkt_domsg(cpu_portfn(0), &bmsg.nm_netmsg.nm_lmsg, 0); 586 } 587 588 /* 589 * Reset a descriptor by flushing its packet buffer and clearing the 590 * receive and drop counts. Should be called at splimp. 591 */ 592 static void 593 bpf_resetd(struct bpf_d *d) 594 { 595 if (d->bd_hbuf) { 596 /* Free the hold buffer. */ 597 d->bd_fbuf = d->bd_hbuf; 598 d->bd_hbuf = NULL; 599 } 600 d->bd_slen = 0; 601 d->bd_hlen = 0; 602 d->bd_rcount = 0; 603 d->bd_dcount = 0; 604 } 605 606 /* 607 * FIONREAD Check for read packet available. 608 * SIOCGIFADDR Get interface address - convenient hook to driver. 609 * BIOCGBLEN Get buffer len [for read()]. 610 * BIOCSETF Set ethernet read filter. 611 * BIOCSETWF Set ethernet write filter. 612 * BIOCFLUSH Flush read packet buffer. 613 * BIOCPROMISC Put interface into promiscuous mode. 614 * BIOCGDLT Get link layer type. 615 * BIOCGETIF Get interface name. 616 * BIOCSETIF Set interface. 617 * BIOCSRTIMEOUT Set read timeout. 618 * BIOCGRTIMEOUT Get read timeout. 619 * BIOCGSTATS Get packet stats. 620 * BIOCIMMEDIATE Set immediate mode. 621 * BIOCVERSION Get filter language version. 622 * BIOCGHDRCMPLT Get "header already complete" flag 623 * BIOCSHDRCMPLT Set "header already complete" flag 624 * BIOCGSEESENT Get "see packets sent" flag 625 * BIOCSSEESENT Set "see packets sent" flag 626 * BIOCLOCK Set "locked" flag 627 */ 628 /* ARGSUSED */ 629 static int 630 bpfioctl(struct dev_ioctl_args *ap) 631 { 632 cdev_t dev = ap->a_head.a_dev; 633 struct bpf_d *d = dev->si_drv1; 634 int error = 0; 635 636 crit_enter(); 637 if (d->bd_state == BPF_WAITING) 638 callout_stop(&d->bd_callout); 639 d->bd_state = BPF_IDLE; 640 crit_exit(); 641 642 if (d->bd_locked == 1) { 643 switch (ap->a_cmd) { 644 case BIOCGBLEN: 645 case BIOCFLUSH: 646 case BIOCGDLT: 647 case BIOCGDLTLIST: 648 case BIOCGETIF: 649 case BIOCGRTIMEOUT: 650 case BIOCGSTATS: 651 case BIOCVERSION: 652 case BIOCGRSIG: 653 case BIOCGHDRCMPLT: 654 case FIONREAD: 655 case BIOCLOCK: 656 case BIOCSRTIMEOUT: 657 case BIOCIMMEDIATE: 658 case TIOCGPGRP: 659 break; 660 default: 661 return (EPERM); 662 } 663 } 664 switch (ap->a_cmd) { 665 default: 666 error = EINVAL; 667 break; 668 669 /* 670 * Check for read packet available. 671 */ 672 case FIONREAD: 673 { 674 int n; 675 676 crit_enter(); 677 n = d->bd_slen; 678 if (d->bd_hbuf) 679 n += d->bd_hlen; 680 crit_exit(); 681 682 *(int *)ap->a_data = n; 683 break; 684 } 685 686 case SIOCGIFADDR: 687 { 688 struct ifnet *ifp; 689 690 if (d->bd_bif == NULL) { 691 error = EINVAL; 692 } else { 693 ifp = d->bd_bif->bif_ifp; 694 ifnet_serialize_all(ifp); 695 error = ifp->if_ioctl(ifp, ap->a_cmd, 696 ap->a_data, ap->a_cred); 697 ifnet_deserialize_all(ifp); 698 } 699 break; 700 } 701 702 /* 703 * Get buffer len [for read()]. 704 */ 705 case BIOCGBLEN: 706 *(u_int *)ap->a_data = d->bd_bufsize; 707 break; 708 709 /* 710 * Set buffer length. 711 */ 712 case BIOCSBLEN: 713 if (d->bd_bif != NULL) { 714 error = EINVAL; 715 } else { 716 u_int size = *(u_int *)ap->a_data; 717 718 if (size > bpf_maxbufsize) 719 *(u_int *)ap->a_data = size = bpf_maxbufsize; 720 else if (size < BPF_MINBUFSIZE) 721 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE; 722 d->bd_bufsize = size; 723 } 724 break; 725 726 /* 727 * Set link layer read filter. 728 */ 729 case BIOCSETF: 730 case BIOCSETWF: 731 error = bpf_setf(d, (struct bpf_program *)ap->a_data, 732 ap->a_cmd); 733 break; 734 735 /* 736 * Flush read packet buffer. 737 */ 738 case BIOCFLUSH: 739 crit_enter(); 740 bpf_resetd(d); 741 crit_exit(); 742 break; 743 744 /* 745 * Put interface into promiscuous mode. 746 */ 747 case BIOCPROMISC: 748 if (d->bd_bif == NULL) { 749 /* 750 * No interface attached yet. 751 */ 752 error = EINVAL; 753 break; 754 } 755 crit_enter(); 756 if (d->bd_promisc == 0) { 757 error = ifpromisc(d->bd_bif->bif_ifp, 1); 758 if (error == 0) 759 d->bd_promisc = 1; 760 } 761 crit_exit(); 762 break; 763 764 /* 765 * Get device parameters. 766 */ 767 case BIOCGDLT: 768 if (d->bd_bif == NULL) 769 error = EINVAL; 770 else 771 *(u_int *)ap->a_data = d->bd_bif->bif_dlt; 772 break; 773 774 /* 775 * Get a list of supported data link types. 776 */ 777 case BIOCGDLTLIST: 778 if (d->bd_bif == NULL) { 779 error = EINVAL; 780 } else { 781 error = bpf_getdltlist(d, 782 (struct bpf_dltlist *)ap->a_data); 783 } 784 break; 785 786 /* 787 * Set data link type. 788 */ 789 case BIOCSDLT: 790 if (d->bd_bif == NULL) 791 error = EINVAL; 792 else 793 error = bpf_setdlt(d, *(u_int *)ap->a_data); 794 break; 795 796 /* 797 * Get interface name. 798 */ 799 case BIOCGETIF: 800 if (d->bd_bif == NULL) { 801 error = EINVAL; 802 } else { 803 struct ifnet *const ifp = d->bd_bif->bif_ifp; 804 struct ifreq *const ifr = (struct ifreq *)ap->a_data; 805 806 strlcpy(ifr->ifr_name, ifp->if_xname, 807 sizeof ifr->ifr_name); 808 } 809 break; 810 811 /* 812 * Set interface. 813 */ 814 case BIOCSETIF: 815 error = bpf_setif(d, (struct ifreq *)ap->a_data); 816 break; 817 818 /* 819 * Set read timeout. 820 */ 821 case BIOCSRTIMEOUT: 822 { 823 struct timeval *tv = (struct timeval *)ap->a_data; 824 825 /* 826 * Subtract 1 tick from tvtohz() since this isn't 827 * a one-shot timer. 828 */ 829 if ((error = itimerfix(tv)) == 0) 830 d->bd_rtout = tvtohz_low(tv); 831 break; 832 } 833 834 /* 835 * Get read timeout. 836 */ 837 case BIOCGRTIMEOUT: 838 { 839 struct timeval *tv = (struct timeval *)ap->a_data; 840 841 tv->tv_sec = d->bd_rtout / hz; 842 tv->tv_usec = (d->bd_rtout % hz) * tick; 843 break; 844 } 845 846 /* 847 * Get packet stats. 848 */ 849 case BIOCGSTATS: 850 { 851 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data; 852 853 bs->bs_recv = d->bd_rcount; 854 bs->bs_drop = d->bd_dcount; 855 break; 856 } 857 858 /* 859 * Set immediate mode. 860 */ 861 case BIOCIMMEDIATE: 862 d->bd_immediate = *(u_int *)ap->a_data; 863 break; 864 865 case BIOCVERSION: 866 { 867 struct bpf_version *bv = (struct bpf_version *)ap->a_data; 868 869 bv->bv_major = BPF_MAJOR_VERSION; 870 bv->bv_minor = BPF_MINOR_VERSION; 871 break; 872 } 873 874 /* 875 * Get "header already complete" flag 876 */ 877 case BIOCGHDRCMPLT: 878 *(u_int *)ap->a_data = d->bd_hdrcmplt; 879 break; 880 881 /* 882 * Set "header already complete" flag 883 */ 884 case BIOCSHDRCMPLT: 885 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0; 886 break; 887 888 /* 889 * Get "see sent packets" flag 890 */ 891 case BIOCGSEESENT: 892 *(u_int *)ap->a_data = d->bd_seesent; 893 break; 894 895 /* 896 * Set "see sent packets" flag 897 */ 898 case BIOCSSEESENT: 899 d->bd_seesent = *(u_int *)ap->a_data; 900 break; 901 902 case FIOASYNC: /* Send signal on receive packets */ 903 d->bd_async = *(int *)ap->a_data; 904 break; 905 906 case FIOSETOWN: 907 error = fsetown(*(int *)ap->a_data, &d->bd_sigio); 908 break; 909 910 case FIOGETOWN: 911 *(int *)ap->a_data = fgetown(d->bd_sigio); 912 break; 913 914 /* This is deprecated, FIOSETOWN should be used instead. */ 915 case TIOCSPGRP: 916 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio); 917 break; 918 919 /* This is deprecated, FIOGETOWN should be used instead. */ 920 case TIOCGPGRP: 921 *(int *)ap->a_data = -fgetown(d->bd_sigio); 922 break; 923 924 case BIOCSRSIG: /* Set receive signal */ 925 { 926 u_int sig; 927 928 sig = *(u_int *)ap->a_data; 929 930 if (sig >= NSIG) 931 error = EINVAL; 932 else 933 d->bd_sig = sig; 934 break; 935 } 936 case BIOCGRSIG: 937 *(u_int *)ap->a_data = d->bd_sig; 938 break; 939 case BIOCLOCK: 940 d->bd_locked = 1; 941 break; 942 } 943 return(error); 944 } 945 946 /* 947 * Set d's packet filter program to fp. If this file already has a filter, 948 * free it and replace it. Returns EINVAL for bogus requests. 949 */ 950 static int 951 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 952 { 953 struct bpf_insn *fcode, *old; 954 u_int wfilter, flen, size; 955 956 if (cmd == BIOCSETWF) { 957 old = d->bd_wfilter; 958 wfilter = 1; 959 } else { 960 wfilter = 0; 961 old = d->bd_rfilter; 962 } 963 if (fp->bf_insns == NULL) { 964 if (fp->bf_len != 0) 965 return(EINVAL); 966 crit_enter(); 967 if (wfilter) 968 d->bd_wfilter = NULL; 969 else 970 d->bd_rfilter = NULL; 971 bpf_resetd(d); 972 crit_exit(); 973 if (old != NULL) 974 kfree(old, M_BPF); 975 return(0); 976 } 977 flen = fp->bf_len; 978 if (flen > BPF_MAXINSNS) 979 return(EINVAL); 980 981 size = flen * sizeof *fp->bf_insns; 982 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK); 983 if (copyin(fp->bf_insns, fcode, size) == 0 && 984 bpf_validate(fcode, (int)flen)) { 985 crit_enter(); 986 if (wfilter) 987 d->bd_wfilter = fcode; 988 else 989 d->bd_rfilter = fcode; 990 bpf_resetd(d); 991 crit_exit(); 992 if (old != NULL) 993 kfree(old, M_BPF); 994 995 return(0); 996 } 997 kfree(fcode, M_BPF); 998 return(EINVAL); 999 } 1000 1001 /* 1002 * Detach a file from its current interface (if attached at all) and attach 1003 * to the interface indicated by the name stored in ifr. 1004 * Return an errno or 0. 1005 */ 1006 static int 1007 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1008 { 1009 struct bpf_if *bp; 1010 int error; 1011 struct ifnet *theywant; 1012 1013 theywant = ifunit(ifr->ifr_name); 1014 if (theywant == NULL) 1015 return(ENXIO); 1016 1017 /* 1018 * Look through attached interfaces for the named one. 1019 */ 1020 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1021 struct ifnet *ifp = bp->bif_ifp; 1022 1023 if (ifp == NULL || ifp != theywant) 1024 continue; 1025 /* skip additional entry */ 1026 if (bp->bif_driverp != &ifp->if_bpf) 1027 continue; 1028 /* 1029 * We found the requested interface. 1030 * If it's not up, return an error. 1031 * Allocate the packet buffers if we need to. 1032 * If we're already attached to requested interface, 1033 * just flush the buffer. 1034 */ 1035 if (!(ifp->if_flags & IFF_UP)) 1036 return(ENETDOWN); 1037 1038 if (d->bd_sbuf == NULL) { 1039 error = bpf_allocbufs(d); 1040 if (error != 0) 1041 return(error); 1042 } 1043 crit_enter(); 1044 if (bp != d->bd_bif) { 1045 if (d->bd_bif != NULL) { 1046 /* 1047 * Detach if attached to something else. 1048 */ 1049 bpf_detachd(d); 1050 } 1051 1052 bpf_attachd(d, bp); 1053 } 1054 bpf_resetd(d); 1055 crit_exit(); 1056 return(0); 1057 } 1058 1059 /* Not found. */ 1060 return(ENXIO); 1061 } 1062 1063 /* 1064 * Support for select() and poll() system calls 1065 * 1066 * Return true iff the specific operation will not block indefinitely. 1067 * Otherwise, return false but make a note that a selwakeup() must be done. 1068 */ 1069 static int 1070 bpfpoll(struct dev_poll_args *ap) 1071 { 1072 cdev_t dev = ap->a_head.a_dev; 1073 struct bpf_d *d; 1074 int revents; 1075 1076 d = dev->si_drv1; 1077 if (d->bd_bif == NULL) 1078 return(ENXIO); 1079 1080 revents = ap->a_events & (POLLOUT | POLLWRNORM); 1081 crit_enter(); 1082 if (ap->a_events & (POLLIN | POLLRDNORM)) { 1083 /* 1084 * An imitation of the FIONREAD ioctl code. 1085 * XXX not quite. An exact imitation: 1086 * if (d->b_slen != 0 || 1087 * (d->bd_hbuf != NULL && d->bd_hlen != 0) 1088 */ 1089 if (d->bd_hlen != 0 || 1090 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1091 d->bd_slen != 0)) { 1092 revents |= ap->a_events & (POLLIN | POLLRDNORM); 1093 } else { 1094 selrecord(curthread, &d->bd_sel); 1095 /* Start the read timeout if necessary. */ 1096 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1097 callout_reset(&d->bd_callout, d->bd_rtout, 1098 bpf_timed_out, d); 1099 d->bd_state = BPF_WAITING; 1100 } 1101 } 1102 } 1103 crit_exit(); 1104 ap->a_events = revents; 1105 return(0); 1106 } 1107 1108 /* 1109 * Process the packet pkt of length pktlen. The packet is parsed 1110 * by each listener's filter, and if accepted, stashed into the 1111 * corresponding buffer. 1112 */ 1113 void 1114 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1115 { 1116 struct bpf_d *d; 1117 struct timeval tv; 1118 int gottime = 0; 1119 u_int slen; 1120 1121 get_mplock(); 1122 1123 /* Re-check */ 1124 if (bp == NULL) { 1125 rel_mplock(); 1126 return; 1127 } 1128 1129 /* 1130 * Note that the ipl does not have to be raised at this point. 1131 * The only problem that could arise here is that if two different 1132 * interfaces shared any data. This is not the case. 1133 */ 1134 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1135 ++d->bd_rcount; 1136 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1137 if (slen != 0) { 1138 if (!gottime) { 1139 microtime(&tv); 1140 gottime = 1; 1141 } 1142 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv); 1143 } 1144 } 1145 1146 rel_mplock(); 1147 } 1148 1149 /* 1150 * Copy data from an mbuf chain into a buffer. This code is derived 1151 * from m_copydata in sys/uipc_mbuf.c. 1152 */ 1153 static void 1154 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1155 { 1156 const struct mbuf *m; 1157 u_int count; 1158 u_char *dst; 1159 1160 m = src_arg; 1161 dst = dst_arg; 1162 while (len > 0) { 1163 if (m == NULL) 1164 panic("bpf_mcopy"); 1165 count = min(m->m_len, len); 1166 bcopy(mtod(m, void *), dst, count); 1167 m = m->m_next; 1168 dst += count; 1169 len -= count; 1170 } 1171 } 1172 1173 /* 1174 * Process the packet in the mbuf chain m. The packet is parsed by each 1175 * listener's filter, and if accepted, stashed into the corresponding 1176 * buffer. 1177 */ 1178 void 1179 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1180 { 1181 struct bpf_d *d; 1182 u_int pktlen, slen; 1183 struct timeval tv; 1184 int gottime = 0; 1185 1186 get_mplock(); 1187 1188 /* Re-check */ 1189 if (bp == NULL) { 1190 rel_mplock(); 1191 return; 1192 } 1193 1194 /* Don't compute pktlen, if no descriptor is attached. */ 1195 if (SLIST_EMPTY(&bp->bif_dlist)) { 1196 rel_mplock(); 1197 return; 1198 } 1199 1200 pktlen = m_lengthm(m, NULL); 1201 1202 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1203 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1204 continue; 1205 ++d->bd_rcount; 1206 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1207 if (slen != 0) { 1208 if (!gottime) { 1209 microtime(&tv); 1210 gottime = 1; 1211 } 1212 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, 1213 &tv); 1214 } 1215 } 1216 1217 rel_mplock(); 1218 } 1219 1220 void 1221 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family) 1222 { 1223 u_int family4; 1224 1225 KKASSERT(family != AF_UNSPEC); 1226 1227 family4 = (u_int)family; 1228 bpf_ptap(bp, m, &family4, sizeof(family4)); 1229 } 1230 1231 /* 1232 * Process the packet in the mbuf chain m with the header in m prepended. 1233 * The packet is parsed by each listener's filter, and if accepted, 1234 * stashed into the corresponding buffer. 1235 */ 1236 void 1237 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1238 { 1239 struct mbuf mb; 1240 1241 /* 1242 * Craft on-stack mbuf suitable for passing to bpf_mtap. 1243 * Note that we cut corners here; we only setup what's 1244 * absolutely needed--this mbuf should never go anywhere else. 1245 */ 1246 mb.m_next = m; 1247 mb.m_data = __DECONST(void *, data); /* LINTED */ 1248 mb.m_len = dlen; 1249 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif; 1250 1251 bpf_mtap(bp, &mb); 1252 } 1253 1254 /* 1255 * Move the packet data from interface memory (pkt) into the 1256 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1257 * otherwise 0. "copy" is the routine called to do the actual data 1258 * transfer. bcopy is passed in to copy contiguous chunks, while 1259 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1260 * pkt is really an mbuf. 1261 */ 1262 static void 1263 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1264 void (*cpfn)(const void *, void *, size_t), 1265 const struct timeval *tv) 1266 { 1267 struct bpf_hdr *hp; 1268 int totlen, curlen; 1269 int hdrlen = d->bd_bif->bif_hdrlen; 1270 /* 1271 * Figure out how many bytes to move. If the packet is 1272 * greater or equal to the snapshot length, transfer that 1273 * much. Otherwise, transfer the whole packet (unless 1274 * we hit the buffer size limit). 1275 */ 1276 totlen = hdrlen + min(snaplen, pktlen); 1277 if (totlen > d->bd_bufsize) 1278 totlen = d->bd_bufsize; 1279 1280 /* 1281 * Round up the end of the previous packet to the next longword. 1282 */ 1283 curlen = BPF_WORDALIGN(d->bd_slen); 1284 if (curlen + totlen > d->bd_bufsize) { 1285 /* 1286 * This packet will overflow the storage buffer. 1287 * Rotate the buffers if we can, then wakeup any 1288 * pending reads. 1289 */ 1290 if (d->bd_fbuf == NULL) { 1291 /* 1292 * We haven't completed the previous read yet, 1293 * so drop the packet. 1294 */ 1295 ++d->bd_dcount; 1296 return; 1297 } 1298 ROTATE_BUFFERS(d); 1299 bpf_wakeup(d); 1300 curlen = 0; 1301 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { 1302 /* 1303 * Immediate mode is set, or the read timeout has 1304 * already expired during a select call. A packet 1305 * arrived, so the reader should be woken up. 1306 */ 1307 bpf_wakeup(d); 1308 } 1309 1310 /* 1311 * Append the bpf header. 1312 */ 1313 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1314 hp->bh_tstamp = *tv; 1315 hp->bh_datalen = pktlen; 1316 hp->bh_hdrlen = hdrlen; 1317 /* 1318 * Copy the packet data into the store buffer and update its length. 1319 */ 1320 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1321 d->bd_slen = curlen + totlen; 1322 } 1323 1324 /* 1325 * Initialize all nonzero fields of a descriptor. 1326 */ 1327 static int 1328 bpf_allocbufs(struct bpf_d *d) 1329 { 1330 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1331 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1332 d->bd_slen = 0; 1333 d->bd_hlen = 0; 1334 return(0); 1335 } 1336 1337 /* 1338 * Free buffers and packet filter program currently in use by a descriptor. 1339 * Called on close. 1340 */ 1341 static void 1342 bpf_freed(struct bpf_d *d) 1343 { 1344 /* 1345 * We don't need to lock out interrupts since this descriptor has 1346 * been detached from its interface and it yet hasn't been marked 1347 * free. 1348 */ 1349 if (d->bd_sbuf != NULL) { 1350 kfree(d->bd_sbuf, M_BPF); 1351 if (d->bd_hbuf != NULL) 1352 kfree(d->bd_hbuf, M_BPF); 1353 if (d->bd_fbuf != NULL) 1354 kfree(d->bd_fbuf, M_BPF); 1355 } 1356 if (d->bd_rfilter) 1357 kfree(d->bd_rfilter, M_BPF); 1358 if (d->bd_wfilter) 1359 kfree(d->bd_wfilter, M_BPF); 1360 } 1361 1362 /* 1363 * Attach an interface to bpf. ifp is a pointer to the structure 1364 * defining the interface to be attached, dlt is the link layer type, 1365 * and hdrlen is the fixed size of the link header (variable length 1366 * headers are not yet supported). 1367 */ 1368 void 1369 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1370 { 1371 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf); 1372 } 1373 1374 void 1375 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1376 { 1377 struct bpf_if *bp; 1378 1379 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO); 1380 1381 SLIST_INIT(&bp->bif_dlist); 1382 bp->bif_ifp = ifp; 1383 bp->bif_dlt = dlt; 1384 bp->bif_driverp = driverp; 1385 *bp->bif_driverp = NULL; 1386 1387 bp->bif_next = bpf_iflist; 1388 bpf_iflist = bp; 1389 1390 /* 1391 * Compute the length of the bpf header. This is not necessarily 1392 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1393 * that the network layer header begins on a longword boundary (for 1394 * performance reasons and to alleviate alignment restrictions). 1395 */ 1396 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1397 1398 if (bootverbose) 1399 if_printf(ifp, "bpf attached\n"); 1400 } 1401 1402 /* 1403 * Detach bpf from an interface. This involves detaching each descriptor 1404 * associated with the interface, and leaving bd_bif NULL. Notify each 1405 * descriptor as it's detached so that any sleepers wake up and get 1406 * ENXIO. 1407 */ 1408 void 1409 bpfdetach(struct ifnet *ifp) 1410 { 1411 struct bpf_if *bp, *bp_prev; 1412 struct bpf_d *d; 1413 1414 crit_enter(); 1415 1416 /* Locate BPF interface information */ 1417 bp_prev = NULL; 1418 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1419 if (ifp == bp->bif_ifp) 1420 break; 1421 bp_prev = bp; 1422 } 1423 1424 /* Interface wasn't attached */ 1425 if (bp->bif_ifp == NULL) { 1426 crit_exit(); 1427 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname); 1428 return; 1429 } 1430 1431 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) { 1432 bpf_detachd(d); 1433 bpf_wakeup(d); 1434 } 1435 1436 if (bp_prev != NULL) 1437 bp_prev->bif_next = bp->bif_next; 1438 else 1439 bpf_iflist = bp->bif_next; 1440 1441 kfree(bp, M_BPF); 1442 1443 crit_exit(); 1444 } 1445 1446 /* 1447 * Get a list of available data link type of the interface. 1448 */ 1449 static int 1450 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1451 { 1452 int n, error; 1453 struct ifnet *ifp; 1454 struct bpf_if *bp; 1455 1456 ifp = d->bd_bif->bif_ifp; 1457 n = 0; 1458 error = 0; 1459 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1460 if (bp->bif_ifp != ifp) 1461 continue; 1462 if (bfl->bfl_list != NULL) { 1463 if (n >= bfl->bfl_len) { 1464 return (ENOMEM); 1465 } 1466 error = copyout(&bp->bif_dlt, 1467 bfl->bfl_list + n, sizeof(u_int)); 1468 } 1469 n++; 1470 } 1471 bfl->bfl_len = n; 1472 return(error); 1473 } 1474 1475 /* 1476 * Set the data link type of a BPF instance. 1477 */ 1478 static int 1479 bpf_setdlt(struct bpf_d *d, u_int dlt) 1480 { 1481 int error, opromisc; 1482 struct ifnet *ifp; 1483 struct bpf_if *bp; 1484 1485 if (d->bd_bif->bif_dlt == dlt) 1486 return (0); 1487 ifp = d->bd_bif->bif_ifp; 1488 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1489 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1490 break; 1491 } 1492 if (bp != NULL) { 1493 opromisc = d->bd_promisc; 1494 crit_enter(); 1495 bpf_detachd(d); 1496 bpf_attachd(d, bp); 1497 bpf_resetd(d); 1498 if (opromisc) { 1499 error = ifpromisc(bp->bif_ifp, 1); 1500 if (error) { 1501 if_printf(bp->bif_ifp, 1502 "bpf_setdlt: ifpromisc failed (%d)\n", 1503 error); 1504 } else { 1505 d->bd_promisc = 1; 1506 } 1507 } 1508 crit_exit(); 1509 } 1510 return(bp == NULL ? EINVAL : 0); 1511 } 1512 1513 static void 1514 bpf_drvinit(void *unused) 1515 { 1516 int i; 1517 1518 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf), 1519 bpfclone, 0, 0, 0600, "bpf"); 1520 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) { 1521 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i); 1522 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i); 1523 } 1524 } 1525 1526 static void 1527 bpf_drvuninit(void *unused) 1528 { 1529 devfs_clone_handler_del("bpf"); 1530 dev_ops_remove_all(&bpf_ops); 1531 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf)); 1532 } 1533 1534 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1535 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL); 1536 1537 #else /* !BPF */ 1538 /* 1539 * NOP stubs to allow bpf-using drivers to load and function. 1540 * 1541 * A 'better' implementation would allow the core bpf functionality 1542 * to be loaded at runtime. 1543 */ 1544 1545 void 1546 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1547 { 1548 } 1549 1550 void 1551 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1552 { 1553 } 1554 1555 void 1556 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1557 { 1558 } 1559 1560 void 1561 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1562 { 1563 } 1564 1565 void 1566 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1567 { 1568 } 1569 1570 void 1571 bpfdetach(struct ifnet *ifp) 1572 { 1573 } 1574 1575 u_int 1576 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1577 { 1578 return -1; /* "no filter" behaviour */ 1579 } 1580 1581 #endif /* !BPF */ 1582