1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $ 41 * $DragonFly: src/sys/net/bpf.c,v 1.50 2008/09/23 11:28:49 sephe Exp $ 42 */ 43 44 #include "use_bpf.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/conf.h> 49 #include <sys/device.h> 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/time.h> 53 #include <sys/proc.h> 54 #include <sys/signalvar.h> 55 #include <sys/filio.h> 56 #include <sys/sockio.h> 57 #include <sys/ttycom.h> 58 #include <sys/filedesc.h> 59 60 #include <sys/poll.h> 61 62 #include <sys/socket.h> 63 #include <sys/vnode.h> 64 65 #include <sys/thread2.h> 66 67 #include <net/if.h> 68 #include <net/bpf.h> 69 #include <net/bpfdesc.h> 70 #include <net/netmsg2.h> 71 72 #include <netinet/in.h> 73 #include <netinet/if_ether.h> 74 #include <sys/kernel.h> 75 #include <sys/sysctl.h> 76 77 struct netmsg_bpf_output { 78 struct netmsg nm_netmsg; 79 struct mbuf *nm_mbuf; 80 struct ifnet *nm_ifp; 81 struct sockaddr *nm_dst; 82 }; 83 84 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 85 86 #if NBPF > 0 87 88 /* 89 * The default read buffer size is patchable. 90 */ 91 static int bpf_bufsize = BPF_DEFAULTBUFSIZE; 92 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 93 &bpf_bufsize, 0, ""); 94 int bpf_maxbufsize = BPF_MAXBUFSIZE; 95 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 96 &bpf_maxbufsize, 0, ""); 97 98 /* 99 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 100 */ 101 static struct bpf_if *bpf_iflist; 102 103 static int bpf_allocbufs(struct bpf_d *); 104 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 105 static void bpf_detachd(struct bpf_d *d); 106 static void bpf_resetd(struct bpf_d *); 107 static void bpf_freed(struct bpf_d *); 108 static void bpf_mcopy(const void *, void *, size_t); 109 static int bpf_movein(struct uio *, int, struct mbuf **, 110 struct sockaddr *, int *, struct bpf_insn *); 111 static int bpf_setif(struct bpf_d *, struct ifreq *); 112 static void bpf_timed_out(void *); 113 static void bpf_wakeup(struct bpf_d *); 114 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 115 void (*)(const void *, void *, size_t), 116 const struct timeval *); 117 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 118 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 119 static int bpf_setdlt(struct bpf_d *, u_int); 120 static void bpf_drvinit(void *unused); 121 122 static d_open_t bpfopen; 123 static d_close_t bpfclose; 124 static d_read_t bpfread; 125 static d_write_t bpfwrite; 126 static d_ioctl_t bpfioctl; 127 static d_poll_t bpfpoll; 128 129 #define CDEV_MAJOR 23 130 static struct dev_ops bpf_ops = { 131 { "bpf", CDEV_MAJOR, 0 }, 132 .d_open = bpfopen, 133 .d_close = bpfclose, 134 .d_read = bpfread, 135 .d_write = bpfwrite, 136 .d_ioctl = bpfioctl, 137 .d_poll = bpfpoll, 138 }; 139 140 141 static int 142 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, 143 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter) 144 { 145 struct mbuf *m; 146 int error; 147 int len; 148 int hlen; 149 int slen; 150 151 *datlen = 0; 152 *mp = NULL; 153 154 /* 155 * Build a sockaddr based on the data link layer type. 156 * We do this at this level because the ethernet header 157 * is copied directly into the data field of the sockaddr. 158 * In the case of SLIP, there is no header and the packet 159 * is forwarded as is. 160 * Also, we are careful to leave room at the front of the mbuf 161 * for the link level header. 162 */ 163 switch (linktype) { 164 case DLT_SLIP: 165 sockp->sa_family = AF_INET; 166 hlen = 0; 167 break; 168 169 case DLT_EN10MB: 170 sockp->sa_family = AF_UNSPEC; 171 /* XXX Would MAXLINKHDR be better? */ 172 hlen = sizeof(struct ether_header); 173 break; 174 175 case DLT_RAW: 176 case DLT_NULL: 177 sockp->sa_family = AF_UNSPEC; 178 hlen = 0; 179 break; 180 181 case DLT_ATM_RFC1483: 182 /* 183 * en atm driver requires 4-byte atm pseudo header. 184 * though it isn't standard, vpi:vci needs to be 185 * specified anyway. 186 */ 187 sockp->sa_family = AF_UNSPEC; 188 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 189 break; 190 191 case DLT_PPP: 192 sockp->sa_family = AF_UNSPEC; 193 hlen = 4; /* This should match PPP_HDRLEN */ 194 break; 195 196 default: 197 return(EIO); 198 } 199 200 len = uio->uio_resid; 201 *datlen = len - hlen; 202 if ((unsigned)len > MCLBYTES) 203 return(EIO); 204 205 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 206 if (m == NULL) 207 return(ENOBUFS); 208 m->m_pkthdr.len = m->m_len = len; 209 m->m_pkthdr.rcvif = NULL; 210 *mp = m; 211 212 if (m->m_len < hlen) { 213 error = EPERM; 214 goto bad; 215 } 216 217 error = uiomove(mtod(m, u_char *), len, uio); 218 if (error) 219 goto bad; 220 221 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 222 if (slen == 0) { 223 error = EPERM; 224 goto bad; 225 } 226 227 /* 228 * Make room for link header, and copy it to sockaddr. 229 */ 230 if (hlen != 0) { 231 bcopy(m->m_data, sockp->sa_data, hlen); 232 m->m_pkthdr.len -= hlen; 233 m->m_len -= hlen; 234 m->m_data += hlen; /* XXX */ 235 } 236 return (0); 237 bad: 238 m_freem(m); 239 return(error); 240 } 241 242 /* 243 * Attach file to the bpf interface, i.e. make d listen on bp. 244 * Must be called at splimp. 245 */ 246 static void 247 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 248 { 249 /* 250 * Point d at bp, and add d to the interface's list of listeners. 251 * Finally, point the driver's bpf cookie at the interface so 252 * it will divert packets to bpf. 253 */ 254 d->bd_bif = bp; 255 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 256 *bp->bif_driverp = bp; 257 } 258 259 /* 260 * Detach a file from its interface. 261 */ 262 static void 263 bpf_detachd(struct bpf_d *d) 264 { 265 int error; 266 struct bpf_if *bp; 267 struct ifnet *ifp; 268 269 bp = d->bd_bif; 270 ifp = bp->bif_ifp; 271 272 /* Remove d from the interface's descriptor list. */ 273 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next); 274 275 if (SLIST_EMPTY(&bp->bif_dlist)) { 276 /* 277 * Let the driver know that there are no more listeners. 278 */ 279 *bp->bif_driverp = NULL; 280 } 281 d->bd_bif = NULL; 282 /* 283 * Check if this descriptor had requested promiscuous mode. 284 * If so, turn it off. 285 */ 286 if (d->bd_promisc) { 287 d->bd_promisc = 0; 288 error = ifpromisc(ifp, 0); 289 if (error != 0 && error != ENXIO) { 290 /* 291 * ENXIO can happen if a pccard is unplugged, 292 * Something is really wrong if we were able to put 293 * the driver into promiscuous mode, but can't 294 * take it out. 295 */ 296 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n", 297 error); 298 } 299 } 300 } 301 302 /* 303 * Open ethernet device. Returns ENXIO for illegal minor device number, 304 * EBUSY if file is open by another process. 305 */ 306 /* ARGSUSED */ 307 static int 308 bpfopen(struct dev_open_args *ap) 309 { 310 cdev_t dev = ap->a_head.a_dev; 311 struct bpf_d *d; 312 313 if (ap->a_cred->cr_prison) 314 return(EPERM); 315 316 d = dev->si_drv1; 317 /* 318 * Each minor can be opened by only one process. If the requested 319 * minor is in use, return EBUSY. 320 */ 321 if (d != NULL) 322 return(EBUSY); 323 make_dev(&bpf_ops, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev)); 324 MALLOC(d, struct bpf_d *, sizeof *d, M_BPF, M_WAITOK | M_ZERO); 325 dev->si_drv1 = d; 326 d->bd_bufsize = bpf_bufsize; 327 d->bd_sig = SIGIO; 328 d->bd_seesent = 1; 329 callout_init(&d->bd_callout); 330 return(0); 331 } 332 333 /* 334 * Close the descriptor by detaching it from its interface, 335 * deallocating its buffers, and marking it free. 336 */ 337 /* ARGSUSED */ 338 static int 339 bpfclose(struct dev_close_args *ap) 340 { 341 cdev_t dev = ap->a_head.a_dev; 342 struct bpf_d *d = dev->si_drv1; 343 344 funsetown(d->bd_sigio); 345 crit_enter(); 346 if (d->bd_state == BPF_WAITING) 347 callout_stop(&d->bd_callout); 348 d->bd_state = BPF_IDLE; 349 if (d->bd_bif != NULL) 350 bpf_detachd(d); 351 crit_exit(); 352 bpf_freed(d); 353 dev->si_drv1 = NULL; 354 kfree(d, M_BPF); 355 356 return(0); 357 } 358 359 /* 360 * Rotate the packet buffers in descriptor d. Move the store buffer 361 * into the hold slot, and the free buffer into the store slot. 362 * Zero the length of the new store buffer. 363 */ 364 #define ROTATE_BUFFERS(d) \ 365 (d)->bd_hbuf = (d)->bd_sbuf; \ 366 (d)->bd_hlen = (d)->bd_slen; \ 367 (d)->bd_sbuf = (d)->bd_fbuf; \ 368 (d)->bd_slen = 0; \ 369 (d)->bd_fbuf = NULL; 370 /* 371 * bpfread - read next chunk of packets from buffers 372 */ 373 static int 374 bpfread(struct dev_read_args *ap) 375 { 376 cdev_t dev = ap->a_head.a_dev; 377 struct bpf_d *d = dev->si_drv1; 378 int timed_out; 379 int error; 380 381 /* 382 * Restrict application to use a buffer the same size as 383 * as kernel buffers. 384 */ 385 if (ap->a_uio->uio_resid != d->bd_bufsize) 386 return(EINVAL); 387 388 crit_enter(); 389 if (d->bd_state == BPF_WAITING) 390 callout_stop(&d->bd_callout); 391 timed_out = (d->bd_state == BPF_TIMED_OUT); 392 d->bd_state = BPF_IDLE; 393 /* 394 * If the hold buffer is empty, then do a timed sleep, which 395 * ends when the timeout expires or when enough packets 396 * have arrived to fill the store buffer. 397 */ 398 while (d->bd_hbuf == NULL) { 399 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 400 /* 401 * A packet(s) either arrived since the previous 402 * read or arrived while we were asleep. 403 * Rotate the buffers and return what's here. 404 */ 405 ROTATE_BUFFERS(d); 406 break; 407 } 408 409 /* 410 * No data is available, check to see if the bpf device 411 * is still pointed at a real interface. If not, return 412 * ENXIO so that the userland process knows to rebind 413 * it before using it again. 414 */ 415 if (d->bd_bif == NULL) { 416 crit_exit(); 417 return(ENXIO); 418 } 419 420 if (ap->a_ioflag & IO_NDELAY) { 421 crit_exit(); 422 return(EWOULDBLOCK); 423 } 424 error = tsleep(d, PCATCH, "bpf", d->bd_rtout); 425 if (error == EINTR || error == ERESTART) { 426 crit_exit(); 427 return(error); 428 } 429 if (error == EWOULDBLOCK) { 430 /* 431 * On a timeout, return what's in the buffer, 432 * which may be nothing. If there is something 433 * in the store buffer, we can rotate the buffers. 434 */ 435 if (d->bd_hbuf) 436 /* 437 * We filled up the buffer in between 438 * getting the timeout and arriving 439 * here, so we don't need to rotate. 440 */ 441 break; 442 443 if (d->bd_slen == 0) { 444 crit_exit(); 445 return(0); 446 } 447 ROTATE_BUFFERS(d); 448 break; 449 } 450 } 451 /* 452 * At this point, we know we have something in the hold slot. 453 */ 454 crit_exit(); 455 456 /* 457 * Move data from hold buffer into user space. 458 * We know the entire buffer is transferred since 459 * we checked above that the read buffer is bpf_bufsize bytes. 460 */ 461 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio); 462 463 crit_enter(); 464 d->bd_fbuf = d->bd_hbuf; 465 d->bd_hbuf = NULL; 466 d->bd_hlen = 0; 467 crit_exit(); 468 469 return(error); 470 } 471 472 473 /* 474 * If there are processes sleeping on this descriptor, wake them up. 475 */ 476 static void 477 bpf_wakeup(struct bpf_d *d) 478 { 479 if (d->bd_state == BPF_WAITING) { 480 callout_stop(&d->bd_callout); 481 d->bd_state = BPF_IDLE; 482 } 483 wakeup(d); 484 if (d->bd_async && d->bd_sig && d->bd_sigio) 485 pgsigio(d->bd_sigio, d->bd_sig, 0); 486 487 get_mplock(); 488 selwakeup(&d->bd_sel); 489 rel_mplock(); 490 /* XXX */ 491 d->bd_sel.si_pid = 0; 492 } 493 494 static void 495 bpf_timed_out(void *arg) 496 { 497 struct bpf_d *d = (struct bpf_d *)arg; 498 499 crit_enter(); 500 if (d->bd_state == BPF_WAITING) { 501 d->bd_state = BPF_TIMED_OUT; 502 if (d->bd_slen != 0) 503 bpf_wakeup(d); 504 } 505 crit_exit(); 506 } 507 508 static void 509 bpf_output_dispatch(struct netmsg *nmsg) 510 { 511 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)nmsg; 512 struct ifnet *ifp = bmsg->nm_ifp; 513 int error; 514 515 /* 516 * The driver frees the mbuf. 517 */ 518 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL); 519 lwkt_replymsg(&nmsg->nm_lmsg, error); 520 } 521 522 static int 523 bpfwrite(struct dev_write_args *ap) 524 { 525 cdev_t dev = ap->a_head.a_dev; 526 struct bpf_d *d = dev->si_drv1; 527 struct ifnet *ifp; 528 struct mbuf *m; 529 int error; 530 struct sockaddr dst; 531 int datlen; 532 struct netmsg_bpf_output bmsg; 533 534 if (d->bd_bif == NULL) 535 return(ENXIO); 536 537 ifp = d->bd_bif->bif_ifp; 538 539 if (ap->a_uio->uio_resid == 0) 540 return(0); 541 542 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m, 543 &dst, &datlen, d->bd_wfilter); 544 if (error) 545 return(error); 546 547 if (datlen > ifp->if_mtu) { 548 m_freem(m); 549 return(EMSGSIZE); 550 } 551 552 if (d->bd_hdrcmplt) 553 dst.sa_family = pseudo_AF_HDRCMPLT; 554 555 netmsg_init(&bmsg.nm_netmsg, &curthread->td_msgport, MSGF_MPSAFE, 556 bpf_output_dispatch); 557 bmsg.nm_mbuf = m; 558 bmsg.nm_ifp = ifp; 559 bmsg.nm_dst = &dst; 560 561 return lwkt_domsg(cpu_portfn(0), &bmsg.nm_netmsg.nm_lmsg, 0); 562 } 563 564 /* 565 * Reset a descriptor by flushing its packet buffer and clearing the 566 * receive and drop counts. Should be called at splimp. 567 */ 568 static void 569 bpf_resetd(struct bpf_d *d) 570 { 571 if (d->bd_hbuf) { 572 /* Free the hold buffer. */ 573 d->bd_fbuf = d->bd_hbuf; 574 d->bd_hbuf = NULL; 575 } 576 d->bd_slen = 0; 577 d->bd_hlen = 0; 578 d->bd_rcount = 0; 579 d->bd_dcount = 0; 580 } 581 582 /* 583 * FIONREAD Check for read packet available. 584 * SIOCGIFADDR Get interface address - convenient hook to driver. 585 * BIOCGBLEN Get buffer len [for read()]. 586 * BIOCSETF Set ethernet read filter. 587 * BIOCSETWF Set ethernet write filter. 588 * BIOCFLUSH Flush read packet buffer. 589 * BIOCPROMISC Put interface into promiscuous mode. 590 * BIOCGDLT Get link layer type. 591 * BIOCGETIF Get interface name. 592 * BIOCSETIF Set interface. 593 * BIOCSRTIMEOUT Set read timeout. 594 * BIOCGRTIMEOUT Get read timeout. 595 * BIOCGSTATS Get packet stats. 596 * BIOCIMMEDIATE Set immediate mode. 597 * BIOCVERSION Get filter language version. 598 * BIOCGHDRCMPLT Get "header already complete" flag 599 * BIOCSHDRCMPLT Set "header already complete" flag 600 * BIOCGSEESENT Get "see packets sent" flag 601 * BIOCSSEESENT Set "see packets sent" flag 602 * BIOCLOCK Set "locked" flag 603 */ 604 /* ARGSUSED */ 605 static int 606 bpfioctl(struct dev_ioctl_args *ap) 607 { 608 cdev_t dev = ap->a_head.a_dev; 609 struct bpf_d *d = dev->si_drv1; 610 int error = 0; 611 612 crit_enter(); 613 if (d->bd_state == BPF_WAITING) 614 callout_stop(&d->bd_callout); 615 d->bd_state = BPF_IDLE; 616 crit_exit(); 617 618 if (d->bd_locked == 1) { 619 switch (ap->a_cmd) { 620 case BIOCGBLEN: 621 case BIOCFLUSH: 622 case BIOCGDLT: 623 case BIOCGDLTLIST: 624 case BIOCGETIF: 625 case BIOCGRTIMEOUT: 626 case BIOCGSTATS: 627 case BIOCVERSION: 628 case BIOCGRSIG: 629 case BIOCGHDRCMPLT: 630 case FIONREAD: 631 case BIOCLOCK: 632 case BIOCSRTIMEOUT: 633 case BIOCIMMEDIATE: 634 case TIOCGPGRP: 635 break; 636 default: 637 return (EPERM); 638 } 639 } 640 switch (ap->a_cmd) { 641 default: 642 error = EINVAL; 643 break; 644 645 /* 646 * Check for read packet available. 647 */ 648 case FIONREAD: 649 { 650 int n; 651 652 crit_enter(); 653 n = d->bd_slen; 654 if (d->bd_hbuf) 655 n += d->bd_hlen; 656 crit_exit(); 657 658 *(int *)ap->a_data = n; 659 break; 660 } 661 662 case SIOCGIFADDR: 663 { 664 struct ifnet *ifp; 665 666 if (d->bd_bif == NULL) { 667 error = EINVAL; 668 } else { 669 ifp = d->bd_bif->bif_ifp; 670 lwkt_serialize_enter(ifp->if_serializer); 671 error = ifp->if_ioctl(ifp, ap->a_cmd, 672 ap->a_data, ap->a_cred); 673 lwkt_serialize_exit(ifp->if_serializer); 674 } 675 break; 676 } 677 678 /* 679 * Get buffer len [for read()]. 680 */ 681 case BIOCGBLEN: 682 *(u_int *)ap->a_data = d->bd_bufsize; 683 break; 684 685 /* 686 * Set buffer length. 687 */ 688 case BIOCSBLEN: 689 if (d->bd_bif != NULL) { 690 error = EINVAL; 691 } else { 692 u_int size = *(u_int *)ap->a_data; 693 694 if (size > bpf_maxbufsize) 695 *(u_int *)ap->a_data = size = bpf_maxbufsize; 696 else if (size < BPF_MINBUFSIZE) 697 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE; 698 d->bd_bufsize = size; 699 } 700 break; 701 702 /* 703 * Set link layer read filter. 704 */ 705 case BIOCSETF: 706 case BIOCSETWF: 707 error = bpf_setf(d, (struct bpf_program *)ap->a_data, 708 ap->a_cmd); 709 break; 710 711 /* 712 * Flush read packet buffer. 713 */ 714 case BIOCFLUSH: 715 crit_enter(); 716 bpf_resetd(d); 717 crit_exit(); 718 break; 719 720 /* 721 * Put interface into promiscuous mode. 722 */ 723 case BIOCPROMISC: 724 if (d->bd_bif == NULL) { 725 /* 726 * No interface attached yet. 727 */ 728 error = EINVAL; 729 break; 730 } 731 crit_enter(); 732 if (d->bd_promisc == 0) { 733 error = ifpromisc(d->bd_bif->bif_ifp, 1); 734 if (error == 0) 735 d->bd_promisc = 1; 736 } 737 crit_exit(); 738 break; 739 740 /* 741 * Get device parameters. 742 */ 743 case BIOCGDLT: 744 if (d->bd_bif == NULL) 745 error = EINVAL; 746 else 747 *(u_int *)ap->a_data = d->bd_bif->bif_dlt; 748 break; 749 750 /* 751 * Get a list of supported data link types. 752 */ 753 case BIOCGDLTLIST: 754 if (d->bd_bif == NULL) { 755 error = EINVAL; 756 } else { 757 error = bpf_getdltlist(d, 758 (struct bpf_dltlist *)ap->a_data); 759 } 760 break; 761 762 /* 763 * Set data link type. 764 */ 765 case BIOCSDLT: 766 if (d->bd_bif == NULL) 767 error = EINVAL; 768 else 769 error = bpf_setdlt(d, *(u_int *)ap->a_data); 770 break; 771 772 /* 773 * Get interface name. 774 */ 775 case BIOCGETIF: 776 if (d->bd_bif == NULL) { 777 error = EINVAL; 778 } else { 779 struct ifnet *const ifp = d->bd_bif->bif_ifp; 780 struct ifreq *const ifr = (struct ifreq *)ap->a_data; 781 782 strlcpy(ifr->ifr_name, ifp->if_xname, 783 sizeof ifr->ifr_name); 784 } 785 break; 786 787 /* 788 * Set interface. 789 */ 790 case BIOCSETIF: 791 error = bpf_setif(d, (struct ifreq *)ap->a_data); 792 break; 793 794 /* 795 * Set read timeout. 796 */ 797 case BIOCSRTIMEOUT: 798 { 799 struct timeval *tv = (struct timeval *)ap->a_data; 800 801 /* 802 * Subtract 1 tick from tvtohz() since this isn't 803 * a one-shot timer. 804 */ 805 if ((error = itimerfix(tv)) == 0) 806 d->bd_rtout = tvtohz_low(tv); 807 break; 808 } 809 810 /* 811 * Get read timeout. 812 */ 813 case BIOCGRTIMEOUT: 814 { 815 struct timeval *tv = (struct timeval *)ap->a_data; 816 817 tv->tv_sec = d->bd_rtout / hz; 818 tv->tv_usec = (d->bd_rtout % hz) * tick; 819 break; 820 } 821 822 /* 823 * Get packet stats. 824 */ 825 case BIOCGSTATS: 826 { 827 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data; 828 829 bs->bs_recv = d->bd_rcount; 830 bs->bs_drop = d->bd_dcount; 831 break; 832 } 833 834 /* 835 * Set immediate mode. 836 */ 837 case BIOCIMMEDIATE: 838 d->bd_immediate = *(u_int *)ap->a_data; 839 break; 840 841 case BIOCVERSION: 842 { 843 struct bpf_version *bv = (struct bpf_version *)ap->a_data; 844 845 bv->bv_major = BPF_MAJOR_VERSION; 846 bv->bv_minor = BPF_MINOR_VERSION; 847 break; 848 } 849 850 /* 851 * Get "header already complete" flag 852 */ 853 case BIOCGHDRCMPLT: 854 *(u_int *)ap->a_data = d->bd_hdrcmplt; 855 break; 856 857 /* 858 * Set "header already complete" flag 859 */ 860 case BIOCSHDRCMPLT: 861 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0; 862 break; 863 864 /* 865 * Get "see sent packets" flag 866 */ 867 case BIOCGSEESENT: 868 *(u_int *)ap->a_data = d->bd_seesent; 869 break; 870 871 /* 872 * Set "see sent packets" flag 873 */ 874 case BIOCSSEESENT: 875 d->bd_seesent = *(u_int *)ap->a_data; 876 break; 877 878 case FIOASYNC: /* Send signal on receive packets */ 879 d->bd_async = *(int *)ap->a_data; 880 break; 881 882 case FIOSETOWN: 883 error = fsetown(*(int *)ap->a_data, &d->bd_sigio); 884 break; 885 886 case FIOGETOWN: 887 *(int *)ap->a_data = fgetown(d->bd_sigio); 888 break; 889 890 /* This is deprecated, FIOSETOWN should be used instead. */ 891 case TIOCSPGRP: 892 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio); 893 break; 894 895 /* This is deprecated, FIOGETOWN should be used instead. */ 896 case TIOCGPGRP: 897 *(int *)ap->a_data = -fgetown(d->bd_sigio); 898 break; 899 900 case BIOCSRSIG: /* Set receive signal */ 901 { 902 u_int sig; 903 904 sig = *(u_int *)ap->a_data; 905 906 if (sig >= NSIG) 907 error = EINVAL; 908 else 909 d->bd_sig = sig; 910 break; 911 } 912 case BIOCGRSIG: 913 *(u_int *)ap->a_data = d->bd_sig; 914 break; 915 case BIOCLOCK: 916 d->bd_locked = 1; 917 break; 918 } 919 return(error); 920 } 921 922 /* 923 * Set d's packet filter program to fp. If this file already has a filter, 924 * free it and replace it. Returns EINVAL for bogus requests. 925 */ 926 static int 927 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 928 { 929 struct bpf_insn *fcode, *old; 930 u_int wfilter, flen, size; 931 932 if (cmd == BIOCSETWF) { 933 old = d->bd_wfilter; 934 wfilter = 1; 935 } else { 936 wfilter = 0; 937 old = d->bd_rfilter; 938 } 939 if (fp->bf_insns == NULL) { 940 if (fp->bf_len != 0) 941 return(EINVAL); 942 crit_enter(); 943 if (wfilter) 944 d->bd_wfilter = NULL; 945 else 946 d->bd_rfilter = NULL; 947 bpf_resetd(d); 948 crit_exit(); 949 if (old != NULL) 950 kfree(old, M_BPF); 951 return(0); 952 } 953 flen = fp->bf_len; 954 if (flen > BPF_MAXINSNS) 955 return(EINVAL); 956 957 size = flen * sizeof *fp->bf_insns; 958 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK); 959 if (copyin(fp->bf_insns, fcode, size) == 0 && 960 bpf_validate(fcode, (int)flen)) { 961 crit_enter(); 962 if (wfilter) 963 d->bd_wfilter = fcode; 964 else 965 d->bd_rfilter = fcode; 966 bpf_resetd(d); 967 crit_exit(); 968 if (old != NULL) 969 kfree(old, M_BPF); 970 971 return(0); 972 } 973 kfree(fcode, M_BPF); 974 return(EINVAL); 975 } 976 977 /* 978 * Detach a file from its current interface (if attached at all) and attach 979 * to the interface indicated by the name stored in ifr. 980 * Return an errno or 0. 981 */ 982 static int 983 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 984 { 985 struct bpf_if *bp; 986 int error; 987 struct ifnet *theywant; 988 989 theywant = ifunit(ifr->ifr_name); 990 if (theywant == NULL) 991 return(ENXIO); 992 993 /* 994 * Look through attached interfaces for the named one. 995 */ 996 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 997 struct ifnet *ifp = bp->bif_ifp; 998 999 if (ifp == NULL || ifp != theywant) 1000 continue; 1001 /* skip additional entry */ 1002 if (bp->bif_driverp != &ifp->if_bpf) 1003 continue; 1004 /* 1005 * We found the requested interface. 1006 * If it's not up, return an error. 1007 * Allocate the packet buffers if we need to. 1008 * If we're already attached to requested interface, 1009 * just flush the buffer. 1010 */ 1011 if (!(ifp->if_flags & IFF_UP)) 1012 return(ENETDOWN); 1013 1014 if (d->bd_sbuf == NULL) { 1015 error = bpf_allocbufs(d); 1016 if (error != 0) 1017 return(error); 1018 } 1019 crit_enter(); 1020 if (bp != d->bd_bif) { 1021 if (d->bd_bif != NULL) { 1022 /* 1023 * Detach if attached to something else. 1024 */ 1025 bpf_detachd(d); 1026 } 1027 1028 bpf_attachd(d, bp); 1029 } 1030 bpf_resetd(d); 1031 crit_exit(); 1032 return(0); 1033 } 1034 1035 /* Not found. */ 1036 return(ENXIO); 1037 } 1038 1039 /* 1040 * Support for select() and poll() system calls 1041 * 1042 * Return true iff the specific operation will not block indefinitely. 1043 * Otherwise, return false but make a note that a selwakeup() must be done. 1044 */ 1045 static int 1046 bpfpoll(struct dev_poll_args *ap) 1047 { 1048 cdev_t dev = ap->a_head.a_dev; 1049 struct bpf_d *d; 1050 int revents; 1051 1052 d = dev->si_drv1; 1053 if (d->bd_bif == NULL) 1054 return(ENXIO); 1055 1056 revents = ap->a_events & (POLLOUT | POLLWRNORM); 1057 crit_enter(); 1058 if (ap->a_events & (POLLIN | POLLRDNORM)) { 1059 /* 1060 * An imitation of the FIONREAD ioctl code. 1061 * XXX not quite. An exact imitation: 1062 * if (d->b_slen != 0 || 1063 * (d->bd_hbuf != NULL && d->bd_hlen != 0) 1064 */ 1065 if (d->bd_hlen != 0 || 1066 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1067 d->bd_slen != 0)) { 1068 revents |= ap->a_events & (POLLIN | POLLRDNORM); 1069 } else { 1070 selrecord(curthread, &d->bd_sel); 1071 /* Start the read timeout if necessary. */ 1072 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1073 callout_reset(&d->bd_callout, d->bd_rtout, 1074 bpf_timed_out, d); 1075 d->bd_state = BPF_WAITING; 1076 } 1077 } 1078 } 1079 crit_exit(); 1080 ap->a_events = revents; 1081 return(0); 1082 } 1083 1084 /* 1085 * Process the packet pkt of length pktlen. The packet is parsed 1086 * by each listener's filter, and if accepted, stashed into the 1087 * corresponding buffer. 1088 */ 1089 void 1090 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1091 { 1092 struct bpf_d *d; 1093 struct timeval tv; 1094 int gottime = 0; 1095 u_int slen; 1096 1097 get_mplock(); 1098 1099 /* Re-check */ 1100 if (bp == NULL) { 1101 rel_mplock(); 1102 return; 1103 } 1104 1105 /* 1106 * Note that the ipl does not have to be raised at this point. 1107 * The only problem that could arise here is that if two different 1108 * interfaces shared any data. This is not the case. 1109 */ 1110 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1111 ++d->bd_rcount; 1112 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1113 if (slen != 0) { 1114 if (!gottime) { 1115 microtime(&tv); 1116 gottime = 1; 1117 } 1118 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv); 1119 } 1120 } 1121 1122 rel_mplock(); 1123 } 1124 1125 /* 1126 * Copy data from an mbuf chain into a buffer. This code is derived 1127 * from m_copydata in sys/uipc_mbuf.c. 1128 */ 1129 static void 1130 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1131 { 1132 const struct mbuf *m; 1133 u_int count; 1134 u_char *dst; 1135 1136 m = src_arg; 1137 dst = dst_arg; 1138 while (len > 0) { 1139 if (m == NULL) 1140 panic("bpf_mcopy"); 1141 count = min(m->m_len, len); 1142 bcopy(mtod(m, void *), dst, count); 1143 m = m->m_next; 1144 dst += count; 1145 len -= count; 1146 } 1147 } 1148 1149 /* 1150 * Process the packet in the mbuf chain m. The packet is parsed by each 1151 * listener's filter, and if accepted, stashed into the corresponding 1152 * buffer. 1153 */ 1154 void 1155 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1156 { 1157 struct bpf_d *d; 1158 u_int pktlen, slen; 1159 struct timeval tv; 1160 int gottime = 0; 1161 1162 get_mplock(); 1163 1164 /* Re-check */ 1165 if (bp == NULL) { 1166 rel_mplock(); 1167 return; 1168 } 1169 1170 /* Don't compute pktlen, if no descriptor is attached. */ 1171 if (SLIST_EMPTY(&bp->bif_dlist)) { 1172 rel_mplock(); 1173 return; 1174 } 1175 1176 pktlen = m_lengthm(m, NULL); 1177 1178 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1179 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1180 continue; 1181 ++d->bd_rcount; 1182 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1183 if (slen != 0) { 1184 if (!gottime) { 1185 microtime(&tv); 1186 gottime = 1; 1187 } 1188 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, 1189 &tv); 1190 } 1191 } 1192 1193 rel_mplock(); 1194 } 1195 1196 void 1197 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family) 1198 { 1199 u_int family4; 1200 1201 KKASSERT(family != AF_UNSPEC); 1202 1203 family4 = (u_int)family; 1204 bpf_ptap(bp, m, &family4, sizeof(family4)); 1205 } 1206 1207 /* 1208 * Process the packet in the mbuf chain m with the header in m prepended. 1209 * The packet is parsed by each listener's filter, and if accepted, 1210 * stashed into the corresponding buffer. 1211 */ 1212 void 1213 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1214 { 1215 struct mbuf mb; 1216 1217 /* 1218 * Craft on-stack mbuf suitable for passing to bpf_mtap. 1219 * Note that we cut corners here; we only setup what's 1220 * absolutely needed--this mbuf should never go anywhere else. 1221 */ 1222 mb.m_next = m; 1223 mb.m_data = __DECONST(void *, data); /* LINTED */ 1224 mb.m_len = dlen; 1225 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif; 1226 1227 bpf_mtap(bp, &mb); 1228 } 1229 1230 /* 1231 * Move the packet data from interface memory (pkt) into the 1232 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1233 * otherwise 0. "copy" is the routine called to do the actual data 1234 * transfer. bcopy is passed in to copy contiguous chunks, while 1235 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1236 * pkt is really an mbuf. 1237 */ 1238 static void 1239 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1240 void (*cpfn)(const void *, void *, size_t), 1241 const struct timeval *tv) 1242 { 1243 struct bpf_hdr *hp; 1244 int totlen, curlen; 1245 int hdrlen = d->bd_bif->bif_hdrlen; 1246 /* 1247 * Figure out how many bytes to move. If the packet is 1248 * greater or equal to the snapshot length, transfer that 1249 * much. Otherwise, transfer the whole packet (unless 1250 * we hit the buffer size limit). 1251 */ 1252 totlen = hdrlen + min(snaplen, pktlen); 1253 if (totlen > d->bd_bufsize) 1254 totlen = d->bd_bufsize; 1255 1256 /* 1257 * Round up the end of the previous packet to the next longword. 1258 */ 1259 curlen = BPF_WORDALIGN(d->bd_slen); 1260 if (curlen + totlen > d->bd_bufsize) { 1261 /* 1262 * This packet will overflow the storage buffer. 1263 * Rotate the buffers if we can, then wakeup any 1264 * pending reads. 1265 */ 1266 if (d->bd_fbuf == NULL) { 1267 /* 1268 * We haven't completed the previous read yet, 1269 * so drop the packet. 1270 */ 1271 ++d->bd_dcount; 1272 return; 1273 } 1274 ROTATE_BUFFERS(d); 1275 bpf_wakeup(d); 1276 curlen = 0; 1277 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { 1278 /* 1279 * Immediate mode is set, or the read timeout has 1280 * already expired during a select call. A packet 1281 * arrived, so the reader should be woken up. 1282 */ 1283 bpf_wakeup(d); 1284 } 1285 1286 /* 1287 * Append the bpf header. 1288 */ 1289 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1290 hp->bh_tstamp = *tv; 1291 hp->bh_datalen = pktlen; 1292 hp->bh_hdrlen = hdrlen; 1293 /* 1294 * Copy the packet data into the store buffer and update its length. 1295 */ 1296 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1297 d->bd_slen = curlen + totlen; 1298 } 1299 1300 /* 1301 * Initialize all nonzero fields of a descriptor. 1302 */ 1303 static int 1304 bpf_allocbufs(struct bpf_d *d) 1305 { 1306 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1307 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1308 d->bd_slen = 0; 1309 d->bd_hlen = 0; 1310 return(0); 1311 } 1312 1313 /* 1314 * Free buffers and packet filter program currently in use by a descriptor. 1315 * Called on close. 1316 */ 1317 static void 1318 bpf_freed(struct bpf_d *d) 1319 { 1320 /* 1321 * We don't need to lock out interrupts since this descriptor has 1322 * been detached from its interface and it yet hasn't been marked 1323 * free. 1324 */ 1325 if (d->bd_sbuf != NULL) { 1326 kfree(d->bd_sbuf, M_BPF); 1327 if (d->bd_hbuf != NULL) 1328 kfree(d->bd_hbuf, M_BPF); 1329 if (d->bd_fbuf != NULL) 1330 kfree(d->bd_fbuf, M_BPF); 1331 } 1332 if (d->bd_rfilter) 1333 kfree(d->bd_rfilter, M_BPF); 1334 if (d->bd_wfilter) 1335 kfree(d->bd_wfilter, M_BPF); 1336 } 1337 1338 /* 1339 * Attach an interface to bpf. ifp is a pointer to the structure 1340 * defining the interface to be attached, dlt is the link layer type, 1341 * and hdrlen is the fixed size of the link header (variable length 1342 * headers are not yet supported). 1343 */ 1344 void 1345 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1346 { 1347 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf); 1348 } 1349 1350 void 1351 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1352 { 1353 struct bpf_if *bp; 1354 1355 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO); 1356 1357 SLIST_INIT(&bp->bif_dlist); 1358 bp->bif_ifp = ifp; 1359 bp->bif_dlt = dlt; 1360 bp->bif_driverp = driverp; 1361 *bp->bif_driverp = NULL; 1362 1363 bp->bif_next = bpf_iflist; 1364 bpf_iflist = bp; 1365 1366 /* 1367 * Compute the length of the bpf header. This is not necessarily 1368 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1369 * that the network layer header begins on a longword boundary (for 1370 * performance reasons and to alleviate alignment restrictions). 1371 */ 1372 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1373 1374 if (bootverbose) 1375 if_printf(ifp, "bpf attached\n"); 1376 } 1377 1378 /* 1379 * Detach bpf from an interface. This involves detaching each descriptor 1380 * associated with the interface, and leaving bd_bif NULL. Notify each 1381 * descriptor as it's detached so that any sleepers wake up and get 1382 * ENXIO. 1383 */ 1384 void 1385 bpfdetach(struct ifnet *ifp) 1386 { 1387 struct bpf_if *bp, *bp_prev; 1388 struct bpf_d *d; 1389 1390 crit_enter(); 1391 1392 /* Locate BPF interface information */ 1393 bp_prev = NULL; 1394 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1395 if (ifp == bp->bif_ifp) 1396 break; 1397 bp_prev = bp; 1398 } 1399 1400 /* Interface wasn't attached */ 1401 if (bp->bif_ifp == NULL) { 1402 crit_exit(); 1403 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname); 1404 return; 1405 } 1406 1407 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) { 1408 bpf_detachd(d); 1409 bpf_wakeup(d); 1410 } 1411 1412 if (bp_prev != NULL) 1413 bp_prev->bif_next = bp->bif_next; 1414 else 1415 bpf_iflist = bp->bif_next; 1416 1417 kfree(bp, M_BPF); 1418 1419 crit_exit(); 1420 } 1421 1422 /* 1423 * Get a list of available data link type of the interface. 1424 */ 1425 static int 1426 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1427 { 1428 int n, error; 1429 struct ifnet *ifp; 1430 struct bpf_if *bp; 1431 1432 ifp = d->bd_bif->bif_ifp; 1433 n = 0; 1434 error = 0; 1435 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1436 if (bp->bif_ifp != ifp) 1437 continue; 1438 if (bfl->bfl_list != NULL) { 1439 if (n >= bfl->bfl_len) { 1440 return (ENOMEM); 1441 } 1442 error = copyout(&bp->bif_dlt, 1443 bfl->bfl_list + n, sizeof(u_int)); 1444 } 1445 n++; 1446 } 1447 bfl->bfl_len = n; 1448 return(error); 1449 } 1450 1451 /* 1452 * Set the data link type of a BPF instance. 1453 */ 1454 static int 1455 bpf_setdlt(struct bpf_d *d, u_int dlt) 1456 { 1457 int error, opromisc; 1458 struct ifnet *ifp; 1459 struct bpf_if *bp; 1460 1461 if (d->bd_bif->bif_dlt == dlt) 1462 return (0); 1463 ifp = d->bd_bif->bif_ifp; 1464 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1465 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1466 break; 1467 } 1468 if (bp != NULL) { 1469 opromisc = d->bd_promisc; 1470 crit_enter(); 1471 bpf_detachd(d); 1472 bpf_attachd(d, bp); 1473 bpf_resetd(d); 1474 if (opromisc) { 1475 error = ifpromisc(bp->bif_ifp, 1); 1476 if (error) { 1477 if_printf(bp->bif_ifp, 1478 "bpf_setdlt: ifpromisc failed (%d)\n", 1479 error); 1480 } else { 1481 d->bd_promisc = 1; 1482 } 1483 } 1484 crit_exit(); 1485 } 1486 return(bp == NULL ? EINVAL : 0); 1487 } 1488 1489 static void 1490 bpf_drvinit(void *unused) 1491 { 1492 dev_ops_add(&bpf_ops, 0, 0); 1493 } 1494 1495 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1496 1497 #else /* !BPF */ 1498 /* 1499 * NOP stubs to allow bpf-using drivers to load and function. 1500 * 1501 * A 'better' implementation would allow the core bpf functionality 1502 * to be loaded at runtime. 1503 */ 1504 1505 void 1506 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1507 { 1508 } 1509 1510 void 1511 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1512 { 1513 } 1514 1515 void 1516 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1517 { 1518 } 1519 1520 void 1521 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1522 { 1523 } 1524 1525 void 1526 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1527 { 1528 } 1529 1530 void 1531 bpfdetach(struct ifnet *ifp) 1532 { 1533 } 1534 1535 u_int 1536 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1537 { 1538 return -1; /* "no filter" behaviour */ 1539 } 1540 1541 #endif /* !BPF */ 1542