1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 35 * 36 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $ 37 */ 38 39 #include "use_bpf.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/conf.h> 44 #include <sys/device.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/time.h> 48 #include <sys/proc.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/sockio.h> 52 #include <sys/ttycom.h> 53 #include <sys/filedesc.h> 54 55 #include <sys/event.h> 56 57 #include <sys/socket.h> 58 #include <sys/vnode.h> 59 60 #include <sys/thread2.h> 61 62 #include <net/if.h> 63 #include <net/bpf.h> 64 #include <net/bpfdesc.h> 65 #include <net/netmsg2.h> 66 #include <net/netisr2.h> 67 68 #include <netinet/in.h> 69 #include <netinet/if_ether.h> 70 #include <sys/kernel.h> 71 #include <sys/sysctl.h> 72 73 #include <sys/devfs.h> 74 75 struct netmsg_bpf_output { 76 struct netmsg_base base; 77 struct mbuf *nm_mbuf; 78 struct ifnet *nm_ifp; 79 struct sockaddr *nm_dst; 80 }; 81 82 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 83 DEVFS_DECLARE_CLONE_BITMAP(bpf); 84 85 #if NBPF <= 1 86 #define BPF_PREALLOCATED_UNITS 4 87 #else 88 #define BPF_PREALLOCATED_UNITS NBPF 89 #endif 90 91 #if NBPF > 0 92 93 /* 94 * The default read buffer size is patchable. 95 */ 96 static int bpf_bufsize = BPF_DEFAULTBUFSIZE; 97 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 98 &bpf_bufsize, 0, "Current size of bpf buffer"); 99 int bpf_maxbufsize = BPF_MAXBUFSIZE; 100 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 101 &bpf_maxbufsize, 0, "Maximum size of bpf buffer"); 102 103 /* 104 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 105 */ 106 static struct bpf_if *bpf_iflist; 107 108 static struct lwkt_token bpf_token = LWKT_TOKEN_INITIALIZER(bpf_token); 109 110 static int bpf_allocbufs(struct bpf_d *); 111 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 112 static void bpf_detachd(struct bpf_d *d); 113 static void bpf_resetd(struct bpf_d *); 114 static void bpf_freed(struct bpf_d *); 115 static void bpf_mcopy(const void *, void *, size_t); 116 static int bpf_movein(struct uio *, int, struct mbuf **, 117 struct sockaddr *, int *, struct bpf_insn *); 118 static int bpf_setif(struct bpf_d *, struct ifreq *); 119 static void bpf_timed_out(void *); 120 static void bpf_wakeup(struct bpf_d *); 121 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 122 void (*)(const void *, void *, size_t), 123 const struct timeval *); 124 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 125 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 126 static int bpf_setdlt(struct bpf_d *, u_int); 127 static void bpf_drvinit(void *unused); 128 static void bpf_filter_detach(struct knote *kn); 129 static int bpf_filter_read(struct knote *kn, long hint); 130 131 static d_open_t bpfopen; 132 static d_clone_t bpfclone; 133 static d_close_t bpfclose; 134 static d_read_t bpfread; 135 static d_write_t bpfwrite; 136 static d_ioctl_t bpfioctl; 137 static d_kqfilter_t bpfkqfilter; 138 139 #define CDEV_MAJOR 23 140 static struct dev_ops bpf_ops = { 141 { "bpf", 0, D_MPSAFE }, 142 .d_open = bpfopen, 143 .d_close = bpfclose, 144 .d_read = bpfread, 145 .d_write = bpfwrite, 146 .d_ioctl = bpfioctl, 147 .d_kqfilter = bpfkqfilter 148 }; 149 150 151 static int 152 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, 153 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter) 154 { 155 struct mbuf *m; 156 int error; 157 int len; 158 int hlen; 159 int slen; 160 161 *datlen = 0; 162 *mp = NULL; 163 164 /* 165 * Build a sockaddr based on the data link layer type. 166 * We do this at this level because the ethernet header 167 * is copied directly into the data field of the sockaddr. 168 * In the case of SLIP, there is no header and the packet 169 * is forwarded as is. 170 * Also, we are careful to leave room at the front of the mbuf 171 * for the link level header. 172 */ 173 switch (linktype) { 174 case DLT_SLIP: 175 sockp->sa_family = AF_INET; 176 hlen = 0; 177 break; 178 179 case DLT_EN10MB: 180 sockp->sa_family = AF_UNSPEC; 181 /* XXX Would MAXLINKHDR be better? */ 182 hlen = sizeof(struct ether_header); 183 break; 184 185 case DLT_RAW: 186 case DLT_NULL: 187 sockp->sa_family = AF_UNSPEC; 188 hlen = 0; 189 break; 190 191 case DLT_ATM_RFC1483: 192 /* 193 * en atm driver requires 4-byte atm pseudo header. 194 * though it isn't standard, vpi:vci needs to be 195 * specified anyway. 196 */ 197 sockp->sa_family = AF_UNSPEC; 198 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 199 break; 200 201 case DLT_PPP: 202 sockp->sa_family = AF_UNSPEC; 203 hlen = 4; /* This should match PPP_HDRLEN */ 204 break; 205 206 default: 207 return(EIO); 208 } 209 210 len = uio->uio_resid; 211 *datlen = len - hlen; 212 if ((unsigned)len > MCLBYTES) 213 return(EIO); 214 215 m = m_getl(len, M_WAITOK, MT_DATA, M_PKTHDR, NULL); 216 if (m == NULL) 217 return(ENOBUFS); 218 m->m_pkthdr.len = m->m_len = len; 219 m->m_pkthdr.rcvif = NULL; 220 *mp = m; 221 222 if (m->m_len < hlen) { 223 error = EPERM; 224 goto bad; 225 } 226 227 error = uiomove(mtod(m, u_char *), len, uio); 228 if (error) 229 goto bad; 230 231 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 232 if (slen == 0) { 233 error = EPERM; 234 goto bad; 235 } 236 237 /* 238 * Make room for link header, and copy it to sockaddr. 239 */ 240 if (hlen != 0) { 241 bcopy(m->m_data, sockp->sa_data, hlen); 242 m->m_pkthdr.len -= hlen; 243 m->m_len -= hlen; 244 m->m_data += hlen; /* XXX */ 245 } 246 return (0); 247 bad: 248 m_freem(m); 249 return(error); 250 } 251 252 /* 253 * Attach file to the bpf interface, i.e. make d listen on bp. 254 * Must be called at splimp. 255 */ 256 static void 257 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 258 { 259 /* 260 * Point d at bp, and add d to the interface's list of listeners. 261 * Finally, point the driver's bpf cookie at the interface so 262 * it will divert packets to bpf. 263 */ 264 lwkt_gettoken(&bpf_token); 265 d->bd_bif = bp; 266 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 267 *bp->bif_driverp = bp; 268 269 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 270 lwkt_reltoken(&bpf_token); 271 } 272 273 /* 274 * Detach a file from its interface. 275 */ 276 static void 277 bpf_detachd(struct bpf_d *d) 278 { 279 int error; 280 struct bpf_if *bp; 281 struct ifnet *ifp; 282 283 lwkt_gettoken(&bpf_token); 284 bp = d->bd_bif; 285 ifp = bp->bif_ifp; 286 287 /* Remove d from the interface's descriptor list. */ 288 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next); 289 290 if (SLIST_EMPTY(&bp->bif_dlist)) { 291 /* 292 * Let the driver know that there are no more listeners. 293 */ 294 *bp->bif_driverp = NULL; 295 } 296 d->bd_bif = NULL; 297 298 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 299 300 /* 301 * Check if this descriptor had requested promiscuous mode. 302 * If so, turn it off. 303 */ 304 if (d->bd_promisc) { 305 d->bd_promisc = 0; 306 error = ifpromisc(ifp, 0); 307 if (error != 0 && error != ENXIO) { 308 /* 309 * ENXIO can happen if a pccard is unplugged, 310 * Something is really wrong if we were able to put 311 * the driver into promiscuous mode, but can't 312 * take it out. 313 */ 314 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n", 315 error); 316 } 317 } 318 lwkt_reltoken(&bpf_token); 319 } 320 321 /* 322 * Open ethernet device. Returns ENXIO for illegal minor device number, 323 * EBUSY if file is open by another process. 324 */ 325 /* ARGSUSED */ 326 static int 327 bpfopen(struct dev_open_args *ap) 328 { 329 cdev_t dev = ap->a_head.a_dev; 330 struct bpf_d *d; 331 332 lwkt_gettoken(&bpf_token); 333 if (ap->a_cred->cr_prison) { 334 lwkt_reltoken(&bpf_token); 335 return(EPERM); 336 } 337 338 d = dev->si_drv1; 339 /* 340 * Each minor can be opened by only one process. If the requested 341 * minor is in use, return EBUSY. 342 */ 343 if (d != NULL) { 344 lwkt_reltoken(&bpf_token); 345 return(EBUSY); 346 } 347 348 d = kmalloc(sizeof *d, M_BPF, M_WAITOK | M_ZERO); 349 dev->si_drv1 = d; 350 d->bd_bufsize = bpf_bufsize; 351 d->bd_sig = SIGIO; 352 d->bd_seesent = 1; 353 callout_init(&d->bd_callout); 354 lwkt_reltoken(&bpf_token); 355 356 return(0); 357 } 358 359 static int 360 bpfclone(struct dev_clone_args *ap) 361 { 362 int unit; 363 364 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0); 365 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit); 366 367 return 0; 368 } 369 370 /* 371 * Close the descriptor by detaching it from its interface, 372 * deallocating its buffers, and marking it free. 373 */ 374 /* ARGSUSED */ 375 static int 376 bpfclose(struct dev_close_args *ap) 377 { 378 cdev_t dev = ap->a_head.a_dev; 379 struct bpf_d *d = dev->si_drv1; 380 381 lwkt_gettoken(&bpf_token); 382 funsetown(&d->bd_sigio); 383 if (d->bd_state == BPF_WAITING) 384 callout_stop(&d->bd_callout); 385 d->bd_state = BPF_IDLE; 386 if (d->bd_bif != NULL) 387 bpf_detachd(d); 388 bpf_freed(d); 389 dev->si_drv1 = NULL; 390 if (dev->si_uminor >= BPF_PREALLOCATED_UNITS) { 391 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor); 392 destroy_dev(dev); 393 } 394 kfree(d, M_BPF); 395 lwkt_reltoken(&bpf_token); 396 397 return(0); 398 } 399 400 /* 401 * Rotate the packet buffers in descriptor d. Move the store buffer 402 * into the hold slot, and the free buffer into the store slot. 403 * Zero the length of the new store buffer. 404 */ 405 #define ROTATE_BUFFERS(d) \ 406 (d)->bd_hbuf = (d)->bd_sbuf; \ 407 (d)->bd_hlen = (d)->bd_slen; \ 408 (d)->bd_sbuf = (d)->bd_fbuf; \ 409 (d)->bd_slen = 0; \ 410 (d)->bd_fbuf = NULL; 411 /* 412 * bpfread - read next chunk of packets from buffers 413 */ 414 static int 415 bpfread(struct dev_read_args *ap) 416 { 417 cdev_t dev = ap->a_head.a_dev; 418 struct bpf_d *d = dev->si_drv1; 419 int timed_out; 420 int error; 421 422 lwkt_gettoken(&bpf_token); 423 /* 424 * Restrict application to use a buffer the same size as 425 * as kernel buffers. 426 */ 427 if (ap->a_uio->uio_resid != d->bd_bufsize) { 428 lwkt_reltoken(&bpf_token); 429 return(EINVAL); 430 } 431 432 if (d->bd_state == BPF_WAITING) 433 callout_stop(&d->bd_callout); 434 timed_out = (d->bd_state == BPF_TIMED_OUT); 435 d->bd_state = BPF_IDLE; 436 /* 437 * If the hold buffer is empty, then do a timed sleep, which 438 * ends when the timeout expires or when enough packets 439 * have arrived to fill the store buffer. 440 */ 441 while (d->bd_hbuf == NULL) { 442 if ((d->bd_immediate || (ap->a_ioflag & IO_NDELAY) || timed_out) 443 && d->bd_slen != 0) { 444 /* 445 * A packet(s) either arrived since the previous, 446 * We're in immediate mode, or are reading 447 * in non-blocking mode, and a packet(s) 448 * either arrived since the previous 449 * read or arrived while we were asleep. 450 * Rotate the buffers and return what's here. 451 */ 452 ROTATE_BUFFERS(d); 453 break; 454 } 455 456 /* 457 * No data is available, check to see if the bpf device 458 * is still pointed at a real interface. If not, return 459 * ENXIO so that the userland process knows to rebind 460 * it before using it again. 461 */ 462 if (d->bd_bif == NULL) { 463 lwkt_reltoken(&bpf_token); 464 return(ENXIO); 465 } 466 467 if (ap->a_ioflag & IO_NDELAY) { 468 lwkt_reltoken(&bpf_token); 469 return(EWOULDBLOCK); 470 } 471 error = tsleep(d, PCATCH, "bpf", d->bd_rtout); 472 if (error == EINTR || error == ERESTART) { 473 lwkt_reltoken(&bpf_token); 474 return(error); 475 } 476 if (error == EWOULDBLOCK) { 477 /* 478 * On a timeout, return what's in the buffer, 479 * which may be nothing. If there is something 480 * in the store buffer, we can rotate the buffers. 481 */ 482 if (d->bd_hbuf) 483 /* 484 * We filled up the buffer in between 485 * getting the timeout and arriving 486 * here, so we don't need to rotate. 487 */ 488 break; 489 490 if (d->bd_slen == 0) { 491 lwkt_reltoken(&bpf_token); 492 return(0); 493 } 494 ROTATE_BUFFERS(d); 495 break; 496 } 497 } 498 /* 499 * At this point, we know we have something in the hold slot. 500 */ 501 502 /* 503 * Move data from hold buffer into user space. 504 * We know the entire buffer is transferred since 505 * we checked above that the read buffer is bpf_bufsize bytes. 506 */ 507 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio); 508 509 d->bd_fbuf = d->bd_hbuf; 510 d->bd_hbuf = NULL; 511 d->bd_hlen = 0; 512 lwkt_reltoken(&bpf_token); 513 514 return(error); 515 } 516 517 518 /* 519 * If there are processes sleeping on this descriptor, wake them up. 520 */ 521 static void 522 bpf_wakeup(struct bpf_d *d) 523 { 524 if (d->bd_state == BPF_WAITING) { 525 callout_stop(&d->bd_callout); 526 d->bd_state = BPF_IDLE; 527 } 528 wakeup(d); 529 if (d->bd_async && d->bd_sig && d->bd_sigio) 530 pgsigio(d->bd_sigio, d->bd_sig, 0); 531 532 KNOTE(&d->bd_kq.ki_note, 0); 533 } 534 535 static void 536 bpf_timed_out(void *arg) 537 { 538 struct bpf_d *d = (struct bpf_d *)arg; 539 540 if (d->bd_state == BPF_WAITING) { 541 d->bd_state = BPF_TIMED_OUT; 542 if (d->bd_slen != 0) 543 bpf_wakeup(d); 544 } 545 } 546 547 static void 548 bpf_output_dispatch(netmsg_t msg) 549 { 550 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)msg; 551 struct ifnet *ifp = bmsg->nm_ifp; 552 int error; 553 554 /* 555 * The driver frees the mbuf. 556 */ 557 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL); 558 lwkt_replymsg(&msg->lmsg, error); 559 } 560 561 static int 562 bpfwrite(struct dev_write_args *ap) 563 { 564 cdev_t dev = ap->a_head.a_dev; 565 struct bpf_d *d = dev->si_drv1; 566 struct ifnet *ifp; 567 struct mbuf *m; 568 int error, ret; 569 struct sockaddr dst; 570 int datlen; 571 struct netmsg_bpf_output bmsg; 572 573 lwkt_gettoken(&bpf_token); 574 if (d->bd_bif == NULL) { 575 lwkt_reltoken(&bpf_token); 576 return(ENXIO); 577 } 578 579 ifp = d->bd_bif->bif_ifp; 580 581 if (ap->a_uio->uio_resid == 0) { 582 lwkt_reltoken(&bpf_token); 583 return(0); 584 } 585 586 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m, 587 &dst, &datlen, d->bd_wfilter); 588 if (error) { 589 lwkt_reltoken(&bpf_token); 590 return(error); 591 } 592 593 if (datlen > ifp->if_mtu) { 594 m_freem(m); 595 lwkt_reltoken(&bpf_token); 596 return(EMSGSIZE); 597 } 598 599 if (d->bd_hdrcmplt) 600 dst.sa_family = pseudo_AF_HDRCMPLT; 601 602 netmsg_init(&bmsg.base, NULL, &curthread->td_msgport, 603 0, bpf_output_dispatch); 604 bmsg.nm_mbuf = m; 605 bmsg.nm_ifp = ifp; 606 bmsg.nm_dst = &dst; 607 608 ret = lwkt_domsg(netisr_cpuport(0), &bmsg.base.lmsg, 0); 609 lwkt_reltoken(&bpf_token); 610 611 return ret; 612 } 613 614 /* 615 * Reset a descriptor by flushing its packet buffer and clearing the 616 * receive and drop counts. Should be called at splimp. 617 */ 618 static void 619 bpf_resetd(struct bpf_d *d) 620 { 621 if (d->bd_hbuf) { 622 /* Free the hold buffer. */ 623 d->bd_fbuf = d->bd_hbuf; 624 d->bd_hbuf = NULL; 625 } 626 d->bd_slen = 0; 627 d->bd_hlen = 0; 628 d->bd_rcount = 0; 629 d->bd_dcount = 0; 630 } 631 632 /* 633 * FIONREAD Check for read packet available. 634 * SIOCGIFADDR Get interface address - convenient hook to driver. 635 * BIOCGBLEN Get buffer len [for read()]. 636 * BIOCSETF Set ethernet read filter. 637 * BIOCSETWF Set ethernet write filter. 638 * BIOCFLUSH Flush read packet buffer. 639 * BIOCPROMISC Put interface into promiscuous mode. 640 * BIOCGDLT Get link layer type. 641 * BIOCGETIF Get interface name. 642 * BIOCSETIF Set interface. 643 * BIOCSRTIMEOUT Set read timeout. 644 * BIOCGRTIMEOUT Get read timeout. 645 * BIOCGSTATS Get packet stats. 646 * BIOCIMMEDIATE Set immediate mode. 647 * BIOCVERSION Get filter language version. 648 * BIOCGHDRCMPLT Get "header already complete" flag 649 * BIOCSHDRCMPLT Set "header already complete" flag 650 * BIOCGSEESENT Get "see packets sent" flag 651 * BIOCSSEESENT Set "see packets sent" flag 652 * BIOCLOCK Set "locked" flag 653 */ 654 /* ARGSUSED */ 655 static int 656 bpfioctl(struct dev_ioctl_args *ap) 657 { 658 cdev_t dev = ap->a_head.a_dev; 659 struct bpf_d *d = dev->si_drv1; 660 int error = 0; 661 662 lwkt_gettoken(&bpf_token); 663 if (d->bd_state == BPF_WAITING) 664 callout_stop(&d->bd_callout); 665 d->bd_state = BPF_IDLE; 666 667 if (d->bd_locked == 1) { 668 switch (ap->a_cmd) { 669 case BIOCGBLEN: 670 case BIOCFLUSH: 671 case BIOCGDLT: 672 case BIOCGDLTLIST: 673 case BIOCGETIF: 674 case BIOCGRTIMEOUT: 675 case BIOCGSTATS: 676 case BIOCVERSION: 677 case BIOCGRSIG: 678 case BIOCGHDRCMPLT: 679 case FIONREAD: 680 case BIOCLOCK: 681 case BIOCSRTIMEOUT: 682 case BIOCIMMEDIATE: 683 case TIOCGPGRP: 684 break; 685 default: 686 lwkt_reltoken(&bpf_token); 687 return (EPERM); 688 } 689 } 690 switch (ap->a_cmd) { 691 default: 692 error = EINVAL; 693 break; 694 695 /* 696 * Check for read packet available. 697 */ 698 case FIONREAD: 699 { 700 int n; 701 702 n = d->bd_slen; 703 if (d->bd_hbuf) 704 n += d->bd_hlen; 705 706 *(int *)ap->a_data = n; 707 break; 708 } 709 710 case SIOCGIFADDR: 711 { 712 struct ifnet *ifp; 713 714 if (d->bd_bif == NULL) { 715 error = EINVAL; 716 } else { 717 ifp = d->bd_bif->bif_ifp; 718 ifnet_serialize_all(ifp); 719 error = ifp->if_ioctl(ifp, ap->a_cmd, 720 ap->a_data, ap->a_cred); 721 ifnet_deserialize_all(ifp); 722 } 723 break; 724 } 725 726 /* 727 * Get buffer len [for read()]. 728 */ 729 case BIOCGBLEN: 730 *(u_int *)ap->a_data = d->bd_bufsize; 731 break; 732 733 /* 734 * Set buffer length. 735 */ 736 case BIOCSBLEN: 737 if (d->bd_bif != NULL) { 738 error = EINVAL; 739 } else { 740 u_int size = *(u_int *)ap->a_data; 741 742 if (size > bpf_maxbufsize) 743 *(u_int *)ap->a_data = size = bpf_maxbufsize; 744 else if (size < BPF_MINBUFSIZE) 745 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE; 746 d->bd_bufsize = size; 747 } 748 break; 749 750 /* 751 * Set link layer read filter. 752 */ 753 case BIOCSETF: 754 case BIOCSETWF: 755 error = bpf_setf(d, (struct bpf_program *)ap->a_data, 756 ap->a_cmd); 757 break; 758 759 /* 760 * Flush read packet buffer. 761 */ 762 case BIOCFLUSH: 763 bpf_resetd(d); 764 break; 765 766 /* 767 * Put interface into promiscuous mode. 768 */ 769 case BIOCPROMISC: 770 if (d->bd_bif == NULL) { 771 /* 772 * No interface attached yet. 773 */ 774 error = EINVAL; 775 break; 776 } 777 if (d->bd_promisc == 0) { 778 error = ifpromisc(d->bd_bif->bif_ifp, 1); 779 if (error == 0) 780 d->bd_promisc = 1; 781 } 782 break; 783 784 /* 785 * Get device parameters. 786 */ 787 case BIOCGDLT: 788 if (d->bd_bif == NULL) 789 error = EINVAL; 790 else 791 *(u_int *)ap->a_data = d->bd_bif->bif_dlt; 792 break; 793 794 /* 795 * Get a list of supported data link types. 796 */ 797 case BIOCGDLTLIST: 798 if (d->bd_bif == NULL) { 799 error = EINVAL; 800 } else { 801 error = bpf_getdltlist(d, 802 (struct bpf_dltlist *)ap->a_data); 803 } 804 break; 805 806 /* 807 * Set data link type. 808 */ 809 case BIOCSDLT: 810 if (d->bd_bif == NULL) 811 error = EINVAL; 812 else 813 error = bpf_setdlt(d, *(u_int *)ap->a_data); 814 break; 815 816 /* 817 * Get interface name. 818 */ 819 case BIOCGETIF: 820 if (d->bd_bif == NULL) { 821 error = EINVAL; 822 } else { 823 struct ifnet *const ifp = d->bd_bif->bif_ifp; 824 struct ifreq *const ifr = (struct ifreq *)ap->a_data; 825 826 strlcpy(ifr->ifr_name, ifp->if_xname, 827 sizeof ifr->ifr_name); 828 } 829 break; 830 831 /* 832 * Set interface. 833 */ 834 case BIOCSETIF: 835 error = bpf_setif(d, (struct ifreq *)ap->a_data); 836 break; 837 838 /* 839 * Set read timeout. 840 */ 841 case BIOCSRTIMEOUT: 842 { 843 struct timeval *tv = (struct timeval *)ap->a_data; 844 845 /* 846 * Subtract 1 tick from tvtohz() since this isn't 847 * a one-shot timer. 848 */ 849 if ((error = itimerfix(tv)) == 0) 850 d->bd_rtout = tvtohz_low(tv); 851 break; 852 } 853 854 /* 855 * Get read timeout. 856 */ 857 case BIOCGRTIMEOUT: 858 { 859 struct timeval *tv = (struct timeval *)ap->a_data; 860 861 tv->tv_sec = d->bd_rtout / hz; 862 tv->tv_usec = (d->bd_rtout % hz) * ustick; 863 break; 864 } 865 866 /* 867 * Get packet stats. 868 */ 869 case BIOCGSTATS: 870 { 871 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data; 872 873 bs->bs_recv = d->bd_rcount; 874 bs->bs_drop = d->bd_dcount; 875 break; 876 } 877 878 /* 879 * Set immediate mode. 880 */ 881 case BIOCIMMEDIATE: 882 d->bd_immediate = *(u_int *)ap->a_data; 883 break; 884 885 case BIOCVERSION: 886 { 887 struct bpf_version *bv = (struct bpf_version *)ap->a_data; 888 889 bv->bv_major = BPF_MAJOR_VERSION; 890 bv->bv_minor = BPF_MINOR_VERSION; 891 break; 892 } 893 894 /* 895 * Get "header already complete" flag 896 */ 897 case BIOCGHDRCMPLT: 898 *(u_int *)ap->a_data = d->bd_hdrcmplt; 899 break; 900 901 /* 902 * Set "header already complete" flag 903 */ 904 case BIOCSHDRCMPLT: 905 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0; 906 break; 907 908 /* 909 * Get "see sent packets" flag 910 */ 911 case BIOCGSEESENT: 912 *(u_int *)ap->a_data = d->bd_seesent; 913 break; 914 915 /* 916 * Set "see sent packets" flag 917 */ 918 case BIOCSSEESENT: 919 d->bd_seesent = *(u_int *)ap->a_data; 920 break; 921 922 case FIOASYNC: /* Send signal on receive packets */ 923 d->bd_async = *(int *)ap->a_data; 924 break; 925 926 case FIOSETOWN: 927 error = fsetown(*(int *)ap->a_data, &d->bd_sigio); 928 break; 929 930 case FIOGETOWN: 931 *(int *)ap->a_data = fgetown(&d->bd_sigio); 932 break; 933 934 /* This is deprecated, FIOSETOWN should be used instead. */ 935 case TIOCSPGRP: 936 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio); 937 break; 938 939 /* This is deprecated, FIOGETOWN should be used instead. */ 940 case TIOCGPGRP: 941 *(int *)ap->a_data = -fgetown(&d->bd_sigio); 942 break; 943 944 case BIOCSRSIG: /* Set receive signal */ 945 { 946 u_int sig; 947 948 sig = *(u_int *)ap->a_data; 949 950 if (sig >= NSIG) 951 error = EINVAL; 952 else 953 d->bd_sig = sig; 954 break; 955 } 956 case BIOCGRSIG: 957 *(u_int *)ap->a_data = d->bd_sig; 958 break; 959 case BIOCLOCK: 960 d->bd_locked = 1; 961 break; 962 } 963 lwkt_reltoken(&bpf_token); 964 965 return(error); 966 } 967 968 /* 969 * Set d's packet filter program to fp. If this file already has a filter, 970 * free it and replace it. Returns EINVAL for bogus requests. 971 */ 972 static int 973 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 974 { 975 struct bpf_insn *fcode, *old; 976 u_int wfilter, flen, size; 977 978 if (cmd == BIOCSETWF) { 979 old = d->bd_wfilter; 980 wfilter = 1; 981 } else { 982 wfilter = 0; 983 old = d->bd_rfilter; 984 } 985 if (fp->bf_insns == NULL) { 986 if (fp->bf_len != 0) 987 return(EINVAL); 988 if (wfilter) 989 d->bd_wfilter = NULL; 990 else 991 d->bd_rfilter = NULL; 992 bpf_resetd(d); 993 if (old != NULL) 994 kfree(old, M_BPF); 995 return(0); 996 } 997 flen = fp->bf_len; 998 if (flen > BPF_MAXINSNS) 999 return(EINVAL); 1000 1001 size = flen * sizeof *fp->bf_insns; 1002 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK); 1003 if (copyin(fp->bf_insns, fcode, size) == 0 && 1004 bpf_validate(fcode, (int)flen)) { 1005 if (wfilter) 1006 d->bd_wfilter = fcode; 1007 else 1008 d->bd_rfilter = fcode; 1009 bpf_resetd(d); 1010 if (old != NULL) 1011 kfree(old, M_BPF); 1012 1013 return(0); 1014 } 1015 kfree(fcode, M_BPF); 1016 return(EINVAL); 1017 } 1018 1019 /* 1020 * Detach a file from its current interface (if attached at all) and attach 1021 * to the interface indicated by the name stored in ifr. 1022 * Return an errno or 0. 1023 */ 1024 static int 1025 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1026 { 1027 struct bpf_if *bp; 1028 int error; 1029 struct ifnet *theywant; 1030 1031 ifnet_lock(); 1032 1033 theywant = ifunit(ifr->ifr_name); 1034 if (theywant == NULL) { 1035 ifnet_unlock(); 1036 return(ENXIO); 1037 } 1038 1039 /* 1040 * Look through attached interfaces for the named one. 1041 */ 1042 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1043 struct ifnet *ifp = bp->bif_ifp; 1044 1045 if (ifp == NULL || ifp != theywant) 1046 continue; 1047 /* skip additional entry */ 1048 if (bp->bif_driverp != &ifp->if_bpf) 1049 continue; 1050 /* 1051 * We found the requested interface. 1052 * Allocate the packet buffers if we need to. 1053 * If we're already attached to requested interface, 1054 * just flush the buffer. 1055 */ 1056 if (d->bd_sbuf == NULL) { 1057 error = bpf_allocbufs(d); 1058 if (error != 0) { 1059 ifnet_unlock(); 1060 return(error); 1061 } 1062 } 1063 if (bp != d->bd_bif) { 1064 if (d->bd_bif != NULL) { 1065 /* 1066 * Detach if attached to something else. 1067 */ 1068 bpf_detachd(d); 1069 } 1070 1071 bpf_attachd(d, bp); 1072 } 1073 bpf_resetd(d); 1074 1075 ifnet_unlock(); 1076 return(0); 1077 } 1078 1079 ifnet_unlock(); 1080 1081 /* Not found. */ 1082 return(ENXIO); 1083 } 1084 1085 static struct filterops bpf_read_filtops = 1086 { FILTEROP_ISFD, NULL, bpf_filter_detach, bpf_filter_read }; 1087 1088 static int 1089 bpfkqfilter(struct dev_kqfilter_args *ap) 1090 { 1091 cdev_t dev = ap->a_head.a_dev; 1092 struct knote *kn = ap->a_kn; 1093 struct klist *klist; 1094 struct bpf_d *d; 1095 1096 lwkt_gettoken(&bpf_token); 1097 d = dev->si_drv1; 1098 if (d->bd_bif == NULL) { 1099 ap->a_result = 1; 1100 lwkt_reltoken(&bpf_token); 1101 return (0); 1102 } 1103 1104 ap->a_result = 0; 1105 switch (kn->kn_filter) { 1106 case EVFILT_READ: 1107 kn->kn_fop = &bpf_read_filtops; 1108 kn->kn_hook = (caddr_t)d; 1109 break; 1110 default: 1111 ap->a_result = EOPNOTSUPP; 1112 lwkt_reltoken(&bpf_token); 1113 return (0); 1114 } 1115 1116 klist = &d->bd_kq.ki_note; 1117 knote_insert(klist, kn); 1118 lwkt_reltoken(&bpf_token); 1119 1120 return (0); 1121 } 1122 1123 static void 1124 bpf_filter_detach(struct knote *kn) 1125 { 1126 struct klist *klist; 1127 struct bpf_d *d; 1128 1129 d = (struct bpf_d *)kn->kn_hook; 1130 klist = &d->bd_kq.ki_note; 1131 knote_remove(klist, kn); 1132 } 1133 1134 static int 1135 bpf_filter_read(struct knote *kn, long hint) 1136 { 1137 struct bpf_d *d; 1138 int ready = 0; 1139 1140 d = (struct bpf_d *)kn->kn_hook; 1141 if (d->bd_hlen != 0 || 1142 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1143 d->bd_slen != 0)) { 1144 ready = 1; 1145 } else { 1146 /* Start the read timeout if necessary. */ 1147 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1148 callout_reset(&d->bd_callout, d->bd_rtout, 1149 bpf_timed_out, d); 1150 d->bd_state = BPF_WAITING; 1151 } 1152 } 1153 1154 return (ready); 1155 } 1156 1157 1158 /* 1159 * Process the packet pkt of length pktlen. The packet is parsed 1160 * by each listener's filter, and if accepted, stashed into the 1161 * corresponding buffer. 1162 */ 1163 void 1164 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1165 { 1166 struct bpf_d *d; 1167 struct timeval tv; 1168 int gottime = 0; 1169 u_int slen; 1170 1171 lwkt_gettoken(&bpf_token); 1172 /* Re-check */ 1173 if (bp == NULL) { 1174 lwkt_reltoken(&bpf_token); 1175 return; 1176 } 1177 1178 /* 1179 * Note that the ipl does not have to be raised at this point. 1180 * The only problem that could arise here is that if two different 1181 * interfaces shared any data. This is not the case. 1182 */ 1183 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1184 ++d->bd_rcount; 1185 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1186 if (slen != 0) { 1187 if (!gottime) { 1188 microtime(&tv); 1189 gottime = 1; 1190 } 1191 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv); 1192 } 1193 } 1194 lwkt_reltoken(&bpf_token); 1195 } 1196 1197 /* 1198 * Copy data from an mbuf chain into a buffer. This code is derived 1199 * from m_copydata in sys/uipc_mbuf.c. 1200 */ 1201 static void 1202 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1203 { 1204 const struct mbuf *m; 1205 u_int count; 1206 u_char *dst; 1207 1208 m = src_arg; 1209 dst = dst_arg; 1210 while (len > 0) { 1211 if (m == NULL) 1212 panic("bpf_mcopy"); 1213 count = min(m->m_len, len); 1214 bcopy(mtod(m, void *), dst, count); 1215 m = m->m_next; 1216 dst += count; 1217 len -= count; 1218 } 1219 } 1220 1221 /* 1222 * Process the packet in the mbuf chain m. The packet is parsed by each 1223 * listener's filter, and if accepted, stashed into the corresponding 1224 * buffer. 1225 */ 1226 void 1227 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1228 { 1229 struct bpf_d *d; 1230 u_int pktlen, slen; 1231 struct timeval tv; 1232 int gottime = 0; 1233 1234 lwkt_gettoken(&bpf_token); 1235 /* Re-check */ 1236 if (bp == NULL) { 1237 lwkt_reltoken(&bpf_token); 1238 return; 1239 } 1240 1241 /* Don't compute pktlen, if no descriptor is attached. */ 1242 if (SLIST_EMPTY(&bp->bif_dlist)) { 1243 lwkt_reltoken(&bpf_token); 1244 return; 1245 } 1246 1247 pktlen = m_lengthm(m, NULL); 1248 1249 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1250 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1251 continue; 1252 ++d->bd_rcount; 1253 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1254 if (slen != 0) { 1255 if (!gottime) { 1256 microtime(&tv); 1257 gottime = 1; 1258 } 1259 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, 1260 &tv); 1261 } 1262 } 1263 lwkt_reltoken(&bpf_token); 1264 } 1265 1266 /* 1267 * Incoming linkage from device drivers, where we have a mbuf chain 1268 * but need to prepend some arbitrary header from a linear buffer. 1269 * 1270 * Con up a minimal dummy header to pacify bpf. Allocate (only) a 1271 * struct m_hdr on the stack. This is safe as bpf only reads from the 1272 * fields in this header that we initialize, and will not try to free 1273 * it or keep a pointer to it. 1274 */ 1275 void 1276 bpf_mtap_hdr(struct bpf_if *arg, caddr_t data, u_int dlen, struct mbuf *m, 1277 u_int direction) 1278 { 1279 struct m_hdr mh; 1280 1281 mh.mh_flags = 0; 1282 mh.mh_next = m; 1283 mh.mh_len = dlen; 1284 mh.mh_data = data; 1285 1286 bpf_mtap(arg, (struct mbuf *) &mh); 1287 } 1288 1289 void 1290 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family) 1291 { 1292 u_int family4; 1293 1294 KKASSERT(family != AF_UNSPEC); 1295 1296 family4 = (u_int)family; 1297 bpf_ptap(bp, m, &family4, sizeof(family4)); 1298 } 1299 1300 /* 1301 * Process the packet in the mbuf chain m with the header in m prepended. 1302 * The packet is parsed by each listener's filter, and if accepted, 1303 * stashed into the corresponding buffer. 1304 */ 1305 void 1306 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1307 { 1308 struct mbuf mb; 1309 1310 /* 1311 * Craft on-stack mbuf suitable for passing to bpf_mtap. 1312 * Note that we cut corners here; we only setup what's 1313 * absolutely needed--this mbuf should never go anywhere else. 1314 */ 1315 mb.m_next = m; 1316 mb.m_data = __DECONST(void *, data); /* LINTED */ 1317 mb.m_len = dlen; 1318 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif; 1319 1320 bpf_mtap(bp, &mb); 1321 } 1322 1323 /* 1324 * Move the packet data from interface memory (pkt) into the 1325 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1326 * otherwise 0. "copy" is the routine called to do the actual data 1327 * transfer. bcopy is passed in to copy contiguous chunks, while 1328 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1329 * pkt is really an mbuf. 1330 */ 1331 static void 1332 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1333 void (*cpfn)(const void *, void *, size_t), 1334 const struct timeval *tv) 1335 { 1336 struct bpf_hdr *hp; 1337 int totlen, curlen; 1338 int hdrlen = d->bd_bif->bif_hdrlen; 1339 int wakeup = 0; 1340 /* 1341 * Figure out how many bytes to move. If the packet is 1342 * greater or equal to the snapshot length, transfer that 1343 * much. Otherwise, transfer the whole packet (unless 1344 * we hit the buffer size limit). 1345 */ 1346 totlen = hdrlen + min(snaplen, pktlen); 1347 if (totlen > d->bd_bufsize) 1348 totlen = d->bd_bufsize; 1349 1350 /* 1351 * Round up the end of the previous packet to the next longword. 1352 */ 1353 curlen = BPF_WORDALIGN(d->bd_slen); 1354 if (curlen + totlen > d->bd_bufsize) { 1355 /* 1356 * This packet will overflow the storage buffer. 1357 * Rotate the buffers if we can, then wakeup any 1358 * pending reads. 1359 */ 1360 if (d->bd_fbuf == NULL) { 1361 /* 1362 * We haven't completed the previous read yet, 1363 * so drop the packet. 1364 */ 1365 ++d->bd_dcount; 1366 return; 1367 } 1368 ROTATE_BUFFERS(d); 1369 wakeup = 1; 1370 curlen = 0; 1371 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { 1372 /* 1373 * Immediate mode is set, or the read timeout has 1374 * already expired during a select call. A packet 1375 * arrived, so the reader should be woken up. 1376 */ 1377 wakeup = 1; 1378 } 1379 1380 /* 1381 * Append the bpf header. 1382 */ 1383 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1384 hp->bh_tstamp = *tv; 1385 hp->bh_datalen = pktlen; 1386 hp->bh_hdrlen = hdrlen; 1387 /* 1388 * Copy the packet data into the store buffer and update its length. 1389 */ 1390 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1391 d->bd_slen = curlen + totlen; 1392 1393 if (wakeup) 1394 bpf_wakeup(d); 1395 } 1396 1397 /* 1398 * Initialize all nonzero fields of a descriptor. 1399 */ 1400 static int 1401 bpf_allocbufs(struct bpf_d *d) 1402 { 1403 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1404 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1405 d->bd_slen = 0; 1406 d->bd_hlen = 0; 1407 return(0); 1408 } 1409 1410 /* 1411 * Free buffers and packet filter program currently in use by a descriptor. 1412 * Called on close. 1413 */ 1414 static void 1415 bpf_freed(struct bpf_d *d) 1416 { 1417 /* 1418 * We don't need to lock out interrupts since this descriptor has 1419 * been detached from its interface and it yet hasn't been marked 1420 * free. 1421 */ 1422 if (d->bd_sbuf != NULL) { 1423 kfree(d->bd_sbuf, M_BPF); 1424 if (d->bd_hbuf != NULL) 1425 kfree(d->bd_hbuf, M_BPF); 1426 if (d->bd_fbuf != NULL) 1427 kfree(d->bd_fbuf, M_BPF); 1428 } 1429 if (d->bd_rfilter) 1430 kfree(d->bd_rfilter, M_BPF); 1431 if (d->bd_wfilter) 1432 kfree(d->bd_wfilter, M_BPF); 1433 } 1434 1435 /* 1436 * Attach an interface to bpf. ifp is a pointer to the structure 1437 * defining the interface to be attached, dlt is the link layer type, 1438 * and hdrlen is the fixed size of the link header (variable length 1439 * headers are not yet supported). 1440 */ 1441 void 1442 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1443 { 1444 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf); 1445 } 1446 1447 void 1448 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1449 { 1450 struct bpf_if *bp; 1451 1452 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO); 1453 1454 lwkt_gettoken(&bpf_token); 1455 1456 SLIST_INIT(&bp->bif_dlist); 1457 bp->bif_ifp = ifp; 1458 bp->bif_dlt = dlt; 1459 bp->bif_driverp = driverp; 1460 *bp->bif_driverp = NULL; 1461 1462 bp->bif_next = bpf_iflist; 1463 bpf_iflist = bp; 1464 1465 /* 1466 * Compute the length of the bpf header. This is not necessarily 1467 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1468 * that the network layer header begins on a longword boundary (for 1469 * performance reasons and to alleviate alignment restrictions). 1470 */ 1471 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1472 1473 lwkt_reltoken(&bpf_token); 1474 1475 if (bootverbose) 1476 if_printf(ifp, "bpf attached\n"); 1477 } 1478 1479 /* 1480 * Detach bpf from an interface. This involves detaching each descriptor 1481 * associated with the interface, and leaving bd_bif NULL. Notify each 1482 * descriptor as it's detached so that any sleepers wake up and get 1483 * ENXIO. 1484 */ 1485 void 1486 bpfdetach(struct ifnet *ifp) 1487 { 1488 struct bpf_if *bp, *bp_prev; 1489 struct bpf_d *d; 1490 1491 lwkt_gettoken(&bpf_token); 1492 1493 /* Locate BPF interface information */ 1494 bp_prev = NULL; 1495 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1496 if (ifp == bp->bif_ifp) 1497 break; 1498 bp_prev = bp; 1499 } 1500 1501 /* Interface wasn't attached */ 1502 if (bp->bif_ifp == NULL) { 1503 lwkt_reltoken(&bpf_token); 1504 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname); 1505 return; 1506 } 1507 1508 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) { 1509 bpf_detachd(d); 1510 bpf_wakeup(d); 1511 } 1512 1513 if (bp_prev != NULL) 1514 bp_prev->bif_next = bp->bif_next; 1515 else 1516 bpf_iflist = bp->bif_next; 1517 1518 kfree(bp, M_BPF); 1519 1520 lwkt_reltoken(&bpf_token); 1521 } 1522 1523 /* 1524 * Get a list of available data link type of the interface. 1525 */ 1526 static int 1527 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1528 { 1529 int n, error; 1530 struct ifnet *ifp; 1531 struct bpf_if *bp; 1532 1533 ifp = d->bd_bif->bif_ifp; 1534 n = 0; 1535 error = 0; 1536 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1537 if (bp->bif_ifp != ifp) 1538 continue; 1539 if (bfl->bfl_list != NULL) { 1540 if (n >= bfl->bfl_len) { 1541 return (ENOMEM); 1542 } 1543 error = copyout(&bp->bif_dlt, 1544 bfl->bfl_list + n, sizeof(u_int)); 1545 } 1546 n++; 1547 } 1548 bfl->bfl_len = n; 1549 return(error); 1550 } 1551 1552 /* 1553 * Set the data link type of a BPF instance. 1554 */ 1555 static int 1556 bpf_setdlt(struct bpf_d *d, u_int dlt) 1557 { 1558 int error, opromisc; 1559 struct ifnet *ifp; 1560 struct bpf_if *bp; 1561 1562 if (d->bd_bif->bif_dlt == dlt) 1563 return (0); 1564 ifp = d->bd_bif->bif_ifp; 1565 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1566 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1567 break; 1568 } 1569 if (bp != NULL) { 1570 opromisc = d->bd_promisc; 1571 bpf_detachd(d); 1572 bpf_attachd(d, bp); 1573 bpf_resetd(d); 1574 if (opromisc) { 1575 error = ifpromisc(bp->bif_ifp, 1); 1576 if (error) { 1577 if_printf(bp->bif_ifp, 1578 "bpf_setdlt: ifpromisc failed (%d)\n", 1579 error); 1580 } else { 1581 d->bd_promisc = 1; 1582 } 1583 } 1584 } 1585 return(bp == NULL ? EINVAL : 0); 1586 } 1587 1588 void 1589 bpf_gettoken(void) 1590 { 1591 lwkt_gettoken(&bpf_token); 1592 } 1593 1594 void 1595 bpf_reltoken(void) 1596 { 1597 lwkt_reltoken(&bpf_token); 1598 } 1599 1600 static void 1601 bpf_drvinit(void *unused) 1602 { 1603 int i; 1604 1605 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf), 1606 bpfclone, 0, 0, 0600, "bpf"); 1607 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) { 1608 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i); 1609 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i); 1610 } 1611 } 1612 1613 static void 1614 bpf_drvuninit(void *unused) 1615 { 1616 devfs_clone_handler_del("bpf"); 1617 dev_ops_remove_all(&bpf_ops); 1618 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf)); 1619 } 1620 1621 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1622 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL); 1623 1624 #else /* !BPF */ 1625 /* 1626 * NOP stubs to allow bpf-using drivers to load and function. 1627 * 1628 * A 'better' implementation would allow the core bpf functionality 1629 * to be loaded at runtime. 1630 */ 1631 1632 void 1633 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1634 { 1635 } 1636 1637 void 1638 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1639 { 1640 } 1641 1642 void 1643 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1644 { 1645 } 1646 1647 void 1648 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1649 { 1650 } 1651 1652 void 1653 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1654 { 1655 } 1656 1657 void 1658 bpfdetach(struct ifnet *ifp) 1659 { 1660 } 1661 1662 u_int 1663 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1664 { 1665 return -1; /* "no filter" behaviour */ 1666 } 1667 1668 void 1669 bpf_gettoken(void) 1670 { 1671 } 1672 1673 void 1674 bpf_reltoken(void) 1675 { 1676 } 1677 1678 #endif /* !BPF */ 1679