1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 35 * 36 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $ 37 */ 38 39 #include "use_bpf.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/uio.h> 44 #include <sys/conf.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/time.h> 49 #include <sys/proc.h> 50 #include <sys/signalvar.h> 51 #include <sys/filio.h> 52 #include <sys/sockio.h> 53 #include <sys/ttycom.h> 54 #include <sys/filedesc.h> 55 56 #include <sys/event.h> 57 58 #include <sys/socket.h> 59 #include <sys/vnode.h> 60 61 #include <net/if.h> 62 #include <net/bpf.h> 63 #include <net/bpfdesc.h> 64 #include <net/netmsg2.h> 65 #include <net/netisr2.h> 66 67 #include <netinet/in.h> 68 #include <netinet/if_ether.h> 69 #include <sys/kernel.h> 70 #include <sys/sysctl.h> 71 72 #include <netproto/802_11/ieee80211_dragonfly.h> 73 74 #include <sys/devfs.h> 75 76 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 77 DEVFS_DEFINE_CLONE_BITMAP(bpf); 78 79 #if NBPF <= 1 80 #define BPF_PREALLOCATED_UNITS 4 81 #else 82 #define BPF_PREALLOCATED_UNITS NBPF 83 #endif 84 85 #if NBPF > 0 86 87 struct netmsg_bpf_output { 88 struct netmsg_base base; 89 struct mbuf *nm_mbuf; 90 struct ifnet *nm_ifp; 91 struct sockaddr *nm_dst; 92 boolean_t nm_feedback; 93 }; 94 95 /* 96 * The default read buffer size is patchable. 97 */ 98 static int bpf_bufsize = BPF_DEFAULTBUFSIZE; 99 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 100 &bpf_bufsize, 0, "Current size of bpf buffer"); 101 int bpf_maxbufsize = BPF_MAXBUFSIZE; 102 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 103 &bpf_maxbufsize, 0, "Maximum size of bpf buffer"); 104 105 /* 106 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 107 */ 108 static struct bpf_if *bpf_iflist; 109 110 static struct lwkt_token bpf_token = LWKT_TOKEN_INITIALIZER(bpf_token); 111 112 static int bpf_allocbufs(struct bpf_d *); 113 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 114 static void bpf_detachd(struct bpf_d *d); 115 static void bpf_resetd(struct bpf_d *); 116 static void bpf_freed(struct bpf_d *); 117 static void bpf_mcopy(volatile const void *, volatile void *, size_t); 118 static int bpf_movein(struct uio *, int, struct mbuf **, 119 struct sockaddr *, int *, struct bpf_insn *); 120 static int bpf_setif(struct bpf_d *, struct ifreq *); 121 static void bpf_timed_out(void *); 122 static void bpf_wakeup(struct bpf_d *); 123 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 124 void (*)(volatile const void *, 125 volatile void *, size_t), 126 const struct timeval *); 127 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 128 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 129 static int bpf_setdlt(struct bpf_d *, u_int); 130 static void bpf_drvinit(void *unused); 131 static void bpf_filter_detach(struct knote *kn); 132 static int bpf_filter_read(struct knote *kn, long hint); 133 134 static d_open_t bpfopen; 135 static d_clone_t bpfclone; 136 static d_close_t bpfclose; 137 static d_read_t bpfread; 138 static d_write_t bpfwrite; 139 static d_ioctl_t bpfioctl; 140 static d_kqfilter_t bpfkqfilter; 141 142 #define CDEV_MAJOR 23 143 static struct dev_ops bpf_ops = { 144 { "bpf", 0, D_MPSAFE }, 145 .d_open = bpfopen, 146 .d_close = bpfclose, 147 .d_read = bpfread, 148 .d_write = bpfwrite, 149 .d_ioctl = bpfioctl, 150 .d_kqfilter = bpfkqfilter 151 }; 152 153 154 static int 155 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, 156 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter) 157 { 158 const struct ieee80211_bpf_params *p; 159 struct mbuf *m; 160 int error; 161 int len; 162 int hlen; 163 int slen; 164 165 *datlen = 0; 166 *mp = NULL; 167 168 /* 169 * Build a sockaddr based on the data link layer type. 170 * We do this at this level because the ethernet header 171 * is copied directly into the data field of the sockaddr. 172 * In the case of SLIP, there is no header and the packet 173 * is forwarded as is. 174 * Also, we are careful to leave room at the front of the mbuf 175 * for the link level header. 176 */ 177 switch (linktype) { 178 case DLT_SLIP: 179 sockp->sa_family = AF_INET; 180 hlen = 0; 181 break; 182 183 case DLT_EN10MB: 184 sockp->sa_family = AF_UNSPEC; 185 /* XXX Would MAXLINKHDR be better? */ 186 hlen = sizeof(struct ether_header); 187 break; 188 189 case DLT_RAW: 190 case DLT_NULL: 191 sockp->sa_family = AF_UNSPEC; 192 hlen = 0; 193 break; 194 195 case DLT_ATM_RFC1483: 196 /* 197 * en atm driver requires 4-byte atm pseudo header. 198 * though it isn't standard, vpi:vci needs to be 199 * specified anyway. 200 */ 201 sockp->sa_family = AF_UNSPEC; 202 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 203 break; 204 205 case DLT_PPP: 206 sockp->sa_family = AF_UNSPEC; 207 hlen = 4; /* This should match PPP_HDRLEN */ 208 break; 209 210 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 211 sockp->sa_family = AF_IEEE80211; 212 hlen = 0; 213 break; 214 215 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 216 sockp->sa_family = AF_IEEE80211; 217 sockp->sa_len = 12; /* XXX != 0 */ 218 hlen = sizeof(struct ieee80211_bpf_params); 219 break; 220 221 default: 222 return(EIO); 223 } 224 225 len = uio->uio_resid; 226 *datlen = len - hlen; 227 if ((unsigned)len > MCLBYTES) 228 return(EIO); 229 230 m = m_getl(len, M_WAITOK, MT_DATA, M_PKTHDR, NULL); 231 m->m_pkthdr.len = m->m_len = len; 232 m->m_pkthdr.rcvif = NULL; 233 *mp = m; 234 235 if (m->m_len < hlen) { 236 error = EPERM; 237 goto bad; 238 } 239 240 error = uiomove(mtod(m, u_char *), len, uio); 241 if (error) 242 goto bad; 243 244 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 245 if (slen == 0) { 246 error = EPERM; 247 goto bad; 248 } 249 250 /* 251 * Make room for link header, and copy it to sockaddr. 252 */ 253 if (hlen != 0) { 254 if (sockp->sa_family == AF_IEEE80211) { 255 /* 256 * Collect true length from the parameter header 257 * NB: sockp is known to be zero'd so if we do a 258 * short copy unspecified parameters will be 259 * zero. 260 * NB: packet may not be aligned after stripping 261 * bpf params 262 * XXX check ibp_vers 263 */ 264 p = mtod(m, const struct ieee80211_bpf_params *); 265 hlen = p->ibp_len; 266 if (hlen > sizeof(sockp->sa_data)) { 267 error = EINVAL; 268 goto bad; 269 } 270 } 271 bcopy(m->m_data, sockp->sa_data, hlen); 272 m->m_pkthdr.len -= hlen; 273 m->m_len -= hlen; 274 m->m_data += hlen; /* XXX */ 275 } 276 return (0); 277 bad: 278 m_freem(m); 279 return(error); 280 } 281 282 /* 283 * Attach file to the bpf interface, i.e. make d listen on bp. 284 * Must be called at splimp. 285 */ 286 static void 287 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 288 { 289 /* 290 * Point d at bp, and add d to the interface's list of listeners. 291 * Finally, point the driver's bpf cookie at the interface so 292 * it will divert packets to bpf. 293 */ 294 lwkt_gettoken(&bpf_token); 295 d->bd_bif = bp; 296 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 297 *bp->bif_driverp = bp; 298 299 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 300 lwkt_reltoken(&bpf_token); 301 } 302 303 /* 304 * Detach a file from its interface. 305 */ 306 static void 307 bpf_detachd(struct bpf_d *d) 308 { 309 int error; 310 struct bpf_if *bp; 311 struct ifnet *ifp; 312 313 lwkt_gettoken(&bpf_token); 314 bp = d->bd_bif; 315 ifp = bp->bif_ifp; 316 317 /* Remove d from the interface's descriptor list. */ 318 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next); 319 320 if (SLIST_EMPTY(&bp->bif_dlist)) { 321 /* 322 * Let the driver know that there are no more listeners. 323 */ 324 *bp->bif_driverp = NULL; 325 } 326 d->bd_bif = NULL; 327 328 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 329 330 /* 331 * Check if this descriptor had requested promiscuous mode. 332 * If so, turn it off. 333 */ 334 if (d->bd_promisc) { 335 d->bd_promisc = 0; 336 error = ifpromisc(ifp, 0); 337 if (error != 0 && error != ENXIO) { 338 /* 339 * ENXIO can happen if a pccard is unplugged, 340 * Something is really wrong if we were able to put 341 * the driver into promiscuous mode, but can't 342 * take it out. 343 */ 344 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n", 345 error); 346 } 347 } 348 lwkt_reltoken(&bpf_token); 349 } 350 351 /* 352 * Open ethernet device. Returns ENXIO for illegal minor device number, 353 * EBUSY if file is open by another process. 354 */ 355 /* ARGSUSED */ 356 static int 357 bpfopen(struct dev_open_args *ap) 358 { 359 cdev_t dev = ap->a_head.a_dev; 360 struct bpf_d *d; 361 362 lwkt_gettoken(&bpf_token); 363 if (ap->a_cred->cr_prison) { 364 lwkt_reltoken(&bpf_token); 365 return(EPERM); 366 } 367 368 d = dev->si_drv1; 369 /* 370 * Each minor can be opened by only one process. If the requested 371 * minor is in use, return EBUSY. 372 */ 373 if (d != NULL) { 374 lwkt_reltoken(&bpf_token); 375 return(EBUSY); 376 } 377 378 d = kmalloc(sizeof *d, M_BPF, M_WAITOK | M_ZERO); 379 dev->si_drv1 = d; 380 d->bd_bufsize = bpf_bufsize; 381 d->bd_sig = SIGIO; 382 d->bd_seesent = 1; 383 d->bd_feedback = 0; 384 callout_init(&d->bd_callout); 385 lwkt_reltoken(&bpf_token); 386 387 return(0); 388 } 389 390 static int 391 bpfclone(struct dev_clone_args *ap) 392 { 393 int unit; 394 395 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0); 396 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit); 397 398 return 0; 399 } 400 401 /* 402 * Close the descriptor by detaching it from its interface, 403 * deallocating its buffers, and marking it free. 404 */ 405 /* ARGSUSED */ 406 static int 407 bpfclose(struct dev_close_args *ap) 408 { 409 cdev_t dev = ap->a_head.a_dev; 410 struct bpf_d *d = dev->si_drv1; 411 int unit; 412 413 lwkt_gettoken(&bpf_token); 414 funsetown(&d->bd_sigio); 415 if (d->bd_state == BPF_WAITING) 416 callout_stop(&d->bd_callout); 417 d->bd_state = BPF_IDLE; 418 if (d->bd_bif != NULL) 419 bpf_detachd(d); 420 bpf_freed(d); 421 dev->si_drv1 = NULL; 422 423 unit = dev->si_uminor; 424 if (unit >= BPF_PREALLOCATED_UNITS) { 425 destroy_dev(dev); 426 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), unit); 427 } 428 kfree(d, M_BPF); 429 lwkt_reltoken(&bpf_token); 430 431 return(0); 432 } 433 434 /* 435 * Rotate the packet buffers in descriptor d. Move the store buffer 436 * into the hold slot, and the free buffer into the store slot. 437 * Zero the length of the new store buffer. 438 */ 439 #define ROTATE_BUFFERS(d) \ 440 (d)->bd_hbuf = (d)->bd_sbuf; \ 441 (d)->bd_hlen = (d)->bd_slen; \ 442 (d)->bd_sbuf = (d)->bd_fbuf; \ 443 (d)->bd_slen = 0; \ 444 (d)->bd_fbuf = NULL; 445 /* 446 * bpfread - read next chunk of packets from buffers 447 */ 448 static int 449 bpfread(struct dev_read_args *ap) 450 { 451 cdev_t dev = ap->a_head.a_dev; 452 struct bpf_d *d = dev->si_drv1; 453 int timed_out; 454 int error; 455 456 lwkt_gettoken(&bpf_token); 457 /* 458 * Restrict application to use a buffer the same size as 459 * as kernel buffers. 460 */ 461 if (ap->a_uio->uio_resid != d->bd_bufsize) { 462 lwkt_reltoken(&bpf_token); 463 return(EINVAL); 464 } 465 466 if (d->bd_state == BPF_WAITING) 467 callout_stop(&d->bd_callout); 468 timed_out = (d->bd_state == BPF_TIMED_OUT); 469 d->bd_state = BPF_IDLE; 470 /* 471 * If the hold buffer is empty, then do a timed sleep, which 472 * ends when the timeout expires or when enough packets 473 * have arrived to fill the store buffer. 474 */ 475 while (d->bd_hbuf == NULL) { 476 if ((d->bd_immediate || (ap->a_ioflag & IO_NDELAY) || timed_out) 477 && d->bd_slen != 0) { 478 /* 479 * A packet(s) either arrived since the previous, 480 * We're in immediate mode, or are reading 481 * in non-blocking mode, and a packet(s) 482 * either arrived since the previous 483 * read or arrived while we were asleep. 484 * Rotate the buffers and return what's here. 485 */ 486 ROTATE_BUFFERS(d); 487 break; 488 } 489 490 /* 491 * No data is available, check to see if the bpf device 492 * is still pointed at a real interface. If not, return 493 * ENXIO so that the userland process knows to rebind 494 * it before using it again. 495 */ 496 if (d->bd_bif == NULL) { 497 lwkt_reltoken(&bpf_token); 498 return(ENXIO); 499 } 500 501 if (ap->a_ioflag & IO_NDELAY) { 502 lwkt_reltoken(&bpf_token); 503 return(EWOULDBLOCK); 504 } 505 error = tsleep(d, PCATCH, "bpf", d->bd_rtout); 506 if (error == EINTR || error == ERESTART) { 507 lwkt_reltoken(&bpf_token); 508 return(error); 509 } 510 if (error == EWOULDBLOCK) { 511 /* 512 * On a timeout, return what's in the buffer, 513 * which may be nothing. If there is something 514 * in the store buffer, we can rotate the buffers. 515 */ 516 if (d->bd_hbuf) 517 /* 518 * We filled up the buffer in between 519 * getting the timeout and arriving 520 * here, so we don't need to rotate. 521 */ 522 break; 523 524 if (d->bd_slen == 0) { 525 lwkt_reltoken(&bpf_token); 526 return(0); 527 } 528 ROTATE_BUFFERS(d); 529 break; 530 } 531 } 532 /* 533 * At this point, we know we have something in the hold slot. 534 */ 535 536 /* 537 * Move data from hold buffer into user space. 538 * We know the entire buffer is transferred since 539 * we checked above that the read buffer is bpf_bufsize bytes. 540 */ 541 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio); 542 543 d->bd_fbuf = d->bd_hbuf; 544 d->bd_hbuf = NULL; 545 d->bd_hlen = 0; 546 lwkt_reltoken(&bpf_token); 547 548 return(error); 549 } 550 551 552 /* 553 * If there are processes sleeping on this descriptor, wake them up. 554 */ 555 static void 556 bpf_wakeup(struct bpf_d *d) 557 { 558 if (d->bd_state == BPF_WAITING) { 559 callout_stop(&d->bd_callout); 560 d->bd_state = BPF_IDLE; 561 } 562 wakeup(d); 563 if (d->bd_async && d->bd_sig && d->bd_sigio) 564 pgsigio(d->bd_sigio, d->bd_sig, 0); 565 566 KNOTE(&d->bd_kq.ki_note, 0); 567 } 568 569 static void 570 bpf_timed_out(void *arg) 571 { 572 struct bpf_d *d = (struct bpf_d *)arg; 573 574 if (d->bd_state == BPF_WAITING) { 575 d->bd_state = BPF_TIMED_OUT; 576 if (d->bd_slen != 0) 577 bpf_wakeup(d); 578 } 579 } 580 581 static void 582 bpf_output_dispatch(netmsg_t msg) 583 { 584 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)msg; 585 struct ifnet *ifp = bmsg->nm_ifp; 586 struct mbuf *mc = NULL; 587 int error; 588 589 if (bmsg->nm_feedback) { 590 mc = m_dup(bmsg->nm_mbuf, M_NOWAIT); 591 if (mc != NULL) 592 mc->m_pkthdr.rcvif = ifp; 593 } 594 595 /* 596 * The driver frees the mbuf. 597 */ 598 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL); 599 lwkt_replymsg(&msg->lmsg, error); 600 601 if (mc != NULL) { 602 if (error == 0) { 603 mc->m_flags &= ~M_HASH; 604 (*ifp->if_input)(ifp, mc, NULL, -1); 605 } else { 606 m_freem(mc); 607 } 608 } 609 } 610 611 static int 612 bpfwrite(struct dev_write_args *ap) 613 { 614 cdev_t dev = ap->a_head.a_dev; 615 struct bpf_d *d = dev->si_drv1; 616 struct ifnet *ifp; 617 struct mbuf *m; 618 int error, ret; 619 struct sockaddr dst; 620 int datlen; 621 struct netmsg_bpf_output bmsg; 622 623 lwkt_gettoken(&bpf_token); 624 if (d->bd_bif == NULL) { 625 lwkt_reltoken(&bpf_token); 626 return(ENXIO); 627 } 628 629 ifp = d->bd_bif->bif_ifp; 630 631 if (ap->a_uio->uio_resid == 0) { 632 lwkt_reltoken(&bpf_token); 633 return(0); 634 } 635 636 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m, 637 &dst, &datlen, d->bd_wfilter); 638 if (error) { 639 lwkt_reltoken(&bpf_token); 640 return(error); 641 } 642 643 if (datlen > ifp->if_mtu) { 644 m_freem(m); 645 lwkt_reltoken(&bpf_token); 646 return(EMSGSIZE); 647 } 648 649 if (d->bd_hdrcmplt) 650 dst.sa_family = pseudo_AF_HDRCMPLT; 651 652 netmsg_init(&bmsg.base, NULL, &curthread->td_msgport, 653 0, bpf_output_dispatch); 654 bmsg.nm_mbuf = m; 655 bmsg.nm_ifp = ifp; 656 bmsg.nm_dst = &dst; 657 658 if (d->bd_feedback) 659 bmsg.nm_feedback = TRUE; 660 else 661 bmsg.nm_feedback = FALSE; 662 663 ret = lwkt_domsg(netisr_cpuport(0), &bmsg.base.lmsg, 0); 664 665 lwkt_reltoken(&bpf_token); 666 667 return ret; 668 } 669 670 /* 671 * Reset a descriptor by flushing its packet buffer and clearing the 672 * receive and drop counts. Should be called at splimp. 673 */ 674 static void 675 bpf_resetd(struct bpf_d *d) 676 { 677 if (d->bd_hbuf) { 678 /* Free the hold buffer. */ 679 d->bd_fbuf = d->bd_hbuf; 680 d->bd_hbuf = NULL; 681 } 682 d->bd_slen = 0; 683 d->bd_hlen = 0; 684 d->bd_rcount = 0; 685 d->bd_dcount = 0; 686 } 687 688 /* 689 * FIONREAD Check for read packet available. 690 * SIOCGIFADDR Get interface address - convenient hook to driver. 691 * BIOCGBLEN Get buffer len [for read()]. 692 * BIOCSETF Set ethernet read filter. 693 * BIOCSETWF Set ethernet write filter. 694 * BIOCFLUSH Flush read packet buffer. 695 * BIOCPROMISC Put interface into promiscuous mode. 696 * BIOCGDLT Get link layer type. 697 * BIOCGETIF Get interface name. 698 * BIOCSETIF Set interface. 699 * BIOCSRTIMEOUT Set read timeout. 700 * BIOCGRTIMEOUT Get read timeout. 701 * BIOCGSTATS Get packet stats. 702 * BIOCIMMEDIATE Set immediate mode. 703 * BIOCVERSION Get filter language version. 704 * BIOCGHDRCMPLT Get "header already complete" flag 705 * BIOCSHDRCMPLT Set "header already complete" flag 706 * BIOCSFEEDBACK Set packet feedback mode. 707 * BIOCGFEEDBACK Get packet feedback mode. 708 * BIOCGSEESENT Get "see packets sent" flag 709 * BIOCSSEESENT Set "see packets sent" flag 710 * BIOCLOCK Set "locked" flag 711 */ 712 /* ARGSUSED */ 713 static int 714 bpfioctl(struct dev_ioctl_args *ap) 715 { 716 cdev_t dev = ap->a_head.a_dev; 717 struct bpf_d *d = dev->si_drv1; 718 int error = 0; 719 720 lwkt_gettoken(&bpf_token); 721 if (d->bd_state == BPF_WAITING) 722 callout_stop(&d->bd_callout); 723 d->bd_state = BPF_IDLE; 724 725 if (d->bd_locked == 1) { 726 switch (ap->a_cmd) { 727 case BIOCGBLEN: 728 case BIOCFLUSH: 729 case BIOCGDLT: 730 case BIOCGDLTLIST: 731 case BIOCGETIF: 732 case BIOCGRTIMEOUT: 733 case BIOCGSTATS: 734 case BIOCVERSION: 735 case BIOCGRSIG: 736 case BIOCGHDRCMPLT: 737 case FIONREAD: 738 case BIOCLOCK: 739 case BIOCSRTIMEOUT: 740 case BIOCIMMEDIATE: 741 case TIOCGPGRP: 742 break; 743 default: 744 lwkt_reltoken(&bpf_token); 745 return (EPERM); 746 } 747 } 748 switch (ap->a_cmd) { 749 default: 750 error = EINVAL; 751 break; 752 753 /* 754 * Check for read packet available. 755 */ 756 case FIONREAD: 757 { 758 int n; 759 760 n = d->bd_slen; 761 if (d->bd_hbuf) 762 n += d->bd_hlen; 763 764 *(int *)ap->a_data = n; 765 break; 766 } 767 768 case SIOCGIFADDR: 769 { 770 struct ifnet *ifp; 771 772 if (d->bd_bif == NULL) { 773 error = EINVAL; 774 } else { 775 ifp = d->bd_bif->bif_ifp; 776 ifnet_serialize_all(ifp); 777 error = ifp->if_ioctl(ifp, ap->a_cmd, 778 ap->a_data, ap->a_cred); 779 ifnet_deserialize_all(ifp); 780 } 781 break; 782 } 783 784 /* 785 * Get buffer len [for read()]. 786 */ 787 case BIOCGBLEN: 788 *(u_int *)ap->a_data = d->bd_bufsize; 789 break; 790 791 /* 792 * Set buffer length. 793 */ 794 case BIOCSBLEN: 795 if (d->bd_bif != NULL) { 796 error = EINVAL; 797 } else { 798 u_int size = *(u_int *)ap->a_data; 799 800 if (size > bpf_maxbufsize) 801 *(u_int *)ap->a_data = size = bpf_maxbufsize; 802 else if (size < BPF_MINBUFSIZE) 803 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE; 804 d->bd_bufsize = size; 805 } 806 break; 807 808 /* 809 * Set link layer read filter. 810 */ 811 case BIOCSETF: 812 case BIOCSETWF: 813 error = bpf_setf(d, (struct bpf_program *)ap->a_data, 814 ap->a_cmd); 815 break; 816 817 /* 818 * Flush read packet buffer. 819 */ 820 case BIOCFLUSH: 821 bpf_resetd(d); 822 break; 823 824 /* 825 * Put interface into promiscuous mode. 826 */ 827 case BIOCPROMISC: 828 if (d->bd_bif == NULL) { 829 /* 830 * No interface attached yet. 831 */ 832 error = EINVAL; 833 break; 834 } 835 if (d->bd_promisc == 0) { 836 error = ifpromisc(d->bd_bif->bif_ifp, 1); 837 if (error == 0) 838 d->bd_promisc = 1; 839 } 840 break; 841 842 /* 843 * Get device parameters. 844 */ 845 case BIOCGDLT: 846 if (d->bd_bif == NULL) 847 error = EINVAL; 848 else 849 *(u_int *)ap->a_data = d->bd_bif->bif_dlt; 850 break; 851 852 /* 853 * Get a list of supported data link types. 854 */ 855 case BIOCGDLTLIST: 856 if (d->bd_bif == NULL) { 857 error = EINVAL; 858 } else { 859 error = bpf_getdltlist(d, 860 (struct bpf_dltlist *)ap->a_data); 861 } 862 break; 863 864 /* 865 * Set data link type. 866 */ 867 case BIOCSDLT: 868 if (d->bd_bif == NULL) 869 error = EINVAL; 870 else 871 error = bpf_setdlt(d, *(u_int *)ap->a_data); 872 break; 873 874 /* 875 * Get interface name. 876 */ 877 case BIOCGETIF: 878 if (d->bd_bif == NULL) { 879 error = EINVAL; 880 } else { 881 struct ifnet *const ifp = d->bd_bif->bif_ifp; 882 struct ifreq *const ifr = (struct ifreq *)ap->a_data; 883 884 strlcpy(ifr->ifr_name, ifp->if_xname, 885 sizeof ifr->ifr_name); 886 } 887 break; 888 889 /* 890 * Set interface. 891 */ 892 case BIOCSETIF: 893 error = bpf_setif(d, (struct ifreq *)ap->a_data); 894 break; 895 896 /* 897 * Set read timeout. 898 */ 899 case BIOCSRTIMEOUT: 900 { 901 struct timeval *tv = (struct timeval *)ap->a_data; 902 903 /* 904 * Subtract 1 tick from tvtohz() since this isn't 905 * a one-shot timer. 906 */ 907 if ((error = itimerfix(tv)) == 0) 908 d->bd_rtout = tvtohz_low(tv); 909 break; 910 } 911 912 /* 913 * Get read timeout. 914 */ 915 case BIOCGRTIMEOUT: 916 { 917 struct timeval *tv = (struct timeval *)ap->a_data; 918 919 tv->tv_sec = d->bd_rtout / hz; 920 tv->tv_usec = (d->bd_rtout % hz) * ustick; 921 break; 922 } 923 924 /* 925 * Get packet stats. 926 */ 927 case BIOCGSTATS: 928 { 929 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data; 930 931 bs->bs_recv = d->bd_rcount; 932 bs->bs_drop = d->bd_dcount; 933 break; 934 } 935 936 /* 937 * Set immediate mode. 938 */ 939 case BIOCIMMEDIATE: 940 d->bd_immediate = *(u_int *)ap->a_data; 941 break; 942 943 case BIOCVERSION: 944 { 945 struct bpf_version *bv = (struct bpf_version *)ap->a_data; 946 947 bv->bv_major = BPF_MAJOR_VERSION; 948 bv->bv_minor = BPF_MINOR_VERSION; 949 break; 950 } 951 952 /* 953 * Get "header already complete" flag 954 */ 955 case BIOCGHDRCMPLT: 956 *(u_int *)ap->a_data = d->bd_hdrcmplt; 957 break; 958 959 /* 960 * Set "header already complete" flag 961 */ 962 case BIOCSHDRCMPLT: 963 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0; 964 break; 965 966 /* 967 * Get "see sent packets" flag 968 */ 969 case BIOCGSEESENT: 970 *(u_int *)ap->a_data = d->bd_seesent; 971 break; 972 973 /* 974 * Set "see sent packets" flag 975 */ 976 case BIOCSSEESENT: 977 d->bd_seesent = *(u_int *)ap->a_data; 978 break; 979 980 case FIOASYNC: /* Send signal on receive packets */ 981 d->bd_async = *(int *)ap->a_data; 982 break; 983 984 /* 985 * Set "feed packets from bpf back to input" mode 986 */ 987 case BIOCSFEEDBACK: 988 d->bd_feedback = *(int *)ap->a_data; 989 break; 990 991 /* 992 * Get "feed packets from bpf back to input" mode 993 */ 994 case BIOCGFEEDBACK: 995 *(u_int *)ap->a_data = d->bd_feedback; 996 break; 997 998 case FIOSETOWN: 999 error = fsetown(*(int *)ap->a_data, &d->bd_sigio); 1000 break; 1001 1002 case FIOGETOWN: 1003 *(int *)ap->a_data = fgetown(&d->bd_sigio); 1004 break; 1005 1006 /* This is deprecated, FIOSETOWN should be used instead. */ 1007 case TIOCSPGRP: 1008 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio); 1009 break; 1010 1011 /* This is deprecated, FIOGETOWN should be used instead. */ 1012 case TIOCGPGRP: 1013 *(int *)ap->a_data = -fgetown(&d->bd_sigio); 1014 break; 1015 1016 case BIOCSRSIG: /* Set receive signal */ 1017 { 1018 u_int sig; 1019 1020 sig = *(u_int *)ap->a_data; 1021 1022 if (sig >= NSIG) 1023 error = EINVAL; 1024 else 1025 d->bd_sig = sig; 1026 break; 1027 } 1028 case BIOCGRSIG: 1029 *(u_int *)ap->a_data = d->bd_sig; 1030 break; 1031 case BIOCLOCK: 1032 d->bd_locked = 1; 1033 break; 1034 } 1035 lwkt_reltoken(&bpf_token); 1036 1037 return(error); 1038 } 1039 1040 /* 1041 * Set d's packet filter program to fp. If this file already has a filter, 1042 * free it and replace it. Returns EINVAL for bogus requests. 1043 */ 1044 static int 1045 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1046 { 1047 struct bpf_insn *fcode, *old; 1048 u_int wfilter, flen, size; 1049 1050 if (cmd == BIOCSETWF) { 1051 old = d->bd_wfilter; 1052 wfilter = 1; 1053 } else { 1054 wfilter = 0; 1055 old = d->bd_rfilter; 1056 } 1057 if (fp->bf_insns == NULL) { 1058 if (fp->bf_len != 0) 1059 return(EINVAL); 1060 if (wfilter) 1061 d->bd_wfilter = NULL; 1062 else 1063 d->bd_rfilter = NULL; 1064 bpf_resetd(d); 1065 if (old != NULL) 1066 kfree(old, M_BPF); 1067 return(0); 1068 } 1069 flen = fp->bf_len; 1070 if (flen > BPF_MAXINSNS) 1071 return(EINVAL); 1072 1073 size = flen * sizeof *fp->bf_insns; 1074 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK); 1075 if (copyin(fp->bf_insns, fcode, size) == 0 && 1076 bpf_validate(fcode, (int)flen)) { 1077 if (wfilter) 1078 d->bd_wfilter = fcode; 1079 else 1080 d->bd_rfilter = fcode; 1081 bpf_resetd(d); 1082 if (old != NULL) 1083 kfree(old, M_BPF); 1084 1085 return(0); 1086 } 1087 kfree(fcode, M_BPF); 1088 return(EINVAL); 1089 } 1090 1091 /* 1092 * Detach a file from its current interface (if attached at all) and attach 1093 * to the interface indicated by the name stored in ifr. 1094 * Return an errno or 0. 1095 */ 1096 static int 1097 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1098 { 1099 struct bpf_if *bp; 1100 int error; 1101 struct ifnet *theywant; 1102 1103 ifnet_lock(); 1104 1105 theywant = ifunit(ifr->ifr_name); 1106 if (theywant == NULL) { 1107 ifnet_unlock(); 1108 return(ENXIO); 1109 } 1110 1111 /* 1112 * Look through attached interfaces for the named one. 1113 */ 1114 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1115 struct ifnet *ifp = bp->bif_ifp; 1116 1117 if (ifp == NULL || ifp != theywant) 1118 continue; 1119 /* skip additional entry */ 1120 if (bp->bif_driverp != &ifp->if_bpf) 1121 continue; 1122 /* 1123 * We found the requested interface. 1124 * Allocate the packet buffers if we need to. 1125 * If we're already attached to requested interface, 1126 * just flush the buffer. 1127 */ 1128 if (d->bd_sbuf == NULL) { 1129 error = bpf_allocbufs(d); 1130 if (error != 0) { 1131 ifnet_unlock(); 1132 return(error); 1133 } 1134 } 1135 if (bp != d->bd_bif) { 1136 if (d->bd_bif != NULL) { 1137 /* 1138 * Detach if attached to something else. 1139 */ 1140 bpf_detachd(d); 1141 } 1142 1143 bpf_attachd(d, bp); 1144 } 1145 bpf_resetd(d); 1146 1147 ifnet_unlock(); 1148 return(0); 1149 } 1150 1151 ifnet_unlock(); 1152 1153 /* Not found. */ 1154 return(ENXIO); 1155 } 1156 1157 static struct filterops bpf_read_filtops = 1158 { FILTEROP_ISFD, NULL, bpf_filter_detach, bpf_filter_read }; 1159 1160 static int 1161 bpfkqfilter(struct dev_kqfilter_args *ap) 1162 { 1163 cdev_t dev = ap->a_head.a_dev; 1164 struct knote *kn = ap->a_kn; 1165 struct klist *klist; 1166 struct bpf_d *d; 1167 1168 lwkt_gettoken(&bpf_token); 1169 d = dev->si_drv1; 1170 if (d->bd_bif == NULL) { 1171 ap->a_result = 1; 1172 lwkt_reltoken(&bpf_token); 1173 return (0); 1174 } 1175 1176 ap->a_result = 0; 1177 switch (kn->kn_filter) { 1178 case EVFILT_READ: 1179 kn->kn_fop = &bpf_read_filtops; 1180 kn->kn_hook = (caddr_t)d; 1181 break; 1182 default: 1183 ap->a_result = EOPNOTSUPP; 1184 lwkt_reltoken(&bpf_token); 1185 return (0); 1186 } 1187 1188 klist = &d->bd_kq.ki_note; 1189 knote_insert(klist, kn); 1190 lwkt_reltoken(&bpf_token); 1191 1192 return (0); 1193 } 1194 1195 static void 1196 bpf_filter_detach(struct knote *kn) 1197 { 1198 struct klist *klist; 1199 struct bpf_d *d; 1200 1201 d = (struct bpf_d *)kn->kn_hook; 1202 klist = &d->bd_kq.ki_note; 1203 knote_remove(klist, kn); 1204 } 1205 1206 static int 1207 bpf_filter_read(struct knote *kn, long hint) 1208 { 1209 struct bpf_d *d; 1210 int ready = 0; 1211 1212 d = (struct bpf_d *)kn->kn_hook; 1213 if (d->bd_hlen != 0 || 1214 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1215 d->bd_slen != 0)) { 1216 ready = 1; 1217 } else { 1218 /* Start the read timeout if necessary. */ 1219 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1220 callout_reset(&d->bd_callout, d->bd_rtout, 1221 bpf_timed_out, d); 1222 d->bd_state = BPF_WAITING; 1223 } 1224 } 1225 1226 return (ready); 1227 } 1228 1229 1230 /* 1231 * Process the packet pkt of length pktlen. The packet is parsed 1232 * by each listener's filter, and if accepted, stashed into the 1233 * corresponding buffer. 1234 */ 1235 void 1236 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1237 { 1238 struct bpf_d *d; 1239 struct timeval tv; 1240 int gottime = 0; 1241 u_int slen; 1242 1243 lwkt_gettoken(&bpf_token); 1244 /* Re-check */ 1245 if (bp == NULL) { 1246 lwkt_reltoken(&bpf_token); 1247 return; 1248 } 1249 1250 /* 1251 * Note that the ipl does not have to be raised at this point. 1252 * The only problem that could arise here is that if two different 1253 * interfaces shared any data. This is not the case. 1254 */ 1255 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1256 ++d->bd_rcount; 1257 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1258 if (slen != 0) { 1259 if (!gottime) { 1260 microtime(&tv); 1261 gottime = 1; 1262 } 1263 catchpacket(d, pkt, pktlen, slen, _bcopy, &tv); 1264 } 1265 } 1266 lwkt_reltoken(&bpf_token); 1267 } 1268 1269 /* 1270 * Copy data from an mbuf chain into a buffer. This code is derived 1271 * from m_copydata in sys/uipc_mbuf.c. 1272 */ 1273 static void 1274 bpf_mcopy(volatile const void *src_arg, volatile void *dst_arg, size_t len) 1275 { 1276 volatile const struct mbuf *m; 1277 u_int count; 1278 volatile u_char *dst; 1279 1280 m = src_arg; 1281 dst = dst_arg; 1282 while (len > 0) { 1283 if (m == NULL) 1284 panic("bpf_mcopy"); 1285 count = min(m->m_len, len); 1286 bcopy(mtod(m, void *), dst, count); 1287 m = m->m_next; 1288 dst += count; 1289 len -= count; 1290 } 1291 } 1292 1293 /* 1294 * Process the packet in the mbuf chain m. The packet is parsed by each 1295 * listener's filter, and if accepted, stashed into the corresponding 1296 * buffer. 1297 */ 1298 void 1299 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1300 { 1301 struct bpf_d *d; 1302 u_int pktlen, slen; 1303 struct timeval tv; 1304 int gottime = 0; 1305 1306 lwkt_gettoken(&bpf_token); 1307 /* Re-check */ 1308 if (bp == NULL) { 1309 lwkt_reltoken(&bpf_token); 1310 return; 1311 } 1312 1313 /* Don't compute pktlen, if no descriptor is attached. */ 1314 if (SLIST_EMPTY(&bp->bif_dlist)) { 1315 lwkt_reltoken(&bpf_token); 1316 return; 1317 } 1318 1319 pktlen = m_lengthm(m, NULL); 1320 1321 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1322 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1323 continue; 1324 ++d->bd_rcount; 1325 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1326 if (slen != 0) { 1327 if (!gottime) { 1328 microtime(&tv); 1329 gottime = 1; 1330 } 1331 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, 1332 &tv); 1333 } 1334 } 1335 lwkt_reltoken(&bpf_token); 1336 } 1337 1338 /* 1339 * Incoming linkage from device drivers, where we have a mbuf chain 1340 * but need to prepend some arbitrary header from a linear buffer. 1341 * 1342 * Con up a minimal dummy header to pacify bpf. Allocate (only) a 1343 * struct m_hdr on the stack. This is safe as bpf only reads from the 1344 * fields in this header that we initialize, and will not try to free 1345 * it or keep a pointer to it. 1346 */ 1347 void 1348 bpf_mtap_hdr(struct bpf_if *arg, caddr_t data, u_int dlen, struct mbuf *m, 1349 u_int direction) 1350 { 1351 struct m_hdr mh; 1352 1353 mh.mh_flags = 0; 1354 mh.mh_next = m; 1355 mh.mh_len = dlen; 1356 mh.mh_data = data; 1357 1358 bpf_mtap(arg, (struct mbuf *) &mh); 1359 } 1360 1361 void 1362 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family) 1363 { 1364 u_int family4; 1365 1366 KKASSERT(family != AF_UNSPEC); 1367 1368 family4 = (u_int)family; 1369 bpf_ptap(bp, m, &family4, sizeof(family4)); 1370 } 1371 1372 /* 1373 * Process the packet in the mbuf chain m with the header in m prepended. 1374 * The packet is parsed by each listener's filter, and if accepted, 1375 * stashed into the corresponding buffer. 1376 */ 1377 void 1378 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1379 { 1380 struct mbuf mb; 1381 1382 /* 1383 * Craft on-stack mbuf suitable for passing to bpf_mtap. 1384 * Note that we cut corners here; we only setup what's 1385 * absolutely needed--this mbuf should never go anywhere else. 1386 */ 1387 mb.m_next = m; 1388 mb.m_data = __DECONST(void *, data); /* LINTED */ 1389 mb.m_len = dlen; 1390 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif; 1391 1392 bpf_mtap(bp, &mb); 1393 } 1394 1395 /* 1396 * Move the packet data from interface memory (pkt) into the 1397 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1398 * otherwise 0. "copy" is the routine called to do the actual data 1399 * transfer. bcopy is passed in to copy contiguous chunks, while 1400 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1401 * pkt is really an mbuf. 1402 */ 1403 static void 1404 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1405 void (*cpfn)(volatile const void *, volatile void *, size_t), 1406 const struct timeval *tv) 1407 { 1408 struct bpf_hdr *hp; 1409 int totlen, curlen; 1410 int hdrlen = d->bd_bif->bif_hdrlen; 1411 int wakeup = 0; 1412 /* 1413 * Figure out how many bytes to move. If the packet is 1414 * greater or equal to the snapshot length, transfer that 1415 * much. Otherwise, transfer the whole packet (unless 1416 * we hit the buffer size limit). 1417 */ 1418 totlen = hdrlen + min(snaplen, pktlen); 1419 if (totlen > d->bd_bufsize) 1420 totlen = d->bd_bufsize; 1421 1422 /* 1423 * Round up the end of the previous packet to the next longword. 1424 */ 1425 curlen = BPF_WORDALIGN(d->bd_slen); 1426 if (curlen + totlen > d->bd_bufsize) { 1427 /* 1428 * This packet will overflow the storage buffer. 1429 * Rotate the buffers if we can, then wakeup any 1430 * pending reads. 1431 */ 1432 if (d->bd_fbuf == NULL) { 1433 /* 1434 * We haven't completed the previous read yet, 1435 * so drop the packet. 1436 */ 1437 ++d->bd_dcount; 1438 return; 1439 } 1440 ROTATE_BUFFERS(d); 1441 wakeup = 1; 1442 curlen = 0; 1443 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { 1444 /* 1445 * Immediate mode is set, or the read timeout has 1446 * already expired during a select call. A packet 1447 * arrived, so the reader should be woken up. 1448 */ 1449 wakeup = 1; 1450 } 1451 1452 /* 1453 * Append the bpf header. 1454 */ 1455 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1456 hp->bh_tstamp = *tv; 1457 hp->bh_datalen = pktlen; 1458 hp->bh_hdrlen = hdrlen; 1459 /* 1460 * Copy the packet data into the store buffer and update its length. 1461 */ 1462 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1463 d->bd_slen = curlen + totlen; 1464 1465 if (wakeup) 1466 bpf_wakeup(d); 1467 } 1468 1469 /* 1470 * Initialize all nonzero fields of a descriptor. 1471 */ 1472 static int 1473 bpf_allocbufs(struct bpf_d *d) 1474 { 1475 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1476 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1477 d->bd_slen = 0; 1478 d->bd_hlen = 0; 1479 return(0); 1480 } 1481 1482 /* 1483 * Free buffers and packet filter program currently in use by a descriptor. 1484 * Called on close. 1485 */ 1486 static void 1487 bpf_freed(struct bpf_d *d) 1488 { 1489 /* 1490 * We don't need to lock out interrupts since this descriptor has 1491 * been detached from its interface and it yet hasn't been marked 1492 * free. 1493 */ 1494 if (d->bd_sbuf != NULL) { 1495 kfree(d->bd_sbuf, M_BPF); 1496 if (d->bd_hbuf != NULL) 1497 kfree(d->bd_hbuf, M_BPF); 1498 if (d->bd_fbuf != NULL) 1499 kfree(d->bd_fbuf, M_BPF); 1500 } 1501 if (d->bd_rfilter) 1502 kfree(d->bd_rfilter, M_BPF); 1503 if (d->bd_wfilter) 1504 kfree(d->bd_wfilter, M_BPF); 1505 } 1506 1507 /* 1508 * Attach an interface to bpf. ifp is a pointer to the structure 1509 * defining the interface to be attached, dlt is the link layer type, 1510 * and hdrlen is the fixed size of the link header (variable length 1511 * headers are not yet supported). 1512 */ 1513 void 1514 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1515 { 1516 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf); 1517 } 1518 1519 void 1520 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1521 { 1522 struct bpf_if *bp; 1523 1524 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO); 1525 1526 lwkt_gettoken(&bpf_token); 1527 1528 SLIST_INIT(&bp->bif_dlist); 1529 bp->bif_ifp = ifp; 1530 bp->bif_dlt = dlt; 1531 bp->bif_driverp = driverp; 1532 *bp->bif_driverp = NULL; 1533 1534 bp->bif_next = bpf_iflist; 1535 bpf_iflist = bp; 1536 1537 /* 1538 * Compute the length of the bpf header. This is not necessarily 1539 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1540 * that the network layer header begins on a longword boundary (for 1541 * performance reasons and to alleviate alignment restrictions). 1542 */ 1543 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1544 1545 lwkt_reltoken(&bpf_token); 1546 1547 if (bootverbose) 1548 if_printf(ifp, "bpf attached\n"); 1549 } 1550 1551 /* 1552 * Detach bpf from an interface. This involves detaching each descriptor 1553 * associated with the interface, and leaving bd_bif NULL. Notify each 1554 * descriptor as it's detached so that any sleepers wake up and get 1555 * ENXIO. 1556 */ 1557 void 1558 bpfdetach(struct ifnet *ifp) 1559 { 1560 struct bpf_if *bp, *bp_prev; 1561 struct bpf_d *d; 1562 1563 lwkt_gettoken(&bpf_token); 1564 1565 /* Locate BPF interface information */ 1566 bp_prev = NULL; 1567 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1568 if (ifp == bp->bif_ifp) 1569 break; 1570 bp_prev = bp; 1571 } 1572 1573 /* Interface wasn't attached */ 1574 if (bp->bif_ifp == NULL) { 1575 lwkt_reltoken(&bpf_token); 1576 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname); 1577 return; 1578 } 1579 1580 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) { 1581 bpf_detachd(d); 1582 bpf_wakeup(d); 1583 } 1584 1585 if (bp_prev != NULL) 1586 bp_prev->bif_next = bp->bif_next; 1587 else 1588 bpf_iflist = bp->bif_next; 1589 1590 kfree(bp, M_BPF); 1591 1592 lwkt_reltoken(&bpf_token); 1593 } 1594 1595 /* 1596 * Get a list of available data link type of the interface. 1597 */ 1598 static int 1599 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1600 { 1601 int n, error; 1602 struct ifnet *ifp; 1603 struct bpf_if *bp; 1604 1605 ifp = d->bd_bif->bif_ifp; 1606 n = 0; 1607 error = 0; 1608 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1609 if (bp->bif_ifp != ifp) 1610 continue; 1611 if (bfl->bfl_list != NULL) { 1612 if (n >= bfl->bfl_len) { 1613 return (ENOMEM); 1614 } 1615 error = copyout(&bp->bif_dlt, 1616 bfl->bfl_list + n, sizeof(u_int)); 1617 } 1618 n++; 1619 } 1620 bfl->bfl_len = n; 1621 return(error); 1622 } 1623 1624 /* 1625 * Set the data link type of a BPF instance. 1626 */ 1627 static int 1628 bpf_setdlt(struct bpf_d *d, u_int dlt) 1629 { 1630 int error, opromisc; 1631 struct ifnet *ifp; 1632 struct bpf_if *bp; 1633 1634 if (d->bd_bif->bif_dlt == dlt) 1635 return (0); 1636 ifp = d->bd_bif->bif_ifp; 1637 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1638 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1639 break; 1640 } 1641 if (bp != NULL) { 1642 opromisc = d->bd_promisc; 1643 bpf_detachd(d); 1644 bpf_attachd(d, bp); 1645 bpf_resetd(d); 1646 if (opromisc) { 1647 error = ifpromisc(bp->bif_ifp, 1); 1648 if (error) { 1649 if_printf(bp->bif_ifp, 1650 "bpf_setdlt: ifpromisc failed (%d)\n", 1651 error); 1652 } else { 1653 d->bd_promisc = 1; 1654 } 1655 } 1656 } 1657 return(bp == NULL ? EINVAL : 0); 1658 } 1659 1660 void 1661 bpf_gettoken(void) 1662 { 1663 lwkt_gettoken(&bpf_token); 1664 } 1665 1666 void 1667 bpf_reltoken(void) 1668 { 1669 lwkt_reltoken(&bpf_token); 1670 } 1671 1672 static void 1673 bpf_drvinit(void *unused) 1674 { 1675 int i; 1676 1677 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf), 1678 bpfclone, 0, 0, 0600, "bpf"); 1679 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) { 1680 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i); 1681 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i); 1682 } 1683 } 1684 1685 static void 1686 bpf_drvuninit(void *unused) 1687 { 1688 devfs_clone_handler_del("bpf"); 1689 dev_ops_remove_all(&bpf_ops); 1690 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf)); 1691 } 1692 1693 SYSINIT(bpfdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR, bpf_drvinit, NULL); 1694 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL); 1695 1696 #else /* !BPF */ 1697 /* 1698 * NOP stubs to allow bpf-using drivers to load and function. 1699 * 1700 * A 'better' implementation would allow the core bpf functionality 1701 * to be loaded at runtime. 1702 */ 1703 1704 void 1705 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1706 { 1707 } 1708 1709 void 1710 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1711 { 1712 } 1713 1714 void 1715 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1716 { 1717 } 1718 1719 void 1720 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1721 { 1722 } 1723 1724 void 1725 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1726 { 1727 } 1728 1729 void 1730 bpfdetach(struct ifnet *ifp) 1731 { 1732 } 1733 1734 u_int 1735 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1736 { 1737 return -1; /* "no filter" behaviour */ 1738 } 1739 1740 void 1741 bpf_gettoken(void) 1742 { 1743 } 1744 1745 void 1746 bpf_reltoken(void) 1747 { 1748 } 1749 1750 #endif /* !BPF */ 1751