1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 35 * 36 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $ 37 */ 38 39 #include "use_bpf.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/conf.h> 44 #include <sys/device.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/time.h> 48 #include <sys/proc.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/sockio.h> 52 #include <sys/ttycom.h> 53 #include <sys/filedesc.h> 54 55 #include <sys/event.h> 56 57 #include <sys/socket.h> 58 #include <sys/vnode.h> 59 60 #include <net/if.h> 61 #include <net/bpf.h> 62 #include <net/bpfdesc.h> 63 #include <net/netmsg2.h> 64 #include <net/netisr2.h> 65 66 #include <netinet/in.h> 67 #include <netinet/if_ether.h> 68 #include <sys/kernel.h> 69 #include <sys/sysctl.h> 70 71 #include <netproto/802_11/ieee80211_dragonfly.h> 72 73 #include <sys/devfs.h> 74 75 struct netmsg_bpf_output { 76 struct netmsg_base base; 77 struct mbuf *nm_mbuf; 78 struct ifnet *nm_ifp; 79 struct sockaddr *nm_dst; 80 boolean_t nm_feedback; 81 }; 82 83 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 84 DEVFS_DEFINE_CLONE_BITMAP(bpf); 85 86 #if NBPF <= 1 87 #define BPF_PREALLOCATED_UNITS 4 88 #else 89 #define BPF_PREALLOCATED_UNITS NBPF 90 #endif 91 92 #if NBPF > 0 93 94 /* 95 * The default read buffer size is patchable. 96 */ 97 static int bpf_bufsize = BPF_DEFAULTBUFSIZE; 98 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 99 &bpf_bufsize, 0, "Current size of bpf buffer"); 100 int bpf_maxbufsize = BPF_MAXBUFSIZE; 101 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 102 &bpf_maxbufsize, 0, "Maximum size of bpf buffer"); 103 104 /* 105 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 106 */ 107 static struct bpf_if *bpf_iflist; 108 109 static struct lwkt_token bpf_token = LWKT_TOKEN_INITIALIZER(bpf_token); 110 111 static int bpf_allocbufs(struct bpf_d *); 112 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 113 static void bpf_detachd(struct bpf_d *d); 114 static void bpf_resetd(struct bpf_d *); 115 static void bpf_freed(struct bpf_d *); 116 static void bpf_mcopy(volatile const void *, volatile void *, size_t); 117 static int bpf_movein(struct uio *, int, struct mbuf **, 118 struct sockaddr *, int *, struct bpf_insn *); 119 static int bpf_setif(struct bpf_d *, struct ifreq *); 120 static void bpf_timed_out(void *); 121 static void bpf_wakeup(struct bpf_d *); 122 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 123 void (*)(volatile const void *, 124 volatile void *, size_t), 125 const struct timeval *); 126 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 127 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 128 static int bpf_setdlt(struct bpf_d *, u_int); 129 static void bpf_drvinit(void *unused); 130 static void bpf_filter_detach(struct knote *kn); 131 static int bpf_filter_read(struct knote *kn, long hint); 132 133 static d_open_t bpfopen; 134 static d_clone_t bpfclone; 135 static d_close_t bpfclose; 136 static d_read_t bpfread; 137 static d_write_t bpfwrite; 138 static d_ioctl_t bpfioctl; 139 static d_kqfilter_t bpfkqfilter; 140 141 #define CDEV_MAJOR 23 142 static struct dev_ops bpf_ops = { 143 { "bpf", 0, D_MPSAFE }, 144 .d_open = bpfopen, 145 .d_close = bpfclose, 146 .d_read = bpfread, 147 .d_write = bpfwrite, 148 .d_ioctl = bpfioctl, 149 .d_kqfilter = bpfkqfilter 150 }; 151 152 153 static int 154 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, 155 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter) 156 { 157 const struct ieee80211_bpf_params *p; 158 struct mbuf *m; 159 int error; 160 int len; 161 int hlen; 162 int slen; 163 164 *datlen = 0; 165 *mp = NULL; 166 167 /* 168 * Build a sockaddr based on the data link layer type. 169 * We do this at this level because the ethernet header 170 * is copied directly into the data field of the sockaddr. 171 * In the case of SLIP, there is no header and the packet 172 * is forwarded as is. 173 * Also, we are careful to leave room at the front of the mbuf 174 * for the link level header. 175 */ 176 switch (linktype) { 177 case DLT_SLIP: 178 sockp->sa_family = AF_INET; 179 hlen = 0; 180 break; 181 182 case DLT_EN10MB: 183 sockp->sa_family = AF_UNSPEC; 184 /* XXX Would MAXLINKHDR be better? */ 185 hlen = sizeof(struct ether_header); 186 break; 187 188 case DLT_RAW: 189 case DLT_NULL: 190 sockp->sa_family = AF_UNSPEC; 191 hlen = 0; 192 break; 193 194 case DLT_ATM_RFC1483: 195 /* 196 * en atm driver requires 4-byte atm pseudo header. 197 * though it isn't standard, vpi:vci needs to be 198 * specified anyway. 199 */ 200 sockp->sa_family = AF_UNSPEC; 201 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 202 break; 203 204 case DLT_PPP: 205 sockp->sa_family = AF_UNSPEC; 206 hlen = 4; /* This should match PPP_HDRLEN */ 207 break; 208 209 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 210 sockp->sa_family = AF_IEEE80211; 211 hlen = 0; 212 break; 213 214 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 215 sockp->sa_family = AF_IEEE80211; 216 sockp->sa_len = 12; /* XXX != 0 */ 217 hlen = sizeof(struct ieee80211_bpf_params); 218 break; 219 220 default: 221 return(EIO); 222 } 223 224 len = uio->uio_resid; 225 *datlen = len - hlen; 226 if ((unsigned)len > MCLBYTES) 227 return(EIO); 228 229 m = m_getl(len, M_WAITOK, MT_DATA, M_PKTHDR, NULL); 230 if (m == NULL) 231 return(ENOBUFS); 232 m->m_pkthdr.len = m->m_len = len; 233 m->m_pkthdr.rcvif = NULL; 234 *mp = m; 235 236 if (m->m_len < hlen) { 237 error = EPERM; 238 goto bad; 239 } 240 241 error = uiomove(mtod(m, u_char *), len, uio); 242 if (error) 243 goto bad; 244 245 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 246 if (slen == 0) { 247 error = EPERM; 248 goto bad; 249 } 250 251 /* 252 * Make room for link header, and copy it to sockaddr. 253 */ 254 if (hlen != 0) { 255 if (sockp->sa_family == AF_IEEE80211) { 256 /* 257 * Collect true length from the parameter header 258 * NB: sockp is known to be zero'd so if we do a 259 * short copy unspecified parameters will be 260 * zero. 261 * NB: packet may not be aligned after stripping 262 * bpf params 263 * XXX check ibp_vers 264 */ 265 p = mtod(m, const struct ieee80211_bpf_params *); 266 hlen = p->ibp_len; 267 if (hlen > sizeof(sockp->sa_data)) { 268 error = EINVAL; 269 goto bad; 270 } 271 } 272 bcopy(m->m_data, sockp->sa_data, hlen); 273 m->m_pkthdr.len -= hlen; 274 m->m_len -= hlen; 275 m->m_data += hlen; /* XXX */ 276 } 277 return (0); 278 bad: 279 m_freem(m); 280 return(error); 281 } 282 283 /* 284 * Attach file to the bpf interface, i.e. make d listen on bp. 285 * Must be called at splimp. 286 */ 287 static void 288 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 289 { 290 /* 291 * Point d at bp, and add d to the interface's list of listeners. 292 * Finally, point the driver's bpf cookie at the interface so 293 * it will divert packets to bpf. 294 */ 295 lwkt_gettoken(&bpf_token); 296 d->bd_bif = bp; 297 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 298 *bp->bif_driverp = bp; 299 300 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 301 lwkt_reltoken(&bpf_token); 302 } 303 304 /* 305 * Detach a file from its interface. 306 */ 307 static void 308 bpf_detachd(struct bpf_d *d) 309 { 310 int error; 311 struct bpf_if *bp; 312 struct ifnet *ifp; 313 314 lwkt_gettoken(&bpf_token); 315 bp = d->bd_bif; 316 ifp = bp->bif_ifp; 317 318 /* Remove d from the interface's descriptor list. */ 319 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next); 320 321 if (SLIST_EMPTY(&bp->bif_dlist)) { 322 /* 323 * Let the driver know that there are no more listeners. 324 */ 325 *bp->bif_driverp = NULL; 326 } 327 d->bd_bif = NULL; 328 329 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 330 331 /* 332 * Check if this descriptor had requested promiscuous mode. 333 * If so, turn it off. 334 */ 335 if (d->bd_promisc) { 336 d->bd_promisc = 0; 337 error = ifpromisc(ifp, 0); 338 if (error != 0 && error != ENXIO) { 339 /* 340 * ENXIO can happen if a pccard is unplugged, 341 * Something is really wrong if we were able to put 342 * the driver into promiscuous mode, but can't 343 * take it out. 344 */ 345 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n", 346 error); 347 } 348 } 349 lwkt_reltoken(&bpf_token); 350 } 351 352 /* 353 * Open ethernet device. Returns ENXIO for illegal minor device number, 354 * EBUSY if file is open by another process. 355 */ 356 /* ARGSUSED */ 357 static int 358 bpfopen(struct dev_open_args *ap) 359 { 360 cdev_t dev = ap->a_head.a_dev; 361 struct bpf_d *d; 362 363 lwkt_gettoken(&bpf_token); 364 if (ap->a_cred->cr_prison) { 365 lwkt_reltoken(&bpf_token); 366 return(EPERM); 367 } 368 369 d = dev->si_drv1; 370 /* 371 * Each minor can be opened by only one process. If the requested 372 * minor is in use, return EBUSY. 373 */ 374 if (d != NULL) { 375 lwkt_reltoken(&bpf_token); 376 return(EBUSY); 377 } 378 379 d = kmalloc(sizeof *d, M_BPF, M_WAITOK | M_ZERO); 380 dev->si_drv1 = d; 381 d->bd_bufsize = bpf_bufsize; 382 d->bd_sig = SIGIO; 383 d->bd_seesent = 1; 384 d->bd_feedback = 0; 385 callout_init(&d->bd_callout); 386 lwkt_reltoken(&bpf_token); 387 388 return(0); 389 } 390 391 static int 392 bpfclone(struct dev_clone_args *ap) 393 { 394 int unit; 395 396 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0); 397 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit); 398 399 return 0; 400 } 401 402 /* 403 * Close the descriptor by detaching it from its interface, 404 * deallocating its buffers, and marking it free. 405 */ 406 /* ARGSUSED */ 407 static int 408 bpfclose(struct dev_close_args *ap) 409 { 410 cdev_t dev = ap->a_head.a_dev; 411 struct bpf_d *d = dev->si_drv1; 412 413 lwkt_gettoken(&bpf_token); 414 funsetown(&d->bd_sigio); 415 if (d->bd_state == BPF_WAITING) 416 callout_stop(&d->bd_callout); 417 d->bd_state = BPF_IDLE; 418 if (d->bd_bif != NULL) 419 bpf_detachd(d); 420 bpf_freed(d); 421 dev->si_drv1 = NULL; 422 if (dev->si_uminor >= BPF_PREALLOCATED_UNITS) { 423 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor); 424 destroy_dev(dev); 425 } 426 kfree(d, M_BPF); 427 lwkt_reltoken(&bpf_token); 428 429 return(0); 430 } 431 432 /* 433 * Rotate the packet buffers in descriptor d. Move the store buffer 434 * into the hold slot, and the free buffer into the store slot. 435 * Zero the length of the new store buffer. 436 */ 437 #define ROTATE_BUFFERS(d) \ 438 (d)->bd_hbuf = (d)->bd_sbuf; \ 439 (d)->bd_hlen = (d)->bd_slen; \ 440 (d)->bd_sbuf = (d)->bd_fbuf; \ 441 (d)->bd_slen = 0; \ 442 (d)->bd_fbuf = NULL; 443 /* 444 * bpfread - read next chunk of packets from buffers 445 */ 446 static int 447 bpfread(struct dev_read_args *ap) 448 { 449 cdev_t dev = ap->a_head.a_dev; 450 struct bpf_d *d = dev->si_drv1; 451 int timed_out; 452 int error; 453 454 lwkt_gettoken(&bpf_token); 455 /* 456 * Restrict application to use a buffer the same size as 457 * as kernel buffers. 458 */ 459 if (ap->a_uio->uio_resid != d->bd_bufsize) { 460 lwkt_reltoken(&bpf_token); 461 return(EINVAL); 462 } 463 464 if (d->bd_state == BPF_WAITING) 465 callout_stop(&d->bd_callout); 466 timed_out = (d->bd_state == BPF_TIMED_OUT); 467 d->bd_state = BPF_IDLE; 468 /* 469 * If the hold buffer is empty, then do a timed sleep, which 470 * ends when the timeout expires or when enough packets 471 * have arrived to fill the store buffer. 472 */ 473 while (d->bd_hbuf == NULL) { 474 if ((d->bd_immediate || (ap->a_ioflag & IO_NDELAY) || timed_out) 475 && d->bd_slen != 0) { 476 /* 477 * A packet(s) either arrived since the previous, 478 * We're in immediate mode, or are reading 479 * in non-blocking mode, and a packet(s) 480 * either arrived since the previous 481 * read or arrived while we were asleep. 482 * Rotate the buffers and return what's here. 483 */ 484 ROTATE_BUFFERS(d); 485 break; 486 } 487 488 /* 489 * No data is available, check to see if the bpf device 490 * is still pointed at a real interface. If not, return 491 * ENXIO so that the userland process knows to rebind 492 * it before using it again. 493 */ 494 if (d->bd_bif == NULL) { 495 lwkt_reltoken(&bpf_token); 496 return(ENXIO); 497 } 498 499 if (ap->a_ioflag & IO_NDELAY) { 500 lwkt_reltoken(&bpf_token); 501 return(EWOULDBLOCK); 502 } 503 error = tsleep(d, PCATCH, "bpf", d->bd_rtout); 504 if (error == EINTR || error == ERESTART) { 505 lwkt_reltoken(&bpf_token); 506 return(error); 507 } 508 if (error == EWOULDBLOCK) { 509 /* 510 * On a timeout, return what's in the buffer, 511 * which may be nothing. If there is something 512 * in the store buffer, we can rotate the buffers. 513 */ 514 if (d->bd_hbuf) 515 /* 516 * We filled up the buffer in between 517 * getting the timeout and arriving 518 * here, so we don't need to rotate. 519 */ 520 break; 521 522 if (d->bd_slen == 0) { 523 lwkt_reltoken(&bpf_token); 524 return(0); 525 } 526 ROTATE_BUFFERS(d); 527 break; 528 } 529 } 530 /* 531 * At this point, we know we have something in the hold slot. 532 */ 533 534 /* 535 * Move data from hold buffer into user space. 536 * We know the entire buffer is transferred since 537 * we checked above that the read buffer is bpf_bufsize bytes. 538 */ 539 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio); 540 541 d->bd_fbuf = d->bd_hbuf; 542 d->bd_hbuf = NULL; 543 d->bd_hlen = 0; 544 lwkt_reltoken(&bpf_token); 545 546 return(error); 547 } 548 549 550 /* 551 * If there are processes sleeping on this descriptor, wake them up. 552 */ 553 static void 554 bpf_wakeup(struct bpf_d *d) 555 { 556 if (d->bd_state == BPF_WAITING) { 557 callout_stop(&d->bd_callout); 558 d->bd_state = BPF_IDLE; 559 } 560 wakeup(d); 561 if (d->bd_async && d->bd_sig && d->bd_sigio) 562 pgsigio(d->bd_sigio, d->bd_sig, 0); 563 564 KNOTE(&d->bd_kq.ki_note, 0); 565 } 566 567 static void 568 bpf_timed_out(void *arg) 569 { 570 struct bpf_d *d = (struct bpf_d *)arg; 571 572 if (d->bd_state == BPF_WAITING) { 573 d->bd_state = BPF_TIMED_OUT; 574 if (d->bd_slen != 0) 575 bpf_wakeup(d); 576 } 577 } 578 579 static void 580 bpf_output_dispatch(netmsg_t msg) 581 { 582 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)msg; 583 struct ifnet *ifp = bmsg->nm_ifp; 584 struct mbuf *mc = NULL; 585 int error; 586 587 if (bmsg->nm_feedback) { 588 mc = m_dup(bmsg->nm_mbuf, M_NOWAIT); 589 if (mc != NULL) 590 mc->m_pkthdr.rcvif = ifp; 591 } 592 593 /* 594 * The driver frees the mbuf. 595 */ 596 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL); 597 lwkt_replymsg(&msg->lmsg, error); 598 599 if (mc != NULL) { 600 if (error == 0) { 601 mc->m_flags &= ~M_HASH; 602 (*ifp->if_input)(ifp, mc, NULL, -1); 603 } else { 604 m_freem(mc); 605 } 606 } 607 } 608 609 static int 610 bpfwrite(struct dev_write_args *ap) 611 { 612 cdev_t dev = ap->a_head.a_dev; 613 struct bpf_d *d = dev->si_drv1; 614 struct ifnet *ifp; 615 struct mbuf *m; 616 int error, ret; 617 struct sockaddr dst; 618 int datlen; 619 struct netmsg_bpf_output bmsg; 620 621 lwkt_gettoken(&bpf_token); 622 if (d->bd_bif == NULL) { 623 lwkt_reltoken(&bpf_token); 624 return(ENXIO); 625 } 626 627 ifp = d->bd_bif->bif_ifp; 628 629 if (ap->a_uio->uio_resid == 0) { 630 lwkt_reltoken(&bpf_token); 631 return(0); 632 } 633 634 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m, 635 &dst, &datlen, d->bd_wfilter); 636 if (error) { 637 lwkt_reltoken(&bpf_token); 638 return(error); 639 } 640 641 if (datlen > ifp->if_mtu) { 642 m_freem(m); 643 lwkt_reltoken(&bpf_token); 644 return(EMSGSIZE); 645 } 646 647 if (d->bd_hdrcmplt) 648 dst.sa_family = pseudo_AF_HDRCMPLT; 649 650 netmsg_init(&bmsg.base, NULL, &curthread->td_msgport, 651 0, bpf_output_dispatch); 652 bmsg.nm_mbuf = m; 653 bmsg.nm_ifp = ifp; 654 bmsg.nm_dst = &dst; 655 656 if (d->bd_feedback) 657 bmsg.nm_feedback = TRUE; 658 else 659 bmsg.nm_feedback = FALSE; 660 661 ret = lwkt_domsg(netisr_cpuport(0), &bmsg.base.lmsg, 0); 662 663 lwkt_reltoken(&bpf_token); 664 665 return ret; 666 } 667 668 /* 669 * Reset a descriptor by flushing its packet buffer and clearing the 670 * receive and drop counts. Should be called at splimp. 671 */ 672 static void 673 bpf_resetd(struct bpf_d *d) 674 { 675 if (d->bd_hbuf) { 676 /* Free the hold buffer. */ 677 d->bd_fbuf = d->bd_hbuf; 678 d->bd_hbuf = NULL; 679 } 680 d->bd_slen = 0; 681 d->bd_hlen = 0; 682 d->bd_rcount = 0; 683 d->bd_dcount = 0; 684 } 685 686 /* 687 * FIONREAD Check for read packet available. 688 * SIOCGIFADDR Get interface address - convenient hook to driver. 689 * BIOCGBLEN Get buffer len [for read()]. 690 * BIOCSETF Set ethernet read filter. 691 * BIOCSETWF Set ethernet write filter. 692 * BIOCFLUSH Flush read packet buffer. 693 * BIOCPROMISC Put interface into promiscuous mode. 694 * BIOCGDLT Get link layer type. 695 * BIOCGETIF Get interface name. 696 * BIOCSETIF Set interface. 697 * BIOCSRTIMEOUT Set read timeout. 698 * BIOCGRTIMEOUT Get read timeout. 699 * BIOCGSTATS Get packet stats. 700 * BIOCIMMEDIATE Set immediate mode. 701 * BIOCVERSION Get filter language version. 702 * BIOCGHDRCMPLT Get "header already complete" flag 703 * BIOCSHDRCMPLT Set "header already complete" flag 704 * BIOCSFEEDBACK Set packet feedback mode. 705 * BIOCGFEEDBACK Get packet feedback mode. 706 * BIOCGSEESENT Get "see packets sent" flag 707 * BIOCSSEESENT Set "see packets sent" flag 708 * BIOCLOCK Set "locked" flag 709 */ 710 /* ARGSUSED */ 711 static int 712 bpfioctl(struct dev_ioctl_args *ap) 713 { 714 cdev_t dev = ap->a_head.a_dev; 715 struct bpf_d *d = dev->si_drv1; 716 int error = 0; 717 718 lwkt_gettoken(&bpf_token); 719 if (d->bd_state == BPF_WAITING) 720 callout_stop(&d->bd_callout); 721 d->bd_state = BPF_IDLE; 722 723 if (d->bd_locked == 1) { 724 switch (ap->a_cmd) { 725 case BIOCGBLEN: 726 case BIOCFLUSH: 727 case BIOCGDLT: 728 case BIOCGDLTLIST: 729 case BIOCGETIF: 730 case BIOCGRTIMEOUT: 731 case BIOCGSTATS: 732 case BIOCVERSION: 733 case BIOCGRSIG: 734 case BIOCGHDRCMPLT: 735 case FIONREAD: 736 case BIOCLOCK: 737 case BIOCSRTIMEOUT: 738 case BIOCIMMEDIATE: 739 case TIOCGPGRP: 740 break; 741 default: 742 lwkt_reltoken(&bpf_token); 743 return (EPERM); 744 } 745 } 746 switch (ap->a_cmd) { 747 default: 748 error = EINVAL; 749 break; 750 751 /* 752 * Check for read packet available. 753 */ 754 case FIONREAD: 755 { 756 int n; 757 758 n = d->bd_slen; 759 if (d->bd_hbuf) 760 n += d->bd_hlen; 761 762 *(int *)ap->a_data = n; 763 break; 764 } 765 766 case SIOCGIFADDR: 767 { 768 struct ifnet *ifp; 769 770 if (d->bd_bif == NULL) { 771 error = EINVAL; 772 } else { 773 ifp = d->bd_bif->bif_ifp; 774 ifnet_serialize_all(ifp); 775 error = ifp->if_ioctl(ifp, ap->a_cmd, 776 ap->a_data, ap->a_cred); 777 ifnet_deserialize_all(ifp); 778 } 779 break; 780 } 781 782 /* 783 * Get buffer len [for read()]. 784 */ 785 case BIOCGBLEN: 786 *(u_int *)ap->a_data = d->bd_bufsize; 787 break; 788 789 /* 790 * Set buffer length. 791 */ 792 case BIOCSBLEN: 793 if (d->bd_bif != NULL) { 794 error = EINVAL; 795 } else { 796 u_int size = *(u_int *)ap->a_data; 797 798 if (size > bpf_maxbufsize) 799 *(u_int *)ap->a_data = size = bpf_maxbufsize; 800 else if (size < BPF_MINBUFSIZE) 801 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE; 802 d->bd_bufsize = size; 803 } 804 break; 805 806 /* 807 * Set link layer read filter. 808 */ 809 case BIOCSETF: 810 case BIOCSETWF: 811 error = bpf_setf(d, (struct bpf_program *)ap->a_data, 812 ap->a_cmd); 813 break; 814 815 /* 816 * Flush read packet buffer. 817 */ 818 case BIOCFLUSH: 819 bpf_resetd(d); 820 break; 821 822 /* 823 * Put interface into promiscuous mode. 824 */ 825 case BIOCPROMISC: 826 if (d->bd_bif == NULL) { 827 /* 828 * No interface attached yet. 829 */ 830 error = EINVAL; 831 break; 832 } 833 if (d->bd_promisc == 0) { 834 error = ifpromisc(d->bd_bif->bif_ifp, 1); 835 if (error == 0) 836 d->bd_promisc = 1; 837 } 838 break; 839 840 /* 841 * Get device parameters. 842 */ 843 case BIOCGDLT: 844 if (d->bd_bif == NULL) 845 error = EINVAL; 846 else 847 *(u_int *)ap->a_data = d->bd_bif->bif_dlt; 848 break; 849 850 /* 851 * Get a list of supported data link types. 852 */ 853 case BIOCGDLTLIST: 854 if (d->bd_bif == NULL) { 855 error = EINVAL; 856 } else { 857 error = bpf_getdltlist(d, 858 (struct bpf_dltlist *)ap->a_data); 859 } 860 break; 861 862 /* 863 * Set data link type. 864 */ 865 case BIOCSDLT: 866 if (d->bd_bif == NULL) 867 error = EINVAL; 868 else 869 error = bpf_setdlt(d, *(u_int *)ap->a_data); 870 break; 871 872 /* 873 * Get interface name. 874 */ 875 case BIOCGETIF: 876 if (d->bd_bif == NULL) { 877 error = EINVAL; 878 } else { 879 struct ifnet *const ifp = d->bd_bif->bif_ifp; 880 struct ifreq *const ifr = (struct ifreq *)ap->a_data; 881 882 strlcpy(ifr->ifr_name, ifp->if_xname, 883 sizeof ifr->ifr_name); 884 } 885 break; 886 887 /* 888 * Set interface. 889 */ 890 case BIOCSETIF: 891 error = bpf_setif(d, (struct ifreq *)ap->a_data); 892 break; 893 894 /* 895 * Set read timeout. 896 */ 897 case BIOCSRTIMEOUT: 898 { 899 struct timeval *tv = (struct timeval *)ap->a_data; 900 901 /* 902 * Subtract 1 tick from tvtohz() since this isn't 903 * a one-shot timer. 904 */ 905 if ((error = itimerfix(tv)) == 0) 906 d->bd_rtout = tvtohz_low(tv); 907 break; 908 } 909 910 /* 911 * Get read timeout. 912 */ 913 case BIOCGRTIMEOUT: 914 { 915 struct timeval *tv = (struct timeval *)ap->a_data; 916 917 tv->tv_sec = d->bd_rtout / hz; 918 tv->tv_usec = (d->bd_rtout % hz) * ustick; 919 break; 920 } 921 922 /* 923 * Get packet stats. 924 */ 925 case BIOCGSTATS: 926 { 927 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data; 928 929 bs->bs_recv = d->bd_rcount; 930 bs->bs_drop = d->bd_dcount; 931 break; 932 } 933 934 /* 935 * Set immediate mode. 936 */ 937 case BIOCIMMEDIATE: 938 d->bd_immediate = *(u_int *)ap->a_data; 939 break; 940 941 case BIOCVERSION: 942 { 943 struct bpf_version *bv = (struct bpf_version *)ap->a_data; 944 945 bv->bv_major = BPF_MAJOR_VERSION; 946 bv->bv_minor = BPF_MINOR_VERSION; 947 break; 948 } 949 950 /* 951 * Get "header already complete" flag 952 */ 953 case BIOCGHDRCMPLT: 954 *(u_int *)ap->a_data = d->bd_hdrcmplt; 955 break; 956 957 /* 958 * Set "header already complete" flag 959 */ 960 case BIOCSHDRCMPLT: 961 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0; 962 break; 963 964 /* 965 * Get "see sent packets" flag 966 */ 967 case BIOCGSEESENT: 968 *(u_int *)ap->a_data = d->bd_seesent; 969 break; 970 971 /* 972 * Set "see sent packets" flag 973 */ 974 case BIOCSSEESENT: 975 d->bd_seesent = *(u_int *)ap->a_data; 976 break; 977 978 case FIOASYNC: /* Send signal on receive packets */ 979 d->bd_async = *(int *)ap->a_data; 980 break; 981 982 /* 983 * Set "feed packets from bpf back to input" mode 984 */ 985 case BIOCSFEEDBACK: 986 d->bd_feedback = *(int *)ap->a_data; 987 break; 988 989 /* 990 * Get "feed packets from bpf back to input" mode 991 */ 992 case BIOCGFEEDBACK: 993 *(u_int *)ap->a_data = d->bd_feedback; 994 break; 995 996 case FIOSETOWN: 997 error = fsetown(*(int *)ap->a_data, &d->bd_sigio); 998 break; 999 1000 case FIOGETOWN: 1001 *(int *)ap->a_data = fgetown(&d->bd_sigio); 1002 break; 1003 1004 /* This is deprecated, FIOSETOWN should be used instead. */ 1005 case TIOCSPGRP: 1006 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio); 1007 break; 1008 1009 /* This is deprecated, FIOGETOWN should be used instead. */ 1010 case TIOCGPGRP: 1011 *(int *)ap->a_data = -fgetown(&d->bd_sigio); 1012 break; 1013 1014 case BIOCSRSIG: /* Set receive signal */ 1015 { 1016 u_int sig; 1017 1018 sig = *(u_int *)ap->a_data; 1019 1020 if (sig >= NSIG) 1021 error = EINVAL; 1022 else 1023 d->bd_sig = sig; 1024 break; 1025 } 1026 case BIOCGRSIG: 1027 *(u_int *)ap->a_data = d->bd_sig; 1028 break; 1029 case BIOCLOCK: 1030 d->bd_locked = 1; 1031 break; 1032 } 1033 lwkt_reltoken(&bpf_token); 1034 1035 return(error); 1036 } 1037 1038 /* 1039 * Set d's packet filter program to fp. If this file already has a filter, 1040 * free it and replace it. Returns EINVAL for bogus requests. 1041 */ 1042 static int 1043 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1044 { 1045 struct bpf_insn *fcode, *old; 1046 u_int wfilter, flen, size; 1047 1048 if (cmd == BIOCSETWF) { 1049 old = d->bd_wfilter; 1050 wfilter = 1; 1051 } else { 1052 wfilter = 0; 1053 old = d->bd_rfilter; 1054 } 1055 if (fp->bf_insns == NULL) { 1056 if (fp->bf_len != 0) 1057 return(EINVAL); 1058 if (wfilter) 1059 d->bd_wfilter = NULL; 1060 else 1061 d->bd_rfilter = NULL; 1062 bpf_resetd(d); 1063 if (old != NULL) 1064 kfree(old, M_BPF); 1065 return(0); 1066 } 1067 flen = fp->bf_len; 1068 if (flen > BPF_MAXINSNS) 1069 return(EINVAL); 1070 1071 size = flen * sizeof *fp->bf_insns; 1072 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK); 1073 if (copyin(fp->bf_insns, fcode, size) == 0 && 1074 bpf_validate(fcode, (int)flen)) { 1075 if (wfilter) 1076 d->bd_wfilter = fcode; 1077 else 1078 d->bd_rfilter = fcode; 1079 bpf_resetd(d); 1080 if (old != NULL) 1081 kfree(old, M_BPF); 1082 1083 return(0); 1084 } 1085 kfree(fcode, M_BPF); 1086 return(EINVAL); 1087 } 1088 1089 /* 1090 * Detach a file from its current interface (if attached at all) and attach 1091 * to the interface indicated by the name stored in ifr. 1092 * Return an errno or 0. 1093 */ 1094 static int 1095 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1096 { 1097 struct bpf_if *bp; 1098 int error; 1099 struct ifnet *theywant; 1100 1101 ifnet_lock(); 1102 1103 theywant = ifunit(ifr->ifr_name); 1104 if (theywant == NULL) { 1105 ifnet_unlock(); 1106 return(ENXIO); 1107 } 1108 1109 /* 1110 * Look through attached interfaces for the named one. 1111 */ 1112 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1113 struct ifnet *ifp = bp->bif_ifp; 1114 1115 if (ifp == NULL || ifp != theywant) 1116 continue; 1117 /* skip additional entry */ 1118 if (bp->bif_driverp != &ifp->if_bpf) 1119 continue; 1120 /* 1121 * We found the requested interface. 1122 * Allocate the packet buffers if we need to. 1123 * If we're already attached to requested interface, 1124 * just flush the buffer. 1125 */ 1126 if (d->bd_sbuf == NULL) { 1127 error = bpf_allocbufs(d); 1128 if (error != 0) { 1129 ifnet_unlock(); 1130 return(error); 1131 } 1132 } 1133 if (bp != d->bd_bif) { 1134 if (d->bd_bif != NULL) { 1135 /* 1136 * Detach if attached to something else. 1137 */ 1138 bpf_detachd(d); 1139 } 1140 1141 bpf_attachd(d, bp); 1142 } 1143 bpf_resetd(d); 1144 1145 ifnet_unlock(); 1146 return(0); 1147 } 1148 1149 ifnet_unlock(); 1150 1151 /* Not found. */ 1152 return(ENXIO); 1153 } 1154 1155 static struct filterops bpf_read_filtops = 1156 { FILTEROP_ISFD, NULL, bpf_filter_detach, bpf_filter_read }; 1157 1158 static int 1159 bpfkqfilter(struct dev_kqfilter_args *ap) 1160 { 1161 cdev_t dev = ap->a_head.a_dev; 1162 struct knote *kn = ap->a_kn; 1163 struct klist *klist; 1164 struct bpf_d *d; 1165 1166 lwkt_gettoken(&bpf_token); 1167 d = dev->si_drv1; 1168 if (d->bd_bif == NULL) { 1169 ap->a_result = 1; 1170 lwkt_reltoken(&bpf_token); 1171 return (0); 1172 } 1173 1174 ap->a_result = 0; 1175 switch (kn->kn_filter) { 1176 case EVFILT_READ: 1177 kn->kn_fop = &bpf_read_filtops; 1178 kn->kn_hook = (caddr_t)d; 1179 break; 1180 default: 1181 ap->a_result = EOPNOTSUPP; 1182 lwkt_reltoken(&bpf_token); 1183 return (0); 1184 } 1185 1186 klist = &d->bd_kq.ki_note; 1187 knote_insert(klist, kn); 1188 lwkt_reltoken(&bpf_token); 1189 1190 return (0); 1191 } 1192 1193 static void 1194 bpf_filter_detach(struct knote *kn) 1195 { 1196 struct klist *klist; 1197 struct bpf_d *d; 1198 1199 d = (struct bpf_d *)kn->kn_hook; 1200 klist = &d->bd_kq.ki_note; 1201 knote_remove(klist, kn); 1202 } 1203 1204 static int 1205 bpf_filter_read(struct knote *kn, long hint) 1206 { 1207 struct bpf_d *d; 1208 int ready = 0; 1209 1210 d = (struct bpf_d *)kn->kn_hook; 1211 if (d->bd_hlen != 0 || 1212 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1213 d->bd_slen != 0)) { 1214 ready = 1; 1215 } else { 1216 /* Start the read timeout if necessary. */ 1217 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1218 callout_reset(&d->bd_callout, d->bd_rtout, 1219 bpf_timed_out, d); 1220 d->bd_state = BPF_WAITING; 1221 } 1222 } 1223 1224 return (ready); 1225 } 1226 1227 1228 /* 1229 * Process the packet pkt of length pktlen. The packet is parsed 1230 * by each listener's filter, and if accepted, stashed into the 1231 * corresponding buffer. 1232 */ 1233 void 1234 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1235 { 1236 struct bpf_d *d; 1237 struct timeval tv; 1238 int gottime = 0; 1239 u_int slen; 1240 1241 lwkt_gettoken(&bpf_token); 1242 /* Re-check */ 1243 if (bp == NULL) { 1244 lwkt_reltoken(&bpf_token); 1245 return; 1246 } 1247 1248 /* 1249 * Note that the ipl does not have to be raised at this point. 1250 * The only problem that could arise here is that if two different 1251 * interfaces shared any data. This is not the case. 1252 */ 1253 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1254 ++d->bd_rcount; 1255 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1256 if (slen != 0) { 1257 if (!gottime) { 1258 microtime(&tv); 1259 gottime = 1; 1260 } 1261 catchpacket(d, pkt, pktlen, slen, _bcopy, &tv); 1262 } 1263 } 1264 lwkt_reltoken(&bpf_token); 1265 } 1266 1267 /* 1268 * Copy data from an mbuf chain into a buffer. This code is derived 1269 * from m_copydata in sys/uipc_mbuf.c. 1270 */ 1271 static void 1272 bpf_mcopy(volatile const void *src_arg, volatile void *dst_arg, size_t len) 1273 { 1274 volatile const struct mbuf *m; 1275 u_int count; 1276 volatile u_char *dst; 1277 1278 m = src_arg; 1279 dst = dst_arg; 1280 while (len > 0) { 1281 if (m == NULL) 1282 panic("bpf_mcopy"); 1283 count = min(m->m_len, len); 1284 bcopy(mtod(m, void *), dst, count); 1285 m = m->m_next; 1286 dst += count; 1287 len -= count; 1288 } 1289 } 1290 1291 /* 1292 * Process the packet in the mbuf chain m. The packet is parsed by each 1293 * listener's filter, and if accepted, stashed into the corresponding 1294 * buffer. 1295 */ 1296 void 1297 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1298 { 1299 struct bpf_d *d; 1300 u_int pktlen, slen; 1301 struct timeval tv; 1302 int gottime = 0; 1303 1304 lwkt_gettoken(&bpf_token); 1305 /* Re-check */ 1306 if (bp == NULL) { 1307 lwkt_reltoken(&bpf_token); 1308 return; 1309 } 1310 1311 /* Don't compute pktlen, if no descriptor is attached. */ 1312 if (SLIST_EMPTY(&bp->bif_dlist)) { 1313 lwkt_reltoken(&bpf_token); 1314 return; 1315 } 1316 1317 pktlen = m_lengthm(m, NULL); 1318 1319 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1320 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1321 continue; 1322 ++d->bd_rcount; 1323 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1324 if (slen != 0) { 1325 if (!gottime) { 1326 microtime(&tv); 1327 gottime = 1; 1328 } 1329 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, 1330 &tv); 1331 } 1332 } 1333 lwkt_reltoken(&bpf_token); 1334 } 1335 1336 /* 1337 * Incoming linkage from device drivers, where we have a mbuf chain 1338 * but need to prepend some arbitrary header from a linear buffer. 1339 * 1340 * Con up a minimal dummy header to pacify bpf. Allocate (only) a 1341 * struct m_hdr on the stack. This is safe as bpf only reads from the 1342 * fields in this header that we initialize, and will not try to free 1343 * it or keep a pointer to it. 1344 */ 1345 void 1346 bpf_mtap_hdr(struct bpf_if *arg, caddr_t data, u_int dlen, struct mbuf *m, 1347 u_int direction) 1348 { 1349 struct m_hdr mh; 1350 1351 mh.mh_flags = 0; 1352 mh.mh_next = m; 1353 mh.mh_len = dlen; 1354 mh.mh_data = data; 1355 1356 bpf_mtap(arg, (struct mbuf *) &mh); 1357 } 1358 1359 void 1360 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family) 1361 { 1362 u_int family4; 1363 1364 KKASSERT(family != AF_UNSPEC); 1365 1366 family4 = (u_int)family; 1367 bpf_ptap(bp, m, &family4, sizeof(family4)); 1368 } 1369 1370 /* 1371 * Process the packet in the mbuf chain m with the header in m prepended. 1372 * The packet is parsed by each listener's filter, and if accepted, 1373 * stashed into the corresponding buffer. 1374 */ 1375 void 1376 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1377 { 1378 struct mbuf mb; 1379 1380 /* 1381 * Craft on-stack mbuf suitable for passing to bpf_mtap. 1382 * Note that we cut corners here; we only setup what's 1383 * absolutely needed--this mbuf should never go anywhere else. 1384 */ 1385 mb.m_next = m; 1386 mb.m_data = __DECONST(void *, data); /* LINTED */ 1387 mb.m_len = dlen; 1388 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif; 1389 1390 bpf_mtap(bp, &mb); 1391 } 1392 1393 /* 1394 * Move the packet data from interface memory (pkt) into the 1395 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1396 * otherwise 0. "copy" is the routine called to do the actual data 1397 * transfer. bcopy is passed in to copy contiguous chunks, while 1398 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1399 * pkt is really an mbuf. 1400 */ 1401 static void 1402 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1403 void (*cpfn)(volatile const void *, volatile void *, size_t), 1404 const struct timeval *tv) 1405 { 1406 struct bpf_hdr *hp; 1407 int totlen, curlen; 1408 int hdrlen = d->bd_bif->bif_hdrlen; 1409 int wakeup = 0; 1410 /* 1411 * Figure out how many bytes to move. If the packet is 1412 * greater or equal to the snapshot length, transfer that 1413 * much. Otherwise, transfer the whole packet (unless 1414 * we hit the buffer size limit). 1415 */ 1416 totlen = hdrlen + min(snaplen, pktlen); 1417 if (totlen > d->bd_bufsize) 1418 totlen = d->bd_bufsize; 1419 1420 /* 1421 * Round up the end of the previous packet to the next longword. 1422 */ 1423 curlen = BPF_WORDALIGN(d->bd_slen); 1424 if (curlen + totlen > d->bd_bufsize) { 1425 /* 1426 * This packet will overflow the storage buffer. 1427 * Rotate the buffers if we can, then wakeup any 1428 * pending reads. 1429 */ 1430 if (d->bd_fbuf == NULL) { 1431 /* 1432 * We haven't completed the previous read yet, 1433 * so drop the packet. 1434 */ 1435 ++d->bd_dcount; 1436 return; 1437 } 1438 ROTATE_BUFFERS(d); 1439 wakeup = 1; 1440 curlen = 0; 1441 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { 1442 /* 1443 * Immediate mode is set, or the read timeout has 1444 * already expired during a select call. A packet 1445 * arrived, so the reader should be woken up. 1446 */ 1447 wakeup = 1; 1448 } 1449 1450 /* 1451 * Append the bpf header. 1452 */ 1453 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1454 hp->bh_tstamp = *tv; 1455 hp->bh_datalen = pktlen; 1456 hp->bh_hdrlen = hdrlen; 1457 /* 1458 * Copy the packet data into the store buffer and update its length. 1459 */ 1460 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1461 d->bd_slen = curlen + totlen; 1462 1463 if (wakeup) 1464 bpf_wakeup(d); 1465 } 1466 1467 /* 1468 * Initialize all nonzero fields of a descriptor. 1469 */ 1470 static int 1471 bpf_allocbufs(struct bpf_d *d) 1472 { 1473 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1474 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK); 1475 d->bd_slen = 0; 1476 d->bd_hlen = 0; 1477 return(0); 1478 } 1479 1480 /* 1481 * Free buffers and packet filter program currently in use by a descriptor. 1482 * Called on close. 1483 */ 1484 static void 1485 bpf_freed(struct bpf_d *d) 1486 { 1487 /* 1488 * We don't need to lock out interrupts since this descriptor has 1489 * been detached from its interface and it yet hasn't been marked 1490 * free. 1491 */ 1492 if (d->bd_sbuf != NULL) { 1493 kfree(d->bd_sbuf, M_BPF); 1494 if (d->bd_hbuf != NULL) 1495 kfree(d->bd_hbuf, M_BPF); 1496 if (d->bd_fbuf != NULL) 1497 kfree(d->bd_fbuf, M_BPF); 1498 } 1499 if (d->bd_rfilter) 1500 kfree(d->bd_rfilter, M_BPF); 1501 if (d->bd_wfilter) 1502 kfree(d->bd_wfilter, M_BPF); 1503 } 1504 1505 /* 1506 * Attach an interface to bpf. ifp is a pointer to the structure 1507 * defining the interface to be attached, dlt is the link layer type, 1508 * and hdrlen is the fixed size of the link header (variable length 1509 * headers are not yet supported). 1510 */ 1511 void 1512 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1513 { 1514 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf); 1515 } 1516 1517 void 1518 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1519 { 1520 struct bpf_if *bp; 1521 1522 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO); 1523 1524 lwkt_gettoken(&bpf_token); 1525 1526 SLIST_INIT(&bp->bif_dlist); 1527 bp->bif_ifp = ifp; 1528 bp->bif_dlt = dlt; 1529 bp->bif_driverp = driverp; 1530 *bp->bif_driverp = NULL; 1531 1532 bp->bif_next = bpf_iflist; 1533 bpf_iflist = bp; 1534 1535 /* 1536 * Compute the length of the bpf header. This is not necessarily 1537 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1538 * that the network layer header begins on a longword boundary (for 1539 * performance reasons and to alleviate alignment restrictions). 1540 */ 1541 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1542 1543 lwkt_reltoken(&bpf_token); 1544 1545 if (bootverbose) 1546 if_printf(ifp, "bpf attached\n"); 1547 } 1548 1549 /* 1550 * Detach bpf from an interface. This involves detaching each descriptor 1551 * associated with the interface, and leaving bd_bif NULL. Notify each 1552 * descriptor as it's detached so that any sleepers wake up and get 1553 * ENXIO. 1554 */ 1555 void 1556 bpfdetach(struct ifnet *ifp) 1557 { 1558 struct bpf_if *bp, *bp_prev; 1559 struct bpf_d *d; 1560 1561 lwkt_gettoken(&bpf_token); 1562 1563 /* Locate BPF interface information */ 1564 bp_prev = NULL; 1565 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1566 if (ifp == bp->bif_ifp) 1567 break; 1568 bp_prev = bp; 1569 } 1570 1571 /* Interface wasn't attached */ 1572 if (bp->bif_ifp == NULL) { 1573 lwkt_reltoken(&bpf_token); 1574 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname); 1575 return; 1576 } 1577 1578 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) { 1579 bpf_detachd(d); 1580 bpf_wakeup(d); 1581 } 1582 1583 if (bp_prev != NULL) 1584 bp_prev->bif_next = bp->bif_next; 1585 else 1586 bpf_iflist = bp->bif_next; 1587 1588 kfree(bp, M_BPF); 1589 1590 lwkt_reltoken(&bpf_token); 1591 } 1592 1593 /* 1594 * Get a list of available data link type of the interface. 1595 */ 1596 static int 1597 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1598 { 1599 int n, error; 1600 struct ifnet *ifp; 1601 struct bpf_if *bp; 1602 1603 ifp = d->bd_bif->bif_ifp; 1604 n = 0; 1605 error = 0; 1606 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1607 if (bp->bif_ifp != ifp) 1608 continue; 1609 if (bfl->bfl_list != NULL) { 1610 if (n >= bfl->bfl_len) { 1611 return (ENOMEM); 1612 } 1613 error = copyout(&bp->bif_dlt, 1614 bfl->bfl_list + n, sizeof(u_int)); 1615 } 1616 n++; 1617 } 1618 bfl->bfl_len = n; 1619 return(error); 1620 } 1621 1622 /* 1623 * Set the data link type of a BPF instance. 1624 */ 1625 static int 1626 bpf_setdlt(struct bpf_d *d, u_int dlt) 1627 { 1628 int error, opromisc; 1629 struct ifnet *ifp; 1630 struct bpf_if *bp; 1631 1632 if (d->bd_bif->bif_dlt == dlt) 1633 return (0); 1634 ifp = d->bd_bif->bif_ifp; 1635 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1636 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1637 break; 1638 } 1639 if (bp != NULL) { 1640 opromisc = d->bd_promisc; 1641 bpf_detachd(d); 1642 bpf_attachd(d, bp); 1643 bpf_resetd(d); 1644 if (opromisc) { 1645 error = ifpromisc(bp->bif_ifp, 1); 1646 if (error) { 1647 if_printf(bp->bif_ifp, 1648 "bpf_setdlt: ifpromisc failed (%d)\n", 1649 error); 1650 } else { 1651 d->bd_promisc = 1; 1652 } 1653 } 1654 } 1655 return(bp == NULL ? EINVAL : 0); 1656 } 1657 1658 void 1659 bpf_gettoken(void) 1660 { 1661 lwkt_gettoken(&bpf_token); 1662 } 1663 1664 void 1665 bpf_reltoken(void) 1666 { 1667 lwkt_reltoken(&bpf_token); 1668 } 1669 1670 static void 1671 bpf_drvinit(void *unused) 1672 { 1673 int i; 1674 1675 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf), 1676 bpfclone, 0, 0, 0600, "bpf"); 1677 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) { 1678 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i); 1679 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i); 1680 } 1681 } 1682 1683 static void 1684 bpf_drvuninit(void *unused) 1685 { 1686 devfs_clone_handler_del("bpf"); 1687 dev_ops_remove_all(&bpf_ops); 1688 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf)); 1689 } 1690 1691 SYSINIT(bpfdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR, bpf_drvinit, NULL); 1692 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL); 1693 1694 #else /* !BPF */ 1695 /* 1696 * NOP stubs to allow bpf-using drivers to load and function. 1697 * 1698 * A 'better' implementation would allow the core bpf functionality 1699 * to be loaded at runtime. 1700 */ 1701 1702 void 1703 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1704 { 1705 } 1706 1707 void 1708 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1709 { 1710 } 1711 1712 void 1713 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen) 1714 { 1715 } 1716 1717 void 1718 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1719 { 1720 } 1721 1722 void 1723 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1724 { 1725 } 1726 1727 void 1728 bpfdetach(struct ifnet *ifp) 1729 { 1730 } 1731 1732 u_int 1733 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1734 { 1735 return -1; /* "no filter" behaviour */ 1736 } 1737 1738 void 1739 bpf_gettoken(void) 1740 { 1741 } 1742 1743 void 1744 bpf_reltoken(void) 1745 { 1746 } 1747 1748 #endif /* !BPF */ 1749