1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1990, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2019 Andrey V. Elsukov <ae@FreeBSD.org> 7 * 8 * This code is derived from the Stanford/CMU enet packet filter, 9 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 10 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 11 * Berkeley Laboratory. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_bpf.h" 44 #include "opt_ddb.h" 45 #include "opt_netgraph.h" 46 47 #include <sys/param.h> 48 #include <sys/conf.h> 49 #include <sys/eventhandler.h> 50 #include <sys/fcntl.h> 51 #include <sys/jail.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/mutex.h> 57 #include <sys/time.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/signalvar.h> 61 #include <sys/filio.h> 62 #include <sys/sockio.h> 63 #include <sys/ttycom.h> 64 #include <sys/uio.h> 65 #include <sys/sysent.h> 66 #include <sys/systm.h> 67 68 #include <sys/event.h> 69 #include <sys/file.h> 70 #include <sys/poll.h> 71 #include <sys/proc.h> 72 73 #include <sys/socket.h> 74 75 #ifdef DDB 76 #include <ddb/ddb.h> 77 #endif 78 79 #include <net/if.h> 80 #include <net/if_var.h> 81 #include <net/if_vlan_var.h> 82 #include <net/if_dl.h> 83 #include <net/bpf.h> 84 #include <net/bpf_buffer.h> 85 #ifdef BPF_JITTER 86 #include <net/bpf_jitter.h> 87 #endif 88 #include <net/bpf_zerocopy.h> 89 #include <net/bpfdesc.h> 90 #include <net/route.h> 91 #include <net/vnet.h> 92 93 #include <netinet/in.h> 94 #include <netinet/if_ether.h> 95 #include <sys/kernel.h> 96 #include <sys/sysctl.h> 97 98 #include <net80211/ieee80211_freebsd.h> 99 100 #include <security/mac/mac_framework.h> 101 102 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 103 104 static struct bpf_if_ext dead_bpf_if = { 105 .bif_dlist = CK_LIST_HEAD_INITIALIZER() 106 }; 107 108 struct bpf_if { 109 #define bif_next bif_ext.bif_next 110 #define bif_dlist bif_ext.bif_dlist 111 struct bpf_if_ext bif_ext; /* public members */ 112 u_int bif_dlt; /* link layer type */ 113 u_int bif_hdrlen; /* length of link header */ 114 struct bpfd_list bif_wlist; /* writer-only list */ 115 struct ifnet *bif_ifp; /* corresponding interface */ 116 struct bpf_if **bif_bpf; /* Pointer to pointer to us */ 117 volatile u_int bif_refcnt; 118 struct epoch_context epoch_ctx; 119 }; 120 121 CTASSERT(offsetof(struct bpf_if, bif_ext) == 0); 122 123 struct bpf_program_buffer { 124 struct epoch_context epoch_ctx; 125 #ifdef BPF_JITTER 126 bpf_jit_filter *func; 127 #endif 128 void *buffer[0]; 129 }; 130 131 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 132 133 #define PRINET 26 /* interruptible */ 134 #define BPF_PRIO_MAX 7 135 136 #define SIZEOF_BPF_HDR(type) \ 137 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen)) 138 139 #ifdef COMPAT_FREEBSD32 140 #include <sys/mount.h> 141 #include <compat/freebsd32/freebsd32.h> 142 #define BPF_ALIGNMENT32 sizeof(int32_t) 143 #define BPF_WORDALIGN32(x) roundup2(x, BPF_ALIGNMENT32) 144 145 #ifndef BURN_BRIDGES 146 /* 147 * 32-bit version of structure prepended to each packet. We use this header 148 * instead of the standard one for 32-bit streams. We mark the a stream as 149 * 32-bit the first time we see a 32-bit compat ioctl request. 150 */ 151 struct bpf_hdr32 { 152 struct timeval32 bh_tstamp; /* time stamp */ 153 uint32_t bh_caplen; /* length of captured portion */ 154 uint32_t bh_datalen; /* original length of packet */ 155 uint16_t bh_hdrlen; /* length of bpf header (this struct 156 plus alignment padding) */ 157 }; 158 #endif 159 160 struct bpf_program32 { 161 u_int bf_len; 162 uint32_t bf_insns; 163 }; 164 165 struct bpf_dltlist32 { 166 u_int bfl_len; 167 u_int bfl_list; 168 }; 169 170 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32) 171 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32) 172 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32) 173 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32) 174 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32) 175 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32) 176 #endif 177 178 #define BPF_LOCK() sx_xlock(&bpf_sx) 179 #define BPF_UNLOCK() sx_xunlock(&bpf_sx) 180 #define BPF_LOCK_ASSERT() sx_assert(&bpf_sx, SA_XLOCKED) 181 /* 182 * bpf_iflist is a list of BPF interface structures, each corresponding to a 183 * specific DLT. The same network interface might have several BPF interface 184 * structures registered by different layers in the stack (i.e., 802.11 185 * frames, ethernet frames, etc). 186 */ 187 CK_LIST_HEAD(bpf_iflist, bpf_if); 188 static struct bpf_iflist bpf_iflist; 189 static struct sx bpf_sx; /* bpf global lock */ 190 static int bpf_bpfd_cnt; 191 192 static void bpfif_ref(struct bpf_if *); 193 static void bpfif_rele(struct bpf_if *); 194 195 static void bpfd_ref(struct bpf_d *); 196 static void bpfd_rele(struct bpf_d *); 197 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 198 static void bpf_detachd(struct bpf_d *); 199 static void bpf_detachd_locked(struct bpf_d *, bool); 200 static void bpfd_free(epoch_context_t); 201 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 202 struct sockaddr *, int *, struct bpf_d *); 203 static int bpf_setif(struct bpf_d *, struct ifreq *); 204 static void bpf_timed_out(void *); 205 static __inline void 206 bpf_wakeup(struct bpf_d *); 207 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 208 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 209 struct bintime *); 210 static void reset_d(struct bpf_d *); 211 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 212 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 213 static int bpf_setdlt(struct bpf_d *, u_int); 214 static void filt_bpfdetach(struct knote *); 215 static int filt_bpfread(struct knote *, long); 216 static void bpf_drvinit(void *); 217 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 218 219 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 220 "bpf sysctl"); 221 int bpf_maxinsns = BPF_MAXINSNS; 222 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 223 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 224 static int bpf_zerocopy_enable = 0; 225 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 226 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 227 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 228 bpf_stats_sysctl, "bpf statistics portal"); 229 230 VNET_DEFINE_STATIC(int, bpf_optimize_writers) = 0; 231 #define V_bpf_optimize_writers VNET(bpf_optimize_writers) 232 SYSCTL_INT(_net_bpf, OID_AUTO, optimize_writers, CTLFLAG_VNET | CTLFLAG_RWTUN, 233 &VNET_NAME(bpf_optimize_writers), 0, 234 "Do not send packets until BPF program is set"); 235 236 static d_open_t bpfopen; 237 static d_read_t bpfread; 238 static d_write_t bpfwrite; 239 static d_ioctl_t bpfioctl; 240 static d_poll_t bpfpoll; 241 static d_kqfilter_t bpfkqfilter; 242 243 static struct cdevsw bpf_cdevsw = { 244 .d_version = D_VERSION, 245 .d_open = bpfopen, 246 .d_read = bpfread, 247 .d_write = bpfwrite, 248 .d_ioctl = bpfioctl, 249 .d_poll = bpfpoll, 250 .d_name = "bpf", 251 .d_kqfilter = bpfkqfilter, 252 }; 253 254 static struct filterops bpfread_filtops = { 255 .f_isfd = 1, 256 .f_detach = filt_bpfdetach, 257 .f_event = filt_bpfread, 258 }; 259 260 /* 261 * LOCKING MODEL USED BY BPF 262 * 263 * Locks: 264 * 1) global lock (BPF_LOCK). Sx, used to protect some global counters, 265 * every bpf_iflist changes, serializes ioctl access to bpf descriptors. 266 * 2) Descriptor lock. Mutex, used to protect BPF buffers and various 267 * structure fields used by bpf_*tap* code. 268 * 269 * Lock order: global lock, then descriptor lock. 270 * 271 * There are several possible consumers: 272 * 273 * 1. The kernel registers interface pointer with bpfattach(). 274 * Each call allocates new bpf_if structure, references ifnet pointer 275 * and links bpf_if into bpf_iflist chain. This is protected with global 276 * lock. 277 * 278 * 2. An userland application uses ioctl() call to bpf_d descriptor. 279 * All such call are serialized with global lock. BPF filters can be 280 * changed, but pointer to old filter will be freed using NET_EPOCH_CALL(). 281 * Thus it should be safe for bpf_tap/bpf_mtap* code to do access to 282 * filter pointers, even if change will happen during bpf_tap execution. 283 * Destroying of bpf_d descriptor also is doing using NET_EPOCH_CALL(). 284 * 285 * 3. An userland application can write packets into bpf_d descriptor. 286 * There we need to be sure, that ifnet won't disappear during bpfwrite(). 287 * 288 * 4. The kernel invokes bpf_tap/bpf_mtap* functions. The access to 289 * bif_dlist is protected with net_epoch_preempt section. So, it should 290 * be safe to make access to bpf_d descriptor inside the section. 291 * 292 * 5. The kernel invokes bpfdetach() on interface destroying. All lists 293 * are modified with global lock held and actual free() is done using 294 * NET_EPOCH_CALL(). 295 */ 296 297 static void 298 bpfif_free(epoch_context_t ctx) 299 { 300 struct bpf_if *bp; 301 302 bp = __containerof(ctx, struct bpf_if, epoch_ctx); 303 if_rele(bp->bif_ifp); 304 free(bp, M_BPF); 305 } 306 307 static void 308 bpfif_ref(struct bpf_if *bp) 309 { 310 311 refcount_acquire(&bp->bif_refcnt); 312 } 313 314 static void 315 bpfif_rele(struct bpf_if *bp) 316 { 317 318 if (!refcount_release(&bp->bif_refcnt)) 319 return; 320 NET_EPOCH_CALL(bpfif_free, &bp->epoch_ctx); 321 } 322 323 static void 324 bpfd_ref(struct bpf_d *d) 325 { 326 327 refcount_acquire(&d->bd_refcnt); 328 } 329 330 static void 331 bpfd_rele(struct bpf_d *d) 332 { 333 334 if (!refcount_release(&d->bd_refcnt)) 335 return; 336 NET_EPOCH_CALL(bpfd_free, &d->epoch_ctx); 337 } 338 339 static struct bpf_program_buffer* 340 bpf_program_buffer_alloc(size_t size, int flags) 341 { 342 343 return (malloc(sizeof(struct bpf_program_buffer) + size, 344 M_BPF, flags)); 345 } 346 347 static void 348 bpf_program_buffer_free(epoch_context_t ctx) 349 { 350 struct bpf_program_buffer *ptr; 351 352 ptr = __containerof(ctx, struct bpf_program_buffer, epoch_ctx); 353 #ifdef BPF_JITTER 354 if (ptr->func != NULL) 355 bpf_destroy_jit_filter(ptr->func); 356 #endif 357 free(ptr, M_BPF); 358 } 359 360 /* 361 * Wrapper functions for various buffering methods. If the set of buffer 362 * modes expands, we will probably want to introduce a switch data structure 363 * similar to protosw, et. 364 */ 365 static void 366 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 367 u_int len) 368 { 369 370 BPFD_LOCK_ASSERT(d); 371 372 switch (d->bd_bufmode) { 373 case BPF_BUFMODE_BUFFER: 374 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 375 376 case BPF_BUFMODE_ZBUF: 377 counter_u64_add(d->bd_zcopy, 1); 378 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 379 380 default: 381 panic("bpf_buf_append_bytes"); 382 } 383 } 384 385 static void 386 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 387 u_int len) 388 { 389 390 BPFD_LOCK_ASSERT(d); 391 392 switch (d->bd_bufmode) { 393 case BPF_BUFMODE_BUFFER: 394 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 395 396 case BPF_BUFMODE_ZBUF: 397 counter_u64_add(d->bd_zcopy, 1); 398 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 399 400 default: 401 panic("bpf_buf_append_mbuf"); 402 } 403 } 404 405 /* 406 * This function gets called when the free buffer is re-assigned. 407 */ 408 static void 409 bpf_buf_reclaimed(struct bpf_d *d) 410 { 411 412 BPFD_LOCK_ASSERT(d); 413 414 switch (d->bd_bufmode) { 415 case BPF_BUFMODE_BUFFER: 416 return; 417 418 case BPF_BUFMODE_ZBUF: 419 bpf_zerocopy_buf_reclaimed(d); 420 return; 421 422 default: 423 panic("bpf_buf_reclaimed"); 424 } 425 } 426 427 /* 428 * If the buffer mechanism has a way to decide that a held buffer can be made 429 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 430 * returned if the buffer can be discarded, (0) is returned if it cannot. 431 */ 432 static int 433 bpf_canfreebuf(struct bpf_d *d) 434 { 435 436 BPFD_LOCK_ASSERT(d); 437 438 switch (d->bd_bufmode) { 439 case BPF_BUFMODE_ZBUF: 440 return (bpf_zerocopy_canfreebuf(d)); 441 } 442 return (0); 443 } 444 445 /* 446 * Allow the buffer model to indicate that the current store buffer is 447 * immutable, regardless of the appearance of space. Return (1) if the 448 * buffer is writable, and (0) if not. 449 */ 450 static int 451 bpf_canwritebuf(struct bpf_d *d) 452 { 453 BPFD_LOCK_ASSERT(d); 454 455 switch (d->bd_bufmode) { 456 case BPF_BUFMODE_ZBUF: 457 return (bpf_zerocopy_canwritebuf(d)); 458 } 459 return (1); 460 } 461 462 /* 463 * Notify buffer model that an attempt to write to the store buffer has 464 * resulted in a dropped packet, in which case the buffer may be considered 465 * full. 466 */ 467 static void 468 bpf_buffull(struct bpf_d *d) 469 { 470 471 BPFD_LOCK_ASSERT(d); 472 473 switch (d->bd_bufmode) { 474 case BPF_BUFMODE_ZBUF: 475 bpf_zerocopy_buffull(d); 476 break; 477 } 478 } 479 480 /* 481 * Notify the buffer model that a buffer has moved into the hold position. 482 */ 483 void 484 bpf_bufheld(struct bpf_d *d) 485 { 486 487 BPFD_LOCK_ASSERT(d); 488 489 switch (d->bd_bufmode) { 490 case BPF_BUFMODE_ZBUF: 491 bpf_zerocopy_bufheld(d); 492 break; 493 } 494 } 495 496 static void 497 bpf_free(struct bpf_d *d) 498 { 499 500 switch (d->bd_bufmode) { 501 case BPF_BUFMODE_BUFFER: 502 return (bpf_buffer_free(d)); 503 504 case BPF_BUFMODE_ZBUF: 505 return (bpf_zerocopy_free(d)); 506 507 default: 508 panic("bpf_buf_free"); 509 } 510 } 511 512 static int 513 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 514 { 515 516 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 517 return (EOPNOTSUPP); 518 return (bpf_buffer_uiomove(d, buf, len, uio)); 519 } 520 521 static int 522 bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 523 { 524 525 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 526 return (EOPNOTSUPP); 527 return (bpf_buffer_ioctl_sblen(d, i)); 528 } 529 530 static int 531 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 532 { 533 534 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 535 return (EOPNOTSUPP); 536 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 537 } 538 539 static int 540 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 541 { 542 543 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 544 return (EOPNOTSUPP); 545 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 546 } 547 548 static int 549 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 550 { 551 552 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 553 return (EOPNOTSUPP); 554 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 555 } 556 557 /* 558 * General BPF functions. 559 */ 560 static int 561 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 562 struct sockaddr *sockp, int *hdrlen, struct bpf_d *d) 563 { 564 const struct ieee80211_bpf_params *p; 565 struct ether_header *eh; 566 struct mbuf *m; 567 int error; 568 int len; 569 int hlen; 570 int slen; 571 572 /* 573 * Build a sockaddr based on the data link layer type. 574 * We do this at this level because the ethernet header 575 * is copied directly into the data field of the sockaddr. 576 * In the case of SLIP, there is no header and the packet 577 * is forwarded as is. 578 * Also, we are careful to leave room at the front of the mbuf 579 * for the link level header. 580 */ 581 switch (linktype) { 582 case DLT_SLIP: 583 sockp->sa_family = AF_INET; 584 hlen = 0; 585 break; 586 587 case DLT_EN10MB: 588 sockp->sa_family = AF_UNSPEC; 589 /* XXX Would MAXLINKHDR be better? */ 590 hlen = ETHER_HDR_LEN; 591 break; 592 593 case DLT_FDDI: 594 sockp->sa_family = AF_IMPLINK; 595 hlen = 0; 596 break; 597 598 case DLT_RAW: 599 sockp->sa_family = AF_UNSPEC; 600 hlen = 0; 601 break; 602 603 case DLT_NULL: 604 /* 605 * null interface types require a 4 byte pseudo header which 606 * corresponds to the address family of the packet. 607 */ 608 sockp->sa_family = AF_UNSPEC; 609 hlen = 4; 610 break; 611 612 case DLT_ATM_RFC1483: 613 /* 614 * en atm driver requires 4-byte atm pseudo header. 615 * though it isn't standard, vpi:vci needs to be 616 * specified anyway. 617 */ 618 sockp->sa_family = AF_UNSPEC; 619 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 620 break; 621 622 case DLT_PPP: 623 sockp->sa_family = AF_UNSPEC; 624 hlen = 4; /* This should match PPP_HDRLEN */ 625 break; 626 627 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 628 sockp->sa_family = AF_IEEE80211; 629 hlen = 0; 630 break; 631 632 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 633 sockp->sa_family = AF_IEEE80211; 634 sockp->sa_len = 12; /* XXX != 0 */ 635 hlen = sizeof(struct ieee80211_bpf_params); 636 break; 637 638 default: 639 return (EIO); 640 } 641 642 len = uio->uio_resid; 643 if (len < hlen || len - hlen > ifp->if_mtu) 644 return (EMSGSIZE); 645 646 /* Allocate a mbuf for our write, since m_get2 fails if len >= to MJUMPAGESIZE, use m_getjcl for bigger buffers */ 647 if (len < MJUMPAGESIZE) 648 m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR); 649 else if (len <= MJUM9BYTES) 650 m = m_getjcl(M_WAITOK, MT_DATA, M_PKTHDR, MJUM9BYTES); 651 else if (len <= MJUM16BYTES) 652 m = m_getjcl(M_WAITOK, MT_DATA, M_PKTHDR, MJUM16BYTES); 653 else 654 m = NULL; 655 if (m == NULL) 656 return (EIO); 657 m->m_pkthdr.len = m->m_len = len; 658 *mp = m; 659 660 error = uiomove(mtod(m, u_char *), len, uio); 661 if (error) 662 goto bad; 663 664 slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len); 665 if (slen == 0) { 666 error = EPERM; 667 goto bad; 668 } 669 670 /* Check for multicast destination */ 671 switch (linktype) { 672 case DLT_EN10MB: 673 eh = mtod(m, struct ether_header *); 674 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 675 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 676 ETHER_ADDR_LEN) == 0) 677 m->m_flags |= M_BCAST; 678 else 679 m->m_flags |= M_MCAST; 680 } 681 if (d->bd_hdrcmplt == 0) { 682 memcpy(eh->ether_shost, IF_LLADDR(ifp), 683 sizeof(eh->ether_shost)); 684 } 685 break; 686 } 687 688 /* 689 * Make room for link header, and copy it to sockaddr 690 */ 691 if (hlen != 0) { 692 if (sockp->sa_family == AF_IEEE80211) { 693 /* 694 * Collect true length from the parameter header 695 * NB: sockp is known to be zero'd so if we do a 696 * short copy unspecified parameters will be 697 * zero. 698 * NB: packet may not be aligned after stripping 699 * bpf params 700 * XXX check ibp_vers 701 */ 702 p = mtod(m, const struct ieee80211_bpf_params *); 703 hlen = p->ibp_len; 704 if (hlen > sizeof(sockp->sa_data)) { 705 error = EINVAL; 706 goto bad; 707 } 708 } 709 bcopy(mtod(m, const void *), sockp->sa_data, hlen); 710 } 711 *hdrlen = hlen; 712 713 return (0); 714 bad: 715 m_freem(m); 716 return (error); 717 } 718 719 /* 720 * Attach descriptor to the bpf interface, i.e. make d listen on bp, 721 * then reset its buffers and counters with reset_d(). 722 */ 723 static void 724 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 725 { 726 int op_w; 727 728 BPF_LOCK_ASSERT(); 729 730 /* 731 * Save sysctl value to protect from sysctl change 732 * between reads 733 */ 734 op_w = V_bpf_optimize_writers || d->bd_writer; 735 736 if (d->bd_bif != NULL) 737 bpf_detachd_locked(d, false); 738 /* 739 * Point d at bp, and add d to the interface's list. 740 * Since there are many applications using BPF for 741 * sending raw packets only (dhcpd, cdpd are good examples) 742 * we can delay adding d to the list of active listeners until 743 * some filter is configured. 744 */ 745 746 BPFD_LOCK(d); 747 /* 748 * Hold reference to bpif while descriptor uses this interface. 749 */ 750 bpfif_ref(bp); 751 d->bd_bif = bp; 752 if (op_w != 0) { 753 /* Add to writers-only list */ 754 CK_LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); 755 /* 756 * We decrement bd_writer on every filter set operation. 757 * First BIOCSETF is done by pcap_open_live() to set up 758 * snap length. After that appliation usually sets its own 759 * filter. 760 */ 761 d->bd_writer = 2; 762 } else 763 CK_LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 764 765 reset_d(d); 766 BPFD_UNLOCK(d); 767 bpf_bpfd_cnt++; 768 769 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list", 770 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); 771 772 if (op_w == 0) 773 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 774 } 775 776 /* 777 * Check if we need to upgrade our descriptor @d from write-only mode. 778 */ 779 static int 780 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode, 781 int flen) 782 { 783 int is_snap, need_upgrade; 784 785 /* 786 * Check if we've already upgraded or new filter is empty. 787 */ 788 if (d->bd_writer == 0 || fcode == NULL) 789 return (0); 790 791 need_upgrade = 0; 792 793 /* 794 * Check if cmd looks like snaplen setting from 795 * pcap_bpf.c:pcap_open_live(). 796 * Note we're not checking .k value here: 797 * while pcap_open_live() definitely sets to non-zero value, 798 * we'd prefer to treat k=0 (deny ALL) case the same way: e.g. 799 * do not consider upgrading immediately 800 */ 801 if (cmd == BIOCSETF && flen == 1 && 802 fcode[0].code == (BPF_RET | BPF_K)) 803 is_snap = 1; 804 else 805 is_snap = 0; 806 807 if (is_snap == 0) { 808 /* 809 * We're setting first filter and it doesn't look like 810 * setting snaplen. We're probably using bpf directly. 811 * Upgrade immediately. 812 */ 813 need_upgrade = 1; 814 } else { 815 /* 816 * Do not require upgrade by first BIOCSETF 817 * (used to set snaplen) by pcap_open_live(). 818 */ 819 820 if (--d->bd_writer == 0) { 821 /* 822 * First snaplen filter has already 823 * been set. This is probably catch-all 824 * filter 825 */ 826 need_upgrade = 1; 827 } 828 } 829 830 CTR5(KTR_NET, 831 "%s: filter function set by pid %d, " 832 "bd_writer counter %d, snap %d upgrade %d", 833 __func__, d->bd_pid, d->bd_writer, 834 is_snap, need_upgrade); 835 836 return (need_upgrade); 837 } 838 839 /* 840 * Detach a file from its interface. 841 */ 842 static void 843 bpf_detachd(struct bpf_d *d) 844 { 845 BPF_LOCK(); 846 bpf_detachd_locked(d, false); 847 BPF_UNLOCK(); 848 } 849 850 static void 851 bpf_detachd_locked(struct bpf_d *d, bool detached_ifp) 852 { 853 struct bpf_if *bp; 854 struct ifnet *ifp; 855 int error; 856 857 BPF_LOCK_ASSERT(); 858 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); 859 860 /* Check if descriptor is attached */ 861 if ((bp = d->bd_bif) == NULL) 862 return; 863 864 BPFD_LOCK(d); 865 /* Remove d from the interface's descriptor list. */ 866 CK_LIST_REMOVE(d, bd_next); 867 /* Save bd_writer value */ 868 error = d->bd_writer; 869 ifp = bp->bif_ifp; 870 d->bd_bif = NULL; 871 if (detached_ifp) { 872 /* 873 * Notify descriptor as it's detached, so that any 874 * sleepers wake up and get ENXIO. 875 */ 876 bpf_wakeup(d); 877 } 878 BPFD_UNLOCK(d); 879 bpf_bpfd_cnt--; 880 881 /* Call event handler iff d is attached */ 882 if (error == 0) 883 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 884 885 /* 886 * Check if this descriptor had requested promiscuous mode. 887 * If so and ifnet is not detached, turn it off. 888 */ 889 if (d->bd_promisc && !detached_ifp) { 890 d->bd_promisc = 0; 891 CURVNET_SET(ifp->if_vnet); 892 error = ifpromisc(ifp, 0); 893 CURVNET_RESTORE(); 894 if (error != 0 && error != ENXIO) { 895 /* 896 * ENXIO can happen if a pccard is unplugged 897 * Something is really wrong if we were able to put 898 * the driver into promiscuous mode, but can't 899 * take it out. 900 */ 901 if_printf(bp->bif_ifp, 902 "bpf_detach: ifpromisc failed (%d)\n", error); 903 } 904 } 905 bpfif_rele(bp); 906 } 907 908 /* 909 * Close the descriptor by detaching it from its interface, 910 * deallocating its buffers, and marking it free. 911 */ 912 static void 913 bpf_dtor(void *data) 914 { 915 struct bpf_d *d = data; 916 917 BPFD_LOCK(d); 918 if (d->bd_state == BPF_WAITING) 919 callout_stop(&d->bd_callout); 920 d->bd_state = BPF_IDLE; 921 BPFD_UNLOCK(d); 922 funsetown(&d->bd_sigio); 923 bpf_detachd(d); 924 #ifdef MAC 925 mac_bpfdesc_destroy(d); 926 #endif /* MAC */ 927 seldrain(&d->bd_sel); 928 knlist_destroy(&d->bd_sel.si_note); 929 callout_drain(&d->bd_callout); 930 bpfd_rele(d); 931 } 932 933 /* 934 * Open ethernet device. Returns ENXIO for illegal minor device number, 935 * EBUSY if file is open by another process. 936 */ 937 /* ARGSUSED */ 938 static int 939 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 940 { 941 struct bpf_d *d; 942 int error; 943 944 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 945 error = devfs_set_cdevpriv(d, bpf_dtor); 946 if (error != 0) { 947 free(d, M_BPF); 948 return (error); 949 } 950 951 /* Setup counters */ 952 d->bd_rcount = counter_u64_alloc(M_WAITOK); 953 d->bd_dcount = counter_u64_alloc(M_WAITOK); 954 d->bd_fcount = counter_u64_alloc(M_WAITOK); 955 d->bd_wcount = counter_u64_alloc(M_WAITOK); 956 d->bd_wfcount = counter_u64_alloc(M_WAITOK); 957 d->bd_wdcount = counter_u64_alloc(M_WAITOK); 958 d->bd_zcopy = counter_u64_alloc(M_WAITOK); 959 960 /* 961 * For historical reasons, perform a one-time initialization call to 962 * the buffer routines, even though we're not yet committed to a 963 * particular buffer method. 964 */ 965 bpf_buffer_init(d); 966 if ((flags & FREAD) == 0) 967 d->bd_writer = 2; 968 d->bd_hbuf_in_use = 0; 969 d->bd_bufmode = BPF_BUFMODE_BUFFER; 970 d->bd_sig = SIGIO; 971 d->bd_direction = BPF_D_INOUT; 972 d->bd_refcnt = 1; 973 BPF_PID_REFRESH(d, td); 974 #ifdef MAC 975 mac_bpfdesc_init(d); 976 mac_bpfdesc_create(td->td_ucred, d); 977 #endif 978 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF); 979 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0); 980 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock); 981 982 /* Disable VLAN pcp tagging. */ 983 d->bd_pcp = 0; 984 985 return (0); 986 } 987 988 /* 989 * bpfread - read next chunk of packets from buffers 990 */ 991 static int 992 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 993 { 994 struct bpf_d *d; 995 int error; 996 int non_block; 997 int timed_out; 998 999 error = devfs_get_cdevpriv((void **)&d); 1000 if (error != 0) 1001 return (error); 1002 1003 /* 1004 * Restrict application to use a buffer the same size as 1005 * as kernel buffers. 1006 */ 1007 if (uio->uio_resid != d->bd_bufsize) 1008 return (EINVAL); 1009 1010 non_block = ((ioflag & O_NONBLOCK) != 0); 1011 1012 BPFD_LOCK(d); 1013 BPF_PID_REFRESH_CUR(d); 1014 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 1015 BPFD_UNLOCK(d); 1016 return (EOPNOTSUPP); 1017 } 1018 if (d->bd_state == BPF_WAITING) 1019 callout_stop(&d->bd_callout); 1020 timed_out = (d->bd_state == BPF_TIMED_OUT); 1021 d->bd_state = BPF_IDLE; 1022 while (d->bd_hbuf_in_use) { 1023 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 1024 PRINET|PCATCH, "bd_hbuf", 0); 1025 if (error != 0) { 1026 BPFD_UNLOCK(d); 1027 return (error); 1028 } 1029 } 1030 /* 1031 * If the hold buffer is empty, then do a timed sleep, which 1032 * ends when the timeout expires or when enough packets 1033 * have arrived to fill the store buffer. 1034 */ 1035 while (d->bd_hbuf == NULL) { 1036 if (d->bd_slen != 0) { 1037 /* 1038 * A packet(s) either arrived since the previous 1039 * read or arrived while we were asleep. 1040 */ 1041 if (d->bd_immediate || non_block || timed_out) { 1042 /* 1043 * Rotate the buffers and return what's here 1044 * if we are in immediate mode, non-blocking 1045 * flag is set, or this descriptor timed out. 1046 */ 1047 ROTATE_BUFFERS(d); 1048 break; 1049 } 1050 } 1051 1052 /* 1053 * No data is available, check to see if the bpf device 1054 * is still pointed at a real interface. If not, return 1055 * ENXIO so that the userland process knows to rebind 1056 * it before using it again. 1057 */ 1058 if (d->bd_bif == NULL) { 1059 BPFD_UNLOCK(d); 1060 return (ENXIO); 1061 } 1062 1063 if (non_block) { 1064 BPFD_UNLOCK(d); 1065 return (EWOULDBLOCK); 1066 } 1067 error = msleep(d, &d->bd_lock, PRINET|PCATCH, 1068 "bpf", d->bd_rtout); 1069 if (error == EINTR || error == ERESTART) { 1070 BPFD_UNLOCK(d); 1071 return (error); 1072 } 1073 if (error == EWOULDBLOCK) { 1074 /* 1075 * On a timeout, return what's in the buffer, 1076 * which may be nothing. If there is something 1077 * in the store buffer, we can rotate the buffers. 1078 */ 1079 if (d->bd_hbuf) 1080 /* 1081 * We filled up the buffer in between 1082 * getting the timeout and arriving 1083 * here, so we don't need to rotate. 1084 */ 1085 break; 1086 1087 if (d->bd_slen == 0) { 1088 BPFD_UNLOCK(d); 1089 return (0); 1090 } 1091 ROTATE_BUFFERS(d); 1092 break; 1093 } 1094 } 1095 /* 1096 * At this point, we know we have something in the hold slot. 1097 */ 1098 d->bd_hbuf_in_use = 1; 1099 BPFD_UNLOCK(d); 1100 1101 /* 1102 * Move data from hold buffer into user space. 1103 * We know the entire buffer is transferred since 1104 * we checked above that the read buffer is bpf_bufsize bytes. 1105 * 1106 * We do not have to worry about simultaneous reads because 1107 * we waited for sole access to the hold buffer above. 1108 */ 1109 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 1110 1111 BPFD_LOCK(d); 1112 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf")); 1113 d->bd_fbuf = d->bd_hbuf; 1114 d->bd_hbuf = NULL; 1115 d->bd_hlen = 0; 1116 bpf_buf_reclaimed(d); 1117 d->bd_hbuf_in_use = 0; 1118 wakeup(&d->bd_hbuf_in_use); 1119 BPFD_UNLOCK(d); 1120 1121 return (error); 1122 } 1123 1124 /* 1125 * If there are processes sleeping on this descriptor, wake them up. 1126 */ 1127 static __inline void 1128 bpf_wakeup(struct bpf_d *d) 1129 { 1130 1131 BPFD_LOCK_ASSERT(d); 1132 if (d->bd_state == BPF_WAITING) { 1133 callout_stop(&d->bd_callout); 1134 d->bd_state = BPF_IDLE; 1135 } 1136 wakeup(d); 1137 if (d->bd_async && d->bd_sig && d->bd_sigio) 1138 pgsigio(&d->bd_sigio, d->bd_sig, 0); 1139 1140 selwakeuppri(&d->bd_sel, PRINET); 1141 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 1142 } 1143 1144 static void 1145 bpf_timed_out(void *arg) 1146 { 1147 struct bpf_d *d = (struct bpf_d *)arg; 1148 1149 BPFD_LOCK_ASSERT(d); 1150 1151 if (callout_pending(&d->bd_callout) || 1152 !callout_active(&d->bd_callout)) 1153 return; 1154 if (d->bd_state == BPF_WAITING) { 1155 d->bd_state = BPF_TIMED_OUT; 1156 if (d->bd_slen != 0) 1157 bpf_wakeup(d); 1158 } 1159 } 1160 1161 static int 1162 bpf_ready(struct bpf_d *d) 1163 { 1164 1165 BPFD_LOCK_ASSERT(d); 1166 1167 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 1168 return (1); 1169 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1170 d->bd_slen != 0) 1171 return (1); 1172 return (0); 1173 } 1174 1175 static int 1176 bpf_setpcp(struct mbuf *m, u_int8_t prio) 1177 { 1178 struct m_tag *mtag; 1179 1180 KASSERT(prio <= BPF_PRIO_MAX, 1181 ("%s with invalid pcp", __func__)); 1182 1183 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL); 1184 if (mtag == NULL) { 1185 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_OUT, 1186 sizeof(uint8_t), M_NOWAIT); 1187 if (mtag == NULL) 1188 return (ENOMEM); 1189 m_tag_prepend(m, mtag); 1190 } 1191 1192 *(uint8_t *)(mtag + 1) = prio; 1193 return (0); 1194 } 1195 1196 static int 1197 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 1198 { 1199 struct route ro; 1200 struct sockaddr dst; 1201 struct epoch_tracker et; 1202 struct bpf_if *bp; 1203 struct bpf_d *d; 1204 struct ifnet *ifp; 1205 struct mbuf *m, *mc; 1206 int error, hlen; 1207 1208 error = devfs_get_cdevpriv((void **)&d); 1209 if (error != 0) 1210 return (error); 1211 1212 NET_EPOCH_ENTER(et); 1213 BPFD_LOCK(d); 1214 BPF_PID_REFRESH_CUR(d); 1215 counter_u64_add(d->bd_wcount, 1); 1216 if ((bp = d->bd_bif) == NULL) { 1217 error = ENXIO; 1218 goto out_locked; 1219 } 1220 1221 ifp = bp->bif_ifp; 1222 if ((ifp->if_flags & IFF_UP) == 0) { 1223 error = ENETDOWN; 1224 goto out_locked; 1225 } 1226 1227 if (uio->uio_resid == 0) 1228 goto out_locked; 1229 1230 bzero(&dst, sizeof(dst)); 1231 m = NULL; 1232 hlen = 0; 1233 1234 /* 1235 * Take extra reference, unlock d and exit from epoch section, 1236 * since bpf_movein() can sleep. 1237 */ 1238 bpfd_ref(d); 1239 NET_EPOCH_EXIT(et); 1240 BPFD_UNLOCK(d); 1241 1242 error = bpf_movein(uio, (int)bp->bif_dlt, ifp, 1243 &m, &dst, &hlen, d); 1244 1245 if (error != 0) { 1246 counter_u64_add(d->bd_wdcount, 1); 1247 bpfd_rele(d); 1248 return (error); 1249 } 1250 1251 BPFD_LOCK(d); 1252 /* 1253 * Check that descriptor is still attached to the interface. 1254 * This can happen on bpfdetach(). To avoid access to detached 1255 * ifnet, free mbuf and return ENXIO. 1256 */ 1257 if (d->bd_bif == NULL) { 1258 counter_u64_add(d->bd_wdcount, 1); 1259 BPFD_UNLOCK(d); 1260 bpfd_rele(d); 1261 m_freem(m); 1262 return (ENXIO); 1263 } 1264 counter_u64_add(d->bd_wfcount, 1); 1265 if (d->bd_hdrcmplt) 1266 dst.sa_family = pseudo_AF_HDRCMPLT; 1267 1268 if (d->bd_feedback) { 1269 mc = m_dup(m, M_NOWAIT); 1270 if (mc != NULL) 1271 mc->m_pkthdr.rcvif = ifp; 1272 /* Set M_PROMISC for outgoing packets to be discarded. */ 1273 if (d->bd_direction == BPF_D_INOUT) 1274 m->m_flags |= M_PROMISC; 1275 } else 1276 mc = NULL; 1277 1278 m->m_pkthdr.len -= hlen; 1279 m->m_len -= hlen; 1280 m->m_data += hlen; /* XXX */ 1281 1282 CURVNET_SET(ifp->if_vnet); 1283 #ifdef MAC 1284 mac_bpfdesc_create_mbuf(d, m); 1285 if (mc != NULL) 1286 mac_bpfdesc_create_mbuf(d, mc); 1287 #endif 1288 1289 bzero(&ro, sizeof(ro)); 1290 if (hlen != 0) { 1291 ro.ro_prepend = (u_char *)&dst.sa_data; 1292 ro.ro_plen = hlen; 1293 ro.ro_flags = RT_HAS_HEADER; 1294 } 1295 1296 if (d->bd_pcp != 0) 1297 bpf_setpcp(m, d->bd_pcp); 1298 1299 /* Avoid possible recursion on BPFD_LOCK(). */ 1300 NET_EPOCH_ENTER(et); 1301 BPFD_UNLOCK(d); 1302 error = (*ifp->if_output)(ifp, m, &dst, &ro); 1303 if (error) 1304 counter_u64_add(d->bd_wdcount, 1); 1305 1306 if (mc != NULL) { 1307 if (error == 0) 1308 (*ifp->if_input)(ifp, mc); 1309 else 1310 m_freem(mc); 1311 } 1312 NET_EPOCH_EXIT(et); 1313 CURVNET_RESTORE(); 1314 bpfd_rele(d); 1315 return (error); 1316 1317 out_locked: 1318 counter_u64_add(d->bd_wdcount, 1); 1319 NET_EPOCH_EXIT(et); 1320 BPFD_UNLOCK(d); 1321 return (error); 1322 } 1323 1324 /* 1325 * Reset a descriptor by flushing its packet buffer and clearing the receive 1326 * and drop counts. This is doable for kernel-only buffers, but with 1327 * zero-copy buffers, we can't write to (or rotate) buffers that are 1328 * currently owned by userspace. It would be nice if we could encapsulate 1329 * this logic in the buffer code rather than here. 1330 */ 1331 static void 1332 reset_d(struct bpf_d *d) 1333 { 1334 1335 BPFD_LOCK_ASSERT(d); 1336 1337 while (d->bd_hbuf_in_use) 1338 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, 1339 "bd_hbuf", 0); 1340 if ((d->bd_hbuf != NULL) && 1341 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 1342 /* Free the hold buffer. */ 1343 d->bd_fbuf = d->bd_hbuf; 1344 d->bd_hbuf = NULL; 1345 d->bd_hlen = 0; 1346 bpf_buf_reclaimed(d); 1347 } 1348 if (bpf_canwritebuf(d)) 1349 d->bd_slen = 0; 1350 counter_u64_zero(d->bd_rcount); 1351 counter_u64_zero(d->bd_dcount); 1352 counter_u64_zero(d->bd_fcount); 1353 counter_u64_zero(d->bd_wcount); 1354 counter_u64_zero(d->bd_wfcount); 1355 counter_u64_zero(d->bd_wdcount); 1356 counter_u64_zero(d->bd_zcopy); 1357 } 1358 1359 /* 1360 * FIONREAD Check for read packet available. 1361 * BIOCGBLEN Get buffer len [for read()]. 1362 * BIOCSETF Set read filter. 1363 * BIOCSETFNR Set read filter without resetting descriptor. 1364 * BIOCSETWF Set write filter. 1365 * BIOCFLUSH Flush read packet buffer. 1366 * BIOCPROMISC Put interface into promiscuous mode. 1367 * BIOCGDLT Get link layer type. 1368 * BIOCGETIF Get interface name. 1369 * BIOCSETIF Set interface. 1370 * BIOCSRTIMEOUT Set read timeout. 1371 * BIOCGRTIMEOUT Get read timeout. 1372 * BIOCGSTATS Get packet stats. 1373 * BIOCIMMEDIATE Set immediate mode. 1374 * BIOCVERSION Get filter language version. 1375 * BIOCGHDRCMPLT Get "header already complete" flag 1376 * BIOCSHDRCMPLT Set "header already complete" flag 1377 * BIOCGDIRECTION Get packet direction flag 1378 * BIOCSDIRECTION Set packet direction flag 1379 * BIOCGTSTAMP Get time stamp format and resolution. 1380 * BIOCSTSTAMP Set time stamp format and resolution. 1381 * BIOCLOCK Set "locked" flag 1382 * BIOCFEEDBACK Set packet feedback mode. 1383 * BIOCSETZBUF Set current zero-copy buffer locations. 1384 * BIOCGETZMAX Get maximum zero-copy buffer size. 1385 * BIOCROTZBUF Force rotation of zero-copy buffer 1386 * BIOCSETBUFMODE Set buffer mode. 1387 * BIOCGETBUFMODE Get current buffer mode. 1388 * BIOCSETVLANPCP Set VLAN PCP tag. 1389 */ 1390 /* ARGSUSED */ 1391 static int 1392 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1393 struct thread *td) 1394 { 1395 struct bpf_d *d; 1396 int error; 1397 1398 error = devfs_get_cdevpriv((void **)&d); 1399 if (error != 0) 1400 return (error); 1401 1402 /* 1403 * Refresh PID associated with this descriptor. 1404 */ 1405 BPFD_LOCK(d); 1406 BPF_PID_REFRESH(d, td); 1407 if (d->bd_state == BPF_WAITING) 1408 callout_stop(&d->bd_callout); 1409 d->bd_state = BPF_IDLE; 1410 BPFD_UNLOCK(d); 1411 1412 if (d->bd_locked == 1) { 1413 switch (cmd) { 1414 case BIOCGBLEN: 1415 case BIOCFLUSH: 1416 case BIOCGDLT: 1417 case BIOCGDLTLIST: 1418 #ifdef COMPAT_FREEBSD32 1419 case BIOCGDLTLIST32: 1420 #endif 1421 case BIOCGETIF: 1422 case BIOCGRTIMEOUT: 1423 #if defined(COMPAT_FREEBSD32) && defined(__amd64__) 1424 case BIOCGRTIMEOUT32: 1425 #endif 1426 case BIOCGSTATS: 1427 case BIOCVERSION: 1428 case BIOCGRSIG: 1429 case BIOCGHDRCMPLT: 1430 case BIOCSTSTAMP: 1431 case BIOCFEEDBACK: 1432 case FIONREAD: 1433 case BIOCLOCK: 1434 case BIOCSRTIMEOUT: 1435 #if defined(COMPAT_FREEBSD32) && defined(__amd64__) 1436 case BIOCSRTIMEOUT32: 1437 #endif 1438 case BIOCIMMEDIATE: 1439 case TIOCGPGRP: 1440 case BIOCROTZBUF: 1441 break; 1442 default: 1443 return (EPERM); 1444 } 1445 } 1446 #ifdef COMPAT_FREEBSD32 1447 /* 1448 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so 1449 * that it will get 32-bit packet headers. 1450 */ 1451 switch (cmd) { 1452 case BIOCSETF32: 1453 case BIOCSETFNR32: 1454 case BIOCSETWF32: 1455 case BIOCGDLTLIST32: 1456 case BIOCGRTIMEOUT32: 1457 case BIOCSRTIMEOUT32: 1458 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1459 BPFD_LOCK(d); 1460 d->bd_compat32 = 1; 1461 BPFD_UNLOCK(d); 1462 } 1463 } 1464 #endif 1465 1466 CURVNET_SET(TD_TO_VNET(td)); 1467 switch (cmd) { 1468 default: 1469 error = EINVAL; 1470 break; 1471 1472 /* 1473 * Check for read packet available. 1474 */ 1475 case FIONREAD: 1476 { 1477 int n; 1478 1479 BPFD_LOCK(d); 1480 n = d->bd_slen; 1481 while (d->bd_hbuf_in_use) 1482 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 1483 PRINET, "bd_hbuf", 0); 1484 if (d->bd_hbuf) 1485 n += d->bd_hlen; 1486 BPFD_UNLOCK(d); 1487 1488 *(int *)addr = n; 1489 break; 1490 } 1491 1492 /* 1493 * Get buffer len [for read()]. 1494 */ 1495 case BIOCGBLEN: 1496 BPFD_LOCK(d); 1497 *(u_int *)addr = d->bd_bufsize; 1498 BPFD_UNLOCK(d); 1499 break; 1500 1501 /* 1502 * Set buffer length. 1503 */ 1504 case BIOCSBLEN: 1505 error = bpf_ioctl_sblen(d, (u_int *)addr); 1506 break; 1507 1508 /* 1509 * Set link layer read filter. 1510 */ 1511 case BIOCSETF: 1512 case BIOCSETFNR: 1513 case BIOCSETWF: 1514 #ifdef COMPAT_FREEBSD32 1515 case BIOCSETF32: 1516 case BIOCSETFNR32: 1517 case BIOCSETWF32: 1518 #endif 1519 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1520 break; 1521 1522 /* 1523 * Flush read packet buffer. 1524 */ 1525 case BIOCFLUSH: 1526 BPFD_LOCK(d); 1527 reset_d(d); 1528 BPFD_UNLOCK(d); 1529 break; 1530 1531 /* 1532 * Put interface into promiscuous mode. 1533 */ 1534 case BIOCPROMISC: 1535 if (d->bd_bif == NULL) { 1536 /* 1537 * No interface attached yet. 1538 */ 1539 error = EINVAL; 1540 break; 1541 } 1542 if (d->bd_promisc == 0) { 1543 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1544 if (error == 0) 1545 d->bd_promisc = 1; 1546 } 1547 break; 1548 1549 /* 1550 * Get current data link type. 1551 */ 1552 case BIOCGDLT: 1553 BPF_LOCK(); 1554 if (d->bd_bif == NULL) 1555 error = EINVAL; 1556 else 1557 *(u_int *)addr = d->bd_bif->bif_dlt; 1558 BPF_UNLOCK(); 1559 break; 1560 1561 /* 1562 * Get a list of supported data link types. 1563 */ 1564 #ifdef COMPAT_FREEBSD32 1565 case BIOCGDLTLIST32: 1566 { 1567 struct bpf_dltlist32 *list32; 1568 struct bpf_dltlist dltlist; 1569 1570 list32 = (struct bpf_dltlist32 *)addr; 1571 dltlist.bfl_len = list32->bfl_len; 1572 dltlist.bfl_list = PTRIN(list32->bfl_list); 1573 BPF_LOCK(); 1574 if (d->bd_bif == NULL) 1575 error = EINVAL; 1576 else { 1577 error = bpf_getdltlist(d, &dltlist); 1578 if (error == 0) 1579 list32->bfl_len = dltlist.bfl_len; 1580 } 1581 BPF_UNLOCK(); 1582 break; 1583 } 1584 #endif 1585 1586 case BIOCGDLTLIST: 1587 BPF_LOCK(); 1588 if (d->bd_bif == NULL) 1589 error = EINVAL; 1590 else 1591 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1592 BPF_UNLOCK(); 1593 break; 1594 1595 /* 1596 * Set data link type. 1597 */ 1598 case BIOCSDLT: 1599 BPF_LOCK(); 1600 if (d->bd_bif == NULL) 1601 error = EINVAL; 1602 else 1603 error = bpf_setdlt(d, *(u_int *)addr); 1604 BPF_UNLOCK(); 1605 break; 1606 1607 /* 1608 * Get interface name. 1609 */ 1610 case BIOCGETIF: 1611 BPF_LOCK(); 1612 if (d->bd_bif == NULL) 1613 error = EINVAL; 1614 else { 1615 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1616 struct ifreq *const ifr = (struct ifreq *)addr; 1617 1618 strlcpy(ifr->ifr_name, ifp->if_xname, 1619 sizeof(ifr->ifr_name)); 1620 } 1621 BPF_UNLOCK(); 1622 break; 1623 1624 /* 1625 * Set interface. 1626 */ 1627 case BIOCSETIF: 1628 { 1629 int alloc_buf, size; 1630 1631 /* 1632 * Behavior here depends on the buffering model. If 1633 * we're using kernel memory buffers, then we can 1634 * allocate them here. If we're using zero-copy, 1635 * then the user process must have registered buffers 1636 * by the time we get here. 1637 */ 1638 alloc_buf = 0; 1639 BPFD_LOCK(d); 1640 if (d->bd_bufmode == BPF_BUFMODE_BUFFER && 1641 d->bd_sbuf == NULL) 1642 alloc_buf = 1; 1643 BPFD_UNLOCK(d); 1644 if (alloc_buf) { 1645 size = d->bd_bufsize; 1646 error = bpf_buffer_ioctl_sblen(d, &size); 1647 if (error != 0) 1648 break; 1649 } 1650 BPF_LOCK(); 1651 error = bpf_setif(d, (struct ifreq *)addr); 1652 BPF_UNLOCK(); 1653 break; 1654 } 1655 1656 /* 1657 * Set read timeout. 1658 */ 1659 case BIOCSRTIMEOUT: 1660 #if defined(COMPAT_FREEBSD32) && defined(__amd64__) 1661 case BIOCSRTIMEOUT32: 1662 #endif 1663 { 1664 struct timeval *tv = (struct timeval *)addr; 1665 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1666 struct timeval32 *tv32; 1667 struct timeval tv64; 1668 1669 if (cmd == BIOCSRTIMEOUT32) { 1670 tv32 = (struct timeval32 *)addr; 1671 tv = &tv64; 1672 tv->tv_sec = tv32->tv_sec; 1673 tv->tv_usec = tv32->tv_usec; 1674 } else 1675 #endif 1676 tv = (struct timeval *)addr; 1677 1678 /* 1679 * Subtract 1 tick from tvtohz() since this isn't 1680 * a one-shot timer. 1681 */ 1682 if ((error = itimerfix(tv)) == 0) 1683 d->bd_rtout = tvtohz(tv) - 1; 1684 break; 1685 } 1686 1687 /* 1688 * Get read timeout. 1689 */ 1690 case BIOCGRTIMEOUT: 1691 #if defined(COMPAT_FREEBSD32) && defined(__amd64__) 1692 case BIOCGRTIMEOUT32: 1693 #endif 1694 { 1695 struct timeval *tv; 1696 #if defined(COMPAT_FREEBSD32) && defined(__amd64__) 1697 struct timeval32 *tv32; 1698 struct timeval tv64; 1699 1700 if (cmd == BIOCGRTIMEOUT32) 1701 tv = &tv64; 1702 else 1703 #endif 1704 tv = (struct timeval *)addr; 1705 1706 tv->tv_sec = d->bd_rtout / hz; 1707 tv->tv_usec = (d->bd_rtout % hz) * tick; 1708 #if defined(COMPAT_FREEBSD32) && defined(__amd64__) 1709 if (cmd == BIOCGRTIMEOUT32) { 1710 tv32 = (struct timeval32 *)addr; 1711 tv32->tv_sec = tv->tv_sec; 1712 tv32->tv_usec = tv->tv_usec; 1713 } 1714 #endif 1715 1716 break; 1717 } 1718 1719 /* 1720 * Get packet stats. 1721 */ 1722 case BIOCGSTATS: 1723 { 1724 struct bpf_stat *bs = (struct bpf_stat *)addr; 1725 1726 /* XXXCSJP overflow */ 1727 bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount); 1728 bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount); 1729 break; 1730 } 1731 1732 /* 1733 * Set immediate mode. 1734 */ 1735 case BIOCIMMEDIATE: 1736 BPFD_LOCK(d); 1737 d->bd_immediate = *(u_int *)addr; 1738 BPFD_UNLOCK(d); 1739 break; 1740 1741 case BIOCVERSION: 1742 { 1743 struct bpf_version *bv = (struct bpf_version *)addr; 1744 1745 bv->bv_major = BPF_MAJOR_VERSION; 1746 bv->bv_minor = BPF_MINOR_VERSION; 1747 break; 1748 } 1749 1750 /* 1751 * Get "header already complete" flag 1752 */ 1753 case BIOCGHDRCMPLT: 1754 BPFD_LOCK(d); 1755 *(u_int *)addr = d->bd_hdrcmplt; 1756 BPFD_UNLOCK(d); 1757 break; 1758 1759 /* 1760 * Set "header already complete" flag 1761 */ 1762 case BIOCSHDRCMPLT: 1763 BPFD_LOCK(d); 1764 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1765 BPFD_UNLOCK(d); 1766 break; 1767 1768 /* 1769 * Get packet direction flag 1770 */ 1771 case BIOCGDIRECTION: 1772 BPFD_LOCK(d); 1773 *(u_int *)addr = d->bd_direction; 1774 BPFD_UNLOCK(d); 1775 break; 1776 1777 /* 1778 * Set packet direction flag 1779 */ 1780 case BIOCSDIRECTION: 1781 { 1782 u_int direction; 1783 1784 direction = *(u_int *)addr; 1785 switch (direction) { 1786 case BPF_D_IN: 1787 case BPF_D_INOUT: 1788 case BPF_D_OUT: 1789 BPFD_LOCK(d); 1790 d->bd_direction = direction; 1791 BPFD_UNLOCK(d); 1792 break; 1793 default: 1794 error = EINVAL; 1795 } 1796 } 1797 break; 1798 1799 /* 1800 * Get packet timestamp format and resolution. 1801 */ 1802 case BIOCGTSTAMP: 1803 BPFD_LOCK(d); 1804 *(u_int *)addr = d->bd_tstamp; 1805 BPFD_UNLOCK(d); 1806 break; 1807 1808 /* 1809 * Set packet timestamp format and resolution. 1810 */ 1811 case BIOCSTSTAMP: 1812 { 1813 u_int func; 1814 1815 func = *(u_int *)addr; 1816 if (BPF_T_VALID(func)) 1817 d->bd_tstamp = func; 1818 else 1819 error = EINVAL; 1820 } 1821 break; 1822 1823 case BIOCFEEDBACK: 1824 BPFD_LOCK(d); 1825 d->bd_feedback = *(u_int *)addr; 1826 BPFD_UNLOCK(d); 1827 break; 1828 1829 case BIOCLOCK: 1830 BPFD_LOCK(d); 1831 d->bd_locked = 1; 1832 BPFD_UNLOCK(d); 1833 break; 1834 1835 case FIONBIO: /* Non-blocking I/O */ 1836 break; 1837 1838 case FIOASYNC: /* Send signal on receive packets */ 1839 BPFD_LOCK(d); 1840 d->bd_async = *(int *)addr; 1841 BPFD_UNLOCK(d); 1842 break; 1843 1844 case FIOSETOWN: 1845 /* 1846 * XXX: Add some sort of locking here? 1847 * fsetown() can sleep. 1848 */ 1849 error = fsetown(*(int *)addr, &d->bd_sigio); 1850 break; 1851 1852 case FIOGETOWN: 1853 BPFD_LOCK(d); 1854 *(int *)addr = fgetown(&d->bd_sigio); 1855 BPFD_UNLOCK(d); 1856 break; 1857 1858 /* This is deprecated, FIOSETOWN should be used instead. */ 1859 case TIOCSPGRP: 1860 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1861 break; 1862 1863 /* This is deprecated, FIOGETOWN should be used instead. */ 1864 case TIOCGPGRP: 1865 *(int *)addr = -fgetown(&d->bd_sigio); 1866 break; 1867 1868 case BIOCSRSIG: /* Set receive signal */ 1869 { 1870 u_int sig; 1871 1872 sig = *(u_int *)addr; 1873 1874 if (sig >= NSIG) 1875 error = EINVAL; 1876 else { 1877 BPFD_LOCK(d); 1878 d->bd_sig = sig; 1879 BPFD_UNLOCK(d); 1880 } 1881 break; 1882 } 1883 case BIOCGRSIG: 1884 BPFD_LOCK(d); 1885 *(u_int *)addr = d->bd_sig; 1886 BPFD_UNLOCK(d); 1887 break; 1888 1889 case BIOCGETBUFMODE: 1890 BPFD_LOCK(d); 1891 *(u_int *)addr = d->bd_bufmode; 1892 BPFD_UNLOCK(d); 1893 break; 1894 1895 case BIOCSETBUFMODE: 1896 /* 1897 * Allow the buffering mode to be changed as long as we 1898 * haven't yet committed to a particular mode. Our 1899 * definition of commitment, for now, is whether or not a 1900 * buffer has been allocated or an interface attached, since 1901 * that's the point where things get tricky. 1902 */ 1903 switch (*(u_int *)addr) { 1904 case BPF_BUFMODE_BUFFER: 1905 break; 1906 1907 case BPF_BUFMODE_ZBUF: 1908 if (bpf_zerocopy_enable) 1909 break; 1910 /* FALLSTHROUGH */ 1911 1912 default: 1913 CURVNET_RESTORE(); 1914 return (EINVAL); 1915 } 1916 1917 BPFD_LOCK(d); 1918 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1919 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1920 BPFD_UNLOCK(d); 1921 CURVNET_RESTORE(); 1922 return (EBUSY); 1923 } 1924 d->bd_bufmode = *(u_int *)addr; 1925 BPFD_UNLOCK(d); 1926 break; 1927 1928 case BIOCGETZMAX: 1929 error = bpf_ioctl_getzmax(td, d, (size_t *)addr); 1930 break; 1931 1932 case BIOCSETZBUF: 1933 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr); 1934 break; 1935 1936 case BIOCROTZBUF: 1937 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr); 1938 break; 1939 1940 case BIOCSETVLANPCP: 1941 { 1942 u_int pcp; 1943 1944 pcp = *(u_int *)addr; 1945 if (pcp > BPF_PRIO_MAX || pcp < 0) { 1946 error = EINVAL; 1947 break; 1948 } 1949 d->bd_pcp = pcp; 1950 break; 1951 } 1952 } 1953 CURVNET_RESTORE(); 1954 return (error); 1955 } 1956 1957 /* 1958 * Set d's packet filter program to fp. If this file already has a filter, 1959 * free it and replace it. Returns EINVAL for bogus requests. 1960 * 1961 * Note we use global lock here to serialize bpf_setf() and bpf_setif() 1962 * calls. 1963 */ 1964 static int 1965 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1966 { 1967 #ifdef COMPAT_FREEBSD32 1968 struct bpf_program fp_swab; 1969 struct bpf_program32 *fp32; 1970 #endif 1971 struct bpf_program_buffer *fcode; 1972 struct bpf_insn *filter; 1973 #ifdef BPF_JITTER 1974 bpf_jit_filter *jfunc; 1975 #endif 1976 size_t size; 1977 u_int flen; 1978 bool track_event; 1979 1980 #ifdef COMPAT_FREEBSD32 1981 switch (cmd) { 1982 case BIOCSETF32: 1983 case BIOCSETWF32: 1984 case BIOCSETFNR32: 1985 fp32 = (struct bpf_program32 *)fp; 1986 fp_swab.bf_len = fp32->bf_len; 1987 fp_swab.bf_insns = 1988 (struct bpf_insn *)(uintptr_t)fp32->bf_insns; 1989 fp = &fp_swab; 1990 switch (cmd) { 1991 case BIOCSETF32: 1992 cmd = BIOCSETF; 1993 break; 1994 case BIOCSETWF32: 1995 cmd = BIOCSETWF; 1996 break; 1997 } 1998 break; 1999 } 2000 #endif 2001 2002 filter = NULL; 2003 #ifdef BPF_JITTER 2004 jfunc = NULL; 2005 #endif 2006 /* 2007 * Check new filter validness before acquiring any locks. 2008 * Allocate memory for new filter, if needed. 2009 */ 2010 flen = fp->bf_len; 2011 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0)) 2012 return (EINVAL); 2013 size = flen * sizeof(*fp->bf_insns); 2014 if (size > 0) { 2015 /* We're setting up new filter. Copy and check actual data. */ 2016 fcode = bpf_program_buffer_alloc(size, M_WAITOK); 2017 filter = (struct bpf_insn *)fcode->buffer; 2018 if (copyin(fp->bf_insns, filter, size) != 0 || 2019 !bpf_validate(filter, flen)) { 2020 free(fcode, M_BPF); 2021 return (EINVAL); 2022 } 2023 #ifdef BPF_JITTER 2024 if (cmd != BIOCSETWF) { 2025 /* 2026 * Filter is copied inside fcode and is 2027 * perfectly valid. 2028 */ 2029 jfunc = bpf_jitter(filter, flen); 2030 } 2031 #endif 2032 } 2033 2034 track_event = false; 2035 fcode = NULL; 2036 2037 BPF_LOCK(); 2038 BPFD_LOCK(d); 2039 /* Set up new filter. */ 2040 if (cmd == BIOCSETWF) { 2041 if (d->bd_wfilter != NULL) { 2042 fcode = __containerof((void *)d->bd_wfilter, 2043 struct bpf_program_buffer, buffer); 2044 #ifdef BPF_JITTER 2045 fcode->func = NULL; 2046 #endif 2047 } 2048 d->bd_wfilter = filter; 2049 } else { 2050 if (d->bd_rfilter != NULL) { 2051 fcode = __containerof((void *)d->bd_rfilter, 2052 struct bpf_program_buffer, buffer); 2053 #ifdef BPF_JITTER 2054 fcode->func = d->bd_bfilter; 2055 #endif 2056 } 2057 d->bd_rfilter = filter; 2058 #ifdef BPF_JITTER 2059 d->bd_bfilter = jfunc; 2060 #endif 2061 if (cmd == BIOCSETF) 2062 reset_d(d); 2063 2064 if (bpf_check_upgrade(cmd, d, filter, flen) != 0) { 2065 /* 2066 * Filter can be set several times without 2067 * specifying interface. In this case just mark d 2068 * as reader. 2069 */ 2070 d->bd_writer = 0; 2071 if (d->bd_bif != NULL) { 2072 /* 2073 * Remove descriptor from writers-only list 2074 * and add it to active readers list. 2075 */ 2076 CK_LIST_REMOVE(d, bd_next); 2077 CK_LIST_INSERT_HEAD(&d->bd_bif->bif_dlist, 2078 d, bd_next); 2079 CTR2(KTR_NET, 2080 "%s: upgrade required by pid %d", 2081 __func__, d->bd_pid); 2082 track_event = true; 2083 } 2084 } 2085 } 2086 BPFD_UNLOCK(d); 2087 2088 if (fcode != NULL) 2089 NET_EPOCH_CALL(bpf_program_buffer_free, &fcode->epoch_ctx); 2090 2091 if (track_event) 2092 EVENTHANDLER_INVOKE(bpf_track, 2093 d->bd_bif->bif_ifp, d->bd_bif->bif_dlt, 1); 2094 2095 BPF_UNLOCK(); 2096 return (0); 2097 } 2098 2099 /* 2100 * Detach a file from its current interface (if attached at all) and attach 2101 * to the interface indicated by the name stored in ifr. 2102 * Return an errno or 0. 2103 */ 2104 static int 2105 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 2106 { 2107 struct bpf_if *bp; 2108 struct ifnet *theywant; 2109 2110 BPF_LOCK_ASSERT(); 2111 2112 theywant = ifunit(ifr->ifr_name); 2113 if (theywant == NULL || theywant->if_bpf == NULL) 2114 return (ENXIO); 2115 2116 bp = theywant->if_bpf; 2117 /* 2118 * At this point, we expect the buffer is already allocated. If not, 2119 * return an error. 2120 */ 2121 switch (d->bd_bufmode) { 2122 case BPF_BUFMODE_BUFFER: 2123 case BPF_BUFMODE_ZBUF: 2124 if (d->bd_sbuf == NULL) 2125 return (EINVAL); 2126 break; 2127 2128 default: 2129 panic("bpf_setif: bufmode %d", d->bd_bufmode); 2130 } 2131 if (bp != d->bd_bif) 2132 bpf_attachd(d, bp); 2133 else { 2134 BPFD_LOCK(d); 2135 reset_d(d); 2136 BPFD_UNLOCK(d); 2137 } 2138 return (0); 2139 } 2140 2141 /* 2142 * Support for select() and poll() system calls 2143 * 2144 * Return true iff the specific operation will not block indefinitely. 2145 * Otherwise, return false but make a note that a selwakeup() must be done. 2146 */ 2147 static int 2148 bpfpoll(struct cdev *dev, int events, struct thread *td) 2149 { 2150 struct bpf_d *d; 2151 int revents; 2152 2153 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 2154 return (events & 2155 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 2156 2157 /* 2158 * Refresh PID associated with this descriptor. 2159 */ 2160 revents = events & (POLLOUT | POLLWRNORM); 2161 BPFD_LOCK(d); 2162 BPF_PID_REFRESH(d, td); 2163 if (events & (POLLIN | POLLRDNORM)) { 2164 if (bpf_ready(d)) 2165 revents |= events & (POLLIN | POLLRDNORM); 2166 else { 2167 selrecord(td, &d->bd_sel); 2168 /* Start the read timeout if necessary. */ 2169 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 2170 callout_reset(&d->bd_callout, d->bd_rtout, 2171 bpf_timed_out, d); 2172 d->bd_state = BPF_WAITING; 2173 } 2174 } 2175 } 2176 BPFD_UNLOCK(d); 2177 return (revents); 2178 } 2179 2180 /* 2181 * Support for kevent() system call. Register EVFILT_READ filters and 2182 * reject all others. 2183 */ 2184 int 2185 bpfkqfilter(struct cdev *dev, struct knote *kn) 2186 { 2187 struct bpf_d *d; 2188 2189 if (devfs_get_cdevpriv((void **)&d) != 0 || 2190 kn->kn_filter != EVFILT_READ) 2191 return (1); 2192 2193 /* 2194 * Refresh PID associated with this descriptor. 2195 */ 2196 BPFD_LOCK(d); 2197 BPF_PID_REFRESH_CUR(d); 2198 kn->kn_fop = &bpfread_filtops; 2199 kn->kn_hook = d; 2200 knlist_add(&d->bd_sel.si_note, kn, 1); 2201 BPFD_UNLOCK(d); 2202 2203 return (0); 2204 } 2205 2206 static void 2207 filt_bpfdetach(struct knote *kn) 2208 { 2209 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 2210 2211 knlist_remove(&d->bd_sel.si_note, kn, 0); 2212 } 2213 2214 static int 2215 filt_bpfread(struct knote *kn, long hint) 2216 { 2217 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 2218 int ready; 2219 2220 BPFD_LOCK_ASSERT(d); 2221 ready = bpf_ready(d); 2222 if (ready) { 2223 kn->kn_data = d->bd_slen; 2224 /* 2225 * Ignore the hold buffer if it is being copied to user space. 2226 */ 2227 if (!d->bd_hbuf_in_use && d->bd_hbuf) 2228 kn->kn_data += d->bd_hlen; 2229 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 2230 callout_reset(&d->bd_callout, d->bd_rtout, 2231 bpf_timed_out, d); 2232 d->bd_state = BPF_WAITING; 2233 } 2234 2235 return (ready); 2236 } 2237 2238 #define BPF_TSTAMP_NONE 0 2239 #define BPF_TSTAMP_FAST 1 2240 #define BPF_TSTAMP_NORMAL 2 2241 #define BPF_TSTAMP_EXTERN 3 2242 2243 static int 2244 bpf_ts_quality(int tstype) 2245 { 2246 2247 if (tstype == BPF_T_NONE) 2248 return (BPF_TSTAMP_NONE); 2249 if ((tstype & BPF_T_FAST) != 0) 2250 return (BPF_TSTAMP_FAST); 2251 2252 return (BPF_TSTAMP_NORMAL); 2253 } 2254 2255 static int 2256 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m) 2257 { 2258 struct m_tag *tag; 2259 int quality; 2260 2261 quality = bpf_ts_quality(tstype); 2262 if (quality == BPF_TSTAMP_NONE) 2263 return (quality); 2264 2265 if (m != NULL) { 2266 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL); 2267 if (tag != NULL) { 2268 *bt = *(struct bintime *)(tag + 1); 2269 return (BPF_TSTAMP_EXTERN); 2270 } 2271 } 2272 if (quality == BPF_TSTAMP_NORMAL) 2273 binuptime(bt); 2274 else 2275 getbinuptime(bt); 2276 2277 return (quality); 2278 } 2279 2280 /* 2281 * Incoming linkage from device drivers. Process the packet pkt, of length 2282 * pktlen, which is stored in a contiguous buffer. The packet is parsed 2283 * by each process' filter, and if accepted, stashed into the corresponding 2284 * buffer. 2285 */ 2286 void 2287 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2288 { 2289 struct epoch_tracker et; 2290 struct bintime bt; 2291 struct bpf_d *d; 2292 #ifdef BPF_JITTER 2293 bpf_jit_filter *bf; 2294 #endif 2295 u_int slen; 2296 int gottime; 2297 2298 gottime = BPF_TSTAMP_NONE; 2299 NET_EPOCH_ENTER(et); 2300 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2301 counter_u64_add(d->bd_rcount, 1); 2302 /* 2303 * NB: We dont call BPF_CHECK_DIRECTION() here since there 2304 * is no way for the caller to indiciate to us whether this 2305 * packet is inbound or outbound. In the bpf_mtap() routines, 2306 * we use the interface pointers on the mbuf to figure it out. 2307 */ 2308 #ifdef BPF_JITTER 2309 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2310 if (bf != NULL) 2311 slen = (*(bf->func))(pkt, pktlen, pktlen); 2312 else 2313 #endif 2314 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 2315 if (slen != 0) { 2316 /* 2317 * Filter matches. Let's to acquire write lock. 2318 */ 2319 BPFD_LOCK(d); 2320 counter_u64_add(d->bd_fcount, 1); 2321 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2322 gottime = bpf_gettime(&bt, d->bd_tstamp, 2323 NULL); 2324 #ifdef MAC 2325 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2326 #endif 2327 catchpacket(d, pkt, pktlen, slen, 2328 bpf_append_bytes, &bt); 2329 BPFD_UNLOCK(d); 2330 } 2331 } 2332 NET_EPOCH_EXIT(et); 2333 } 2334 2335 #define BPF_CHECK_DIRECTION(d, r, i) \ 2336 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 2337 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 2338 2339 /* 2340 * Incoming linkage from device drivers, when packet is in an mbuf chain. 2341 * Locking model is explained in bpf_tap(). 2342 */ 2343 void 2344 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2345 { 2346 struct epoch_tracker et; 2347 struct bintime bt; 2348 struct bpf_d *d; 2349 #ifdef BPF_JITTER 2350 bpf_jit_filter *bf; 2351 #endif 2352 u_int pktlen, slen; 2353 int gottime; 2354 2355 /* Skip outgoing duplicate packets. */ 2356 if ((m->m_flags & M_PROMISC) != 0 && m_rcvif(m) == NULL) { 2357 m->m_flags &= ~M_PROMISC; 2358 return; 2359 } 2360 2361 pktlen = m_length(m, NULL); 2362 gottime = BPF_TSTAMP_NONE; 2363 2364 NET_EPOCH_ENTER(et); 2365 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2366 if (BPF_CHECK_DIRECTION(d, m_rcvif(m), bp->bif_ifp)) 2367 continue; 2368 counter_u64_add(d->bd_rcount, 1); 2369 #ifdef BPF_JITTER 2370 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2371 /* XXX We cannot handle multiple mbufs. */ 2372 if (bf != NULL && m->m_next == NULL) 2373 slen = (*(bf->func))(mtod(m, u_char *), pktlen, 2374 pktlen); 2375 else 2376 #endif 2377 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 2378 if (slen != 0) { 2379 BPFD_LOCK(d); 2380 2381 counter_u64_add(d->bd_fcount, 1); 2382 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2383 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2384 #ifdef MAC 2385 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2386 #endif 2387 catchpacket(d, (u_char *)m, pktlen, slen, 2388 bpf_append_mbuf, &bt); 2389 BPFD_UNLOCK(d); 2390 } 2391 } 2392 NET_EPOCH_EXIT(et); 2393 } 2394 2395 /* 2396 * Incoming linkage from device drivers, when packet is in 2397 * an mbuf chain and to be prepended by a contiguous header. 2398 */ 2399 void 2400 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 2401 { 2402 struct epoch_tracker et; 2403 struct bintime bt; 2404 struct mbuf mb; 2405 struct bpf_d *d; 2406 u_int pktlen, slen; 2407 int gottime; 2408 2409 /* Skip outgoing duplicate packets. */ 2410 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2411 m->m_flags &= ~M_PROMISC; 2412 return; 2413 } 2414 2415 pktlen = m_length(m, NULL); 2416 /* 2417 * Craft on-stack mbuf suitable for passing to bpf_filter. 2418 * Note that we cut corners here; we only setup what's 2419 * absolutely needed--this mbuf should never go anywhere else. 2420 */ 2421 mb.m_flags = 0; 2422 mb.m_next = m; 2423 mb.m_data = data; 2424 mb.m_len = dlen; 2425 pktlen += dlen; 2426 2427 gottime = BPF_TSTAMP_NONE; 2428 2429 NET_EPOCH_ENTER(et); 2430 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2431 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2432 continue; 2433 counter_u64_add(d->bd_rcount, 1); 2434 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 2435 if (slen != 0) { 2436 BPFD_LOCK(d); 2437 2438 counter_u64_add(d->bd_fcount, 1); 2439 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2440 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2441 #ifdef MAC 2442 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2443 #endif 2444 catchpacket(d, (u_char *)&mb, pktlen, slen, 2445 bpf_append_mbuf, &bt); 2446 BPFD_UNLOCK(d); 2447 } 2448 } 2449 NET_EPOCH_EXIT(et); 2450 } 2451 2452 #undef BPF_CHECK_DIRECTION 2453 #undef BPF_TSTAMP_NONE 2454 #undef BPF_TSTAMP_FAST 2455 #undef BPF_TSTAMP_NORMAL 2456 #undef BPF_TSTAMP_EXTERN 2457 2458 static int 2459 bpf_hdrlen(struct bpf_d *d) 2460 { 2461 int hdrlen; 2462 2463 hdrlen = d->bd_bif->bif_hdrlen; 2464 #ifndef BURN_BRIDGES 2465 if (d->bd_tstamp == BPF_T_NONE || 2466 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) 2467 #ifdef COMPAT_FREEBSD32 2468 if (d->bd_compat32) 2469 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32); 2470 else 2471 #endif 2472 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr); 2473 else 2474 #endif 2475 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr); 2476 #ifdef COMPAT_FREEBSD32 2477 if (d->bd_compat32) 2478 hdrlen = BPF_WORDALIGN32(hdrlen); 2479 else 2480 #endif 2481 hdrlen = BPF_WORDALIGN(hdrlen); 2482 2483 return (hdrlen - d->bd_bif->bif_hdrlen); 2484 } 2485 2486 static void 2487 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) 2488 { 2489 struct bintime bt2, boottimebin; 2490 struct timeval tsm; 2491 struct timespec tsn; 2492 2493 if ((tstype & BPF_T_MONOTONIC) == 0) { 2494 bt2 = *bt; 2495 getboottimebin(&boottimebin); 2496 bintime_add(&bt2, &boottimebin); 2497 bt = &bt2; 2498 } 2499 switch (BPF_T_FORMAT(tstype)) { 2500 case BPF_T_MICROTIME: 2501 bintime2timeval(bt, &tsm); 2502 ts->bt_sec = tsm.tv_sec; 2503 ts->bt_frac = tsm.tv_usec; 2504 break; 2505 case BPF_T_NANOTIME: 2506 bintime2timespec(bt, &tsn); 2507 ts->bt_sec = tsn.tv_sec; 2508 ts->bt_frac = tsn.tv_nsec; 2509 break; 2510 case BPF_T_BINTIME: 2511 ts->bt_sec = bt->sec; 2512 ts->bt_frac = bt->frac; 2513 break; 2514 } 2515 } 2516 2517 /* 2518 * Move the packet data from interface memory (pkt) into the 2519 * store buffer. "cpfn" is the routine called to do the actual data 2520 * transfer. bcopy is passed in to copy contiguous chunks, while 2521 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 2522 * pkt is really an mbuf. 2523 */ 2524 static void 2525 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 2526 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 2527 struct bintime *bt) 2528 { 2529 struct bpf_xhdr hdr; 2530 #ifndef BURN_BRIDGES 2531 struct bpf_hdr hdr_old; 2532 #ifdef COMPAT_FREEBSD32 2533 struct bpf_hdr32 hdr32_old; 2534 #endif 2535 #endif 2536 int caplen, curlen, hdrlen, totlen; 2537 int do_wakeup = 0; 2538 int do_timestamp; 2539 int tstype; 2540 2541 BPFD_LOCK_ASSERT(d); 2542 if (d->bd_bif == NULL) { 2543 /* Descriptor was detached in concurrent thread */ 2544 counter_u64_add(d->bd_dcount, 1); 2545 return; 2546 } 2547 2548 /* 2549 * Detect whether user space has released a buffer back to us, and if 2550 * so, move it from being a hold buffer to a free buffer. This may 2551 * not be the best place to do it (for example, we might only want to 2552 * run this check if we need the space), but for now it's a reliable 2553 * spot to do it. 2554 */ 2555 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 2556 d->bd_fbuf = d->bd_hbuf; 2557 d->bd_hbuf = NULL; 2558 d->bd_hlen = 0; 2559 bpf_buf_reclaimed(d); 2560 } 2561 2562 /* 2563 * Figure out how many bytes to move. If the packet is 2564 * greater or equal to the snapshot length, transfer that 2565 * much. Otherwise, transfer the whole packet (unless 2566 * we hit the buffer size limit). 2567 */ 2568 hdrlen = bpf_hdrlen(d); 2569 totlen = hdrlen + min(snaplen, pktlen); 2570 if (totlen > d->bd_bufsize) 2571 totlen = d->bd_bufsize; 2572 2573 /* 2574 * Round up the end of the previous packet to the next longword. 2575 * 2576 * Drop the packet if there's no room and no hope of room 2577 * If the packet would overflow the storage buffer or the storage 2578 * buffer is considered immutable by the buffer model, try to rotate 2579 * the buffer and wakeup pending processes. 2580 */ 2581 #ifdef COMPAT_FREEBSD32 2582 if (d->bd_compat32) 2583 curlen = BPF_WORDALIGN32(d->bd_slen); 2584 else 2585 #endif 2586 curlen = BPF_WORDALIGN(d->bd_slen); 2587 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 2588 if (d->bd_fbuf == NULL) { 2589 /* 2590 * There's no room in the store buffer, and no 2591 * prospect of room, so drop the packet. Notify the 2592 * buffer model. 2593 */ 2594 bpf_buffull(d); 2595 counter_u64_add(d->bd_dcount, 1); 2596 return; 2597 } 2598 KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use")); 2599 ROTATE_BUFFERS(d); 2600 do_wakeup = 1; 2601 curlen = 0; 2602 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 2603 /* 2604 * Immediate mode is set, or the read timeout has already 2605 * expired during a select call. A packet arrived, so the 2606 * reader should be woken up. 2607 */ 2608 do_wakeup = 1; 2609 caplen = totlen - hdrlen; 2610 tstype = d->bd_tstamp; 2611 do_timestamp = tstype != BPF_T_NONE; 2612 #ifndef BURN_BRIDGES 2613 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) { 2614 struct bpf_ts ts; 2615 if (do_timestamp) 2616 bpf_bintime2ts(bt, &ts, tstype); 2617 #ifdef COMPAT_FREEBSD32 2618 if (d->bd_compat32) { 2619 bzero(&hdr32_old, sizeof(hdr32_old)); 2620 if (do_timestamp) { 2621 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; 2622 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; 2623 } 2624 hdr32_old.bh_datalen = pktlen; 2625 hdr32_old.bh_hdrlen = hdrlen; 2626 hdr32_old.bh_caplen = caplen; 2627 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, 2628 sizeof(hdr32_old)); 2629 goto copy; 2630 } 2631 #endif 2632 bzero(&hdr_old, sizeof(hdr_old)); 2633 if (do_timestamp) { 2634 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; 2635 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; 2636 } 2637 hdr_old.bh_datalen = pktlen; 2638 hdr_old.bh_hdrlen = hdrlen; 2639 hdr_old.bh_caplen = caplen; 2640 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, 2641 sizeof(hdr_old)); 2642 goto copy; 2643 } 2644 #endif 2645 2646 /* 2647 * Append the bpf header. Note we append the actual header size, but 2648 * move forward the length of the header plus padding. 2649 */ 2650 bzero(&hdr, sizeof(hdr)); 2651 if (do_timestamp) 2652 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype); 2653 hdr.bh_datalen = pktlen; 2654 hdr.bh_hdrlen = hdrlen; 2655 hdr.bh_caplen = caplen; 2656 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 2657 2658 /* 2659 * Copy the packet data into the store buffer and update its length. 2660 */ 2661 #ifndef BURN_BRIDGES 2662 copy: 2663 #endif 2664 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); 2665 d->bd_slen = curlen + totlen; 2666 2667 if (do_wakeup) 2668 bpf_wakeup(d); 2669 } 2670 2671 /* 2672 * Free buffers currently in use by a descriptor. 2673 * Called on close. 2674 */ 2675 static void 2676 bpfd_free(epoch_context_t ctx) 2677 { 2678 struct bpf_d *d; 2679 struct bpf_program_buffer *p; 2680 2681 /* 2682 * We don't need to lock out interrupts since this descriptor has 2683 * been detached from its interface and it yet hasn't been marked 2684 * free. 2685 */ 2686 d = __containerof(ctx, struct bpf_d, epoch_ctx); 2687 bpf_free(d); 2688 if (d->bd_rfilter != NULL) { 2689 p = __containerof((void *)d->bd_rfilter, 2690 struct bpf_program_buffer, buffer); 2691 #ifdef BPF_JITTER 2692 p->func = d->bd_bfilter; 2693 #endif 2694 bpf_program_buffer_free(&p->epoch_ctx); 2695 } 2696 if (d->bd_wfilter != NULL) { 2697 p = __containerof((void *)d->bd_wfilter, 2698 struct bpf_program_buffer, buffer); 2699 #ifdef BPF_JITTER 2700 p->func = NULL; 2701 #endif 2702 bpf_program_buffer_free(&p->epoch_ctx); 2703 } 2704 2705 mtx_destroy(&d->bd_lock); 2706 counter_u64_free(d->bd_rcount); 2707 counter_u64_free(d->bd_dcount); 2708 counter_u64_free(d->bd_fcount); 2709 counter_u64_free(d->bd_wcount); 2710 counter_u64_free(d->bd_wfcount); 2711 counter_u64_free(d->bd_wdcount); 2712 counter_u64_free(d->bd_zcopy); 2713 free(d, M_BPF); 2714 } 2715 2716 /* 2717 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 2718 * fixed size of the link header (variable length headers not yet supported). 2719 */ 2720 void 2721 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2722 { 2723 2724 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2725 } 2726 2727 /* 2728 * Attach an interface to bpf. ifp is a pointer to the structure 2729 * defining the interface to be attached, dlt is the link layer type, 2730 * and hdrlen is the fixed size of the link header (variable length 2731 * headers are not yet supporrted). 2732 */ 2733 void 2734 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, 2735 struct bpf_if **driverp) 2736 { 2737 struct bpf_if *bp; 2738 2739 KASSERT(*driverp == NULL, 2740 ("bpfattach2: driverp already initialized")); 2741 2742 bp = malloc(sizeof(*bp), M_BPF, M_WAITOK | M_ZERO); 2743 2744 CK_LIST_INIT(&bp->bif_dlist); 2745 CK_LIST_INIT(&bp->bif_wlist); 2746 bp->bif_ifp = ifp; 2747 bp->bif_dlt = dlt; 2748 bp->bif_hdrlen = hdrlen; 2749 bp->bif_bpf = driverp; 2750 bp->bif_refcnt = 1; 2751 *driverp = bp; 2752 /* 2753 * Reference ifnet pointer, so it won't freed until 2754 * we release it. 2755 */ 2756 if_ref(ifp); 2757 BPF_LOCK(); 2758 CK_LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 2759 BPF_UNLOCK(); 2760 2761 if (bootverbose && IS_DEFAULT_VNET(curvnet)) 2762 if_printf(ifp, "bpf attached\n"); 2763 } 2764 2765 #ifdef VIMAGE 2766 /* 2767 * When moving interfaces between vnet instances we need a way to 2768 * query the dlt and hdrlen before detach so we can re-attch the if_bpf 2769 * after the vmove. We unfortunately have no device driver infrastructure 2770 * to query the interface for these values after creation/attach, thus 2771 * add this as a workaround. 2772 */ 2773 int 2774 bpf_get_bp_params(struct bpf_if *bp, u_int *bif_dlt, u_int *bif_hdrlen) 2775 { 2776 2777 if (bp == NULL) 2778 return (ENXIO); 2779 if (bif_dlt == NULL && bif_hdrlen == NULL) 2780 return (0); 2781 2782 if (bif_dlt != NULL) 2783 *bif_dlt = bp->bif_dlt; 2784 if (bif_hdrlen != NULL) 2785 *bif_hdrlen = bp->bif_hdrlen; 2786 2787 return (0); 2788 } 2789 #endif 2790 2791 /* 2792 * Detach bpf from an interface. This involves detaching each descriptor 2793 * associated with the interface. Notify each descriptor as it's detached 2794 * so that any sleepers wake up and get ENXIO. 2795 */ 2796 void 2797 bpfdetach(struct ifnet *ifp) 2798 { 2799 struct bpf_if *bp, *bp_temp; 2800 struct bpf_d *d; 2801 2802 BPF_LOCK(); 2803 /* Find all bpf_if struct's which reference ifp and detach them. */ 2804 CK_LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) { 2805 if (ifp != bp->bif_ifp) 2806 continue; 2807 2808 CK_LIST_REMOVE(bp, bif_next); 2809 *bp->bif_bpf = (struct bpf_if *)&dead_bpf_if; 2810 2811 CTR4(KTR_NET, 2812 "%s: sheduling free for encap %d (%p) for if %p", 2813 __func__, bp->bif_dlt, bp, ifp); 2814 2815 /* Detach common descriptors */ 2816 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) { 2817 bpf_detachd_locked(d, true); 2818 } 2819 2820 /* Detach writer-only descriptors */ 2821 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) { 2822 bpf_detachd_locked(d, true); 2823 } 2824 bpfif_rele(bp); 2825 } 2826 BPF_UNLOCK(); 2827 } 2828 2829 /* 2830 * Get a list of available data link type of the interface. 2831 */ 2832 static int 2833 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 2834 { 2835 struct ifnet *ifp; 2836 struct bpf_if *bp; 2837 u_int *lst; 2838 int error, n, n1; 2839 2840 BPF_LOCK_ASSERT(); 2841 2842 ifp = d->bd_bif->bif_ifp; 2843 n1 = 0; 2844 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2845 if (bp->bif_ifp == ifp) 2846 n1++; 2847 } 2848 if (bfl->bfl_list == NULL) { 2849 bfl->bfl_len = n1; 2850 return (0); 2851 } 2852 if (n1 > bfl->bfl_len) 2853 return (ENOMEM); 2854 2855 lst = malloc(n1 * sizeof(u_int), M_TEMP, M_WAITOK); 2856 n = 0; 2857 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2858 if (bp->bif_ifp != ifp) 2859 continue; 2860 lst[n++] = bp->bif_dlt; 2861 } 2862 error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n); 2863 free(lst, M_TEMP); 2864 bfl->bfl_len = n; 2865 return (error); 2866 } 2867 2868 /* 2869 * Set the data link type of a BPF instance. 2870 */ 2871 static int 2872 bpf_setdlt(struct bpf_d *d, u_int dlt) 2873 { 2874 int error, opromisc; 2875 struct ifnet *ifp; 2876 struct bpf_if *bp; 2877 2878 BPF_LOCK_ASSERT(); 2879 MPASS(d->bd_bif != NULL); 2880 2881 /* 2882 * It is safe to check bd_bif without BPFD_LOCK, it can not be 2883 * changed while we hold global lock. 2884 */ 2885 if (d->bd_bif->bif_dlt == dlt) 2886 return (0); 2887 2888 ifp = d->bd_bif->bif_ifp; 2889 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2890 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 2891 break; 2892 } 2893 if (bp == NULL) 2894 return (EINVAL); 2895 2896 opromisc = d->bd_promisc; 2897 bpf_attachd(d, bp); 2898 if (opromisc) { 2899 error = ifpromisc(bp->bif_ifp, 1); 2900 if (error) 2901 if_printf(bp->bif_ifp, "%s: ifpromisc failed (%d)\n", 2902 __func__, error); 2903 else 2904 d->bd_promisc = 1; 2905 } 2906 return (0); 2907 } 2908 2909 static void 2910 bpf_drvinit(void *unused) 2911 { 2912 struct cdev *dev; 2913 2914 sx_init(&bpf_sx, "bpf global lock"); 2915 CK_LIST_INIT(&bpf_iflist); 2916 2917 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2918 /* For compatibility */ 2919 make_dev_alias(dev, "bpf0"); 2920 } 2921 2922 /* 2923 * Zero out the various packet counters associated with all of the bpf 2924 * descriptors. At some point, we will probably want to get a bit more 2925 * granular and allow the user to specify descriptors to be zeroed. 2926 */ 2927 static void 2928 bpf_zero_counters(void) 2929 { 2930 struct bpf_if *bp; 2931 struct bpf_d *bd; 2932 2933 BPF_LOCK(); 2934 /* 2935 * We are protected by global lock here, interfaces and 2936 * descriptors can not be deleted while we hold it. 2937 */ 2938 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2939 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2940 counter_u64_zero(bd->bd_rcount); 2941 counter_u64_zero(bd->bd_dcount); 2942 counter_u64_zero(bd->bd_fcount); 2943 counter_u64_zero(bd->bd_wcount); 2944 counter_u64_zero(bd->bd_wfcount); 2945 counter_u64_zero(bd->bd_zcopy); 2946 } 2947 } 2948 BPF_UNLOCK(); 2949 } 2950 2951 /* 2952 * Fill filter statistics 2953 */ 2954 static void 2955 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2956 { 2957 2958 BPF_LOCK_ASSERT(); 2959 bzero(d, sizeof(*d)); 2960 d->bd_structsize = sizeof(*d); 2961 d->bd_immediate = bd->bd_immediate; 2962 d->bd_promisc = bd->bd_promisc; 2963 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2964 d->bd_direction = bd->bd_direction; 2965 d->bd_feedback = bd->bd_feedback; 2966 d->bd_async = bd->bd_async; 2967 d->bd_rcount = counter_u64_fetch(bd->bd_rcount); 2968 d->bd_dcount = counter_u64_fetch(bd->bd_dcount); 2969 d->bd_fcount = counter_u64_fetch(bd->bd_fcount); 2970 d->bd_sig = bd->bd_sig; 2971 d->bd_slen = bd->bd_slen; 2972 d->bd_hlen = bd->bd_hlen; 2973 d->bd_bufsize = bd->bd_bufsize; 2974 d->bd_pid = bd->bd_pid; 2975 strlcpy(d->bd_ifname, 2976 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2977 d->bd_locked = bd->bd_locked; 2978 d->bd_wcount = counter_u64_fetch(bd->bd_wcount); 2979 d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount); 2980 d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount); 2981 d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy); 2982 d->bd_bufmode = bd->bd_bufmode; 2983 } 2984 2985 /* 2986 * Handle `netstat -B' stats request 2987 */ 2988 static int 2989 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2990 { 2991 static const struct xbpf_d zerostats; 2992 struct xbpf_d *xbdbuf, *xbd, tempstats; 2993 int index, error; 2994 struct bpf_if *bp; 2995 struct bpf_d *bd; 2996 2997 /* 2998 * XXX This is not technically correct. It is possible for non 2999 * privileged users to open bpf devices. It would make sense 3000 * if the users who opened the devices were able to retrieve 3001 * the statistics for them, too. 3002 */ 3003 error = priv_check(req->td, PRIV_NET_BPF); 3004 if (error) 3005 return (error); 3006 /* 3007 * Check to see if the user is requesting that the counters be 3008 * zeroed out. Explicitly check that the supplied data is zeroed, 3009 * as we aren't allowing the user to set the counters currently. 3010 */ 3011 if (req->newptr != NULL) { 3012 if (req->newlen != sizeof(tempstats)) 3013 return (EINVAL); 3014 memset(&tempstats, 0, sizeof(tempstats)); 3015 error = SYSCTL_IN(req, &tempstats, sizeof(tempstats)); 3016 if (error) 3017 return (error); 3018 if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0) 3019 return (EINVAL); 3020 bpf_zero_counters(); 3021 return (0); 3022 } 3023 if (req->oldptr == NULL) 3024 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 3025 if (bpf_bpfd_cnt == 0) 3026 return (SYSCTL_OUT(req, 0, 0)); 3027 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 3028 BPF_LOCK(); 3029 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 3030 BPF_UNLOCK(); 3031 free(xbdbuf, M_BPF); 3032 return (ENOMEM); 3033 } 3034 index = 0; 3035 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) { 3036 /* Send writers-only first */ 3037 CK_LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { 3038 xbd = &xbdbuf[index++]; 3039 bpfstats_fill_xbpf(xbd, bd); 3040 } 3041 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 3042 xbd = &xbdbuf[index++]; 3043 bpfstats_fill_xbpf(xbd, bd); 3044 } 3045 } 3046 BPF_UNLOCK(); 3047 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 3048 free(xbdbuf, M_BPF); 3049 return (error); 3050 } 3051 3052 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 3053 3054 #else /* !DEV_BPF && !NETGRAPH_BPF */ 3055 3056 /* 3057 * NOP stubs to allow bpf-using drivers to load and function. 3058 * 3059 * A 'better' implementation would allow the core bpf functionality 3060 * to be loaded at runtime. 3061 */ 3062 3063 void 3064 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 3065 { 3066 } 3067 3068 void 3069 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 3070 { 3071 } 3072 3073 void 3074 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 3075 { 3076 } 3077 3078 void 3079 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 3080 { 3081 3082 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 3083 } 3084 3085 void 3086 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 3087 { 3088 3089 *driverp = (struct bpf_if *)&dead_bpf_if; 3090 } 3091 3092 void 3093 bpfdetach(struct ifnet *ifp) 3094 { 3095 } 3096 3097 u_int 3098 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 3099 { 3100 return -1; /* "no filter" behaviour */ 3101 } 3102 3103 int 3104 bpf_validate(const struct bpf_insn *f, int len) 3105 { 3106 return 0; /* false */ 3107 } 3108 3109 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 3110 3111 #ifdef DDB 3112 static void 3113 bpf_show_bpf_if(struct bpf_if *bpf_if) 3114 { 3115 3116 if (bpf_if == NULL) 3117 return; 3118 db_printf("%p:\n", bpf_if); 3119 #define BPF_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, bpf_if->e); 3120 /* bif_ext.bif_next */ 3121 /* bif_ext.bif_dlist */ 3122 BPF_DB_PRINTF("%#x", bif_dlt); 3123 BPF_DB_PRINTF("%u", bif_hdrlen); 3124 /* bif_wlist */ 3125 BPF_DB_PRINTF("%p", bif_ifp); 3126 BPF_DB_PRINTF("%p", bif_bpf); 3127 BPF_DB_PRINTF("%u", bif_refcnt); 3128 } 3129 3130 DB_SHOW_COMMAND(bpf_if, db_show_bpf_if) 3131 { 3132 3133 if (!have_addr) { 3134 db_printf("usage: show bpf_if <struct bpf_if *>\n"); 3135 return; 3136 } 3137 3138 bpf_show_bpf_if((struct bpf_if *)addr); 3139 } 3140 #endif 3141