1 /* $OpenBSD: if_bridge.c,v 1.368 2023/05/16 14:32:54 jan Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 25 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Effort sponsored in part by the Defense Advanced Research Projects 29 * Agency (DARPA) and Air Force Research Laboratory, Air Force 30 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 31 * 32 */ 33 34 #include "bpfilter.h" 35 #include "gif.h" 36 #include "pf.h" 37 #include "carp.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mbuf.h> 43 #include <sys/socket.h> 44 #include <sys/ioctl.h> 45 #include <sys/kernel.h> 46 47 #include <net/if.h> 48 #include <net/if_types.h> 49 #include <net/if_llc.h> 50 #include <net/netisr.h> 51 52 #include <netinet/in.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip_var.h> 55 #include <netinet/if_ether.h> 56 #include <netinet/ip_icmp.h> 57 58 #ifdef IPSEC 59 #include <netinet/ip_ipsp.h> 60 #include <net/if_enc.h> 61 #endif 62 63 #ifdef INET6 64 #include <netinet6/in6_var.h> 65 #include <netinet/ip6.h> 66 #include <netinet6/ip6_var.h> 67 #endif 68 69 #if NPF > 0 70 #include <net/pfvar.h> 71 #define BRIDGE_IN PF_IN 72 #define BRIDGE_OUT PF_OUT 73 #else 74 #define BRIDGE_IN 0 75 #define BRIDGE_OUT 1 76 #endif 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #if NCARP > 0 83 #include <netinet/ip_carp.h> 84 #endif 85 86 #if NVLAN > 0 87 #include <net/if_vlan_var.h> 88 #endif 89 90 #include <net/if_bridge.h> 91 92 /* 93 * Maximum number of addresses to cache 94 */ 95 #ifndef BRIDGE_RTABLE_MAX 96 #define BRIDGE_RTABLE_MAX 100 97 #endif 98 99 /* 100 * Timeout (in seconds) for entries learned dynamically 101 */ 102 #ifndef BRIDGE_RTABLE_TIMEOUT 103 #define BRIDGE_RTABLE_TIMEOUT 240 104 #endif 105 106 void bridgeattach(int); 107 int bridge_ioctl(struct ifnet *, u_long, caddr_t); 108 void bridge_ifdetach(void *); 109 void bridge_spandetach(void *); 110 int bridge_ifremove(struct bridge_iflist *); 111 void bridge_spanremove(struct bridge_iflist *); 112 struct mbuf * 113 bridge_input(struct ifnet *, struct mbuf *, uint64_t, void *); 114 void bridge_process(struct ifnet *, struct mbuf *); 115 void bridgeintr_frame(struct ifnet *, struct ifnet *, struct mbuf *); 116 void bridge_bifgetstp(struct bridge_softc *, struct bridge_iflist *, 117 struct ifbreq *); 118 void bridge_broadcast(struct bridge_softc *, struct ifnet *, 119 struct ether_header *, struct mbuf *); 120 int bridge_localbroadcast(struct ifnet *, struct ether_header *, 121 struct mbuf *); 122 void bridge_span(struct ifnet *, struct mbuf *); 123 void bridge_stop(struct bridge_softc *); 124 void bridge_init(struct bridge_softc *); 125 int bridge_bifconf(struct bridge_softc *, struct ifbifconf *); 126 int bridge_blocknonip(struct ether_header *, struct mbuf *); 127 void bridge_ifinput(struct ifnet *, struct mbuf *); 128 int bridge_dummy_output(struct ifnet *, struct mbuf *, struct sockaddr *, 129 struct rtentry *); 130 void bridge_send_icmp_err(struct ifnet *, struct ether_header *, 131 struct mbuf *, int, struct llc *, int, int, int); 132 int bridge_ifenqueue(struct ifnet *, struct ifnet *, struct mbuf *); 133 struct mbuf *bridge_ip(struct ifnet *, int, struct ifnet *, 134 struct ether_header *, struct mbuf *); 135 #ifdef IPSEC 136 int bridge_ipsec(struct ifnet *, struct ether_header *, int, struct llc *, 137 int, int, int, struct mbuf *); 138 #endif 139 int bridge_clone_create(struct if_clone *, int); 140 int bridge_clone_destroy(struct ifnet *); 141 void bridge_take(void *); 142 void bridge_rele(void *); 143 144 #define ETHERADDR_IS_IP_MCAST(a) \ 145 /* struct etheraddr *a; */ \ 146 ((a)->ether_addr_octet[0] == 0x01 && \ 147 (a)->ether_addr_octet[1] == 0x00 && \ 148 (a)->ether_addr_octet[2] == 0x5e) 149 150 struct niqueue bridgeintrq = NIQUEUE_INITIALIZER(1024, NETISR_BRIDGE); 151 152 struct if_clone bridge_cloner = 153 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy); 154 155 const struct ether_brport bridge_brport = { 156 bridge_input, 157 bridge_take, 158 bridge_rele, 159 NULL, 160 }; 161 162 void 163 bridgeattach(int n) 164 { 165 if_clone_attach(&bridge_cloner); 166 } 167 168 int 169 bridge_clone_create(struct if_clone *ifc, int unit) 170 { 171 struct bridge_softc *sc; 172 struct ifnet *ifp; 173 int i; 174 175 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 176 sc->sc_stp = bstp_create(); 177 if (!sc->sc_stp) { 178 free(sc, M_DEVBUF, sizeof *sc); 179 return (ENOMEM); 180 } 181 182 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 183 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 184 timeout_set(&sc->sc_brtimeout, bridge_rtage, sc); 185 SMR_SLIST_INIT(&sc->sc_iflist); 186 SMR_SLIST_INIT(&sc->sc_spanlist); 187 mtx_init(&sc->sc_mtx, IPL_MPFLOOR); 188 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) 189 LIST_INIT(&sc->sc_rts[i]); 190 arc4random_buf(&sc->sc_hashkey, sizeof(sc->sc_hashkey)); 191 ifp = &sc->sc_if; 192 snprintf(ifp->if_xname, sizeof ifp->if_xname, "%s%d", ifc->ifc_name, 193 unit); 194 ifp->if_softc = sc; 195 ifp->if_mtu = ETHERMTU; 196 ifp->if_ioctl = bridge_ioctl; 197 ifp->if_output = bridge_dummy_output; 198 ifp->if_xflags = IFXF_CLONED; 199 ifp->if_start = NULL; 200 ifp->if_type = IFT_BRIDGE; 201 ifp->if_hdrlen = ETHER_HDR_LEN; 202 203 if_attach(ifp); 204 if_alloc_sadl(ifp); 205 206 #if NBPFILTER > 0 207 bpfattach(&sc->sc_if.if_bpf, ifp, 208 DLT_EN10MB, ETHER_HDR_LEN); 209 #endif 210 211 return (0); 212 } 213 214 int 215 bridge_dummy_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 216 struct rtentry *rt) 217 { 218 m_freem(m); 219 return (EAFNOSUPPORT); 220 } 221 222 int 223 bridge_clone_destroy(struct ifnet *ifp) 224 { 225 struct bridge_softc *sc = ifp->if_softc; 226 struct bridge_iflist *bif; 227 228 /* 229 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 230 * use of smr_barrier() while holding the lock might lead to a 231 * deadlock situation. 232 */ 233 NET_ASSERT_UNLOCKED(); 234 235 bridge_stop(sc); 236 bridge_rtflush(sc, IFBF_FLUSHALL); 237 while ((bif = SMR_SLIST_FIRST_LOCKED(&sc->sc_iflist)) != NULL) 238 bridge_ifremove(bif); 239 while ((bif = SMR_SLIST_FIRST_LOCKED(&sc->sc_spanlist)) != NULL) 240 bridge_spanremove(bif); 241 242 bstp_destroy(sc->sc_stp); 243 244 if_detach(ifp); 245 246 free(sc, M_DEVBUF, sizeof *sc); 247 return (0); 248 } 249 250 int 251 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 252 { 253 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc; 254 struct ifbreq *req = (struct ifbreq *)data; 255 struct ifbropreq *brop = (struct ifbropreq *)data; 256 struct ifnet *ifs; 257 struct bridge_iflist *bif; 258 struct bstp_port *bp; 259 struct bstp_state *bs = sc->sc_stp; 260 int error = 0; 261 262 /* 263 * bridge(4) data structure aren't protected by the NET_LOCK(). 264 * Idealy it shouldn't be taken before calling `ifp->if_ioctl' 265 * but we aren't there yet. Media ioctl run without netlock. 266 */ 267 switch (cmd) { 268 case SIOCSIFMEDIA: 269 case SIOCGIFMEDIA: 270 return (ENOTTY); 271 } 272 NET_UNLOCK(); 273 274 switch (cmd) { 275 case SIOCBRDGADD: 276 /* bridge(4) does not distinguish between routing/forwarding ports */ 277 case SIOCBRDGADDL: 278 if ((error = suser(curproc)) != 0) 279 break; 280 281 ifs = if_unit(req->ifbr_ifsname); 282 if (ifs == NULL) { /* no such interface */ 283 error = ENOENT; 284 break; 285 } 286 if (ifs->if_type != IFT_ETHER) { 287 if_put(ifs); 288 error = EINVAL; 289 break; 290 } 291 if (ifs->if_bridgeidx != 0) { 292 if (ifs->if_bridgeidx == ifp->if_index) 293 error = EEXIST; 294 else 295 error = EBUSY; 296 if_put(ifs); 297 break; 298 } 299 300 error = ether_brport_isset(ifs); 301 if (error != 0) { 302 if_put(ifs); 303 break; 304 } 305 306 /* If it's in the span list, it can't be a member. */ 307 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 308 if (bif->ifp == ifs) 309 break; 310 } 311 if (bif != NULL) { 312 if_put(ifs); 313 error = EBUSY; 314 break; 315 } 316 317 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 318 if (bif == NULL) { 319 if_put(ifs); 320 error = ENOMEM; 321 break; 322 } 323 324 NET_LOCK(); 325 error = ifpromisc(ifs, 1); 326 NET_UNLOCK(); 327 if (error != 0) { 328 if_put(ifs); 329 free(bif, M_DEVBUF, sizeof(*bif)); 330 break; 331 } 332 333 /* 334 * XXX If the NET_LOCK() or ifpromisc() calls above 335 * had to sleep, then something else could have come 336 * along and taken over ifs while the kernel lock was 337 * released. 338 */ 339 340 NET_LOCK(); 341 ifsetlro(ifs, 0); 342 NET_UNLOCK(); 343 344 bif->bridge_sc = sc; 345 bif->ifp = ifs; 346 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 347 SIMPLEQ_INIT(&bif->bif_brlin); 348 SIMPLEQ_INIT(&bif->bif_brlout); 349 ifs->if_bridgeidx = ifp->if_index; 350 task_set(&bif->bif_dtask, bridge_ifdetach, bif); 351 if_detachhook_add(ifs, &bif->bif_dtask); 352 ether_brport_set(bif->ifp, &bridge_brport); 353 SMR_SLIST_INSERT_HEAD_LOCKED(&sc->sc_iflist, bif, bif_next); 354 break; 355 case SIOCBRDGDEL: 356 if ((error = suser(curproc)) != 0) 357 break; 358 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 359 if (error != 0) 360 break; 361 bridge_ifremove(bif); 362 break; 363 case SIOCBRDGIFS: 364 error = bridge_bifconf(sc, (struct ifbifconf *)data); 365 break; 366 case SIOCBRDGADDS: 367 if ((error = suser(curproc)) != 0) 368 break; 369 ifs = if_unit(req->ifbr_ifsname); 370 if (ifs == NULL) { /* no such interface */ 371 error = ENOENT; 372 break; 373 } 374 if (ifs->if_type != IFT_ETHER) { 375 if_put(ifs); 376 error = EINVAL; 377 break; 378 } 379 if (ifs->if_bridgeidx != 0) { 380 if (ifs->if_bridgeidx == ifp->if_index) 381 error = EEXIST; 382 else 383 error = EBUSY; 384 if_put(ifs); 385 break; 386 } 387 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 388 if (bif->ifp == ifs) 389 break; 390 } 391 if (bif != NULL) { 392 if_put(ifs); 393 error = EEXIST; 394 break; 395 } 396 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 397 if (bif == NULL) { 398 if_put(ifs); 399 error = ENOMEM; 400 break; 401 } 402 403 NET_LOCK(); 404 ifsetlro(ifs, 0); 405 NET_UNLOCK(); 406 407 bif->bridge_sc = sc; 408 bif->ifp = ifs; 409 bif->bif_flags = IFBIF_SPAN; 410 SIMPLEQ_INIT(&bif->bif_brlin); 411 SIMPLEQ_INIT(&bif->bif_brlout); 412 task_set(&bif->bif_dtask, bridge_spandetach, bif); 413 if_detachhook_add(ifs, &bif->bif_dtask); 414 SMR_SLIST_INSERT_HEAD_LOCKED(&sc->sc_spanlist, bif, bif_next); 415 break; 416 case SIOCBRDGDELS: 417 if ((error = suser(curproc)) != 0) 418 break; 419 ifs = if_unit(req->ifbr_ifsname); 420 if (ifs == NULL) { 421 error = ENOENT; 422 break; 423 } 424 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 425 if (bif->ifp == ifs) 426 break; 427 } 428 if_put(ifs); 429 if (bif == NULL) { 430 error = ESRCH; 431 break; 432 } 433 bridge_spanremove(bif); 434 break; 435 case SIOCBRDGGIFFLGS: 436 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 437 if (error != 0) 438 break; 439 req->ifbr_ifsflags = bif->bif_flags; 440 req->ifbr_portno = bif->ifp->if_index & 0xfff; 441 req->ifbr_protected = bif->bif_protected; 442 if (bif->bif_flags & IFBIF_STP) 443 bridge_bifgetstp(sc, bif, req); 444 break; 445 case SIOCBRDGSIFFLGS: 446 if (req->ifbr_ifsflags & IFBIF_RO_MASK) { 447 error = EINVAL; 448 break; 449 } 450 if ((error = suser(curproc)) != 0) 451 break; 452 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 453 if (error != 0) 454 break; 455 if (req->ifbr_ifsflags & IFBIF_STP) { 456 if ((bif->bif_flags & IFBIF_STP) == 0) { 457 /* Enable STP */ 458 if ((bif->bif_stp = bstp_add(sc->sc_stp, 459 bif->ifp)) == NULL) { 460 error = ENOMEM; 461 break; 462 } 463 } else { 464 /* Update STP flags */ 465 bstp_ifsflags(bif->bif_stp, req->ifbr_ifsflags); 466 } 467 } else if (bif->bif_flags & IFBIF_STP) { 468 bstp_delete(bif->bif_stp); 469 bif->bif_stp = NULL; 470 } 471 bif->bif_flags = req->ifbr_ifsflags; 472 break; 473 case SIOCSIFFLAGS: 474 if ((ifp->if_flags & IFF_UP) == IFF_UP) 475 bridge_init(sc); 476 477 if ((ifp->if_flags & IFF_UP) == 0) 478 bridge_stop(sc); 479 480 break; 481 case SIOCBRDGGPARAM: 482 if ((bp = bs->bs_root_port) == NULL) 483 brop->ifbop_root_port = 0; 484 else 485 brop->ifbop_root_port = bp->bp_ifindex; 486 brop->ifbop_maxage = bs->bs_bridge_max_age >> 8; 487 brop->ifbop_hellotime = bs->bs_bridge_htime >> 8; 488 brop->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 489 brop->ifbop_holdcount = bs->bs_txholdcount; 490 brop->ifbop_priority = bs->bs_bridge_priority; 491 brop->ifbop_protocol = bs->bs_protover; 492 brop->ifbop_root_bridge = bs->bs_root_pv.pv_root_id; 493 brop->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 494 brop->ifbop_root_port = bs->bs_root_pv.pv_port_id; 495 brop->ifbop_desg_bridge = bs->bs_root_pv.pv_dbridge_id; 496 brop->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 497 brop->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 498 break; 499 case SIOCBRDGSIFPROT: 500 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 501 if (error != 0) 502 break; 503 bif->bif_protected = req->ifbr_protected; 504 break; 505 case SIOCBRDGRTS: 506 case SIOCBRDGGCACHE: 507 case SIOCBRDGGPRI: 508 case SIOCBRDGGMA: 509 case SIOCBRDGGHT: 510 case SIOCBRDGGFD: 511 case SIOCBRDGGTO: 512 case SIOCBRDGGRL: 513 break; 514 case SIOCBRDGFLUSH: 515 case SIOCBRDGSADDR: 516 case SIOCBRDGDADDR: 517 case SIOCBRDGSCACHE: 518 case SIOCBRDGSTO: 519 case SIOCBRDGARL: 520 case SIOCBRDGFRL: 521 case SIOCBRDGSPRI: 522 case SIOCBRDGSFD: 523 case SIOCBRDGSMA: 524 case SIOCBRDGSHT: 525 case SIOCBRDGSTXHC: 526 case SIOCBRDGSPROTO: 527 case SIOCBRDGSIFPRIO: 528 case SIOCBRDGSIFCOST: 529 error = suser(curproc); 530 break; 531 default: 532 error = ENOTTY; 533 break; 534 } 535 536 if (!error) 537 error = bridgectl_ioctl(ifp, cmd, data); 538 539 if (!error) 540 error = bstp_ioctl(ifp, cmd, data); 541 542 NET_LOCK(); 543 return (error); 544 } 545 546 /* Detach an interface from a bridge. */ 547 int 548 bridge_ifremove(struct bridge_iflist *bif) 549 { 550 struct bridge_softc *sc = bif->bridge_sc; 551 int error; 552 553 SMR_SLIST_REMOVE_LOCKED(&sc->sc_iflist, bif, bridge_iflist, bif_next); 554 if_detachhook_del(bif->ifp, &bif->bif_dtask); 555 ether_brport_clr(bif->ifp); 556 557 smr_barrier(); 558 559 if (bif->bif_flags & IFBIF_STP) { 560 bstp_delete(bif->bif_stp); 561 bif->bif_stp = NULL; 562 } 563 564 bif->ifp->if_bridgeidx = 0; 565 NET_LOCK(); 566 error = ifpromisc(bif->ifp, 0); 567 NET_UNLOCK(); 568 569 bridge_rtdelete(sc, bif->ifp, 0); 570 bridge_flushrule(bif); 571 572 if_put(bif->ifp); 573 bif->ifp = NULL; 574 free(bif, M_DEVBUF, sizeof(*bif)); 575 576 return (error); 577 } 578 579 void 580 bridge_spanremove(struct bridge_iflist *bif) 581 { 582 struct bridge_softc *sc = bif->bridge_sc; 583 584 SMR_SLIST_REMOVE_LOCKED(&sc->sc_spanlist, bif, bridge_iflist, bif_next); 585 if_detachhook_del(bif->ifp, &bif->bif_dtask); 586 587 smr_barrier(); 588 589 if_put(bif->ifp); 590 bif->ifp = NULL; 591 free(bif, M_DEVBUF, sizeof(*bif)); 592 } 593 594 void 595 bridge_ifdetach(void *xbif) 596 { 597 struct bridge_iflist *bif = xbif; 598 599 /* 600 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 601 * use of smr_barrier() while holding the lock might lead to a 602 * deadlock situation. 603 */ 604 NET_UNLOCK(); 605 bridge_ifremove(bif); 606 NET_LOCK(); 607 } 608 609 void 610 bridge_spandetach(void *xbif) 611 { 612 struct bridge_iflist *bif = xbif; 613 614 /* 615 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 616 * use of smr_barrier() while holding the lock might lead to a 617 * deadlock situation. 618 */ 619 NET_UNLOCK(); 620 bridge_spanremove(bif); 621 NET_LOCK(); 622 } 623 624 void 625 bridge_bifgetstp(struct bridge_softc *sc, struct bridge_iflist *bif, 626 struct ifbreq *breq) 627 { 628 struct bstp_state *bs = sc->sc_stp; 629 struct bstp_port *bp = bif->bif_stp; 630 631 breq->ifbr_state = bstp_getstate(bs, bp); 632 breq->ifbr_priority = bp->bp_priority; 633 breq->ifbr_path_cost = bp->bp_path_cost; 634 breq->ifbr_proto = bp->bp_protover; 635 breq->ifbr_role = bp->bp_role; 636 breq->ifbr_stpflags = bp->bp_flags; 637 breq->ifbr_fwd_trans = bp->bp_forward_transitions; 638 breq->ifbr_root_bridge = bs->bs_root_pv.pv_root_id; 639 breq->ifbr_root_cost = bs->bs_root_pv.pv_cost; 640 breq->ifbr_root_port = bs->bs_root_pv.pv_port_id; 641 breq->ifbr_desg_bridge = bs->bs_root_pv.pv_dbridge_id; 642 breq->ifbr_desg_port = bs->bs_root_pv.pv_dport_id; 643 644 /* Copy STP state options as flags */ 645 if (bp->bp_operedge) 646 breq->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 647 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 648 breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 649 if (bp->bp_ptp_link) 650 breq->ifbr_ifsflags |= IFBIF_BSTP_PTP; 651 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 652 breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 653 } 654 655 int 656 bridge_bifconf(struct bridge_softc *sc, struct ifbifconf *bifc) 657 { 658 struct bridge_iflist *bif; 659 u_int32_t total = 0, i = 0; 660 int error = 0; 661 struct ifbreq *breq, *breqs = NULL; 662 663 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) 664 total++; 665 666 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) 667 total++; 668 669 if (bifc->ifbic_len == 0) { 670 i = total; 671 goto done; 672 } 673 674 breqs = mallocarray(total, sizeof(*breqs), M_TEMP, M_NOWAIT|M_ZERO); 675 if (breqs == NULL) 676 goto done; 677 678 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 679 if (bifc->ifbic_len < (i + 1) * sizeof(*breqs)) 680 break; 681 breq = &breqs[i]; 682 strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 683 strlcpy(breq->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ); 684 breq->ifbr_ifsflags = bif->bif_flags; 685 breq->ifbr_portno = bif->ifp->if_index & 0xfff; 686 breq->ifbr_protected = bif->bif_protected; 687 if (bif->bif_flags & IFBIF_STP) 688 bridge_bifgetstp(sc, bif, breq); 689 i++; 690 } 691 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 692 if (bifc->ifbic_len < (i + 1) * sizeof(*breqs)) 693 break; 694 breq = &breqs[i]; 695 strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 696 strlcpy(breq->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ); 697 breq->ifbr_ifsflags = bif->bif_flags | IFBIF_SPAN; 698 breq->ifbr_portno = bif->ifp->if_index & 0xfff; 699 i++; 700 } 701 702 error = copyout(breqs, bifc->ifbic_req, i * sizeof(*breqs)); 703 done: 704 free(breqs, M_TEMP, total * sizeof(*breq)); 705 bifc->ifbic_len = i * sizeof(*breq); 706 return (error); 707 } 708 709 int 710 bridge_findbif(struct bridge_softc *sc, const char *name, 711 struct bridge_iflist **rbif) 712 { 713 struct ifnet *ifp; 714 struct bridge_iflist *bif; 715 int error = 0; 716 717 KERNEL_ASSERT_LOCKED(); 718 719 if ((ifp = if_unit(name)) == NULL) 720 return (ENOENT); 721 722 if (ifp->if_bridgeidx != sc->sc_if.if_index) { 723 error = ESRCH; 724 goto put; 725 } 726 727 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 728 if (bif->ifp == ifp) 729 break; 730 } 731 732 if (bif == NULL) { 733 error = ENOENT; 734 goto put; 735 } 736 737 *rbif = bif; 738 put: 739 if_put(ifp); 740 741 return (error); 742 } 743 744 struct bridge_iflist * 745 bridge_getbif(struct ifnet *ifp) 746 { 747 struct bridge_iflist *bif; 748 struct bridge_softc *sc; 749 struct ifnet *bifp; 750 751 KERNEL_ASSERT_LOCKED(); 752 753 bifp = if_get(ifp->if_bridgeidx); 754 if (bifp == NULL) 755 return (NULL); 756 757 sc = bifp->if_softc; 758 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 759 if (bif->ifp == ifp) 760 break; 761 } 762 763 if_put(bifp); 764 765 return (bif); 766 } 767 768 void 769 bridge_init(struct bridge_softc *sc) 770 { 771 struct ifnet *ifp = &sc->sc_if; 772 773 if (ISSET(ifp->if_flags, IFF_RUNNING)) 774 return; 775 776 bstp_enable(sc->sc_stp, ifp->if_index); 777 778 if (sc->sc_brttimeout != 0) 779 timeout_add_sec(&sc->sc_brtimeout, sc->sc_brttimeout); 780 781 SET(ifp->if_flags, IFF_RUNNING); 782 } 783 784 /* 785 * Stop the bridge and deallocate the routing table. 786 */ 787 void 788 bridge_stop(struct bridge_softc *sc) 789 { 790 struct ifnet *ifp = &sc->sc_if; 791 792 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 793 return; 794 795 CLR(ifp->if_flags, IFF_RUNNING); 796 797 bstp_disable(sc->sc_stp); 798 799 timeout_del_barrier(&sc->sc_brtimeout); 800 801 bridge_rtflush(sc, IFBF_FLUSHDYN); 802 } 803 804 /* 805 * Send output from the bridge. The mbuf has the ethernet header 806 * already attached. We must enqueue or free the mbuf before exiting. 807 */ 808 int 809 bridge_enqueue(struct ifnet *ifp, struct mbuf *m) 810 { 811 struct ifnet *brifp; 812 struct ether_header *eh; 813 struct ifnet *dst_if = NULL; 814 unsigned int dst_ifidx = 0; 815 #if NBPFILTER > 0 816 caddr_t if_bpf; 817 #endif 818 int error = 0; 819 820 if (m->m_len < sizeof(*eh)) { 821 m = m_pullup(m, sizeof(*eh)); 822 if (m == NULL) 823 return (ENOBUFS); 824 } 825 826 /* ifp must be a member interface of the bridge. */ 827 brifp = if_get(ifp->if_bridgeidx); 828 if (brifp == NULL) { 829 m_freem(m); 830 return (EINVAL); 831 } 832 833 /* 834 * If bridge is down, but original output interface is up, 835 * go ahead and send out that interface. Otherwise the packet 836 * is dropped below. 837 */ 838 if (!ISSET(brifp->if_flags, IFF_RUNNING)) { 839 /* Loop prevention. */ 840 m->m_flags |= M_PROTO1; 841 error = if_enqueue(ifp, m); 842 if_put(brifp); 843 return (error); 844 } 845 846 #if NBPFILTER > 0 847 if_bpf = brifp->if_bpf; 848 if (if_bpf) 849 bpf_mtap(if_bpf, m, BPF_DIRECTION_OUT); 850 #endif 851 ifp->if_opackets++; 852 ifp->if_obytes += m->m_pkthdr.len; 853 854 bridge_span(brifp, m); 855 856 eh = mtod(m, struct ether_header *); 857 if (!ETHER_IS_MULTICAST(eh->ether_dhost)) { 858 struct ether_addr *dst; 859 860 dst = (struct ether_addr *)&eh->ether_dhost[0]; 861 dst_ifidx = bridge_rtlookup(brifp, dst, m); 862 } 863 864 /* 865 * If the packet is a broadcast or we don't know a better way to 866 * get there, send to all interfaces. 867 */ 868 if (dst_ifidx == 0) { 869 struct bridge_softc *sc = brifp->if_softc; 870 struct bridge_iflist *bif; 871 struct mbuf *mc; 872 873 smr_read_enter(); 874 SMR_SLIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 875 dst_if = bif->ifp; 876 if ((dst_if->if_flags & IFF_RUNNING) == 0) 877 continue; 878 879 /* 880 * If this is not the original output interface, 881 * and the interface is participating in spanning 882 * tree, make sure the port is in a state that 883 * allows forwarding. 884 */ 885 if (dst_if != ifp && 886 (bif->bif_flags & IFBIF_STP) && 887 (bif->bif_state == BSTP_IFSTATE_DISCARDING)) 888 continue; 889 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && 890 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 891 continue; 892 893 if (bridge_filterrule(&bif->bif_brlout, eh, m) == 894 BRL_ACTION_BLOCK) 895 continue; 896 897 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 898 if (mc == NULL) { 899 brifp->if_oerrors++; 900 continue; 901 } 902 903 error = bridge_ifenqueue(brifp, dst_if, mc); 904 if (error) 905 continue; 906 } 907 smr_read_leave(); 908 m_freem(m); 909 goto out; 910 } 911 912 dst_if = if_get(dst_ifidx); 913 if ((dst_if == NULL) || !ISSET(dst_if->if_flags, IFF_RUNNING)) { 914 m_freem(m); 915 if_put(dst_if); 916 error = ENETDOWN; 917 goto out; 918 } 919 920 bridge_ifenqueue(brifp, dst_if, m); 921 if_put(dst_if); 922 out: 923 if_put(brifp); 924 return (error); 925 } 926 927 /* 928 * Loop through each bridge interface and process their input queues. 929 */ 930 void 931 bridgeintr(void) 932 { 933 struct mbuf_list ml; 934 struct mbuf *m; 935 struct ifnet *ifp; 936 937 niq_delist(&bridgeintrq, &ml); 938 if (ml_empty(&ml)) 939 return; 940 941 KERNEL_LOCK(); 942 while ((m = ml_dequeue(&ml)) != NULL) { 943 944 ifp = if_get(m->m_pkthdr.ph_ifidx); 945 if (ifp == NULL) { 946 m_freem(m); 947 continue; 948 } 949 950 bridge_process(ifp, m); 951 952 if_put(ifp); 953 } 954 KERNEL_UNLOCK(); 955 } 956 957 /* 958 * Process a single frame. Frame must be freed or queued before returning. 959 */ 960 void 961 bridgeintr_frame(struct ifnet *brifp, struct ifnet *src_if, struct mbuf *m) 962 { 963 struct bridge_softc *sc = brifp->if_softc; 964 struct ifnet *dst_if = NULL; 965 struct bridge_iflist *bif; 966 struct ether_addr *dst, *src; 967 struct ether_header eh; 968 unsigned int dst_ifidx; 969 u_int32_t protected; 970 int len; 971 972 973 sc->sc_if.if_ipackets++; 974 sc->sc_if.if_ibytes += m->m_pkthdr.len; 975 976 bif = bridge_getbif(src_if); 977 KASSERT(bif != NULL); 978 979 m_copydata(m, 0, ETHER_HDR_LEN, &eh); 980 dst = (struct ether_addr *)&eh.ether_dhost[0]; 981 src = (struct ether_addr *)&eh.ether_shost[0]; 982 983 /* 984 * If interface is learning, and if source address 985 * is not broadcast or multicast, record its address. 986 */ 987 if ((bif->bif_flags & IFBIF_LEARNING) && 988 !ETHER_IS_MULTICAST(eh.ether_shost) && 989 !ETHER_IS_ANYADDR(eh.ether_shost)) 990 bridge_rtupdate(sc, src, src_if, 0, IFBAF_DYNAMIC, m); 991 992 if ((bif->bif_flags & IFBIF_STP) && 993 (bif->bif_state == BSTP_IFSTATE_LEARNING)) { 994 m_freem(m); 995 return; 996 } 997 998 /* 999 * At this point, the port either doesn't participate in stp or 1000 * it's in the forwarding state 1001 */ 1002 1003 /* 1004 * If packet is unicast, destined for someone on "this" 1005 * side of the bridge, drop it. 1006 */ 1007 if (!ETHER_IS_MULTICAST(eh.ether_dhost)) { 1008 dst_ifidx = bridge_rtlookup(brifp, dst, NULL); 1009 if (dst_ifidx == src_if->if_index) { 1010 m_freem(m); 1011 return; 1012 } 1013 } else { 1014 if (ETHER_IS_BROADCAST(eh.ether_dhost)) 1015 m->m_flags |= M_BCAST; 1016 else 1017 m->m_flags |= M_MCAST; 1018 } 1019 1020 /* 1021 * Multicast packets get handled a little differently: 1022 * If interface is: 1023 * -link0,-link1 (default) Forward all multicast 1024 * as broadcast. 1025 * -link0,link1 Drop non-IP multicast, forward 1026 * as broadcast IP multicast. 1027 * link0,-link1 Drop IP multicast, forward as 1028 * broadcast non-IP multicast. 1029 * link0,link1 Drop all multicast. 1030 */ 1031 if (m->m_flags & M_MCAST) { 1032 if ((sc->sc_if.if_flags & 1033 (IFF_LINK0 | IFF_LINK1)) == 1034 (IFF_LINK0 | IFF_LINK1)) { 1035 m_freem(m); 1036 return; 1037 } 1038 if (sc->sc_if.if_flags & IFF_LINK0 && 1039 ETHERADDR_IS_IP_MCAST(dst)) { 1040 m_freem(m); 1041 return; 1042 } 1043 if (sc->sc_if.if_flags & IFF_LINK1 && 1044 !ETHERADDR_IS_IP_MCAST(dst)) { 1045 m_freem(m); 1046 return; 1047 } 1048 } 1049 1050 if (bif->bif_flags & IFBIF_BLOCKNONIP && bridge_blocknonip(&eh, m)) { 1051 m_freem(m); 1052 return; 1053 } 1054 1055 if (bridge_filterrule(&bif->bif_brlin, &eh, m) == BRL_ACTION_BLOCK) { 1056 m_freem(m); 1057 return; 1058 } 1059 m = bridge_ip(&sc->sc_if, BRIDGE_IN, src_if, &eh, m); 1060 if (m == NULL) 1061 return; 1062 /* 1063 * If the packet is a multicast or broadcast OR if we don't 1064 * know any better, forward it to all interfaces. 1065 */ 1066 if ((m->m_flags & (M_BCAST | M_MCAST)) || dst_ifidx == 0) { 1067 sc->sc_if.if_imcasts++; 1068 bridge_broadcast(sc, src_if, &eh, m); 1069 return; 1070 } 1071 protected = bif->bif_protected; 1072 1073 dst_if = if_get(dst_ifidx); 1074 if (dst_if == NULL) 1075 goto bad; 1076 1077 /* 1078 * At this point, we're dealing with a unicast frame going to a 1079 * different interface 1080 */ 1081 if (!ISSET(dst_if->if_flags, IFF_RUNNING)) 1082 goto bad; 1083 bif = bridge_getbif(dst_if); 1084 if ((bif == NULL) || ((bif->bif_flags & IFBIF_STP) && 1085 (bif->bif_state == BSTP_IFSTATE_DISCARDING))) 1086 goto bad; 1087 /* 1088 * Do not transmit if both ports are part of the same protected 1089 * domain. 1090 */ 1091 if (protected != 0 && (protected & bif->bif_protected)) 1092 goto bad; 1093 if (bridge_filterrule(&bif->bif_brlout, &eh, m) == BRL_ACTION_BLOCK) 1094 goto bad; 1095 m = bridge_ip(&sc->sc_if, BRIDGE_OUT, dst_if, &eh, m); 1096 if (m == NULL) 1097 goto bad; 1098 1099 len = m->m_pkthdr.len; 1100 #if NVLAN > 0 1101 if ((m->m_flags & M_VLANTAG) && 1102 (dst_if->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 1103 len += ETHER_VLAN_ENCAP_LEN; 1104 #endif 1105 if ((len - ETHER_HDR_LEN) > dst_if->if_mtu) 1106 bridge_fragment(&sc->sc_if, dst_if, &eh, m); 1107 else { 1108 bridge_ifenqueue(&sc->sc_if, dst_if, m); 1109 } 1110 m = NULL; 1111 bad: 1112 if_put(dst_if); 1113 m_freem(m); 1114 } 1115 1116 /* 1117 * Return 1 if `ena' belongs to `bif', 0 otherwise. 1118 */ 1119 int 1120 bridge_ourether(struct ifnet *ifp, uint8_t *ena) 1121 { 1122 struct arpcom *ac = (struct arpcom *)ifp; 1123 1124 if (memcmp(ac->ac_enaddr, ena, ETHER_ADDR_LEN) == 0) 1125 return (1); 1126 1127 #if NCARP > 0 1128 if (carp_ourether(ifp, ena)) 1129 return (1); 1130 #endif 1131 1132 return (0); 1133 } 1134 1135 /* 1136 * Receive input from an interface. Queue the packet for bridging if its 1137 * not for us, and schedule an interrupt. 1138 */ 1139 struct mbuf * 1140 bridge_input(struct ifnet *ifp, struct mbuf *m, uint64_t dst, void *null) 1141 { 1142 KASSERT(m->m_flags & M_PKTHDR); 1143 1144 if (m->m_flags & M_PROTO1) { 1145 m->m_flags &= ~M_PROTO1; 1146 return (m); 1147 } 1148 1149 niq_enqueue(&bridgeintrq, m); 1150 1151 return (NULL); 1152 } 1153 1154 void 1155 bridge_process(struct ifnet *ifp, struct mbuf *m) 1156 { 1157 struct ifnet *brifp; 1158 struct bridge_softc *sc; 1159 struct bridge_iflist *bif = NULL, *bif0 = NULL; 1160 struct ether_header *eh; 1161 struct mbuf *mc; 1162 #if NBPFILTER > 0 1163 caddr_t if_bpf; 1164 #endif 1165 1166 KERNEL_ASSERT_LOCKED(); 1167 1168 brifp = if_get(ifp->if_bridgeidx); 1169 if ((brifp == NULL) || !ISSET(brifp->if_flags, IFF_RUNNING)) 1170 goto reenqueue; 1171 1172 if (m->m_pkthdr.len < sizeof(*eh)) 1173 goto bad; 1174 1175 #if NVLAN > 0 1176 /* 1177 * If the underlying interface removed the VLAN header itself, 1178 * add it back. 1179 */ 1180 if (ISSET(m->m_flags, M_VLANTAG)) { 1181 m = vlan_inject(m, ETHERTYPE_VLAN, m->m_pkthdr.ether_vtag); 1182 if (m == NULL) 1183 goto bad; 1184 } 1185 #endif 1186 1187 #if NBPFILTER > 0 1188 if_bpf = brifp->if_bpf; 1189 if (if_bpf) 1190 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_IN); 1191 #endif 1192 1193 eh = mtod(m, struct ether_header *); 1194 1195 sc = brifp->if_softc; 1196 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1197 struct arpcom *ac = (struct arpcom *)bif->ifp; 1198 if (memcmp(ac->ac_enaddr, eh->ether_shost, ETHER_ADDR_LEN) == 0) 1199 goto bad; 1200 if (bif->ifp == ifp) 1201 bif0 = bif; 1202 } 1203 if (bif0 == NULL) 1204 goto reenqueue; 1205 1206 bridge_span(brifp, m); 1207 1208 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 1209 /* 1210 * Reserved destination MAC addresses (01:80:C2:00:00:0x) 1211 * should not be forwarded to bridge members according to 1212 * section 7.12.6 of the 802.1D-2004 specification. The 1213 * STP destination address (as stored in bstp_etheraddr) 1214 * is the first of these. 1215 */ 1216 if (memcmp(eh->ether_dhost, bstp_etheraddr, 1217 ETHER_ADDR_LEN - 1) == 0) { 1218 if (eh->ether_dhost[ETHER_ADDR_LEN - 1] == 0) { 1219 /* STP traffic */ 1220 m = bstp_input(sc->sc_stp, bif0->bif_stp, eh, 1221 m); 1222 if (m == NULL) 1223 goto bad; 1224 } else if (eh->ether_dhost[ETHER_ADDR_LEN - 1] <= 0xf) 1225 goto bad; 1226 } 1227 1228 /* 1229 * No need to process frames for ifs in the discarding state 1230 */ 1231 if ((bif0->bif_flags & IFBIF_STP) && 1232 (bif0->bif_state == BSTP_IFSTATE_DISCARDING)) 1233 goto reenqueue; 1234 1235 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1236 if (mc == NULL) 1237 goto reenqueue; 1238 1239 bridge_ifinput(ifp, mc); 1240 1241 bridgeintr_frame(brifp, ifp, m); 1242 if_put(brifp); 1243 return; 1244 } 1245 1246 /* 1247 * Unicast, make sure it's not for us. 1248 */ 1249 if (bridge_ourether(bif0->ifp, eh->ether_dhost)) { 1250 bif = bif0; 1251 } else { 1252 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1253 if (bif->ifp == ifp) 1254 continue; 1255 if (bridge_ourether(bif->ifp, eh->ether_dhost)) 1256 break; 1257 } 1258 } 1259 if (bif != NULL) { 1260 if (bif0->bif_flags & IFBIF_LEARNING) 1261 bridge_rtupdate(sc, 1262 (struct ether_addr *)&eh->ether_shost, 1263 ifp, 0, IFBAF_DYNAMIC, m); 1264 if (bridge_filterrule(&bif0->bif_brlin, eh, m) == 1265 BRL_ACTION_BLOCK) { 1266 goto bad; 1267 } 1268 1269 /* Count for the bridge */ 1270 brifp->if_ipackets++; 1271 brifp->if_ibytes += m->m_pkthdr.len; 1272 1273 ifp = bif->ifp; 1274 goto reenqueue; 1275 } 1276 1277 bridgeintr_frame(brifp, ifp, m); 1278 if_put(brifp); 1279 return; 1280 1281 reenqueue: 1282 bridge_ifinput(ifp, m); 1283 m = NULL; 1284 bad: 1285 m_freem(m); 1286 if_put(brifp); 1287 } 1288 1289 /* 1290 * Send a frame to all interfaces that are members of the bridge 1291 * (except the one it came in on). 1292 */ 1293 void 1294 bridge_broadcast(struct bridge_softc *sc, struct ifnet *ifp, 1295 struct ether_header *eh, struct mbuf *m) 1296 { 1297 struct bridge_iflist *bif; 1298 struct mbuf *mc; 1299 struct ifnet *dst_if; 1300 int len, used = 0; 1301 u_int32_t protected; 1302 1303 bif = bridge_getbif(ifp); 1304 KASSERT(bif != NULL); 1305 protected = bif->bif_protected; 1306 1307 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1308 dst_if = bif->ifp; 1309 1310 if ((dst_if->if_flags & IFF_RUNNING) == 0) 1311 continue; 1312 1313 if ((bif->bif_flags & IFBIF_STP) && 1314 (bif->bif_state == BSTP_IFSTATE_DISCARDING)) 1315 continue; 1316 1317 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && 1318 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 1319 continue; 1320 1321 /* Drop non-IP frames if the appropriate flag is set. */ 1322 if (bif->bif_flags & IFBIF_BLOCKNONIP && 1323 bridge_blocknonip(eh, m)) 1324 continue; 1325 1326 /* 1327 * Do not transmit if both ports are part of the same 1328 * protected domain. 1329 */ 1330 if (protected != 0 && (protected & bif->bif_protected)) 1331 continue; 1332 1333 if (bridge_filterrule(&bif->bif_brlout, eh, m) == 1334 BRL_ACTION_BLOCK) 1335 continue; 1336 1337 /* 1338 * Don't retransmit out of the same interface where 1339 * the packet was received from. 1340 */ 1341 if (dst_if->if_index == ifp->if_index) 1342 continue; 1343 1344 if (bridge_localbroadcast(dst_if, eh, m)) 1345 sc->sc_if.if_oerrors++; 1346 1347 /* If last one, reuse the passed-in mbuf */ 1348 if (SMR_SLIST_NEXT_LOCKED(bif, bif_next) == NULL) { 1349 mc = m; 1350 used = 1; 1351 } else { 1352 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1353 if (mc == NULL) { 1354 sc->sc_if.if_oerrors++; 1355 continue; 1356 } 1357 } 1358 1359 mc = bridge_ip(&sc->sc_if, BRIDGE_OUT, dst_if, eh, mc); 1360 if (mc == NULL) 1361 continue; 1362 1363 len = mc->m_pkthdr.len; 1364 #if NVLAN > 0 1365 if ((mc->m_flags & M_VLANTAG) && 1366 (dst_if->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 1367 len += ETHER_VLAN_ENCAP_LEN; 1368 #endif 1369 if ((len - ETHER_HDR_LEN) > dst_if->if_mtu) 1370 bridge_fragment(&sc->sc_if, dst_if, eh, mc); 1371 else { 1372 bridge_ifenqueue(&sc->sc_if, dst_if, mc); 1373 } 1374 } 1375 1376 if (!used) 1377 m_freem(m); 1378 } 1379 1380 int 1381 bridge_localbroadcast(struct ifnet *ifp, struct ether_header *eh, 1382 struct mbuf *m) 1383 { 1384 struct mbuf *m1; 1385 u_int16_t etype; 1386 1387 /* 1388 * quick optimisation, don't send packets up the stack if no 1389 * corresponding address has been specified. 1390 */ 1391 etype = ntohs(eh->ether_type); 1392 if (!(m->m_flags & M_VLANTAG) && etype == ETHERTYPE_IP) { 1393 struct ifaddr *ifa; 1394 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 1395 if (ifa->ifa_addr->sa_family == AF_INET) 1396 break; 1397 } 1398 if (ifa == NULL) 1399 return (0); 1400 } 1401 1402 m1 = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1403 if (m1 == NULL) 1404 return (1); 1405 1406 #if NPF > 0 1407 pf_pkt_addr_changed(m1); 1408 #endif /* NPF */ 1409 1410 bridge_ifinput(ifp, m1); 1411 1412 return (0); 1413 } 1414 1415 void 1416 bridge_span(struct ifnet *brifp, struct mbuf *m) 1417 { 1418 struct bridge_softc *sc = brifp->if_softc; 1419 struct bridge_iflist *bif; 1420 struct ifnet *ifp; 1421 struct mbuf *mc; 1422 int error; 1423 1424 smr_read_enter(); 1425 SMR_SLIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1426 ifp = bif->ifp; 1427 1428 if ((ifp->if_flags & IFF_RUNNING) == 0) 1429 continue; 1430 1431 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 1432 if (mc == NULL) { 1433 brifp->if_oerrors++; 1434 continue; 1435 } 1436 1437 error = bridge_ifenqueue(brifp, ifp, mc); 1438 if (error) 1439 continue; 1440 } 1441 smr_read_leave(); 1442 } 1443 1444 /* 1445 * Block non-ip frames: 1446 * Returns 0 if frame is ip, and 1 if it should be dropped. 1447 */ 1448 int 1449 bridge_blocknonip(struct ether_header *eh, struct mbuf *m) 1450 { 1451 struct llc llc; 1452 u_int16_t etype; 1453 1454 if (m->m_pkthdr.len < ETHER_HDR_LEN) 1455 return (1); 1456 1457 #if NVLAN > 0 1458 if (m->m_flags & M_VLANTAG) 1459 return (1); 1460 #endif 1461 1462 etype = ntohs(eh->ether_type); 1463 switch (etype) { 1464 case ETHERTYPE_ARP: 1465 case ETHERTYPE_REVARP: 1466 case ETHERTYPE_IP: 1467 case ETHERTYPE_IPV6: 1468 return (0); 1469 } 1470 1471 if (etype > ETHERMTU) 1472 return (1); 1473 1474 if (m->m_pkthdr.len < 1475 (ETHER_HDR_LEN + LLC_SNAPFRAMELEN)) 1476 return (1); 1477 1478 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, &llc); 1479 1480 etype = ntohs(llc.llc_snap.ether_type); 1481 if (llc.llc_dsap == LLC_SNAP_LSAP && 1482 llc.llc_ssap == LLC_SNAP_LSAP && 1483 llc.llc_control == LLC_UI && 1484 llc.llc_snap.org_code[0] == 0 && 1485 llc.llc_snap.org_code[1] == 0 && 1486 llc.llc_snap.org_code[2] == 0 && 1487 (etype == ETHERTYPE_ARP || etype == ETHERTYPE_REVARP || 1488 etype == ETHERTYPE_IP || etype == ETHERTYPE_IPV6)) { 1489 return (0); 1490 } 1491 1492 return (1); 1493 } 1494 1495 #ifdef IPSEC 1496 int 1497 bridge_ipsec(struct ifnet *ifp, struct ether_header *eh, int hassnap, 1498 struct llc *llc, int dir, int af, int hlen, struct mbuf *m) 1499 { 1500 union sockaddr_union dst; 1501 struct tdb *tdb; 1502 u_int32_t spi; 1503 u_int16_t cpi; 1504 int error, off, prot; 1505 u_int8_t proto = 0; 1506 struct ip *ip; 1507 #ifdef INET6 1508 struct ip6_hdr *ip6; 1509 #endif /* INET6 */ 1510 #if NPF > 0 1511 struct ifnet *encif; 1512 #endif 1513 1514 if (dir == BRIDGE_IN) { 1515 switch (af) { 1516 case AF_INET: 1517 if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t)) 1518 goto skiplookup; 1519 1520 ip = mtod(m, struct ip *); 1521 proto = ip->ip_p; 1522 off = offsetof(struct ip, ip_p); 1523 1524 if (proto != IPPROTO_ESP && proto != IPPROTO_AH && 1525 proto != IPPROTO_IPCOMP) 1526 goto skiplookup; 1527 1528 bzero(&dst, sizeof(union sockaddr_union)); 1529 dst.sa.sa_family = AF_INET; 1530 dst.sin.sin_len = sizeof(struct sockaddr_in); 1531 m_copydata(m, offsetof(struct ip, ip_dst), 1532 sizeof(struct in_addr), &dst.sin.sin_addr); 1533 1534 break; 1535 #ifdef INET6 1536 case AF_INET6: 1537 if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t)) 1538 goto skiplookup; 1539 1540 ip6 = mtod(m, struct ip6_hdr *); 1541 1542 /* XXX We should chase down the header chain */ 1543 proto = ip6->ip6_nxt; 1544 off = offsetof(struct ip6_hdr, ip6_nxt); 1545 1546 if (proto != IPPROTO_ESP && proto != IPPROTO_AH && 1547 proto != IPPROTO_IPCOMP) 1548 goto skiplookup; 1549 1550 bzero(&dst, sizeof(union sockaddr_union)); 1551 dst.sa.sa_family = AF_INET6; 1552 dst.sin6.sin6_len = sizeof(struct sockaddr_in6); 1553 m_copydata(m, offsetof(struct ip6_hdr, ip6_dst), 1554 sizeof(struct in6_addr), &dst.sin6.sin6_addr); 1555 1556 break; 1557 #endif /* INET6 */ 1558 default: 1559 return (0); 1560 } 1561 1562 switch (proto) { 1563 case IPPROTO_ESP: 1564 m_copydata(m, hlen, sizeof(u_int32_t), &spi); 1565 break; 1566 case IPPROTO_AH: 1567 m_copydata(m, hlen + sizeof(u_int32_t), 1568 sizeof(u_int32_t), &spi); 1569 break; 1570 case IPPROTO_IPCOMP: 1571 m_copydata(m, hlen + sizeof(u_int16_t), 1572 sizeof(u_int16_t), &cpi); 1573 spi = htonl(ntohs(cpi)); 1574 break; 1575 } 1576 1577 NET_ASSERT_LOCKED(); 1578 1579 tdb = gettdb(ifp->if_rdomain, spi, &dst, proto); 1580 if (tdb != NULL && (tdb->tdb_flags & TDBF_INVALID) == 0 && 1581 tdb->tdb_xform != NULL) { 1582 if (tdb->tdb_first_use == 0) { 1583 tdb->tdb_first_use = gettime(); 1584 if (tdb->tdb_flags & TDBF_FIRSTUSE) { 1585 if (timeout_add_sec( 1586 &tdb->tdb_first_tmo, 1587 tdb->tdb_exp_first_use)) 1588 tdb_ref(tdb); 1589 } 1590 if (tdb->tdb_flags & TDBF_SOFT_FIRSTUSE) { 1591 if (timeout_add_sec( 1592 &tdb->tdb_sfirst_tmo, 1593 tdb->tdb_soft_first_use)) 1594 tdb_ref(tdb); 1595 } 1596 } 1597 1598 prot = (*(tdb->tdb_xform->xf_input))(&m, tdb, hlen, 1599 off); 1600 tdb_unref(tdb); 1601 if (prot != IPPROTO_DONE) 1602 ip_deliver(&m, &hlen, prot, af); 1603 return (1); 1604 } else { 1605 tdb_unref(tdb); 1606 skiplookup: 1607 /* XXX do an input policy lookup */ 1608 return (0); 1609 } 1610 } else { /* Outgoing from the bridge. */ 1611 error = ipsp_spd_lookup(m, af, hlen, IPSP_DIRECTION_OUT, 1612 NULL, NULL, &tdb, NULL); 1613 if (error == 0 && tdb != NULL) { 1614 /* 1615 * We don't need to do loop detection, the 1616 * bridge will do that for us. 1617 */ 1618 #if NPF > 0 1619 if ((encif = enc_getif(tdb->tdb_rdomain, 1620 tdb->tdb_tap)) == NULL || 1621 pf_test(af, dir, encif, &m) != PF_PASS) { 1622 m_freem(m); 1623 tdb_unref(tdb); 1624 return (1); 1625 } 1626 if (m == NULL) { 1627 tdb_unref(tdb); 1628 return (1); 1629 } 1630 if (af == AF_INET) 1631 in_proto_cksum_out(m, encif); 1632 #ifdef INET6 1633 else if (af == AF_INET6) 1634 in6_proto_cksum_out(m, encif); 1635 #endif /* INET6 */ 1636 #endif /* NPF */ 1637 1638 ip = mtod(m, struct ip *); 1639 if ((af == AF_INET) && 1640 ip_mtudisc && (ip->ip_off & htons(IP_DF)) && 1641 tdb->tdb_mtu && ntohs(ip->ip_len) > tdb->tdb_mtu && 1642 tdb->tdb_mtutimeout > gettime()) { 1643 bridge_send_icmp_err(ifp, eh, m, 1644 hassnap, llc, tdb->tdb_mtu, 1645 ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG); 1646 } else { 1647 KERNEL_LOCK(); 1648 error = ipsp_process_packet(m, tdb, af, 0); 1649 KERNEL_UNLOCK(); 1650 } 1651 tdb_unref(tdb); 1652 return (1); 1653 } else 1654 return (0); 1655 } 1656 1657 return (0); 1658 } 1659 #endif /* IPSEC */ 1660 1661 /* 1662 * Filter IP packets by peeking into the ethernet frame. This violates 1663 * the ISO model, but allows us to act as a IP filter at the data link 1664 * layer. As a result, most of this code will look familiar to those 1665 * who've read net/if_ethersubr.c and netinet/ip_input.c 1666 */ 1667 struct mbuf * 1668 bridge_ip(struct ifnet *brifp, int dir, struct ifnet *ifp, 1669 struct ether_header *eh, struct mbuf *m) 1670 { 1671 struct llc llc; 1672 int hassnap = 0; 1673 struct ip *ip; 1674 int hlen; 1675 u_int16_t etype; 1676 1677 #if NVLAN > 0 1678 if (m->m_flags & M_VLANTAG) 1679 return (m); 1680 #endif 1681 1682 etype = ntohs(eh->ether_type); 1683 1684 if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) { 1685 if (etype > ETHERMTU || 1686 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1687 ETHER_HDR_LEN)) 1688 return (m); 1689 1690 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, &llc); 1691 1692 if (llc.llc_dsap != LLC_SNAP_LSAP || 1693 llc.llc_ssap != LLC_SNAP_LSAP || 1694 llc.llc_control != LLC_UI || 1695 llc.llc_snap.org_code[0] || 1696 llc.llc_snap.org_code[1] || 1697 llc.llc_snap.org_code[2]) 1698 return (m); 1699 1700 etype = ntohs(llc.llc_snap.ether_type); 1701 if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) 1702 return (m); 1703 hassnap = 1; 1704 } 1705 1706 m_adj(m, ETHER_HDR_LEN); 1707 if (hassnap) 1708 m_adj(m, LLC_SNAPFRAMELEN); 1709 1710 switch (etype) { 1711 1712 case ETHERTYPE_IP: 1713 m = ipv4_check(ifp, m); 1714 if (m == NULL) 1715 return (NULL); 1716 1717 ip = mtod(m, struct ip *); 1718 hlen = ip->ip_hl << 2; 1719 1720 #ifdef IPSEC 1721 if ((brifp->if_flags & IFF_LINK2) == IFF_LINK2 && 1722 bridge_ipsec(ifp, eh, hassnap, &llc, dir, AF_INET, hlen, m)) 1723 return (NULL); 1724 #endif /* IPSEC */ 1725 #if NPF > 0 1726 /* Finally, we get to filter the packet! */ 1727 if (pf_test(AF_INET, dir, ifp, &m) != PF_PASS) 1728 goto dropit; 1729 if (m == NULL) 1730 goto dropit; 1731 #endif /* NPF > 0 */ 1732 1733 /* Rebuild the IP header */ 1734 if (m->m_len < hlen && ((m = m_pullup(m, hlen)) == NULL)) 1735 return (NULL); 1736 if (m->m_len < sizeof(struct ip)) 1737 goto dropit; 1738 in_hdr_cksum_out(m, ifp); 1739 in_proto_cksum_out(m, ifp); 1740 1741 #if NPF > 0 1742 if (dir == BRIDGE_IN && 1743 m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1744 m_resethdr(m); 1745 m->m_pkthdr.ph_ifidx = ifp->if_index; 1746 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1747 ipv4_input(ifp, m); 1748 return (NULL); 1749 } 1750 #endif /* NPF > 0 */ 1751 1752 break; 1753 1754 #ifdef INET6 1755 case ETHERTYPE_IPV6: 1756 m = ipv6_check(ifp, m); 1757 if (m == NULL) 1758 return (NULL); 1759 1760 #ifdef IPSEC 1761 hlen = sizeof(struct ip6_hdr); 1762 1763 if ((brifp->if_flags & IFF_LINK2) == IFF_LINK2 && 1764 bridge_ipsec(ifp, eh, hassnap, &llc, dir, AF_INET6, hlen, 1765 m)) 1766 return (NULL); 1767 #endif /* IPSEC */ 1768 1769 #if NPF > 0 1770 if (pf_test(AF_INET6, dir, ifp, &m) != PF_PASS) 1771 goto dropit; 1772 if (m == NULL) 1773 return (NULL); 1774 #endif /* NPF > 0 */ 1775 in6_proto_cksum_out(m, ifp); 1776 1777 #if NPF > 0 1778 if (dir == BRIDGE_IN && 1779 m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1780 m_resethdr(m); 1781 m->m_pkthdr.ph_ifidx = ifp->if_index; 1782 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1783 ipv6_input(ifp, m); 1784 return (NULL); 1785 } 1786 #endif /* NPF > 0 */ 1787 1788 break; 1789 #endif /* INET6 */ 1790 1791 default: 1792 goto dropit; 1793 break; 1794 } 1795 1796 /* Reattach SNAP header */ 1797 if (hassnap) { 1798 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1799 if (m == NULL) 1800 goto dropit; 1801 bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 1802 } 1803 1804 /* Reattach ethernet header */ 1805 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 1806 if (m == NULL) 1807 goto dropit; 1808 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 1809 1810 return (m); 1811 1812 dropit: 1813 m_freem(m); 1814 return (NULL); 1815 } 1816 1817 void 1818 bridge_fragment(struct ifnet *brifp, struct ifnet *ifp, struct ether_header *eh, 1819 struct mbuf *m) 1820 { 1821 struct llc llc; 1822 struct mbuf_list ml; 1823 int error = 0; 1824 int hassnap = 0; 1825 u_int16_t etype; 1826 struct ip *ip; 1827 1828 etype = ntohs(eh->ether_type); 1829 #if NVLAN > 0 1830 if ((m->m_flags & M_VLANTAG) || etype == ETHERTYPE_VLAN || 1831 etype == ETHERTYPE_QINQ) { 1832 int len = m->m_pkthdr.len; 1833 1834 if (m->m_flags & M_VLANTAG) 1835 len += ETHER_VLAN_ENCAP_LEN; 1836 if ((ifp->if_capabilities & IFCAP_VLAN_MTU) && 1837 (len - sizeof(struct ether_vlan_header) <= ifp->if_mtu)) { 1838 bridge_ifenqueue(brifp, ifp, m); 1839 return; 1840 } 1841 goto dropit; 1842 } 1843 #endif 1844 if (etype != ETHERTYPE_IP) { 1845 if (etype > ETHERMTU || 1846 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1847 ETHER_HDR_LEN)) 1848 goto dropit; 1849 1850 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, &llc); 1851 1852 if (llc.llc_dsap != LLC_SNAP_LSAP || 1853 llc.llc_ssap != LLC_SNAP_LSAP || 1854 llc.llc_control != LLC_UI || 1855 llc.llc_snap.org_code[0] || 1856 llc.llc_snap.org_code[1] || 1857 llc.llc_snap.org_code[2] || 1858 llc.llc_snap.ether_type != htons(ETHERTYPE_IP)) 1859 goto dropit; 1860 1861 hassnap = 1; 1862 } 1863 1864 m_adj(m, ETHER_HDR_LEN); 1865 if (hassnap) 1866 m_adj(m, LLC_SNAPFRAMELEN); 1867 1868 if (m->m_len < sizeof(struct ip) && 1869 (m = m_pullup(m, sizeof(struct ip))) == NULL) 1870 goto dropit; 1871 ip = mtod(m, struct ip *); 1872 1873 /* Respect IP_DF, return a ICMP_UNREACH_NEEDFRAG. */ 1874 if (ip->ip_off & htons(IP_DF)) { 1875 bridge_send_icmp_err(ifp, eh, m, hassnap, &llc, 1876 ifp->if_mtu, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG); 1877 return; 1878 } 1879 1880 error = ip_fragment(m, &ml, ifp, ifp->if_mtu); 1881 if (error) 1882 return; 1883 1884 while ((m = ml_dequeue(&ml)) != NULL) { 1885 if (hassnap) { 1886 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1887 if (m == NULL) { 1888 error = ENOBUFS; 1889 break; 1890 } 1891 bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 1892 } 1893 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 1894 if (m == NULL) { 1895 error = ENOBUFS; 1896 break; 1897 } 1898 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 1899 error = bridge_ifenqueue(brifp, ifp, m); 1900 if (error) 1901 break; 1902 } 1903 if (error) 1904 ml_purge(&ml); 1905 else 1906 ipstat_inc(ips_fragmented); 1907 1908 return; 1909 dropit: 1910 m_freem(m); 1911 } 1912 1913 int 1914 bridge_ifenqueue(struct ifnet *brifp, struct ifnet *ifp, struct mbuf *m) 1915 { 1916 int error, len; 1917 1918 /* Loop prevention. */ 1919 m->m_flags |= M_PROTO1; 1920 1921 len = m->m_pkthdr.len; 1922 1923 error = if_enqueue(ifp, m); 1924 if (error) { 1925 brifp->if_oerrors++; 1926 return (error); 1927 } 1928 1929 brifp->if_opackets++; 1930 brifp->if_obytes += len; 1931 1932 return (0); 1933 } 1934 1935 void 1936 bridge_ifinput(struct ifnet *ifp, struct mbuf *m) 1937 { 1938 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1939 1940 m->m_flags |= M_PROTO1; 1941 1942 ml_enqueue(&ml, m); 1943 if_input(ifp, &ml); 1944 } 1945 1946 void 1947 bridge_send_icmp_err(struct ifnet *ifp, 1948 struct ether_header *eh, struct mbuf *n, int hassnap, struct llc *llc, 1949 int mtu, int type, int code) 1950 { 1951 struct ip *ip; 1952 struct icmp *icp; 1953 struct in_addr t; 1954 struct mbuf *m, *n2; 1955 int hlen; 1956 u_int8_t ether_tmp[ETHER_ADDR_LEN]; 1957 1958 n2 = m_copym(n, 0, M_COPYALL, M_DONTWAIT); 1959 if (!n2) { 1960 m_freem(n); 1961 return; 1962 } 1963 m = icmp_do_error(n, type, code, 0, mtu); 1964 if (m == NULL) { 1965 m_freem(n2); 1966 return; 1967 } 1968 1969 n = n2; 1970 1971 ip = mtod(m, struct ip *); 1972 hlen = ip->ip_hl << 2; 1973 t = ip->ip_dst; 1974 ip->ip_dst = ip->ip_src; 1975 ip->ip_src = t; 1976 1977 m->m_data += hlen; 1978 m->m_len -= hlen; 1979 icp = mtod(m, struct icmp *); 1980 icp->icmp_cksum = 0; 1981 icp->icmp_cksum = in_cksum(m, ntohs(ip->ip_len) - hlen); 1982 m->m_data -= hlen; 1983 m->m_len += hlen; 1984 1985 ip->ip_v = IPVERSION; 1986 ip->ip_off &= htons(IP_DF); 1987 ip->ip_id = htons(ip_randomid()); 1988 ip->ip_ttl = MAXTTL; 1989 in_hdr_cksum_out(m, NULL); 1990 1991 /* Swap ethernet addresses */ 1992 bcopy(&eh->ether_dhost, ðer_tmp, sizeof(ether_tmp)); 1993 bcopy(&eh->ether_shost, &eh->ether_dhost, sizeof(ether_tmp)); 1994 bcopy(ðer_tmp, &eh->ether_shost, sizeof(ether_tmp)); 1995 1996 /* Reattach SNAP header */ 1997 if (hassnap) { 1998 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1999 if (m == NULL) 2000 goto dropit; 2001 bcopy(llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 2002 } 2003 2004 /* Reattach ethernet header */ 2005 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 2006 if (m == NULL) 2007 goto dropit; 2008 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 2009 2010 bridge_enqueue(ifp, m); 2011 m_freem(n); 2012 return; 2013 2014 dropit: 2015 m_freem(n); 2016 } 2017 2018 void 2019 bridge_take(void *unused) 2020 { 2021 return; 2022 } 2023 2024 void 2025 bridge_rele(void *unused) 2026 { 2027 return; 2028 } 2029